Files
openvino/inference-engine/samples/hello_classification/main.cpp
2019-08-09 19:02:42 +03:00

110 lines
5.3 KiB
C++

// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <string>
#include <samples/common.hpp>
#ifdef UNICODE
#include <tchar.h>
#endif
#include <inference_engine.hpp>
#include <samples/ocv_common.hpp>
#include <samples/classification_results.h>
using namespace InferenceEngine;
#ifndef UNICODE
#define tcout std::cout
#define _T(STR) STR
#else
#define tcout std::wcout
#endif
#ifndef UNICODE
int main(int argc, char *argv[]) {
#else
int wmain(int argc, wchar_t *argv[]) {
#endif
try {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 4) {
tcout << _T("Usage : ./hello_classification <path_to_model> <path_to_image> <device_name>") << std::endl;
return EXIT_FAILURE;
}
const file_name_t input_model{argv[1]};
const file_name_t input_image_path{argv[2]};
const std::string device_name{argv[3]};
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load inference engine instance -------------------------------------
Core ie;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
CNNNetReader network_reader;
network_reader.ReadNetwork(fileNameToString(input_model));
network_reader.ReadWeights(fileNameToString(input_model).substr(0, input_model.size() - 4) + ".bin");
network_reader.getNetwork().setBatchSize(1);
CNNNetwork network = network_reader.getNetwork();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input & output ---------------------------------------------
// --------------------------- Prepare input blobs -----------------------------------------------------
InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
std::string input_name = network.getInputsInfo().begin()->first;
/* Mark input as resizable by setting of a resize algorithm.
* In this case we will be able to set an input blob of any shape to an infer request.
* Resize and layout conversions are executed automatically during inference */
input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_info->setLayout(Layout::NHWC);
input_info->setPrecision(Precision::U8);
// --------------------------- Prepare output blobs ----------------------------------------------------
DataPtr output_info = network.getOutputsInfo().begin()->second;
std::string output_name = network.getOutputsInfo().begin()->first;
output_info->setPrecision(Precision::FP32);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Loading model to the device ------------------------------------------
ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Create infer request -------------------------------------------------
InferRequest infer_request = executable_network.CreateInferRequest();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
/* Read input image to a blob and set it to an infer request without resize and layout conversions. */
cv::Mat image = cv::imread(input_image_path);
Blob::Ptr imgBlob = wrapMat2Blob(image); // just wrap Mat data by Blob::Ptr without allocating of new memory
infer_request.SetBlob(input_name, imgBlob); // infer_request accepts input blob of any size
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Do inference --------------------------------------------------------
/* Running the request synchronously */
infer_request.Infer();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Process output ------------------------------------------------------
Blob::Ptr output = infer_request.GetBlob(output_name);
// Print classification results
ClassificationResult classificationResult(output, {fileNameToString(input_image_path)});
classificationResult.print();
// -----------------------------------------------------------------------------------------------------
} catch (const std::exception & ex) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
std::cout << "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool" << std::endl;
return EXIT_SUCCESS;
}