ov2.0 IE samples modification (#8340)

* ov2.0 IE samples modification

apply code style

turn off clang style check for headers order

unify samples a bit

add yuv nv12 reader to format_reader, helloe_nv112 sample

hello_reshape_ssd ov2.0

* sync with PR 8629 preprocessing api changes

* fix for slog << vector<int>

* add operator<< for ov::Version from PR-8687

* Update samples/cpp/hello_nv12_input_classification/main.cpp

Co-authored-by: Mikhail Nosov <mikhail.nosov@intel.com>

* apply code style

* change according to review comments

* add const qualifier

* apply code style

* std::ostream for old inference engine version to make VPU plugin tests happy

* apply code style

* revert changes in print version for old api samples

* keep inference_engine.hpp for not ov2.0 yet samples

* fix merge artifacts

* fix compilation

* apply code style

* Fixed classification sample test

* Revert changes in hello_reshape_ssd sample

* rebase to master, sync with PR-9054

* fix issues found by C++ tests

* rebased and sync with PR-9051

* fix test result parsers for classification tests (except unicode one)

* fix mismatches after merge

* rebase and sync with PR-9144

Co-authored-by: Mikhail Nosov <mikhail.nosov@intel.com>
Co-authored-by: antonrom23 <anton.romanov@intel.com>
This commit is contained in:
Vladimir Dudnik
2021-12-13 11:30:58 +03:00
committed by GitHub
parent 4e8a6d5a4b
commit 5b25dbee22
45 changed files with 1033 additions and 825 deletions

View File

@@ -4,12 +4,12 @@
#pragma once
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <vector>
#include "gflags/gflags.h"
/// @brief message for help argument
static const char help_message[] = "Print a usage message.";
@@ -27,17 +27,6 @@ static const char target_device_message[] =
"Default value is CPU. Use \"-d HETERO:<comma_separated_devices_list>\" format to specify HETERO plugin. "
"Sample will look for a suitable plugin for device specified.";
/// @brief message for top results number
static const char ntop_message[] = "Optional. Number of top results. Default value is 10.";
/// @brief message for plugin custom kernels desc
static const char custom_plugin_cfg_message[] = "Required for GPU, MYRIAD, HDDL custom kernels. "
"Absolute path to the .xml config file with the kernels descriptions.";
/// @brief message for user library argument
static const char custom_ex_library_message[] = "Required for CPU plugin custom layers. "
"Absolute path to a shared library with the kernels implementations.";
/// @brief Define flag for showing help message <br>
DEFINE_bool(h, false, help_message);
@@ -53,18 +42,6 @@ DEFINE_string(m, "", model_message);
/// It is an optional parameter
DEFINE_string(d, "CPU", target_device_message);
/// @brief Top results number (default 10) <br>
/// It is an optional parameter
DEFINE_uint32(nt, 10, ntop_message);
/// @brief Define parameter for plugin custom kernels path <br>
/// It is an optional parameter
DEFINE_string(c, "", custom_plugin_cfg_message);
/// @brief Absolute path to CPU library with user layers <br>
/// It is an optional parameter
DEFINE_string(l, "", custom_ex_library_message);
/**
* @brief This function show a help message
*/
@@ -75,10 +52,5 @@ static void showUsage() {
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_ex_library_message << std::endl;
std::cout << " Or" << std::endl;
std::cout << " -c \"<absolute_path>\" " << custom_plugin_cfg_message << std::endl;
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
std::cout << " -nt \"<integer>\" " << ntop_message << std::endl;
}

View File

@@ -8,25 +8,32 @@
* @example classification_sample_async/main.cpp
*/
#include <format_reader_ptr.h>
#include <samples/classification_results.h>
#include <sys/stat.h>
#include <condition_variable>
#include <fstream>
#include <inference_engine.hpp>
#include <map>
#include <memory>
#include <mutex>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>
#include "classification_sample_async.h"
// clang-format off
#include "openvino/openvino.hpp"
#include "samples/args_helper.hpp"
#include "samples/common.hpp"
#include "samples/classification_results.h"
#include "samples/slog.hpp"
#include "format_reader_ptr.h"
#include "classification_sample_async.h"
// clang-format on
constexpr auto N_TOP_RESULTS = 10;
using namespace ov::preprocess;
/**
* @brief Checks input args
* @param argc number of args
@@ -42,10 +49,6 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) {
}
slog::info << "Parsing input parameters" << slog::endl;
if (FLAGS_nt <= 0) {
throw std::logic_error("Incorrect value for nt argument. It should be greater than 0.");
}
if (FLAGS_m.empty()) {
showUsage();
throw std::logic_error("Model is required but not set. Please set -m option.");
@@ -79,32 +82,20 @@ int main(int argc, char* argv[]) {
// -------- Step 1. Initialize OpenVINO Runtime Core --------
ov::runtime::Core core;
if (!FLAGS_l.empty()) {
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(FLAGS_l);
core.add_extension(extension_ptr);
slog::info << "Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty() && (FLAGS_d == "GPU" || FLAGS_d == "MYRIAD" || FLAGS_d == "HDDL")) {
// Config for device plugin custom extension is loaded from an .xml
// description
core.set_config({{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d);
slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c
<< slog::endl;
}
// -------- Step 2. Read a model --------
slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl;
std::shared_ptr<ov::Model> model = core.read_model(FLAGS_m);
printInputAndOutputsInfo(*model);
OPENVINO_ASSERT(model->get_parameters().size() == 1, "Sample supports models with 1 input only");
OPENVINO_ASSERT(model->get_results().size() == 1, "Sample supports models with 1 output only");
// -------- Step 3. Apply preprocessing --------
// -------- Step 3. Configure preprocessing --------
const ov::Layout tensor_layout{"NHWC"};
ov::preprocess::PrePostProcessor proc(model);
ov::preprocess::PrePostProcessor ppp(model);
// 1) input() with no args assumes a model has a single input
ov::preprocess::InputInfo& input_info = proc.input();
ov::preprocess::InputInfo& input_info = ppp.input();
// 2) Set input tensor information:
// - precision of tensor is supposed to be 'u8'
// - layout of data is 'NHWC'
@@ -114,10 +105,11 @@ int main(int argc, char* argv[]) {
// 4) output() with no args assumes a model has a single result
// - output() with no args assumes a model has a single result
// - precision of tensor is supposed to be 'f32'
proc.output().tensor().set_element_type(ov::element::f32);
ppp.output().tensor().set_element_type(ov::element::f32);
// 5) Once the build() method is called, the pre(post)processing steps
// for layout and precision conversions are inserted automatically
model = proc.build();
model = ppp.build();
// -------- Step 4. read input images --------
slog::info << "Read input images" << slog::endl;
@@ -134,7 +126,7 @@ int main(int argc, char* argv[]) {
slog::warn << "Image " + i + " cannot be read!" << slog::endl;
continue;
}
// Store image data
// Collect image data
std::shared_ptr<unsigned char> data(reader->getData(width, height));
if (data != nullptr) {
images_data.push_back(data);
@@ -147,74 +139,69 @@ int main(int argc, char* argv[]) {
// -------- Step 5. Loading model to the device --------
// Setting batch size using image count
const size_t batchSize = images_data.size();
input_shape[ov::layout::batch_idx(tensor_layout)] = batchSize;
model->reshape({{model->input().get_any_name(), input_shape}});
slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;
slog::info << "Set batch size " << std::to_string(batchSize) << slog::endl;
ov::set_batch(model, batchSize);
printInputAndOutputsInfo(*model);
// -------- Step 6. Loading model to the device --------
slog::info << "Loading model to the device " << FLAGS_d << slog::endl;
ov::runtime::CompiledModel compiled_model = core.compile_model(model, FLAGS_d);
// -------- Step 6. Create infer request --------
// -------- Step 7. Create infer request --------
slog::info << "Create infer request" << slog::endl;
ov::runtime::InferRequest infer_request = compiled_model.create_infer_request();
// -------- Step 7. Combine multiple input images as batch --------
// -------- Step 8. Combine multiple input images as batch --------
ov::runtime::Tensor input_tensor = infer_request.get_input_tensor();
for (size_t image_id = 0; image_id < images_data.size(); ++image_id) {
const size_t image_size = shape_size(input_shape) / batchSize;
const size_t image_size = shape_size(model->input().get_shape()) / batchSize;
std::memcpy(input_tensor.data<std::uint8_t>() + image_id * image_size,
images_data[image_id].get(),
image_size);
}
// -------- Step 8. Do asynchronous inference --------
// -------- Step 9. Do asynchronous inference --------
size_t num_iterations = 10;
size_t cur_iteration = 0;
std::condition_variable condVar;
std::mutex mutex;
// -------- Step 10. Do asynchronous inference --------
infer_request.set_callback([&](std::exception_ptr ex) {
if (ex)
throw ex;
std::lock_guard<std::mutex> l(mutex);
cur_iteration++;
slog::info << "Completed " << cur_iteration << " async request execution" << slog::endl;
if (cur_iteration < num_iterations) {
/* here a user can read output containing inference results and put new
input to repeat async request again */
// here a user can read output containing inference results and put new
// input to repeat async request again
infer_request.start_async();
} else {
/* continue sample execution after last Asynchronous inference request
* execution */
// continue sample execution after last Asynchronous inference request
// execution
condVar.notify_one();
}
});
/* Start async request for the first time */
slog::info << "Start inference (" << num_iterations << " asynchronous executions)" << slog::endl;
// Start async request for the first time
slog::info << "Start inference (asynchronous executions)" << slog::endl;
infer_request.start_async();
/* Wait all iterations of the async request */
// Wait all iterations of the async request
std::unique_lock<std::mutex> lock(mutex);
condVar.wait(lock, [&] {
return cur_iteration == num_iterations;
});
// -------- Step 9. Process output --------
slog::info << "Completed async requests execution" << slog::endl;
// -------- Step 11. Process output --------
ov::runtime::Tensor output = infer_request.get_output_tensor();
/** Validating -nt value **/
const size_t resultsCnt = output.get_size() / batchSize;
if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
slog::warn << "-nt " << FLAGS_nt << " is not available for this model (-nt should be less than "
<< resultsCnt + 1 << " and more than 0)\n Maximal value " << resultsCnt
<< " will be used." << slog::endl;
FLAGS_nt = resultsCnt;
}
/** Read labels from file (e.x. AlexNet.labels) **/
// Read labels from file (e.x. AlexNet.labels)
std::string labelFileName = fileNameNoExt(FLAGS_m) + ".labels";
std::vector<std::string> labels;
@@ -229,20 +216,15 @@ int main(int argc, char* argv[]) {
}
// Prints formatted classification results
ClassificationResult classificationResult(output, valid_image_names, batchSize, FLAGS_nt, labels);
ClassificationResult classificationResult(output, valid_image_names, batchSize, N_TOP_RESULTS, labels);
classificationResult.show();
} catch (const std::exception& error) {
slog::err << error.what() << slog::endl;
} catch (const std::exception& ex) {
slog::err << ex.what() << slog::endl;
return EXIT_FAILURE;
} catch (...) {
slog::err << "Unknown/internal exception happened." << slog::endl;
return EXIT_FAILURE;
}
slog::info << "Execution successful" << slog::endl;
slog::info << slog::endl
<< "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool"
<< slog::endl;
return EXIT_SUCCESS;
}