ov2.0 IE samples modification (#8340)

* ov2.0 IE samples modification

apply code style

turn off clang style check for headers order

unify samples a bit

add yuv nv12 reader to format_reader, helloe_nv112 sample

hello_reshape_ssd ov2.0

* sync with PR 8629 preprocessing api changes

* fix for slog << vector<int>

* add operator<< for ov::Version from PR-8687

* Update samples/cpp/hello_nv12_input_classification/main.cpp

Co-authored-by: Mikhail Nosov <mikhail.nosov@intel.com>

* apply code style

* change according to review comments

* add const qualifier

* apply code style

* std::ostream for old inference engine version to make VPU plugin tests happy

* apply code style

* revert changes in print version for old api samples

* keep inference_engine.hpp for not ov2.0 yet samples

* fix merge artifacts

* fix compilation

* apply code style

* Fixed classification sample test

* Revert changes in hello_reshape_ssd sample

* rebase to master, sync with PR-9054

* fix issues found by C++ tests

* rebased and sync with PR-9051

* fix test result parsers for classification tests (except unicode one)

* fix mismatches after merge

* rebase and sync with PR-9144

Co-authored-by: Mikhail Nosov <mikhail.nosov@intel.com>
Co-authored-by: antonrom23 <anton.romanov@intel.com>
This commit is contained in:
Vladimir Dudnik
2021-12-13 11:30:58 +03:00
committed by GitHub
parent 4e8a6d5a4b
commit 5b25dbee22
45 changed files with 1033 additions and 825 deletions

View File

@@ -8,12 +8,12 @@
# define HAVE_DEVICE_MEM_SUPPORT
#endif
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <vector>
#include "gflags/gflags.h"
/// @brief message for help argument
static const char help_message[] = "Print a usage message";

View File

@@ -8,7 +8,6 @@
#include <chrono>
#include <condition_variable>
#include <functional>
#include <inference_engine.hpp>
#include <map>
#include <memory>
#include <mutex>
@@ -16,7 +15,11 @@
#include <string>
#include <vector>
// clang-format off
#include "inference_engine.hpp"
#include "statistics_report.hpp"
// clang-format on
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;

View File

@@ -2,17 +2,19 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "inputs_filling.hpp"
#include <format_reader_ptr.h>
#include <algorithm>
#include <memory>
#include <samples/slog.hpp>
#include <string>
#include <utility>
#include <vector>
// clang-format off
#include "samples/slog.hpp"
#include "format_reader_ptr.h"
#include "inputs_filling.hpp"
// clang-format on
using namespace InferenceEngine;
#ifdef USE_OPENCV

View File

@@ -4,12 +4,15 @@
#pragma once
#include <inference_engine.hpp>
#include <string>
#include <vector>
// clang-format off
#include "inference_engine.hpp"
#include "infer_request_wrap.hpp"
#include "utils.hpp"
// clang-format on
void fillBlobs(const std::vector<std::string>& inputFiles,
const size_t& batchSize,

View File

@@ -4,18 +4,22 @@
#include <algorithm>
#include <chrono>
#include <gna/gna_config.hpp>
#include <gpu/gpu_config.hpp>
#include <inference_engine.hpp>
#include <map>
#include <memory>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <utility>
#include <vector>
#include <vpu/vpu_plugin_config.hpp>
// clang-format off
#include "inference_engine.hpp"
#include "gna/gna_config.hpp"
#include "gpu/gpu_config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "samples/args_helper.hpp"
#include "samples/common.hpp"
#include "samples/slog.hpp"
#include "benchmark_app.hpp"
#include "infer_request_wrap.hpp"
@@ -24,6 +28,7 @@
#include "remote_blobs_filling.hpp"
#include "statistics_report.hpp"
#include "utils.hpp"
// clang-format on
using namespace InferenceEngine;

View File

@@ -5,7 +5,8 @@
#pragma once
#include <memory>
#include <samples/console_progress.hpp>
#include "samples/console_progress.hpp"
/// @brief Responsible for progress bar handling within the benchmark_app
class ProgressBar {

View File

@@ -2,13 +2,15 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "remote_blobs_filling.hpp"
// clang-format off
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "remote_blobs_filling.hpp"
// clang-format on
namespace gpu {
template <typename T>

View File

@@ -4,13 +4,15 @@
#if defined(HAVE_GPU_DEVICE_MEM_SUPPORT)
# define HAVE_DEVICE_MEM_SUPPORT
# include <gpu/gpu_context_api_ocl.hpp>
# include "gpu/gpu_context_api_ocl.hpp"
#endif
#include <inference_engine.hpp>
// clang-format off
#include "inference_engine.hpp"
#include "infer_request_wrap.hpp"
#include "utils.hpp"
// clang-format on
namespace gpu {

View File

@@ -2,14 +2,16 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "statistics_report.hpp"
// clang-format off
#include <algorithm>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "statistics_report.hpp"
// clang-format on
void StatisticsReport::addParameters(const Category& category, const Parameters& parameters) {
if (_parameters.count(category) == 0)
_parameters[category] = parameters;

View File

@@ -4,15 +4,18 @@
#pragma once
#include <inference_engine.hpp>
#include <map>
#include <samples/common.hpp>
#include <samples/csv_dumper.hpp>
#include <samples/slog.hpp>
#include <string>
#include <utility>
#include <vector>
// clang-format off
#include "inference_engine.hpp"
#include "samples/common.hpp"
#include "samples/csv_dumper.hpp"
#include "samples/slog.hpp"
// clang-format on
// @brief statistics reports types
static constexpr char noCntReport[] = "no_counters";
static constexpr char averageCntReport[] = "average_counters";

View File

@@ -6,12 +6,13 @@
#include <algorithm>
#include <map>
#include <regex>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <utility>
#include <vector>
#include "samples/common.hpp"
#include "samples/slog.hpp"
#include "utils.hpp"
// clang-format on

View File

@@ -4,12 +4,12 @@
#pragma once
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <vector>
#include "gflags/gflags.h"
/// @brief message for help argument
static const char help_message[] = "Print a usage message.";
@@ -27,17 +27,6 @@ static const char target_device_message[] =
"Default value is CPU. Use \"-d HETERO:<comma_separated_devices_list>\" format to specify HETERO plugin. "
"Sample will look for a suitable plugin for device specified.";
/// @brief message for top results number
static const char ntop_message[] = "Optional. Number of top results. Default value is 10.";
/// @brief message for plugin custom kernels desc
static const char custom_plugin_cfg_message[] = "Required for GPU, MYRIAD, HDDL custom kernels. "
"Absolute path to the .xml config file with the kernels descriptions.";
/// @brief message for user library argument
static const char custom_ex_library_message[] = "Required for CPU plugin custom layers. "
"Absolute path to a shared library with the kernels implementations.";
/// @brief Define flag for showing help message <br>
DEFINE_bool(h, false, help_message);
@@ -53,18 +42,6 @@ DEFINE_string(m, "", model_message);
/// It is an optional parameter
DEFINE_string(d, "CPU", target_device_message);
/// @brief Top results number (default 10) <br>
/// It is an optional parameter
DEFINE_uint32(nt, 10, ntop_message);
/// @brief Define parameter for plugin custom kernels path <br>
/// It is an optional parameter
DEFINE_string(c, "", custom_plugin_cfg_message);
/// @brief Absolute path to CPU library with user layers <br>
/// It is an optional parameter
DEFINE_string(l, "", custom_ex_library_message);
/**
* @brief This function show a help message
*/
@@ -75,10 +52,5 @@ static void showUsage() {
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_ex_library_message << std::endl;
std::cout << " Or" << std::endl;
std::cout << " -c \"<absolute_path>\" " << custom_plugin_cfg_message << std::endl;
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
std::cout << " -nt \"<integer>\" " << ntop_message << std::endl;
}

View File

@@ -8,25 +8,32 @@
* @example classification_sample_async/main.cpp
*/
#include <format_reader_ptr.h>
#include <samples/classification_results.h>
#include <sys/stat.h>
#include <condition_variable>
#include <fstream>
#include <inference_engine.hpp>
#include <map>
#include <memory>
#include <mutex>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>
#include "classification_sample_async.h"
// clang-format off
#include "openvino/openvino.hpp"
#include "samples/args_helper.hpp"
#include "samples/common.hpp"
#include "samples/classification_results.h"
#include "samples/slog.hpp"
#include "format_reader_ptr.h"
#include "classification_sample_async.h"
// clang-format on
constexpr auto N_TOP_RESULTS = 10;
using namespace ov::preprocess;
/**
* @brief Checks input args
* @param argc number of args
@@ -42,10 +49,6 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) {
}
slog::info << "Parsing input parameters" << slog::endl;
if (FLAGS_nt <= 0) {
throw std::logic_error("Incorrect value for nt argument. It should be greater than 0.");
}
if (FLAGS_m.empty()) {
showUsage();
throw std::logic_error("Model is required but not set. Please set -m option.");
@@ -79,32 +82,20 @@ int main(int argc, char* argv[]) {
// -------- Step 1. Initialize OpenVINO Runtime Core --------
ov::runtime::Core core;
if (!FLAGS_l.empty()) {
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(FLAGS_l);
core.add_extension(extension_ptr);
slog::info << "Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty() && (FLAGS_d == "GPU" || FLAGS_d == "MYRIAD" || FLAGS_d == "HDDL")) {
// Config for device plugin custom extension is loaded from an .xml
// description
core.set_config({{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d);
slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c
<< slog::endl;
}
// -------- Step 2. Read a model --------
slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl;
std::shared_ptr<ov::Model> model = core.read_model(FLAGS_m);
printInputAndOutputsInfo(*model);
OPENVINO_ASSERT(model->get_parameters().size() == 1, "Sample supports models with 1 input only");
OPENVINO_ASSERT(model->get_results().size() == 1, "Sample supports models with 1 output only");
// -------- Step 3. Apply preprocessing --------
// -------- Step 3. Configure preprocessing --------
const ov::Layout tensor_layout{"NHWC"};
ov::preprocess::PrePostProcessor proc(model);
ov::preprocess::PrePostProcessor ppp(model);
// 1) input() with no args assumes a model has a single input
ov::preprocess::InputInfo& input_info = proc.input();
ov::preprocess::InputInfo& input_info = ppp.input();
// 2) Set input tensor information:
// - precision of tensor is supposed to be 'u8'
// - layout of data is 'NHWC'
@@ -114,10 +105,11 @@ int main(int argc, char* argv[]) {
// 4) output() with no args assumes a model has a single result
// - output() with no args assumes a model has a single result
// - precision of tensor is supposed to be 'f32'
proc.output().tensor().set_element_type(ov::element::f32);
ppp.output().tensor().set_element_type(ov::element::f32);
// 5) Once the build() method is called, the pre(post)processing steps
// for layout and precision conversions are inserted automatically
model = proc.build();
model = ppp.build();
// -------- Step 4. read input images --------
slog::info << "Read input images" << slog::endl;
@@ -134,7 +126,7 @@ int main(int argc, char* argv[]) {
slog::warn << "Image " + i + " cannot be read!" << slog::endl;
continue;
}
// Store image data
// Collect image data
std::shared_ptr<unsigned char> data(reader->getData(width, height));
if (data != nullptr) {
images_data.push_back(data);
@@ -147,74 +139,69 @@ int main(int argc, char* argv[]) {
// -------- Step 5. Loading model to the device --------
// Setting batch size using image count
const size_t batchSize = images_data.size();
input_shape[ov::layout::batch_idx(tensor_layout)] = batchSize;
model->reshape({{model->input().get_any_name(), input_shape}});
slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;
slog::info << "Set batch size " << std::to_string(batchSize) << slog::endl;
ov::set_batch(model, batchSize);
printInputAndOutputsInfo(*model);
// -------- Step 6. Loading model to the device --------
slog::info << "Loading model to the device " << FLAGS_d << slog::endl;
ov::runtime::CompiledModel compiled_model = core.compile_model(model, FLAGS_d);
// -------- Step 6. Create infer request --------
// -------- Step 7. Create infer request --------
slog::info << "Create infer request" << slog::endl;
ov::runtime::InferRequest infer_request = compiled_model.create_infer_request();
// -------- Step 7. Combine multiple input images as batch --------
// -------- Step 8. Combine multiple input images as batch --------
ov::runtime::Tensor input_tensor = infer_request.get_input_tensor();
for (size_t image_id = 0; image_id < images_data.size(); ++image_id) {
const size_t image_size = shape_size(input_shape) / batchSize;
const size_t image_size = shape_size(model->input().get_shape()) / batchSize;
std::memcpy(input_tensor.data<std::uint8_t>() + image_id * image_size,
images_data[image_id].get(),
image_size);
}
// -------- Step 8. Do asynchronous inference --------
// -------- Step 9. Do asynchronous inference --------
size_t num_iterations = 10;
size_t cur_iteration = 0;
std::condition_variable condVar;
std::mutex mutex;
// -------- Step 10. Do asynchronous inference --------
infer_request.set_callback([&](std::exception_ptr ex) {
if (ex)
throw ex;
std::lock_guard<std::mutex> l(mutex);
cur_iteration++;
slog::info << "Completed " << cur_iteration << " async request execution" << slog::endl;
if (cur_iteration < num_iterations) {
/* here a user can read output containing inference results and put new
input to repeat async request again */
// here a user can read output containing inference results and put new
// input to repeat async request again
infer_request.start_async();
} else {
/* continue sample execution after last Asynchronous inference request
* execution */
// continue sample execution after last Asynchronous inference request
// execution
condVar.notify_one();
}
});
/* Start async request for the first time */
slog::info << "Start inference (" << num_iterations << " asynchronous executions)" << slog::endl;
// Start async request for the first time
slog::info << "Start inference (asynchronous executions)" << slog::endl;
infer_request.start_async();
/* Wait all iterations of the async request */
// Wait all iterations of the async request
std::unique_lock<std::mutex> lock(mutex);
condVar.wait(lock, [&] {
return cur_iteration == num_iterations;
});
// -------- Step 9. Process output --------
slog::info << "Completed async requests execution" << slog::endl;
// -------- Step 11. Process output --------
ov::runtime::Tensor output = infer_request.get_output_tensor();
/** Validating -nt value **/
const size_t resultsCnt = output.get_size() / batchSize;
if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
slog::warn << "-nt " << FLAGS_nt << " is not available for this model (-nt should be less than "
<< resultsCnt + 1 << " and more than 0)\n Maximal value " << resultsCnt
<< " will be used." << slog::endl;
FLAGS_nt = resultsCnt;
}
/** Read labels from file (e.x. AlexNet.labels) **/
// Read labels from file (e.x. AlexNet.labels)
std::string labelFileName = fileNameNoExt(FLAGS_m) + ".labels";
std::vector<std::string> labels;
@@ -229,20 +216,15 @@ int main(int argc, char* argv[]) {
}
// Prints formatted classification results
ClassificationResult classificationResult(output, valid_image_names, batchSize, FLAGS_nt, labels);
ClassificationResult classificationResult(output, valid_image_names, batchSize, N_TOP_RESULTS, labels);
classificationResult.show();
} catch (const std::exception& error) {
slog::err << error.what() << slog::endl;
} catch (const std::exception& ex) {
slog::err << ex.what() << slog::endl;
return EXIT_FAILURE;
} catch (...) {
slog::err << "Unknown/internal exception happened." << slog::endl;
return EXIT_FAILURE;
}
slog::info << "Execution successful" << slog::endl;
slog::info << slog::endl
<< "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool"
<< slog::endl;
return EXIT_SUCCESS;
}

View File

@@ -2,12 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <MnistUbyte.h>
// clang-format off
#include <fstream>
#include <iostream>
#include <string>
#include "MnistUbyte.h"
// clang-format on
using namespace FormatReader;
int MnistUbyte::reverseInt(int i) {

View File

@@ -8,12 +8,13 @@
*/
#pragma once
#include <format_reader.h>
#include <memory>
#include <string>
// clang-format off
#include "format_reader.h"
#include "register.h"
// clang-format on
namespace FormatReader {
/**

View File

@@ -2,11 +2,13 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "bmp.h"
// clang-format off
#include <fstream>
#include <iostream>
#include "bmp.h"
// clang-format on
using namespace std;
using namespace FormatReader;

View File

@@ -8,12 +8,13 @@
*/
#pragma once
#include <format_reader.h>
#include <memory>
#include <string>
// clang-format off
#include "format_reader.h"
#include "register.h"
// clang-format on
namespace FormatReader {
/**

View File

@@ -2,19 +2,22 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <format_reader.h>
#include <iostream>
#include "MnistUbyte.h"
// clang-format off
#include "bmp.h"
#include "MnistUbyte.h"
#include "yuv_nv12.h"
#include "opencv_wrapper.h"
#include "format_reader.h"
// clang-format on
using namespace FormatReader;
std::vector<Registry::CreatorFunction> Registry::_data;
Register<MnistUbyte> MnistUbyte::reg;
Register<YUV_NV12> YUV_NV12::reg;
#ifdef USE_OPENCV
Register<OCVReader> OCVReader::reg;
#else

View File

@@ -81,4 +81,4 @@ public:
* \brief Function for create reader
* @return FormatReader pointer
*/
FORMAT_READER_API(FormatReader::Reader*) CreateFormatReader(const char* filename);
FORMAT_READER_API(FormatReader::Reader*) CreateFormatReader(const char* filename);

View File

@@ -3,12 +3,15 @@
//
#ifdef USE_OPENCV
# include "opencv_wrapper.h"
# include <fstream>
# include <iostream>
// clang-format off
# include <opencv2/opencv.hpp>
# include <samples/slog.hpp>
# include "samples/slog.hpp"
# include "opencv_wrapper.h"
// clang-format on
using namespace std;
using namespace FormatReader;

View File

@@ -9,13 +9,15 @@
#pragma once
#ifdef USE_OPENCV
# include <format_reader.h>
# include <memory>
# include <opencv2/opencv.hpp>
# include <string>
// clang-format off
# include <opencv2/opencv.hpp>
# include "format_reader.h"
# include "register.h"
// clang-format on
namespace FormatReader {
/**

View File

@@ -7,12 +7,12 @@
*/
#pragma once
#include <format_reader.h>
#include <functional>
#include <string>
#include <vector>
#include "format_reader.h"
namespace FormatReader {
/**
* \class Registry
@@ -55,4 +55,4 @@ public:
});
}
};
} // namespace FormatReader
} // namespace FormatReader

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// clang-format off
#include <fstream>
#include <iostream>
#include <string>
#include "yuv_nv12.h"
// clang-format on
using namespace FormatReader;
YUV_NV12::YUV_NV12(const std::string& filename) {
auto pos = filename.rfind('.');
if (pos == std::string::npos)
return;
if (filename.substr(pos + 1) != "yuv")
return;
std::ifstream file(filename, std::ios::binary);
if (!file.is_open()) {
return;
}
file.seekg(0, file.end);
_size = file.tellg();
file.seekg(0, file.beg);
_data.reset(new unsigned char[_size], std::default_delete<unsigned char[]>());
file.read(reinterpret_cast<char*>(_data.get()), _size);
file.close();
}

View File

@@ -0,0 +1,54 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* \brief YUV NV12 reader
* \file yuv_nv12.h
*/
#pragma once
#include <memory>
#include <string>
// clang-format off
#include "format_reader.h"
#include "register.h"
// clang-format on
namespace FormatReader {
/**
* \class YUV_NV12
* \brief Reader for YUV NV12 files
*/
class YUV_NV12 : public Reader {
private:
static Register<YUV_NV12> reg;
size_t _size = 0;
public:
/**
* \brief Constructor of YUV NV12 reader
* @param filename - path to input data
* @return YUV_NV12 reader object
*/
explicit YUV_NV12(const std::string& filename);
virtual ~YUV_NV12() {}
/**
* \brief Get size
* @return size
*/
size_t size() const override {
return _size;
}
std::shared_ptr<unsigned char> getData(size_t width, size_t height) override {
if ((width * height * 3 / 2 != size())) {
std::cout << "Image dimensions not match with NV12 file size \n";
return nullptr;
}
return _data;
}
};
} // namespace FormatReader

View File

@@ -9,10 +9,14 @@
#pragma once
#include <inference_engine.hpp>
// clang-format off
#include <string>
#include <vector>
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
// clang-format on
/**
* @brief This function checks input args and existence of specified files in a given folder
* @param arg path to a file to be checked for existence

View File

@@ -15,24 +15,21 @@
#include <utility>
#include <vector>
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
/**
* @class ClassificationResult
* @brief A ClassificationResult creates an output table with results
*/
template <class strType = std::string>
class ClassificationResultT {
class ClassificationResult {
private:
const std::string _classidStr = "classid";
const std::string _probabilityStr = "probability";
const std::string _labelStr = "label";
size_t _nTop;
ov::runtime::Tensor _outTensor;
InferenceEngine::Blob::Ptr _outBlob;
const std::vector<std::string> _labels;
const std::vector<strType> _imageNames;
const std::vector<std::string> _imageNames;
const size_t _batchSize;
std::vector<unsigned> _results;
@@ -85,43 +82,6 @@ private:
}
}
template <class T>
void topResults(unsigned int n, InferenceEngine::Blob::Ptr& input, std::vector<unsigned>& output) {
InferenceEngine::SizeVector dims = input->getTensorDesc().getDims();
size_t input_rank = dims.size();
if (!input_rank || !dims[0])
IE_THROW() << "Input blob has incorrect dimensions!";
size_t batchSize = dims[0];
std::vector<unsigned> indexes(input->size() / batchSize);
n = static_cast<unsigned>(std::min<size_t>((size_t)n, input->size()));
output.resize(n * batchSize);
InferenceEngine::MemoryBlob::CPtr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
if (!moutput) {
IE_THROW() << "Output blob should be inherited from MemoryBlob";
}
// locked memory holder should be alive all time while access to its buffer happens
auto moutputHolder = moutput->rmap();
for (size_t i = 0; i < batchSize; i++) {
size_t offset = i * (input->size() / batchSize);
T* batchData = moutputHolder.as<T*>();
batchData += offset;
std::iota(std::begin(indexes), std::end(indexes), 0);
std::partial_sort(std::begin(indexes),
std::begin(indexes) + n,
std::end(indexes),
[&batchData](unsigned l, unsigned r) {
return batchData[l] > batchData[r];
});
for (unsigned j = 0; j < n; j++) {
output.at(i * n + j) = indexes.at(j);
}
}
}
/**
* @brief Gets the top n results from a blob
*
@@ -156,57 +116,12 @@ private:
#undef TENSOR_TOP_RESULT
}
void topResults(unsigned int n, InferenceEngine::Blob::Ptr& input, std::vector<unsigned>& output) {
#define TBLOB_TOP_RESULT(precision) \
case InferenceEngine::Precision::precision: { \
using myBlobType = InferenceEngine::PrecisionTrait<InferenceEngine::Precision::precision>::value_type; \
topResults<myBlobType>(n, input, output); \
break; \
}
switch (input->getTensorDesc().getPrecision()) {
TBLOB_TOP_RESULT(FP32);
TBLOB_TOP_RESULT(FP64);
TBLOB_TOP_RESULT(FP16);
TBLOB_TOP_RESULT(Q78);
TBLOB_TOP_RESULT(I16);
TBLOB_TOP_RESULT(U8);
TBLOB_TOP_RESULT(I8);
TBLOB_TOP_RESULT(U16);
TBLOB_TOP_RESULT(I32);
TBLOB_TOP_RESULT(U32);
TBLOB_TOP_RESULT(U64);
TBLOB_TOP_RESULT(I64);
default:
IE_THROW() << "cannot locate blob for precision: " << input->getTensorDesc().getPrecision();
}
#undef TBLOB_TOP_RESULT
}
public:
explicit ClassificationResultT(InferenceEngine::Blob::Ptr output_blob,
std::vector<strType> image_names = {},
size_t batch_size = 1,
size_t num_of_top = 10,
std::vector<std::string> labels = {})
: _nTop(num_of_top),
_outBlob(std::move(output_blob)),
_labels(std::move(labels)),
_imageNames(std::move(image_names)),
_batchSize(batch_size),
_results() {
if (_imageNames.size() != _batchSize) {
throw std::logic_error("Batch size should be equal to the number of images.");
}
topResults(_nTop, _outBlob, _results);
}
explicit ClassificationResultT(const ov::runtime::Tensor& output_tensor,
const std::vector<strType>& image_names = {},
size_t batch_size = 1,
size_t num_of_top = 10,
const std::vector<std::string>& labels = {})
explicit ClassificationResult(const ov::runtime::Tensor& output_tensor,
const std::vector<std::string>& image_names = {},
size_t batch_size = 1,
size_t num_of_top = 10,
const std::vector<std::string>& labels = {})
: _nTop(num_of_top),
_outTensor(output_tensor),
_labels(labels),
@@ -225,17 +140,17 @@ public:
/** Print the result iterating over each batch **/
std::ios::fmtflags fmt(std::cout.flags());
std::cout << std::endl << "Top " << _nTop << " results:" << std::endl << std::endl;
for (unsigned int image_id = 0; image_id < _batchSize; ++image_id) {
std::wstring out(_imageNames[image_id].begin(), _imageNames[image_id].end());
std::wcout << L"Image " << out;
std::wcout.flush();
std::wcout.clear();
std::wcout << std::endl << std::endl;
for (size_t image_id = 0; image_id < _batchSize; ++image_id) {
std::string out(_imageNames[image_id].begin(), _imageNames[image_id].end());
std::cout << "Image " << out;
std::cout.flush();
std::cout.clear();
std::cout << std::endl << std::endl;
printHeader();
for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
std::cout.precision(7);
/** Getting probability for resulting class **/
// Getting probability for resulting class
const auto index = _results.at(id) + image_id * (_outTensor.get_size() / _batchSize);
const auto result = _outTensor.data<const float>()[index];
@@ -256,30 +171,18 @@ public:
/** Print the result iterating over each batch **/
std::ios::fmtflags fmt(std::cout.flags());
std::cout << std::endl << "Top " << _nTop << " results:" << std::endl << std::endl;
for (unsigned int image_id = 0; image_id < _batchSize; ++image_id) {
std::wstring out(_imageNames[image_id].begin(), _imageNames[image_id].end());
std::wcout << L"Image " << out;
std::wcout.flush();
std::wcout.clear();
std::wcout << std::endl << std::endl;
for (size_t image_id = 0; image_id < _batchSize; ++image_id) {
std::string out(_imageNames[image_id].begin(), _imageNames[image_id].end());
std::cout << "Image " << out;
std::cout.flush();
std::cout.clear();
std::cout << std::endl << std::endl;
printHeader();
InferenceEngine::MemoryBlob::CPtr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(_outBlob);
auto moutputHolder = moutput->rmap();
for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
std::cout.precision(7);
/** Getting probability for resulting class **/
if (!moutput) {
throw std::logic_error("We expect _outBlob to be inherited from MemoryBlob in "
"ClassificationResult::print, "
"but by fact we were not able to cast _outBlob to MemoryBlob");
}
// locked memory holder should be alive all time while access to its buffer happens
const auto result =
moutputHolder
.as<const InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type*>()
[_results.at(id) + image_id * (_outBlob->size() / _batchSize)];
// Getting probability for resulting class
const auto result = _outTensor.data<float>();
std::cout << std::setw(static_cast<int>(_classidStr.length())) << std::left << _results.at(id) << " ";
std::cout << std::left << std::setw(static_cast<int>(_probabilityStr.length())) << std::fixed << result;
@@ -300,6 +203,3 @@ public:
return _results;
}
};
using ClassificationResult = ClassificationResultT<>;
using ClassificationResultW = ClassificationResultT<std::wstring>;

View File

@@ -12,7 +12,6 @@
#include <algorithm>
#include <fstream>
#include <functional>
#include <inference_engine.hpp>
#include <iomanip>
#include <iostream>
#include <limits>
@@ -23,8 +22,11 @@
#include <utility>
#include <vector>
// clang-format off
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
#include "slog.hpp"
// clang-format on
#ifndef UNUSED
# if defined(_MSC_VER) && !defined(__clang__)
@@ -411,6 +413,7 @@ static UNUSED void addRectangles(unsigned char* data,
{180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0},
{100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111},
{81, 0, 81}};
if (rectangles.size() % 4 != 0 || rectangles.size() / 4 != classes.size()) {
return;
}
@@ -433,21 +436,21 @@ static UNUSED void addRectangles(unsigned char* data,
h = 0;
if (static_cast<std::size_t>(x) >= width) {
x = width - 1;
x = static_cast<int>(width - 1);
w = 0;
thickness = 1;
}
if (static_cast<std::size_t>(y) >= height) {
y = height - 1;
y = static_cast<int>(height - 1);
h = 0;
thickness = 1;
}
if (static_cast<std::size_t>(x + w) >= width) {
w = width - x - 1;
w = static_cast<int>(width - x - 1);
}
if (static_cast<std::size_t>(y + h) >= height) {
h = height - y - 1;
h = static_cast<int>(height - y - 1);
}
thickness = std::min(std::min(thickness, w / 2 + 1), h / 2 + 1);
@@ -951,7 +954,7 @@ public:
rec.push_back(recall);
}
int num = rec.size();
int num = static_cast<int>(rec.size());
// 11point from Caffe
double ap = 0;
@@ -1123,8 +1126,8 @@ inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) {
}
inline void showAvailableDevices() {
InferenceEngine::Core ie;
std::vector<std::string> devices = ie.GetAvailableDevices();
ov::runtime::Core core;
std::vector<std::string> devices = core.get_available_devices();
std::cout << std::endl;
std::cout << "Available target devices:";

View File

@@ -4,14 +4,14 @@
#pragma once
#include <time.h>
#include <ctime>
#include <fstream>
#include <iostream>
#include <samples/slog.hpp>
#include <sstream>
#include <string>
#include "samples/slog.hpp"
/**
* @class CsvDumper
* @brief A CsvDumper class provides functionality for dumping the values in CSV files

View File

@@ -10,9 +10,9 @@
#pragma once
#include <opencv2/opencv.hpp>
#include <samples/common.hpp>
#include "openvino/openvino.hpp"
#include "samples/common.hpp"
/**
* @brief Sets image data stored in cv::Mat object to a given Blob object.

View File

@@ -11,6 +11,7 @@
#include <ostream>
#include <string>
#include <vector>
namespace slog {
/**
@@ -29,6 +30,14 @@ class LogStreamBoolAlpha {};
static constexpr LogStreamBoolAlpha boolalpha;
/**
* @class LogStreamFlush
* @brief The LogStreamFlush class implements flushing for a log stream
*/
class LogStreamFlush {};
static constexpr LogStreamFlush flush;
/**
* @class LogStream
* @brief The LogStream class implements a stream for sample logging
@@ -60,11 +69,30 @@ public:
return *this;
}
/**
* @brief Overload output stream operator to print vectors in pretty form
* [value1, value2, ...]
*/
template <typename T>
LogStream& operator<<(const std::vector<T>& v) {
(*_log_stream) << "[ ";
for (auto&& value : v)
(*_log_stream) << value << " ";
(*_log_stream) << "]";
return *this;
}
// Specializing for LogStreamEndLine to support slog::endl
LogStream& operator<<(const LogStreamEndLine&);
// Specializing for LogStreamBoolAlpha to support slog::boolalpha
LogStream& operator<<(const LogStreamBoolAlpha&);
// Specializing for LogStreamFlush to support slog::flush
LogStream& operator<<(const LogStreamFlush&);
};
extern LogStream info;

View File

@@ -4,6 +4,7 @@
#pragma once
#include <fstream>
#include <map>
#include <string>

View File

@@ -2,20 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "samples/args_helper.hpp"
#include <gflags/gflags.h>
// clang-format off
#include <sys/stat.h>
#include <iostream>
#include <samples/slog.hpp>
#ifdef _WIN32
# include <samples/os/windows/w_dirent.h>
# include "samples/os/windows/w_dirent.h"
#else
# include <dirent.h>
#endif
#include "openvino/openvino.hpp"
#include "gflags/gflags.h"
#include "samples/args_helper.hpp"
#include "samples/slog.hpp"
// clang-format on
/**
* @brief Checks input file argument and add it to files vector
* @param files reference to vector to store file names

View File

@@ -2,10 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "samples/slog.hpp"
// clang-format off
#include <iostream>
#include "samples/slog.hpp"
// clang-format on
namespace slog {
LogStream info("INFO", std::cout);
@@ -32,4 +34,10 @@ LogStream& LogStream::operator<<(const LogStreamBoolAlpha& /*arg*/) {
return *this;
}
} // namespace slog
// Specializing for LogStreamFlush to support slog::flush
LogStream& LogStream::operator<<(const LogStreamFlush& /*arg*/) {
(*_log_stream) << std::flush;
return *this;
}
} // namespace slog

View File

@@ -41,7 +41,7 @@ int tmain(int argc, tchar* argv[]) {
// -------- Step 2. Read a model --------
slog::info << "Loading model files: " << model_path << slog::endl;
auto model = core.read_model(model_path);
std::shared_ptr<ov::Model> model = core.read_model(model_path);
printInputAndOutputsInfo(*model);
OPENVINO_ASSERT(model->get_parameters().size() == 1, "Sample supports models with 1 input only");
@@ -67,15 +67,16 @@ int tmain(int argc, tchar* argv[]) {
const ov::Shape tensor_shape = input_tensor.get_shape();
const ov::Layout tensor_layout{"NHWC"};
// -------- Step 4. Apply preprocessing --------
// -------- Step 4. Configure preprocessing --------
ov::preprocess::PrePostProcessor ppp(model);
ov::preprocess::PrePostProcessor preproc(model);
// 1) Set input tensor information:
// - input() provides information about a single model input
// - precision of tensor is supposed to be 'u8'
// - layout of data is 'NHWC'
// - set static spatial dimensions to input tensor to resize from
preproc.input()
ppp.input()
.tensor()
.set_element_type(ov::element::u8)
.set_layout(tensor_layout)
@@ -84,14 +85,15 @@ int tmain(int argc, tchar* argv[]) {
// 2) Adding explicit preprocessing steps:
// - convert layout to 'NCHW' (from 'NHWC' specified above at tensor layout)
// - apply linear resize from tensor spatial dims to model spatial dims
preproc.input().preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
ppp.input().preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
// 4) Here we suppose model has 'NCHW' layout for input
preproc.input().model().set_layout("NCHW");
ppp.input().model().set_layout("NCHW");
// 5) Set output tensor information:
// - precision of tensor is supposed to be 'f32'
preproc.output().tensor().set_element_type(ov::element::f32);
ppp.output().tensor().set_element_type(ov::element::f32);
// 6) Apply preprocessing modifing the original 'model'
model = preproc.build();
model = ppp.build();
// -------- Step 5. Loading a model to the device --------
ov::runtime::CompiledModel compiled_model = core.compile_model(model, device_name);

View File

@@ -4,4 +4,4 @@
ie_add_sample(NAME hello_nv12_input_classification
SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
DEPENDENCIES ie_samples_utils)
DEPENDENCIES format_reader ie_samples_utils)

View File

@@ -2,30 +2,38 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <samples/classification_results.h>
#include <sys/stat.h>
#include <cassert>
#include <fstream>
#include <inference_engine.hpp>
#include <iostream>
#include <memory>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#ifdef _WIN32
# include <samples/os/windows/w_dirent.h>
# include "samples/os/windows/w_dirent.h"
#else
# include <dirent.h>
#endif
using namespace InferenceEngine;
// clang-format off
#include "openvino/openvino.hpp"
#include "samples/args_helper.hpp"
#include "samples/common.hpp"
#include "samples/slog.hpp"
#include "samples/classification_results.h"
#include "format_reader_ptr.h"
// clang-format on
constexpr auto N_TOP_RESULTS = 10;
using namespace ov::preprocess;
/**
* \brief Parse image size provided as string in format WIDTHxHEIGHT
* @brief Parse image size provided as string in format WIDTHxHEIGHT
* @param string of image size in WIDTHxHEIGHT format
* @return parsed width and height
*/
@@ -54,258 +62,96 @@ std::pair<size_t, size_t> parseImageSize(const std::string& size_string) {
return {width, height};
}
// Comparing to samples/args_helper.hpp, this version filters files by ".yuv"
// extension
/**
* @brief This function checks input args and existence of specified files in a
* given folder
* @param path path to a file to be checked for existence
* @return files updated vector of verified input files
*/
std::vector<std::string> readInputFileNames(const std::string& path) {
struct stat sb;
if (stat(path.c_str(), &sb) != 0) {
slog::warn << "File " << path << " cannot be opened!" << slog::endl;
return {};
}
std::vector<std::string> files;
if (S_ISDIR(sb.st_mode)) {
DIR* dp = opendir(path.c_str());
if (dp == nullptr) {
slog::warn << "Directory " << path << " cannot be opened!" << slog::endl;
return {};
}
for (struct dirent* ep = readdir(dp); ep != nullptr; ep = readdir(dp)) {
std::string fileName = ep->d_name;
if (fileName == "." || fileName == ".." || fileName.substr(fileName.size() - 4) != ".yuv")
continue;
files.push_back(path + "/" + ep->d_name);
}
closedir(dp);
} else {
files.push_back(path);
}
size_t max_files = 20;
if (files.size() < max_files) {
slog::info << "Files were added: " << files.size() << slog::endl;
for (std::string filePath : files) {
slog::info << " " << filePath << slog::endl;
}
} else {
slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl;
}
return files;
}
using UString = std::basic_string<uint8_t>;
/**
* \brief Read image data from file
* @param vector files paths
* @param size of file paths vector
* @return buffers containing the images data
*/
std::vector<UString> readImagesDataFromFiles(const std::vector<std::string>& files, size_t size) {
std::vector<UString> result;
for (const auto& image_path : files) {
std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary);
if (!file.good() || !file.is_open()) {
std::stringstream err;
err << "Cannot access input image file. File path: " << image_path;
throw std::runtime_error(err.str());
}
const size_t file_size = file.tellg();
if (file_size < size) {
std::stringstream err;
err << "Invalid read size provided. File size: " << file_size << ", to read: " << size;
throw std::runtime_error(err.str());
}
file.seekg(0);
UString data(size, 0);
file.read(reinterpret_cast<char*>(&data[0]), size);
result.push_back(std::move(data));
}
return result;
}
/**
* @brief Read input image to blob
* @param ref to input image data
* @param width input image
* @param height input image
* @return blob point to hold the NV12 input data
*/
std::vector<Blob::Ptr> readInputBlobs(std::vector<UString>& data, size_t width, size_t height) {
// read image with size converted to NV12 data size: height(NV12) = 3 / 2 *
// logical height
// Create tensor descriptors for Y and UV blobs
const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8,
{1, 1, height, width},
InferenceEngine::Layout::NHWC);
const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8,
{1, 2, height / 2, width / 2},
InferenceEngine::Layout::NHWC);
const size_t offset = width * height;
std::vector<Blob::Ptr> blobs;
for (auto& buf : data) {
// --------------------------- Create a blob to hold the NV12 input data
// -------------------------------
auto ptr = &buf[0];
// Create blob for Y plane from raw data
Blob::Ptr y_blob = make_shared_blob<uint8_t>(y_plane_desc, ptr);
// Create blob for UV plane from raw data
Blob::Ptr uv_blob = make_shared_blob<uint8_t>(uv_plane_desc, ptr + offset);
// Create NV12Blob from Y and UV blobs
blobs.emplace_back(make_shared_blob<NV12Blob>(y_blob, uv_blob));
}
return blobs;
}
/**
* @brief Check supported batched blob for device
* @param IE core object
* @param string device name
* @return True(success) or False(fail)
*/
bool isBatchedBlobSupported(const Core& ie, const std::string& device_name) {
const std::vector<std::string> supported_metrics = ie.GetMetric(device_name, METRIC_KEY(SUPPORTED_METRICS));
if (std::find(supported_metrics.begin(), supported_metrics.end(), METRIC_KEY(OPTIMIZATION_CAPABILITIES)) ==
supported_metrics.end()) {
return false;
}
const std::vector<std::string> optimization_caps = ie.GetMetric(device_name, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
return std::find(optimization_caps.begin(), optimization_caps.end(), METRIC_VALUE(BATCHED_BLOB)) !=
optimization_caps.end();
}
/**
* @brief The entry point of the Inference Engine sample application
* @brief The entry point of the OpenVINO Runtime sample application
*/
int main(int argc, char* argv[]) {
try {
// ------------------------------ Parsing and validation input
// arguments------------------------------
// -------- Get OpenVINO runtime version --------
slog::info << ov::get_openvino_version() << slog::endl;
// -------- Parsing and validation input arguments --------
if (argc != 5) {
std::cout << "Usage : " << argv[0] << " <path_to_model> <path_to_image(s)> <image_size> <device_name>"
std::cout << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <image_size> <device_name>"
<< std::endl;
return EXIT_FAILURE;
}
const std::string input_model{argv[1]};
const std::string input_image_path{argv[2]};
size_t input_width = 0, input_height = 0;
const std::string model_path{argv[1]};
const std::string image_path{argv[2]};
size_t input_width = 0;
size_t input_height = 0;
std::tie(input_width, input_height) = parseImageSize(argv[3]);
const std::string device_name{argv[4]};
// -----------------------------------------------------------------------------------------------------
// ------------------------------ Read image names
// -----------------------------------------------------
auto image_names = readInputFileNames(input_image_path);
if (image_names.empty()) {
throw std::invalid_argument("images not found");
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 1. Initialize inference engine core
// ------------------------------------------------
Core ie;
// -----------------------------------------------------------------------------------------------------
// Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
// .bin files) or ONNX (.onnx file) format
CNNNetwork network = ie.ReadNetwork(input_model);
// -----------------------------------------------------------------------------------------------------
// --------------------------- Reshape model
// -------------------------------------------------
size_t netInputSize = isBatchedBlobSupported(ie, device_name) ? image_names.size() : 1;
ICNNNetwork::InputShapes inputShapes = network.getInputShapes();
for (auto& shape : inputShapes) {
auto& dims = shape.second;
if (dims.empty()) {
throw std::runtime_error("Network's input shapes have empty dimensions");
}
dims[0] = netInputSize;
}
network.reshape(inputShapes);
size_t batchSize = network.getBatchSize();
std::cout << "Batch size is " << batchSize << std::endl;
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 3. Configure input and output
// -------------------------------------------
// --------------------------- Prepare input blobs
// -----------------------------------------------------
if (network.getInputsInfo().empty()) {
std::cerr << "Network inputs info is empty" << std::endl;
return EXIT_FAILURE;
}
InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
std::string input_name = network.getInputsInfo().begin()->first;
input_info->setLayout(Layout::NCHW);
input_info->setPrecision(Precision::U8);
// set input resize algorithm to enable input autoresize
input_info->getPreProcess().setResizeAlgorithm(ResizeAlgorithm::RESIZE_BILINEAR);
// set input color format to ColorFormat::NV12 to enable automatic input
// color format pre-processing
input_info->getPreProcess().setColorFormat(ColorFormat::NV12);
// --------------------------- Prepare output blobs
// ----------------------------------------------------
if (network.getOutputsInfo().empty()) {
std::cerr << "Network outputs info is empty" << std::endl;
return EXIT_FAILURE;
}
DataPtr output_info = network.getOutputsInfo().begin()->second;
std::string output_name = network.getOutputsInfo().begin()->first;
output_info->setPrecision(Precision::FP32);
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 4. Loading a model to the device
// ----------------------------------------
ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 5. Create an infer request
// ----------------------------------------------
InferRequest infer_request = executable_network.CreateInferRequest();
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 6. Prepare input
// --------------------------------------------------------
auto image_bufs = readImagesDataFromFiles(image_names, input_width * (input_height * 3 / 2));
auto inputs = readInputBlobs(image_bufs, input_width, input_height);
// If batch_size > 1 => batched blob supported => replace all inputs by a
// BatchedBlob
if (netInputSize > 1) {
assert(netInputSize == inputs.size());
std::cout << "Infer using BatchedBlob of NV12 images." << std::endl;
Blob::Ptr batched_input = make_shared_blob<BatchedBlob>(inputs);
inputs = {batched_input};
// -------- Read image names --------
FormatReader::ReaderPtr reader(image_path.c_str());
if (reader.get() == nullptr) {
std::string msg = "Image " + image_path + " cannot be read!";
throw std::logic_error(msg);
}
/** Read labels from file (e.x. AlexNet.labels) **/
std::string labelFileName = fileNameNoExt(input_model) + ".labels";
size_t batch = 1;
// -----------------------------------------------------------------------------------------------------
// -------- Step 1. Initialize OpenVINO Runtime Core ---------
ov::runtime::Core core;
// -------- Step 2. Read a model --------
slog::info << "Loading model files: " << model_path << slog::endl;
std::shared_ptr<ov::Model> model = core.read_model(model_path);
printInputAndOutputsInfo(*model);
OPENVINO_ASSERT(model->get_parameters().size() == 1, "Sample supports models with 1 input only");
OPENVINO_ASSERT(model->get_results().size() == 1, "Sample supports models with 1 output only");
std::string input_tensor_name = model->input().get_any_name();
std::string output_tensor_name = model->output().get_any_name();
// -------- Step 3. Configure preprocessing --------
PrePostProcessor ppp = PrePostProcessor(model);
// 1) Select input with 'input_tensor_name' tensor name
InputInfo& input_info = ppp.input(input_tensor_name);
// 2) Set input type
// - as 'u8' precision
// - set color format to NV12 (single plane)
// - static spatial dimensions for resize preprocessing operation
input_info.tensor()
.set_element_type(ov::element::u8)
.set_color_format(ColorFormat::NV12_SINGLE_PLANE)
.set_spatial_static_shape(input_height, input_width);
// 3) Pre-processing steps:
// a) Convert to 'float'. This is to have color conversion more accurate
// b) Convert to BGR: Assumes that model accepts images in BGR format. For RGB, change it manually
// c) Resize image from tensor's dimensions to model ones
input_info.preprocess()
.convert_element_type(ov::element::f32)
.convert_color(ColorFormat::BGR)
.resize(ResizeAlgorithm::RESIZE_LINEAR);
// 4) Set model data layout (Assuming model accepts images in NCHW layout)
input_info.model().set_layout("NCHW");
// 5) Apply preprocessing to an input with 'input_tensor_name' name of loaded model
model = ppp.build();
// -------- Step 4. Loading a model to the device --------
ov::runtime::CompiledModel compiled_model = core.compile_model(model, device_name);
// -------- Step 5. Create an infer request --------
ov::runtime::InferRequest infer_request = compiled_model.create_infer_request();
// -------- Step 6. Prepare input data --------
std::shared_ptr<unsigned char> image_data = reader->getData(input_width, input_height);
ov::runtime::Tensor input_tensor{ov::element::u8,
{batch, input_height * 3 / 2, input_width, 1},
image_data.get()};
// Read labels from file (e.x. AlexNet.labels)
std::string labelFileName = fileNameNoExt(model_path) + ".labels";
std::vector<std::string> labels;
std::ifstream inputFile;
@@ -318,37 +164,26 @@ int main(int argc, char* argv[]) {
}
}
for (size_t i = 0; i < inputs.size(); i++) {
const auto& input = inputs[i];
// --------------------------- Set the input blob to the InferRequest
// ------------------------------
infer_request.SetBlob(input_name, input);
// -------------------------------------------------------------------------------------------------
// -------- Step 6. Set input tensor --------
// Set the input tensor by tensor name to the InferRequest
infer_request.set_tensor(input_tensor_name, input_tensor);
// --------------------------- Step 7. Do inference
// -----------------------------------------------------
/* Running the request synchronously */
infer_request.Infer();
// -------------------------------------------------------------------------------------------------
// -------- Step 7. Do inference --------
// Running the request synchronously
infer_request.infer();
// --------------------------- Step 8. Process output
// ---------------------------------------------------
Blob::Ptr output = infer_request.GetBlob(output_name);
// -------- Step 8. Process output --------
ov::runtime::Tensor output = infer_request.get_tensor(output_tensor_name);
// Print classification results
const auto names_offset = image_names.begin() + netInputSize * i;
std::vector<std::string> names(names_offset, names_offset + netInputSize);
// Print classification results
ClassificationResult classification_result(output, {image_path}, batch, N_TOP_RESULTS, labels);
classification_result.show();
ClassificationResult classificationResult(output, names, netInputSize, 10, labels);
classificationResult.print();
// -------------------------------------------------------------------------------------------------
}
} catch (const std::exception& ex) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
std::cout << "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool"
<< std::endl;
return EXIT_SUCCESS;
}

View File

@@ -5,28 +5,18 @@
#include <cstdlib>
#include <iomanip>
#include <memory>
#include <samples/common.hpp>
#include <set>
#include <string>
#include <tuple>
#include <vector>
#include "ie_plugin_config.hpp"
// clang-format off
#include "openvino/openvino.hpp"
#include "samples/common.hpp"
#include "samples/slog.hpp"
// clang-format on
namespace {
/**
* @brief Overload output stream operator to print vectors in pretty form
* [value1, value2, ...]
*/
template <typename T>
std::ostream& operator<<(std::ostream& stream, const std::vector<T>& v) {
stream << "[ ";
for (auto&& value : v)
stream << value << " ";
return stream << "]";
}
/**
* @brief Print IE Parameters
* @param reference on IE Parameter
@@ -34,54 +24,54 @@ std::ostream& operator<<(std::ostream& stream, const std::vector<T>& v) {
*/
void printAnyValue(const ov::Any& value) {
if (value.empty()) {
std::cout << "EMPTY VALUE" << std::endl;
slog::info << "EMPTY VALUE" << slog::endl;
} else if (value.is<bool>()) {
std::cout << std::boolalpha << value.as<bool>() << std::noboolalpha << std::endl;
slog::info << std::boolalpha << value.as<bool>() << std::noboolalpha << slog::endl;
} else if (value.is<int>()) {
std::cout << value.as<int>() << std::endl;
slog::info << value.as<int>() << slog::endl;
} else if (value.is<unsigned int>()) {
std::cout << value.as<unsigned int>() << std::endl;
slog::info << value.as<unsigned int>() << slog::endl;
} else if (value.is<uint64_t>()) {
std::cout << value.as<uint64_t>() << std::endl;
slog::info << value.as<uint64_t>() << slog::endl;
} else if (value.is<float>()) {
std::cout << value.as<float>() << std::endl;
slog::info << value.as<float>() << slog::endl;
} else if (value.is<std::string>()) {
std::string stringValue = value.as<std::string>();
std::cout << (stringValue.empty() ? "\"\"" : stringValue) << std::endl;
slog::info << (stringValue.empty() ? "\"\"" : stringValue) << slog::endl;
} else if (value.is<std::vector<std::string>>()) {
std::cout << value.as<std::vector<std::string>>() << std::endl;
slog::info << value.as<std::vector<std::string>>() << slog::endl;
} else if (value.is<std::vector<int>>()) {
std::cout << value.as<std::vector<int>>() << std::endl;
slog::info << value.as<std::vector<int>>() << slog::endl;
} else if (value.is<std::vector<float>>()) {
std::cout << value.as<std::vector<float>>() << std::endl;
slog::info << value.as<std::vector<float>>() << slog::endl;
} else if (value.is<std::vector<unsigned int>>()) {
std::cout << value.as<std::vector<unsigned int>>() << std::endl;
slog::info << value.as<std::vector<unsigned int>>() << slog::endl;
} else if (value.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
auto values = value.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
std::cout << "{ ";
std::cout << std::get<0>(values) << ", ";
std::cout << std::get<1>(values) << ", ";
std::cout << std::get<2>(values);
std::cout << " }";
std::cout << std::endl;
slog::info << "{ ";
slog::info << std::get<0>(values) << ", ";
slog::info << std::get<1>(values) << ", ";
slog::info << std::get<2>(values);
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<InferenceEngine::Metrics::DeviceType>()) {
auto v = value.as<InferenceEngine::Metrics::DeviceType>();
std::cout << v << std::endl;
slog::info << v << slog::endl;
} else if (value.is<std::map<InferenceEngine::Precision, float>>()) {
auto values = value.as<std::map<InferenceEngine::Precision, float>>();
std::cout << "{ ";
slog::info << "{ ";
for (auto& kv : values) {
std::cout << kv.first << ": " << kv.second << "; ";
slog::info << kv.first << ": " << kv.second << "; ";
}
std::cout << " }";
std::cout << std::endl;
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<std::tuple<unsigned int, unsigned int>>()) {
auto values = value.as<std::tuple<unsigned int, unsigned int>>();
std::cout << "{ ";
std::cout << std::get<0>(values) << ", ";
std::cout << std::get<1>(values);
std::cout << " }";
std::cout << std::endl;
slog::info << "{ ";
slog::info << std::get<0>(values) << ", ";
slog::info << std::get<1>(values);
slog::info << " }";
slog::info << slog::endl;
} else {
std::stringstream strm;
value.print(strm);
@@ -98,6 +88,9 @@ void printAnyValue(const ov::Any& value) {
int main(int argc, char* argv[]) {
try {
// -------- Get OpenVINO runtime version --------
slog::info << ov::get_openvino_version() << slog::endl;
// -------- Parsing and validation of input arguments --------
if (argc != 1) {
std::cout << "Usage : " << argv[0] << std::endl;
@@ -105,25 +98,22 @@ int main(int argc, char* argv[]) {
}
// -------- Step 1. Initialize OpenVINO Runtime Core --------
std::cout << "Loading OpenVINO Runtime" << std::endl;
ov::runtime::Core core;
// -------- Step 2. Get list of available devices --------
std::vector<std::string> availableDevices = core.get_available_devices();
// -------- Step 3. Query and print supported metrics and config keys --------
std::cout << "Available devices: " << std::endl;
slog::info << "Available devices: " << slog::endl;
for (auto&& device : availableDevices) {
std::cout << device << std::endl;
slog::info << device << slog::endl;
// Query supported metrics and print all of them
std::cout << "\tSUPPORTED_METRICS: " << std::endl;
slog::info << "\tSUPPORTED_METRICS: " << slog::endl;
std::vector<std::string> supportedMetrics = core.get_metric(device, METRIC_KEY(SUPPORTED_METRICS));
for (auto&& metricName : supportedMetrics) {
if (metricName != METRIC_KEY(SUPPORTED_METRICS) && metricName != METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::cout << "\t\t" << metricName << " : " << std::flush;
slog::info << "\t\t" << metricName << " : " << slog::flush;
printAnyValue(core.get_metric(device, metricName));
}
}
@@ -131,16 +121,16 @@ int main(int argc, char* argv[]) {
// Query supported config keys and print all of them
if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) !=
supportedMetrics.end()) {
std::cout << "\tSUPPORTED_CONFIG_KEYS (default values): " << std::endl;
slog::info << "\tSUPPORTED_CONFIG_KEYS (default values): " << slog::endl;
std::vector<std::string> supportedConfigKeys =
core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto&& configKey : supportedConfigKeys) {
std::cout << "\t\t" << configKey << " : " << std::flush;
slog::info << "\t\t" << configKey << " : " << slog::flush;
printAnyValue(core.get_config(device, configKey));
}
}
std::cout << std::endl;
slog::info << slog::endl;
}
} catch (const std::exception& ex) {
std::cerr << std::endl << "Exception occurred: " << ex.what() << std::endl << std::flush;

View File

@@ -2,6 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <format_reader_ptr.h>
#include <inference_engine.hpp>
#include <memory>
#include <string>
#include <vector>
@@ -202,6 +205,9 @@ int main(int argc, char* argv[]) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
std::cout << std::endl
<< "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool"
<< std::endl;
return EXIT_SUCCESS;
}

View File

@@ -10,53 +10,25 @@
#include <string>
#include <vector>
#include "format_reader_ptr.h"
#include "gflags/gflags.h"
#include "ngraph/util.hpp"
#include "ngraph_function_creation_sample.hpp"
// clang-format off
#include "openvino/openvino.hpp"
#include "openvino/opsets/opset8.hpp"
#include "ngraph/util.hpp"
#include "samples/args_helper.hpp"
#include "samples/classification_results.h"
#include "samples/common.hpp"
#include "samples/classification_results.h"
#include "samples/slog.hpp"
#include "ngraph_function_creation_sample.hpp"
// clang-format on
constexpr auto N_TOP_RESULTS = 1;
constexpr auto LENET_WEIGHTS_SIZE = 1724336;
constexpr auto LENET_NUM_CLASSES = 10;
using namespace ov;
/**
* @brief Checks input args
* @param argc number of args
* @param argv list of input arguments
* @return bool status true(Success) or false(Fail)
*/
bool ParseAndCheckCommandLine(int argc, char* argv[]) {
slog::info << "Parsing input parameters" << slog::endl;
gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
if (FLAGS_h) {
showUsage();
showAvailableDevices();
return false;
}
if (FLAGS_nt <= 0 || FLAGS_nt > 10) {
throw std::logic_error("Incorrect value for nt argument. It should be "
"greater than 0 and less than 10.");
}
if (FLAGS_m.empty()) {
showUsage();
throw std::logic_error("Path to a .bin file with weights for the trained model is required "
"but not set. Please set -m option.");
}
if (FLAGS_i.empty()) {
showUsage();
throw std::logic_error("Path to an image is required but not set. Please set -i option.");
}
return true;
}
using namespace ov::preprocess;
/**
* @brief Read file to the buffer
@@ -90,7 +62,7 @@ ov::runtime::Tensor ReadWeights(const std::string& filepath) {
std::ifstream weightFile(filepath, std::ifstream::ate | std::ifstream::binary);
int64_t fileSize = weightFile.tellg();
OPENVINO_ASSERT(fileSize == 1724336,
OPENVINO_ASSERT(fileSize == LENET_WEIGHTS_SIZE,
"Incorrect weights file. This sample works only with LeNet "
"classification model.");
@@ -104,8 +76,8 @@ ov::runtime::Tensor ReadWeights(const std::string& filepath) {
* @brief Create ngraph function
* @return Ptr to ngraph function
*/
std::shared_ptr<ov::Model> createNgraphFunction() {
auto weights = ReadWeights(FLAGS_m);
std::shared_ptr<ov::Model> createNgraphFunction(const std::string& path_to_weights) {
const ov::runtime::Tensor weights = ReadWeights(path_to_weights);
const std::uint8_t* data = weights.data<std::uint8_t>();
// -------input------
@@ -252,147 +224,103 @@ int main(int argc, char* argv[]) {
slog::info << ov::get_openvino_version() << slog::endl;
// -------- Parsing and validation of input arguments --------
if (!ParseAndCheckCommandLine(argc, argv)) {
return EXIT_SUCCESS;
if (argc != 3) {
std::cout << "Usage : " << argv[0] << " <path_to_lenet_weights> <device>" << std::endl;
return EXIT_FAILURE;
}
// -------- Read input --------
std::vector<std::string> images;
parseInputFilesArguments(images);
OPENVINO_ASSERT(!images.empty(), "No suitable images were found");
const std::string weights_path{argv[1]};
const std::string device_name{argv[2]};
// -------- Step 1. Initialize OpenVINO Runtime Core object --------
slog::info << "Loading OpenVINO runtime" << slog::endl;
runtime::Core core;
slog::info << "Device info: " << slog::endl;
slog::info << core.get_versions(FLAGS_d) << slog::endl;
slog::info << core.get_versions(device_name) << slog::endl;
// -------- Step 2. Create network using ov::Model --------
// -------- Step 2. Create network using ov::Function --------
slog::info << "Create model from weights: " << weights_path << slog::endl;
std::shared_ptr<ov::Model> model = createNgraphFunction(weights_path);
printInputAndOutputsInfo(*model);
auto model = createNgraphFunction();
OPENVINO_ASSERT(model->inputs().size() == 1, "Incorrect number of inputs for LeNet");
OPENVINO_ASSERT(model->outputs().size() == 1, "Incorrect number of outputs for LeNet");
ov::Shape input_shape = model->input().get_shape();
OPENVINO_ASSERT(input_shape.size() == 4, "Incorrect input dimensions for LeNet");
const ov::Shape output_shape = model->output().get_shape();
OPENVINO_ASSERT(output_shape.size() == 2, "Incorrect output dimensions for LeNet");
const auto classCount = output_shape[1];
OPENVINO_ASSERT(classCount <= LENET_NUM_CLASSES, "Incorrect number of output classes for LeNet model");
// -------- Step 3. Apply preprocessing --------
const Layout tensor_layout{"NHWC"};
// apply preprocessing
auto proc = ov::preprocess::PrePostProcessor(model);
ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
// 1) InputInfo() with no args assumes a model has a single input
auto& input_info = proc.input();
ov::preprocess::InputInfo& input_info = ppp.input();
// 2) Set input tensor information:
// - layout of data is 'NHWC'
// - precision of tensor is supposed to be 'u8'
input_info.tensor().set_layout(tensor_layout).set_element_type(element::u8);
// 3) Here we suppose model has 'NCHW' layout for input
input_info.model().set_layout("NCHW");
// 4) Once the build() method is called, the preprocessing steps
// for layout and precision conversions are inserted automatically
model = proc.build();
model = ppp.build();
// -------- Step 4. Read input images --------
// Set batch size using images count
const size_t batch_size = digits.size();
const auto input = model->input();
auto input_shape = input.get_shape();
const size_t width = input_shape[layout::width_idx(tensor_layout)];
const size_t height = input_shape[layout::height_idx(tensor_layout)];
std::vector<std::shared_ptr<unsigned char>> imagesData;
for (auto& i : images) {
FormatReader::ReaderPtr reader(i.c_str());
if (reader.get() == nullptr) {
slog::warn << "Image " + i + " cannot be read!" << slog::endl;
continue;
}
if (reader->size() != width * height) {
throw std::logic_error("Not supported format. Only MNist ubyte images supported.");
}
// Store image data
std::shared_ptr<unsigned char> data(reader->getData(width, height));
if (data.get() != nullptr) {
imagesData.push_back(data);
}
}
OPENVINO_ASSERT(!imagesData.empty(), "Valid input images were not found");
// -------- Step 4. Reshape a model --------
// -------- Step 4. Reshape a model to new batch size --------
// Setting batch size using image count
const auto batch_size = static_cast<int64_t>(imagesData.size());
ov::set_batch(model, batch_size);
slog::info << "Batch size is " << std::to_string(batch_size) << slog::endl;
printInputAndOutputsInfo(*model);
const auto outputShape = model->output().get_shape();
OPENVINO_ASSERT(outputShape.size() == 2, "Incorrect output dimensions for LeNet");
// -------- Step 5. Compiling model for the device --------
slog::info << "Compiling a model for the " << device_name << " device" << slog::endl;
runtime::CompiledModel compiled_model = core.compile_model(model, device_name);
const auto classCount = outputShape[1];
OPENVINO_ASSERT(classCount <= 10, "Incorrect number of output classes for LeNet model");
// -------- Step 4. Compiling model for the device --------
slog::info << "Compiling a model for the " << FLAGS_d << " device" << slog::endl;
runtime::CompiledModel exeNetwork = core.compile_model(model, FLAGS_d);
// -------- Step 5. Create infer request --------
// -------- Step 6. Create infer request --------
slog::info << "Create infer request" << slog::endl;
runtime::InferRequest infer_request = exeNetwork.create_infer_request();
runtime::InferRequest infer_request = compiled_model.create_infer_request();
// -------- Step 6. Combine multiple input images as batch --------
slog::info << "Combining a batch and set input tensor" << slog::endl;
// -------- Step 7. Combine multiple input images as batch --------
slog::info << "Combine images in batch and set to input tensor" << slog::endl;
runtime::Tensor input_tensor = infer_request.get_input_tensor();
// Iterate over all input images
for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
const size_t image_size = shape_size(input_shape) / batch_size;
std::memcpy(input_tensor.data<std::uint8_t>() + image_id * image_size,
imagesData[image_id].get(),
image_size);
// Iterate over all input images and copy data to input tensor
for (size_t image_id = 0; image_id < digits.size(); ++image_id) {
const size_t image_size = shape_size(model->input().get_shape()) / batch_size;
std::memcpy(input_tensor.data<std::uint8_t>() + image_id * image_size, digits[image_id], image_size);
}
// -------- Step 7. Do sync inference --------
// -------- Step 8. Do sync inference --------
slog::info << "Start sync inference" << slog::endl;
infer_request.infer();
// -------- Step 8. Process output --------
// -------- Step 9. Process output --------
slog::info << "Processing output tensor" << slog::endl;
const runtime::Tensor output_tensor = infer_request.get_output_tensor();
// Validating -nt value
const size_t results_cnt = output_tensor.get_size() / batch_size;
if (FLAGS_nt > results_cnt || FLAGS_nt < 1) {
slog::warn << "-nt " << FLAGS_nt << " is not available for this model (-nt should be less than "
<< results_cnt + 1 << " and more than 0).\n Maximal value " << results_cnt
<< " will be used.";
FLAGS_nt = results_cnt;
}
// Read labels from file (e.x. LeNet.labels) **/
std::string label_file_name = fileNameNoExt(FLAGS_m) + ".labels";
std::vector<std::string> labels;
std::ifstream input_file;
input_file.open(label_file_name, std::ios::in);
if (input_file.is_open()) {
std::string strLine;
while (std::getline(input_file, strLine)) {
trim(strLine);
labels.push_back(strLine);
}
input_file.close();
}
const std::vector<std::string> lenet_labels{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
// Prints formatted classification results
ClassificationResult classification_result(output_tensor, images, batch_size, FLAGS_nt, labels);
ClassificationResult classification_result(output_tensor,
lenet_labels, // in this sample images have the same names as labels
batch_size,
N_TOP_RESULTS,
lenet_labels);
classification_result.show();
} catch (const std::exception& ex) {
slog::err << ex.what() << slog::endl;
return EXIT_FAILURE;
}
slog::info << "This sample is an API example, for performance measurements, "
"use the dedicated benchmark_app tool"
<< slog::endl;
return EXIT_SUCCESS;
}

View File

@@ -4,61 +4,445 @@
#pragma once
#include <gflags/gflags.h>
#include <iostream>
#include <string>
#include <vector>
/// @brief message for help argument
static const char help_message[] = "Print a usage message.";
static const unsigned char digit_0[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0b, 0x96, 0xfd, 0xca, 0x1f, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x25, 0xfb, 0xfb, 0xfd, 0x6b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0x15, 0xc5, 0xfb, 0xfb, 0xfd, 0x6b, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x6e, 0xbe, 0xfb,
0xfb, 0xfb, 0xfd, 0xa9, 0x6d, 0x3e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0xfd, 0xfb, 0xfb, 0xfb, 0xfb, 0xfd, 0xfb, 0xfb, 0xdc, 0x33, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xb6, 0xff, 0xfd, 0xfd, 0xfd,
0xfd, 0xea, 0xde, 0xfd, 0xfd, 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x3f, 0xdd, 0xfd, 0xfb, 0xfb, 0xfb, 0x93, 0x4d, 0x3e, 0x80, 0xfb, 0xfb, 0x69, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x20, 0xe7, 0xfb, 0xfd, 0xfb, 0xdc, 0x89, 0x0a,
0, 0, 0x1f, 0xe6, 0xfb, 0xf3, 0x71, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x25, 0xfb, 0xfb, 0xfd, 0xbc, 0x14, 0, 0, 0, 0, 0, 0x6d, 0xfb, 0xfd, 0xfb, 0x23, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x25, 0xfb, 0xfb, 0xc9, 0x1e, 0, 0, 0, 0,
0, 0, 0x1f, 0xc8, 0xfd, 0xfb, 0x23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x25, 0xfd, 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0x20, 0xca, 0xff, 0xfd, 0xa4, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8c, 0xfb, 0xfb, 0, 0, 0, 0, 0, 0, 0,
0, 0x6d, 0xfb, 0xfd, 0xfb, 0x23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xd9,
0xfb, 0xfb, 0, 0, 0, 0, 0, 0, 0x15, 0x3f, 0xe7, 0xfb, 0xfd, 0xe6, 0x1e, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0xd9, 0xfb, 0xfb, 0, 0, 0, 0, 0, 0, 0x90, 0xfb,
0xfb, 0xfb, 0xdd, 0x3d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xd9, 0xfb,
0xfb, 0, 0, 0, 0, 0, 0xb6, 0xdd, 0xfb, 0xfb, 0xfb, 0xb4, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0xda, 0xfd, 0xfd, 0x49, 0x49, 0xe4, 0xfd, 0xfd, 0xff, 0xfd, 0xfd, 0xfd,
0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x71, 0xfb, 0xfb,
0xfd, 0xfb, 0xfb, 0xfb, 0xfb, 0xfd, 0xfb, 0xfb, 0xfb, 0x93, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x1f, 0xe6, 0xfb, 0xfd, 0xfb, 0xfb, 0xfb, 0xfb, 0xfd, 0xe6, 0xbd, 0x23, 0x0a,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3e, 0x8e, 0xfd,
0xfb, 0xfb, 0xfb, 0xfb, 0xfd, 0x6b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0x48, 0xae, 0xfb, 0xad, 0x47, 0x48, 0x1e, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// @brief message for images argument
static const char input_message[] =
"Required. Path to a folder with images or path to image files. Support ubyte files only.";
static const unsigned char digit_1[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x26, 0xfe, 0x6d, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x57, 0xfc, 0x52, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x87, 0xf1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2d, 0xf4, 0x96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x54, 0xfe, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xca, 0xdf, 0x0b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x20, 0xfe, 0xd8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x5f, 0xfe, 0xc3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8c, 0xfe, 0x4d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x39, 0xed, 0xcd, 0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7c, 0xff, 0xa5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xab, 0xfe, 0x51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x18, 0xe8, 0xd7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x78, 0xfe, 0x9f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x97, 0xfe, 0x8e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xe4, 0xfe, 0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3d, 0xfb, 0xfe, 0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8d, 0xfe, 0xcd, 0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0a, 0xd7, 0xfe, 0x79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x05, 0xc6, 0xb0, 0x0a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
/// @brief message for model argument
static const char model_message[] = "Required. Path to a .bin file with weights for the trained model.";
static const unsigned char digit_2[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x74,
0x7d, 0xab, 0xff, 0xff, 0x96, 0x5d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0xa9, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xda, 0x1e, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xa9, 0xfd, 0xfd, 0xfd,
0xd5, 0x8e, 0xb0, 0xfd, 0xfd, 0x7a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x34, 0xfa, 0xfd, 0xd2, 0x20, 0x0c, 0, 0x06, 0xce, 0xfd, 0x8c, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x4d, 0xfb, 0xd2, 0x19, 0, 0,
0, 0x7a, 0xf8, 0xfd, 0x41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x1f, 0x12, 0, 0, 0, 0, 0xd1, 0xfd, 0xfd, 0x41, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x75,
0xf7, 0xfd, 0xc6, 0x0a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x4c, 0xf7, 0xfd, 0xe7, 0x3f, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0xfd, 0xfd,
0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0xb0, 0xf6, 0xfd, 0x9f, 0x0c, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x19, 0xea, 0xfd, 0xe9, 0x23, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0xc6, 0xfd, 0xfd, 0x8d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x4e, 0xf8, 0xfd, 0xbd, 0x0c, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x13, 0xc8, 0xfd, 0xfd, 0x8d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x86, 0xfd, 0xfd, 0xad, 0x0c, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xf8,
0xfd, 0xfd, 0x19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0xf8, 0xfd, 0xfd, 0x2b, 0x14, 0x14, 0x14, 0x14, 0x05, 0, 0x05,
0x14, 0x14, 0x25, 0x96, 0x96, 0x96, 0x93, 0x0a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xf8, 0xfd,
0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xa8, 0x8f, 0xa6, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0x7b, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0xae, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd,
0xf9, 0xf7, 0xf7, 0xa9, 0x75, 0x75, 0x39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x76, 0x7b,
0x7b, 0x7b, 0xa6, 0xfd, 0xfd, 0xfd, 0x9b, 0x7b, 0x7b, 0x29, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// @brief message for assigning cnn calculation to device
static const char target_device_message[] =
"Optional. Specify the target device to infer on (the list of available devices is shown below). "
"Default value is CPU. Use \"-d HETERO:<comma_separated_devices_list>\" format to specify HETERO plugin. "
"Sample will look for a suitable plugin for device specified.";
static const unsigned char digit_3[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x15, 0x71, 0xc1, 0xfe, 0xfd, 0xfe, 0xfd, 0xfe, 0xac, 0x52, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xb7, 0xfd, 0xfc, 0xfd, 0xfc, 0xfd,
0xfc, 0xfd, 0xfc, 0xf3, 0x28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0xcb, 0xff, 0xe9, 0xb7, 0x66, 0xcb, 0xcb, 0xea, 0xfd, 0xfe, 0x97, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x51, 0x97, 0x32, 0, 0, 0, 0x29,
0xc1, 0xfc, 0xfd, 0x6f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x0b, 0xd5, 0xfe, 0xfd, 0xcb, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7b, 0xd5, 0xfc, 0xfd,
0xfc, 0x51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x33, 0xfd, 0xfe, 0xfd, 0xfe, 0x97, 0x15, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0a, 0xd4, 0xfd, 0xfc, 0xfd, 0xe8,
0xdf, 0x7a, 0x52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0x7b, 0xdf, 0xfe, 0xfd, 0xfe, 0xfd, 0xfe, 0x47, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x14, 0x32, 0x83, 0xd5,
0xfc, 0xfd, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0x15, 0xa2, 0xfe, 0xfd, 0x66, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x15, 0xcb,
0xfd, 0xfc, 0x3d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x84, 0xfd, 0xfe, 0x5b, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x15, 0x8e, 0xfd, 0xfc, 0xe9,
0x1e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x29, 0xd6, 0xfd, 0xfe, 0xd5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xa3, 0xf3, 0xfd, 0xfc, 0xac, 0x0a, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0b, 0xad, 0xad,
0xfd, 0xff, 0xfd, 0xe0, 0x51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x84, 0xfc, 0xfd, 0xfc, 0xfd, 0xab, 0x14, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x99, 0xfd, 0xf4, 0xcb,
0x52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0x5c, 0xc0, 0x7a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// @brief message for top results number
static const char ntop_message[] = "Number of top results. The default value is 10.";
static const unsigned char digit_4[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x32, 0xe0,
0, 0, 0, 0, 0, 0, 0, 0x46, 0x1d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x79, 0xe7, 0, 0, 0, 0, 0, 0, 0, 0x94, 0xa8, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x04, 0xc3, 0xe7, 0,
0, 0, 0, 0, 0, 0, 0x60, 0xd2, 0x0b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0x45, 0xfc, 0x86, 0, 0, 0, 0, 0, 0, 0, 0x72, 0xfc, 0x15, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2d, 0xec, 0xd9, 0x0c, 0, 0,
0, 0, 0, 0, 0, 0xc0, 0xfc, 0x15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0xa8, 0xf7, 0x35, 0, 0, 0, 0, 0, 0, 0, 0x12, 0xff, 0xfd, 0x15, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x54, 0xf2, 0xd3, 0, 0, 0, 0, 0,
0, 0, 0, 0x8d, 0xfd, 0xbd, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0xa9, 0xfc, 0x6a, 0, 0, 0, 0, 0, 0, 0, 0x20, 0xe8, 0xfa, 0x42, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0f, 0xe1, 0xfc, 0, 0, 0, 0, 0, 0, 0,
0, 0x86, 0xfc, 0xd3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x16,
0xfc, 0xa4, 0, 0, 0, 0, 0, 0, 0, 0, 0xa9, 0xfc, 0xa7, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0x09, 0xcc, 0xd1, 0x12, 0, 0, 0, 0, 0, 0, 0x16,
0xfd, 0xfd, 0x6b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xa9,
0xfc, 0xc7, 0x55, 0x55, 0x55, 0x55, 0x81, 0xa4, 0xc3, 0xfc, 0xfc, 0x6a, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0x29, 0xaa, 0xf5, 0xfc, 0xfc, 0xfc, 0xfc, 0xe8, 0xe7, 0xfb, 0xfc,
0xfc, 0x09, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x31, 0x54, 0x54, 0x54, 0x54, 0, 0, 0xa1, 0xfc, 0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7f, 0xfc, 0xfc,
0x2d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x80, 0xfd, 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7f, 0xfc, 0xfc, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0x87, 0xfc, 0xf4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xe8, 0xec, 0x6f, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xb3, 0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// \brief Define flag for showing help message <br>
DEFINE_bool(h, false, help_message);
static const unsigned char digit_5[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x33, 0x84, 0xd6, 0xfd, 0xfe, 0xfd, 0xcb, 0xa2, 0x29,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x66, 0x8e, 0xcb, 0xcb,
0xfd, 0xfc, 0xfd, 0xfc, 0x97, 0x46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0xfe, 0xfd, 0xf4, 0xcb, 0x8e, 0x66, 0x52, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xac, 0xfc, 0xcb, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x15, 0xdf, 0xea, 0x1e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7a, 0xfd, 0x32, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x7b, 0xfe, 0x5b, 0x33, 0x33, 0x33, 0x0a, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x15, 0xdf, 0xfd, 0xfc, 0xfd, 0xfc, 0xfd,
0xac, 0x52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x15, 0xd6, 0xfd, 0xcb, 0xa2, 0x66, 0x66, 0xcb, 0xdf, 0xfe, 0xfd, 0x33, 0x0a, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3d, 0xfd, 0xab, 0, 0, 0, 0, 0, 0x14,
0x70, 0xc0, 0xfd, 0xd4, 0x29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x66, 0xcb, 0xea, 0x33, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0x14, 0xd5, 0xe8, 0x52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3e, 0xcb, 0xea, 0x70, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x14, 0xd5, 0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x99, 0xfd, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0x29, 0xe9, 0xd4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x71, 0x5c,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1f, 0xad, 0xf4, 0x28, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x52, 0xfd, 0x97, 0, 0, 0, 0, 0, 0, 0x15, 0x66, 0x66, 0xb7,
0xe9, 0xd4, 0x51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x52, 0xff, 0xfd, 0xea,
0x98, 0x99, 0xc1, 0xad, 0xfd, 0xfe, 0xfd, 0xfe, 0xd5, 0x8e, 0x14, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x47, 0x97, 0x97, 0xe8, 0xfd, 0xd4, 0xc0, 0x97, 0x83, 0x32, 0x32, 0x0a, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// \brief Define parameter for set weight file <br>
/// It is a required parameter
DEFINE_string(m, "", model_message);
static const unsigned char digit_6[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x07,
0xcc, 0xfd, 0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x07, 0x96, 0xfc, 0xfc, 0x7d, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x75, 0xfc, 0xba,
0x38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x8d, 0xfc, 0x76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x9a, 0xf7, 0x32, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x1a, 0xfd, 0xc4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x96, 0xfd, 0xc4, 0, 0, 0,
0, 0, 0, 0, 0x39, 0x55, 0x55, 0x26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0xe1, 0xfd, 0x60, 0, 0, 0, 0, 0, 0x97, 0xe2, 0xf3, 0xfc, 0xfc, 0xee, 0x7d, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0a, 0xe5, 0xe2, 0, 0, 0, 0x04, 0x36,
0xe5, 0xfd, 0xff, 0xea, 0xaf, 0xe1, 0xff, 0xe4, 0x1f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x6e, 0xfc, 0x96, 0, 0, 0x1a, 0x80, 0xfc, 0xfc, 0xe3, 0x86, 0x1c, 0, 0, 0xb2, 0xfc, 0x38, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x9f, 0xfc, 0x71, 0, 0, 0x96, 0xfd, 0xfc, 0xba,
0x2b, 0, 0, 0, 0, 0x8d, 0xfc, 0x38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0xb9, 0xfc, 0x71, 0, 0x26, 0xed, 0xfd, 0x97, 0x06, 0, 0, 0, 0, 0, 0x8d, 0xca, 0x06, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0xc6, 0xfd, 0x72, 0, 0x93, 0xfd, 0xa3, 0, 0, 0,
0, 0, 0, 0, 0x9a, 0xc5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xc5,
0xfc, 0x71, 0, 0xac, 0xfc, 0xbc, 0, 0, 0, 0, 0, 0, 0x1a, 0xfd, 0xab, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0xc5, 0xfc, 0x71, 0, 0x13, 0xe7, 0xf7, 0x7a, 0x13, 0, 0,
0, 0, 0xc8, 0xf4, 0x38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1a, 0xde, 0xfc,
0x71, 0, 0, 0x19, 0xcb, 0xfc, 0xc1, 0x0d, 0, 0x4c, 0xc8, 0xf9, 0x7d, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0xb9, 0xfd, 0xb3, 0x0a, 0, 0, 0, 0x4c, 0x23, 0x1d, 0x9a, 0xfd,
0xf4, 0x7d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1c, 0xd1, 0xfd,
0xc4, 0x52, 0x39, 0x39, 0x83, 0xc5, 0xfc, 0xfd, 0xd6, 0x51, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x19, 0xd8, 0xfc, 0xfc, 0xfc, 0xfd, 0xfc, 0xfc, 0xfc, 0x9c, 0x13, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0x67,
0x8b, 0xf0, 0x8c, 0x8b, 0x8b, 0x28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// \brief Define parameter for set image file <br>
/// It is a required parameter
DEFINE_string(i, "", input_message);
static const unsigned char digit_7[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x54, 0xb9, 0x9f, 0x97, 0x3c, 0x24, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0xde, 0xfe, 0xfe, 0xfe, 0xfe, 0xf1, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xaa, 0x34, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x43, 0x72, 0x48, 0x72, 0xa3, 0xe3, 0xfe, 0xe1,
0xfe, 0xfe, 0xfe, 0xfa, 0xe5, 0xfe, 0xfe, 0x8c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0x11, 0x42, 0x0e, 0x43, 0x43, 0x43, 0x3b, 0x15, 0xec, 0xfe, 0x6a, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x53, 0xfd, 0xd1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x16, 0xe9, 0xff, 0x53, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x81, 0xfe, 0xee, 0x2c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3b, 0xf9, 0xfe, 0x3e, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x85,
0xfe, 0xbb, 0x05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x09, 0xcd, 0xf8, 0x3a, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7e, 0xfe, 0xb6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0x4b, 0xfb, 0xf0, 0x39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x13, 0xdd, 0xfe, 0xa6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0x03, 0xcb, 0xfe, 0xdb, 0x23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x26, 0xfe, 0xfe, 0x4d, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x1f, 0xe0, 0xfe, 0x73, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x85, 0xfe, 0xfe, 0x34, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3d, 0xf2,
0xfe, 0xfe, 0x34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x79, 0xfe, 0xfe, 0xdb, 0x28, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x79, 0xfe, 0xcf,
0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// \brief device the target device to infer on <br>
/// It is an optional parameter
DEFINE_string(d, "CPU", target_device_message);
static const unsigned char digit_8[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x06, 0x4b, 0, 0x62, 0xb9, 0xb2, 0x5e, 0x13, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0f, 0x6f, 0xc3, 0xee, 0x5e, 0, 0xd0, 0xf9, 0xfe,
0xfe, 0x74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x14, 0x32, 0x6b,
0xc5, 0xf6, 0xb7, 0x19, 0, 0, 0x51, 0xf5, 0xfe, 0xf9, 0x5b, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x12, 0x54, 0xe6, 0xfe, 0xfe, 0xdd, 0x56, 0, 0, 0x01, 0x7d, 0xfd, 0xfe, 0xb2, 0x35,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x85, 0xfe, 0xfe, 0xd9, 0x76, 0x04,
0, 0, 0x3e, 0xca, 0xfe, 0xf1, 0x83, 0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0x6b, 0xf4, 0xfe, 0xd5, 0x2d, 0, 0, 0, 0x3e, 0xf0, 0xfe, 0xdc, 0x1d, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2c, 0xf6, 0xfe, 0xd1, 0x31, 0, 0, 0, 0x1f,
0xf1, 0xfe, 0xdd, 0x1b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x5f, 0xfe, 0xfe, 0x37, 0, 0, 0, 0x11, 0xc6, 0xfe, 0xda, 0x1c, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1b, 0xdb, 0xfe, 0xe9, 0x90, 0x27, 0x2a, 0xcc, 0xfe, 0xcd,
0x0a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x73, 0xf8, 0xfe, 0xfe, 0xf4, 0xe9, 0xfe, 0xdf, 0x18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x26, 0x54, 0xa8, 0xf5, 0xfe, 0xfe, 0xfe, 0xcf, 0x73,
0x09, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0x0e, 0xec, 0xfe, 0xe6, 0xa3, 0xed, 0xf4, 0xd3, 0x50, 0x01, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x63, 0xfe, 0xfe, 0x63, 0, 0, 0x25, 0xe1,
0xfe, 0x82, 0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x47, 0xfe, 0xe5, 0x0c, 0, 0, 0, 0x02, 0xaa, 0xfe, 0x33, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x60, 0xfe, 0xfe, 0x12, 0, 0, 0, 0, 0x51,
0xfe, 0xc6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x50, 0xfe, 0xfe, 0x12, 0, 0, 0, 0, 0x83, 0xfe, 0x77, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x05, 0xd6, 0xfe, 0x4e, 0, 0, 0, 0x01, 0xb7, 0xf4,
0x3f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x40, 0xfd, 0xdf, 0x18, 0, 0x02, 0x7e, 0xfe, 0xb2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x57, 0xee, 0xde, 0x77, 0xb1, 0xfe, 0xd9, 0x1b, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x12, 0x9a, 0xc4, 0xc4, 0x65, 0x19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/// @brief Top results number (default 10) <br>
/// It is an optional parameter
DEFINE_uint32(nt, 10, ntop_message);
static const unsigned char digit_9[28 * 28] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x24, 0x38, 0x89, 0xc9, 0xc7, 0x5f, 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x2d, 0x98, 0xea, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfa, 0xd3, 0x97, 0x06,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2e, 0x99, 0xf0, 0xfe, 0xfe,
0xe3, 0xa6, 0x85, 0xfb, 0xc8, 0xfe, 0xe5, 0xe1, 0x68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x99, 0xea, 0xfe, 0xfe, 0xbb, 0x8e, 0x08, 0, 0, 0xbf, 0x28, 0xc6, 0xf6, 0xdf, 0xfd, 0x15,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x7e, 0xfd, 0xfe, 0xe9, 0x80, 0x0b, 0, 0,
0, 0, 0xd2, 0x2b, 0x46, 0xfe, 0xfe, 0xfe, 0x15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x48, 0xf3, 0xfe, 0xe4, 0x36, 0, 0, 0, 0, 0x03, 0x20, 0x74, 0xe1, 0xf2, 0xfe, 0xff, 0xa2, 0x05, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x4b, 0xf0, 0xfe, 0xdf, 0x6d, 0x8a, 0xb2, 0xb2, 0xa9, 0xd2,
0xfb, 0xe7, 0xfe, 0xfe, 0xfe, 0xe8, 0x26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x09,
0xaf, 0xf4, 0xfd, 0xff, 0xfe, 0xfe, 0xfb, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfc, 0xab, 0x19, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0x88, 0xc3, 0xb0, 0x92, 0x99, 0xc8, 0xfe, 0xfe,
0xfe, 0xfe, 0x96, 0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0xa2, 0xfe, 0xfe, 0xf1, 0x63, 0x03, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x76, 0xfa, 0xfe, 0xfe, 0x5a,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0x64, 0xf2, 0xfe, 0xfe, 0xd3, 0x07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x36, 0xf1, 0xfe, 0xfe, 0xf2, 0x3b, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x83, 0xfe, 0xfe, 0xf4, 0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0d, 0xf9, 0xfe, 0xfe, 0x98, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0c, 0xe4,
0xfe, 0xfe, 0xd0, 0x08, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0x4e, 0xff, 0xfe, 0xfe, 0x42, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xd1, 0xfe, 0xfe,
0x89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0xe3, 0xff, 0xe9, 0x19, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x71, 0xff, 0x6c, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
};
/**
* \brief Shows a help message.
*/
static void showUsage() {
std::cout << std::endl;
std::cout << "ngraph_function_creation_sample [OPTION]" << std::endl;
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
std::cout << " -i \"<path>\" " << input_message << std::endl;
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
std::cout << " -nt \"<integer>\" " << ntop_message << std::endl;
}
static const std::vector<const unsigned char*> digits =
{digit_0, digit_1, digit_2, digit_3, digit_4, digit_5, digit_6, digit_7, digit_8, digit_9};

View File

@@ -3,6 +3,9 @@
//
#pragma once
#include <map>
#include <ostream>
#include "openvino/core/core_visibility.hpp"
/**
@@ -40,6 +43,12 @@ struct Version {
};
#pragma pack(pop)
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const Version& version);
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const std::map<std::string, Version>& versions);
/**
* @brief Gets the current OpenVINO version
* @return The current OpenVINO version

View File

@@ -8,6 +8,25 @@
const char* NGRAPH_VERSION_NUMBER = CI_BUILD_NUMBER;
using namespace std;
std::ostream& ov::operator<<(std::ostream& s, const Version& version) {
s << version.description << std::endl;
s << " Version : ";
s << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH;
s << std::endl;
s << " Build : ";
s << version.buildNumber << std::endl;
return s;
}
std::ostream& ov::operator<<(std::ostream& s, const std::map<std::string, Version>& versions) {
for (auto&& version : versions) {
s << version.second << std::endl;
}
return s;
}
namespace ov {
const Version get_openvino_version() noexcept {

View File

@@ -22,7 +22,6 @@ log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=s
test_data_fp32 = get_tests(cmd_params={'i': [os.path.join('227x227', 'dog.bmp')],
'm': [os.path.join('squeezenet1.1', 'FP32', 'squeezenet1.1.xml')],
'nt': ['1'],
'sample_type': ['C++','Python'],
'batch': [1, 2, 4],
'd': ['CPU']},
@@ -30,8 +29,7 @@ test_data_fp32 = get_tests(cmd_params={'i': [os.path.join('227x227', 'dog.bmp')]
)
test_data_fp16 = get_tests(cmd_params={'i': [os.path.join('227x227', 'dog.bmp')],
'm': [os.path.join('squeezenet1.1', 'FP32', 'squeezenet1.1.xml')],
'nt': ['1'],
'm': [os.path.join('squeezenet1.1', 'FP16', 'squeezenet1.1.xml')],
'sample_type': ['C++','Python'],
'batch': [1, 2, 4],
'd': ['CPU']},
@@ -63,16 +61,17 @@ def _check_output(self, param):
stdout = self._test(param)
if not stdout:
return 0
stdout = stdout.split('\n')
is_ok = 0
is_ok = True
for line in range(len(stdout)):
if re.match("\d+ +\d+.\d+$", stdout[line].replace('[ INFO ]', '').strip()) is not None:
if is_ok == 0:
is_ok = True
if re.match('\\d+ +\\d+.\\d+$', stdout[line].replace('[ INFO ]', '').strip()) is not None:
top1 = stdout[line].replace('[ INFO ]', '').strip().split(' ')[0]
top1 = re.sub("\D", "", top1)
top1 = re.sub('\\D', '', top1)
if '215' not in top1:
is_ok = False
log.info("Detected class {}".format(top1))
assert is_ok, "Wrong top1 class"
log.error("Expected class 215, Detected class {}".format(top1))
break
assert is_ok, 'Wrong top1 class'
log.info('Accuracy passed')

View File

@@ -56,17 +56,20 @@ class TestHello(SamplesCommonTestClass):
stdout = self._test(param, use_preffix=False, get_cmd_func=self.get_hello_cmd_line)
if not stdout:
return 0
stdout = stdout.split('\n')
is_ok = 0
for line in stdout:
if re.match(r"\d+ +\d+.\d+$", line.strip()) is not None:
is_ok = True
top1 = line.strip().split(' ')[0]
top1 = re.sub(r"\D", "", top1)
assert '215' in top1, "Wrong top1 class"
log.info('Accuracy passed')
is_ok = True
for line in range(len(stdout)):
if re.match('\\d+ +\\d+.\\d+$', stdout[line].replace('[ INFO ]', '').strip()) is not None:
top1 = stdout[line].replace('[ INFO ]', '').strip().split(' ')[0]
top1 = re.sub('\\D', '', top1)
if '215' not in top1:
is_ok = False
log.error('Expected class 215, Detected class {}'.format(top1))
break
assert is_ok != 0, "Accuracy check didn't passed, probably format of output has changes"
assert is_ok, 'Wrong top1 class'
log.info('Accuracy passed')
@pytest.mark.parametrize("param", test_data_fp32_unicode)
def test_hello_classification_check_unicode_path_support(self, param):
@@ -123,7 +126,7 @@ class TestHello(SamplesCommonTestClass):
ref_probs = []
for line in ref_stdout.split(sep='\n'):
if re.match(r"\d+\s+\d+.\d+", line):
if re.match(r"\\d+\\s+\\d+.\\d+", line):
prob_class = int(line.split()[0])
prob = float(line.split()[1])
ref_probs.append((prob_class, prob))
@@ -170,7 +173,7 @@ class TestHello(SamplesCommonTestClass):
probs = []
for line in stdout.split(sep='\n'):
if re.match(r"^\d+\s+\d+.\d+", line):
if re.match(r"^\\d+\\s+\\d+.\\d+", line):
prob_class = int(line.split()[0])
prob = float(line.split()[1])
probs.append((prob_class, prob))

View File

@@ -49,14 +49,17 @@ def _check_output(self, param):
stdout = self._test(param, use_preffix=False, get_cmd_func=self.get_hello_nv12_cmd_line)
if not stdout:
return 0
stdout = stdout.split('\n')
is_ok = 0
for line in stdout:
if re.match("\d+ +\d+.\d+$", line.strip()) is not None:
is_ok = True
top1 = line.strip().split(' ')[0]
top1 = re.sub("\D", "", top1)
assert '215' in top1, "Wrong top1 class"
log.info('[INFO] Accuracy passed')
is_ok = True
for line in range(len(stdout)):
if re.match('\\d+ +\\d+.\\d+$', stdout[line].replace('[ INFO ]', '').strip()) is not None:
top1 = stdout[line].replace('[ INFO ]', '').strip().split(' ')[0]
top1 = re.sub('\\D', '', top1)
if '215' not in top1:
is_ok = False
log.error('Expected class 215, Detected class {}'.format(top1))
break
assert is_ok != 0, "Accuracy check didn't passed, probably format of output has changes"
assert is_ok, 'Wrong top1 class'
log.info('Accuracy passed')