DOCS: added code snippets compilation and fixes (#2606)

This commit is contained in:
Ilya Lavrenov 2020-10-10 11:19:16 +03:00 committed by GitHub
parent 00faee86e0
commit daf8bc6164
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 189 additions and 158 deletions

View File

@ -3,7 +3,7 @@
#
if(NOT ENABLE_DOCKER)
add_subdirectory(examples)
add_subdirectory(snippets)
# Detect nGraph
find_package(ngraph QUIET)
@ -20,7 +20,7 @@ if(NOT ENABLE_DOCKER)
add_subdirectory(template_extension)
set(all_docs_targets
ie_docs_examples
ie_docs_snippets
template_extension
templatePlugin TemplateBehaviorTests TemplateFunctionalTests)
foreach(target_name IN LISTS all_docs_targets)

View File

@ -868,13 +868,13 @@ EXCLUDE_SYMBOLS =
# command).
EXAMPLE_PATH = ../template_plugin/src \
../template_plugin/include \
../template_plugin/include \
../template_plugin/src/CMakeLists.txt \
../template_plugin/tests/functional/CMakeLists.txt \
../template_plugin/tests/functional/transformations \
../template_plugin/tests/functional/shared_tests_instances/ \
../../inference-engine/tests/functional/plugin/shared/include \
../examples
../snippets
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and

View File

@ -1,13 +0,0 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_examples)
file(GLOB SOURCES *.cpp)
add_library(ie_docs_examples STATIC ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph)
#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@ -4,7 +4,6 @@ int main() {
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
auto cpuOptimizationCapabilities = core.GetMetric("CPU", METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as<std::vector<std::string>>();
//! [part0]
return 0;

View File

@ -4,11 +4,8 @@ int main() {
using namespace InferenceEngine;
//! [part1]
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto exeNetwork = core.LoadNetwork(network, "CPU");
auto enforceBF16 = exeNetwork.GetConfig(PluginConfigParams::KEY_ENFORCE_BF16).as<std::string>();
//! [part1]

View File

@ -4,7 +4,6 @@ int main() {
using namespace InferenceEngine;
//! [part2]
InferenceEngine::Core core;
core.SetConfig({ { CONFIG_KEY(ENFORCE_BF16), CONFIG_VALUE(NO) } }, "CPU");
//! [part2]

View File

@ -0,0 +1,57 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_snippets)
file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp")
# remove OpenCL related sources
# TODO: fix compilation of OpenCL files
if(NOT CLDNN__IOCL_ICD_INCDIRS OR TRUE)
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API0.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API1.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API2.cpp")
endif()
# remove OpenCV related sources
find_package(OpenCV QUIET)
if(NOT OpenCV_FOUND)
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide5.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/ShapeInference.cpp")
endif()
# ONNX importer related files
if(NOT NGRAPH_ONNX_IMPORT_ENABLE)
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial0.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial1.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial2.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial3.cpp")
endif()
# remove snippets for deprecated / removed API
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/Migration_CoreAPI.cpp")
# requires mfxFrameSurface1 and MSS API
list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide2.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide3.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide4.cpp")
# create a static library
add_library(${TARGET_NAME} STATIC ${SOURCES})
if(CLDNN__IOCL_ICD_INCDIRS)
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${CLDNN__IOCL_ICD_INCDIRS})
endif()
if(OpenCV_FOUND)
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${OpenCV_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} PRIVATE opencv_core)
endif()
if(NGRAPH_ONNX_IMPORT_ENABLE)
target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer)
endif()
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph)

View File

@ -2,43 +2,36 @@
#include <vector>
int main() {
using namespace InferenceEngine;
int FLAGS_bl = 1;
auto imagesData = std::vector<std::string>(2);
auto imagesData2 = std::vector<std::string>(4);
//! [part0]
int dynBatchLimit = FLAGS_bl; //take dynamic batch limit from command line option
int dynBatchLimit = FLAGS_bl; //take dynamic batch limit from command line option
// Read network model
Core core;
CNNNetwork network = core.ReadNetwork("sample.xml");
InferenceEngine::Core core;
InferenceEngine::CNNNetwork network = core.ReadNetwork("sample.xml");
// enable dynamic batching and prepare for setting max batch limit
const std::map<std::string, std::string> dyn_config =
{ { PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES } };
{ { InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES } };
network.setBatchSize(dynBatchLimit);
// create executable network and infer request
auto executable_network = core.LoadNetwork(network, "CPU", dyn_config);
auto infer_request = executable_network.CreateInferRequest();
// ...
// process a set of images
// dynamically set batch size for subsequent Infer() calls of this request
size_t batchSize = imagesData.size();
infer_request.SetBatch(batchSize);
infer_request.Infer();
// ...
// process another set of images
batchSize = imagesData2.size();
infer_request.SetBatch(batchSize);

View File

@ -14,16 +14,17 @@ using namespace InferenceEngine;
// ...
// initialize the plugin and load the network
// initialize the core and load the network
InferenceEngine::Core ie;
auto exec_net = ie.LoadNetwork(net, "GPU", config);
auto net = ie.ReadNetwork("network.xml");
auto exec_net = ie.LoadNetwork(net, "GPU");
// obtain the RemoteContext pointer from the executable network object
auto cldnn_context = exec_net.GetContext();
// obtain the OpenCL context handle from the RemoteContext,
// get device info and create a queue
cl::Context ctx = std::dynamic_pointer_cast<ClContext>(cldnn_context);
cl::Context ctx = std::dynamic_pointer_cast<cl::Context>(cldnn_context);
_device = cl::Device(_context.getInfo<CL_CONTEXT_DEVICES>()[0].get(), true);
cl::CommandQueue _queue;
cl_command_queue_properties props = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE;
@ -41,11 +42,11 @@ auto shared_blob = gpu::make_shared_blob(input_info->getTensorDesc(), cldnn_cont
cl::Kernel kernel(program, kernelName.c_str());
kernel.setArg(0, shared_buffer);
queue.enqueueNDRangeKernel(kernel,
cl::NDRange(0),
cl::NDRange(image_size),
cl::NDRange(1),
0, // wait events *
&profileEvent);
cl::NDRange(0),
cl::NDRange(image_size),
cl::NDRange(1),
0, // wait events *
&profileEvent);
queue.finish();
// ...

View File

@ -3,14 +3,12 @@
#include "hetero/hetero_plugin_config.hpp"
int main() {
using namespace InferenceEngine;
using namespace ngraph;
Core core;
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto function = network.getFunction();
//! [part0]
for (auto && op : function->get_ops())
op->get_rt_info()["affinity"] = std::shared_ptr<ngraph::VariantWrapper<std::string>>("CPU");
op->get_rt_info()["affinity"] = std::make_shared<ngraph::VariantWrapper<std::string>>("CPU");
//! [part0]
return 0;
}

View File

@ -1,19 +1,20 @@
#include <inference_engine.hpp>
int main() {
using namespace InferenceEngine;
const std::string output_name = "output_name";
const std::string input_name = "input_name";
//! [part0]
InferenceEngine::Core core;
InferenceEngine::CNNNetwork network;
InferenceEngine::ExecutableNetwork executable_network;
//! [part0]
//! [part1]
auto network = core.ReadNetwork("Model.xml");
network = core.ReadNetwork("Model.xml");
//! [part1]
//! [part2]
auto network = core.ReadNetwork("model.onnx");
network = core.ReadNetwork("model.onnx");
//! [part2]
//! [part3]
@ -27,27 +28,27 @@ InferenceEngine::OutputsDataMap output_info = network.getOutputsInfo();
/** Iterate over all input info**/
for (auto &item : input_info) {
auto input_data = item.second;
input_data->setPrecision(Precision::U8);
input_data->setLayout(Layout::NCHW);
input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
input_data->setPrecision(InferenceEngine::Precision::U8);
input_data->setLayout(InferenceEngine::Layout::NCHW);
input_data->getPreProcess().setResizeAlgorithm(InferenceEngine::RESIZE_BILINEAR);
input_data->getPreProcess().setColorFormat(InferenceEngine::ColorFormat::RGB);
}
/** Iterate over all output info**/
for (auto &item : output_info) {
auto output_data = item.second;
output_data->setPrecision(Precision::FP32);
output_data->setLayout(Layout::NC);
output_data->setPrecision(InferenceEngine::Precision::FP32);
output_data->setLayout(InferenceEngine::Layout::NC);
}
//! [part4]
//! [part5]
auto executable_network = core.LoadNetwork(network, "CPU");
executable_network = core.LoadNetwork(network, "CPU");
//! [part5]
//! [part6]
/** Optional config. E.g. this enables profiling of performance counters. **/
std::map<std::string, std::string> config = {{ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES }};
auto executable_network = core.LoadNetwork(network, "CPU", config);
std::map<std::string, std::string> config = {{ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES }};
executable_network = core.LoadNetwork(network, "CPU", config);
//! [part6]
//! [part7]
@ -93,7 +94,9 @@ for (auto & item : input_info) {
/** Create input blob **/
InferenceEngine::TBlob<unsigned char>::Ptr input;
// assuming input precision was asked to be U8 in prev step
input = InferenceEngine::make_shared_blob<unsigned char, InferenceEngine::SizeVector>(InferenceEngine::Precision::U8, input_data->getDims());
input = InferenceEngine::make_shared_blob<unsigned char>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, input_data->getTensorDesc().getDims(),
input_data->getTensorDesc().getLayout()));
input->allocate();
infer_request.SetBlob(item.first, input);
@ -104,7 +107,7 @@ for (auto & item : input_info) {
//! [part12]
infer_request.StartAsync();
infer_request.Wait(IInferRequest::WaitMode::RESULT_READY);
infer_request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY);
//! [part12]
auto sync_infer_request = executable_network.CreateInferRequest();

View File

@ -3,18 +3,17 @@
int main() {
using namespace InferenceEngine;
//! [part3]
Core ie;
InferenceEngine::Core ie;
auto cnnNetwork = ie.ReadNetwork("sample.xml");
std::string allDevices = "MULTI:";
std::vector<std::string> myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(myriadDevices));
std::vector<std::string> myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES));
for (int i = 0; i < myriadDevices.size(); ++i) {
allDevices += std::string("MYRIAD.")
+ myriadDevices[i]
+ std::string(i < (myriadDevices.size() -1) ? "," : "");
}
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, allDevices, {});
InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, allDevices, {});
//! [part3]
return 0;
}

View File

@ -3,18 +3,17 @@
int main() {
using namespace InferenceEngine;
const std::map<std::string, std::string> hddl_config = { { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } };
const std::map<std::string, std::string> gpu_config = { { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } };
const std::map<std::string, std::string> hddl_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } };
const std::map<std::string, std::string> gpu_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } };
//! [part4]
// configure the HDDL device first
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml");
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml");
ie.SetConfig(hddl_config, "HDDL");
// configure the GPU device
ie.SetConfig(gpu_config, "GPU");
// load the network to the multi-device, while specifying the configuration (devices along with priorities):
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, "MULTI", {{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "HDDL,GPU"}});
InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, "MULTI", {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "HDDL,GPU"}});
// new metric allows to query the optimal number of requests:
uint32_t nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
//! [part4]

View File

@ -3,14 +3,13 @@
int main() {
using namespace InferenceEngine;
std::string device_name = "MULTI:HDDL,GPU";
const std::map< std::string, std::string > full_config = {};
//! [part5]
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml");
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml");
// 'device_name' can be "MULTI:HDDL,GPU" to configure the multi-device to use HDDL and GPU
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device_name, full_config);
InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device_name, full_config);
// new metric allows to query the optimal number of requests:
uint32_t nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
//! [part5]

View File

@ -1,8 +1,6 @@
#include <inference_engine.hpp>
#include <ie_cnn_network.h>
int main() {
using namespace InferenceEngine;
std::string deviceName = "Device name";
//! [part0]
InferenceEngine::InferencePlugin plugin = InferenceEngine::PluginDispatcher({ FLAGS_pp }).getPluginByDevice(FLAGS_d);
@ -13,18 +11,18 @@ InferenceEngine::Core core;
//! [part1]
//! [part2]
CNNNetReader network_reader;
InferenceEngine::CNNNetReader network_reader;
network_reader.ReadNetwork(fileNameToString(input_model));
network_reader.ReadWeights(fileNameToString(input_model).substr(0, input_model.size() - 4) + ".bin");
CNNNetwork network = network_reader.getNetwork();
InferenceEngine::CNNNetwork network = network_reader.getNetwork();
//! [part2]
//! [part3]
CNNNetwork network = core.ReadNetwork(input_model);
InferenceEngine::CNNNetwork network = core.ReadNetwork(input_model);
//! [part3]
//! [part4]
CNNNetwork network = core.ReadNetwork("model.onnx");
InferenceEngine::CNNNetwork network = core.ReadNetwork("model.onnx");
//! [part4]
//! [part5]

View File

@ -1,11 +1,10 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "onnx/onnx-ml.pb.h"
#include "onnx_import/onnx.hpp"
#include <iostream>
#include <set>
int main() {
using namespace InferenceEngine;
//! [part0]
const std::int64_t version = 12;
const std::string domain = "ai.onnx";

View File

@ -1,9 +1,8 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "ngraph/frontend/onnx_import/onnx_utils.hpp"
#include "onnx_import/onnx.hpp"
int main() {
using namespace InferenceEngine;
//! [part1]
const std::string op_name = "Abs";
const std::int64_t version = 12;

View File

@ -1,15 +1,14 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "onnx_import/onnx.hpp"
#include <iostream>
#include <fstream>
int main() {
using namespace InferenceEngine;
using namespace ngraph;
//! [part2]
const std::string resnet50_path = "resnet50/model.onnx";
const char * resnet50_path = "resnet50/model.onnx";
std::ifstream resnet50_stream(resnet50_path);
if(resnet50_stream.is_open())
if (resnet50_stream.is_open())
{
try
{

View File

@ -1,12 +1,11 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "onnx_import/onnx.hpp"
#include <iostream>
int main() {
using namespace InferenceEngine;
using namespace ngraph;
//! [part3]
const char * resnet50_path = "resnet50/model.onnx";
const std::shared_ptr<ngraph::Function> ng_function = ngraph::onnx_import::import_onnx_model(resnet50_path);
//! [part3]
return 0;

View File

@ -1,17 +1,15 @@
#include <inference_engine.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
int main() {
using namespace InferenceEngine;
using namespace cv;
int batch_size = 1;
//! [part0]
InferenceEngine::Core core;
// ------------- 0. Read IR and image ----------------------------------------------
CNNNetwork network = core.ReadNetwork("path/to/IR/xml");
InferenceEngine::CNNNetwork network = core.ReadNetwork("path/to/IR/xml");
cv::Mat image = cv::imread("path/to/image");
// ---------------------------------------------------------------------------------
@ -21,7 +19,7 @@ int batch_size = 1;
// ------------- 2. Set new input shapes -------------------------------------------
std::string input_name;
SizeVector input_shape;
InferenceEngine::SizeVector input_shape;
std::tie(input_name, input_shape) = *input_shapes.begin(); // let's consider first input only
input_shape[0] = batch_size; // set batch size to the first input dimension
input_shape[2] = image.rows; // changes input height to the image one
@ -37,10 +35,9 @@ int batch_size = 1;
// ------------- 4. Loading model to the device ------------------------------------
std::string device = "CPU";
ExecutableNetwork executable_network = core.LoadNetwork(network, device);
InferenceEngine::ExecutableNetwork executable_network = core.LoadNetwork(network, device);
// ---------------------------------------------------------------------------------
//! [part0]
return 0;

View File

@ -7,12 +7,12 @@ int main() {
using namespace InferenceEngine;
//! [part3]
InferenceEngine::SizeVector dims_src = {
1 /* batch, N*/,
(size_t) frame_in->Info.Height /* Height */,
(size_t) frame_in->Info.Width /* Width */,
3 /*Channels,*/,
};
TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC);
1 /* batch, N*/,
(size_t) frame_in->Info.Height /* Height */,
(size_t) frame_in->Info.Width /* Width */,
3 /*Channels,*/,
};
InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC);
/* wrapping the surface data, as RGB is interleaved, need to pass only ptr to the R, notice that this wouldnt work with planar formats as these are 3 separate planes/pointers*/
InferenceEngine::TBlob<uint8_t>::Ptr p = InferenceEngine::make_shared_blob<uint8_t>( desc, (uint8_t*) frame_in->Data.R);
inferRequest.SetBlob("input", p);

View File

@ -7,8 +7,8 @@ int main() {
using namespace InferenceEngine;
//! [part4]
InferenceEngine::SizeVector dims_src = {
1 /* batch, N*/,
3 /*Channels,*/,
1 /* batch, N*/,
3 /*Channels,*/,
(size_t) frame_in->Info.Height /* Height */,
(size_t) frame_in->Info.Width /* Width */,
};

View File

@ -5,27 +5,28 @@
int main() {
using namespace InferenceEngine;
InferenceEngine::InferRequest inferRequest;
//! [part5]
cv::Mat frame = ...; // regular CV_8UC3 image, interleaved
cv::Mat frame(cv::Size(100, 100), CV_8UC3); // regular CV_8UC3 image, interleaved
// creating blob that wraps the OpenCVs Mat
// (the data it points should persists until the blob is released):
InferenceEngine::SizeVector dims_src = {
1 /* batch, N*/,
(size_t)frame.rows /* Height */,
(size_t)frame.cols /* Width */,
(size_t)frame.channels() /*Channels,*/,
};
TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC);
1 /* batch, N*/,
(size_t)frame.rows /* Height */,
(size_t)frame.cols /* Width */,
(size_t)frame.channels() /*Channels,*/,
};
InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC);
InferenceEngine::TBlob<uint8_t>::Ptr p = InferenceEngine::make_shared_blob<uint8_t>( desc, (uint8_t*)frame.data, frame.step[0] * frame.rows);
inferRequest.SetBlob("input", p);
inferRequest.Infer();
// …
// similarly, you can wrap the output tensor (lets assume it is FP32)
// notice that the output should be also explicitly stated as NHWC with setLayout
const float* output_data = output_blob->buffer().
as<PrecisionTrait<Precision::FP32>::value_type*>();
cv::Mat res (rows, cols, CV_32FC3, output_data, CV_AUTOSTEP);
auto output_blob = inferRequest.GetBlob("output");
const float* output_data = output_blob->buffer().as<float*>();
auto dims = output_blob->getTensorDesc().getDims();
cv::Mat res (dims[2], dims[3], CV_32FC3, (void *)output_data);
//! [part5]
return 0;

View File

@ -6,23 +6,21 @@
int main() {
using namespace InferenceEngine;
//! [part6]
Core ie;
InferenceEngine::Core ie;
auto network = ie.ReadNetwork("Model.xml", "Model.bin");
InferenceEngine::InputsDataMap input_info(network.getInputsInfo());
auto executable_network = ie.LoadNetwork(network, "GPU");
auto infer_request = executable_network.CreateInferRequest();
for (auto & item : input_info) {
std::string input_name = item.first;
auto input = infer_request.GetBlob(input_name);
/** Lock/Fill input tensor with data **/
unsigned char* data = input->buffer().as<PrecisionTrait<Precision::U8>::value_type*>();
// ...
std::string input_name = item.first;
auto input = infer_request.GetBlob(input_name);
/** Lock/Fill input tensor with data **/
unsigned char* data = input->buffer().as<PrecisionTrait<Precision::U8>::value_type*>();
// ...
}
infer_request.Infer();
//! [part6]
return 0;

View File

@ -4,14 +4,15 @@
int main() {
using namespace InferenceEngine;
Core plugin;
auto network0 = plugin.ReadNetwork("sample.xml");
auto network1 = plugin.ReadNetwork("sample.xml");
InferenceEngine::Core core;
auto network0 = core.ReadNetwork("sample.xml");
auto network1 = core.ReadNetwork("sample.xml");
//! [part7]
//these two networks go thru same plugin (aka device) and their requests will not overlap.
auto executable_network0 = plugin.LoadNetwork(network0, "CPU", {{PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES}});
auto executable_network1 = plugin.LoadNetwork(network1, "GPU", {{PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES}});
auto executable_network0 = core.LoadNetwork(network0, "CPU",
{{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}});
auto executable_network1 = core.LoadNetwork(network1, "GPU",
{{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}});
//! [part7]
return 0;
}

View File

@ -4,13 +4,12 @@
int main() {
using namespace InferenceEngine;
//! [part8]
while() {
capture frame
populate CURRENT InferRequest
Infer CURRENT InferRequest //this call is synchronous
display CURRENT result
while(true) {
// capture frame
// populate CURRENT InferRequest
// Infer CURRENT InferRequest //this call is synchronous
// display CURRENT result
}
//! [part8]
return 0;

View File

@ -6,13 +6,13 @@
int main() {
using namespace InferenceEngine;
//! [part9]
while() {
capture frame
populate NEXT InferRequest
start NEXT InferRequest //this call is async and returns immediately
wait for the CURRENT InferRequest //processed in a dedicated thread
display CURRENT result
swap CURRENT and NEXT InferRequests
while(true) {
// capture frame
// populate NEXT InferRequest
// start NEXT InferRequest //this call is async and returns immediately
// wait for the CURRENT InferRequest //processed in a dedicated thread
// display CURRENT result
// swap CURRENT and NEXT InferRequests
}
//! [part9]
return 0;

View File

@ -10,13 +10,13 @@ using namespace InferenceEngine;
class AcceleratorSyncRequest : public InferRequestInternal {
public:
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
};
// ! [async_infer_request:define_pipeline]

View File

@ -1,8 +1,8 @@
#include <inference_engine.hpp>
int main() {
using namespace InferenceEngine;
Core core;
InferenceEngine::Core core;
InferenceEngine::IInferRequest::CompletionCallback callback;
int numRequests = 42;
int i = 1;
auto network = core.ReadNetwork("sample.xml");
@ -30,7 +30,7 @@ request[i].inferRequest->StartAsync();
//! [part3]
//! [part4]
request[i].inferRequest->SetCompletionCallback(InferenceEngine::IInferRequest::Ptr context);
request[i].inferRequest->SetCompletionCallback(callback);
//! [part4]
return 0;

View File

@ -4,7 +4,6 @@
int main() {
using namespace InferenceEngine;
//! [part0]
using namespace std;
@ -34,7 +33,7 @@ auto ng_function = make_shared<Function>(OutputVector{add1}, ParameterVector{arg
//! [part0]
//! [part1]
CNNNetwork net (ng_function);
InferenceEngine::CNNNetwork net (ng_function);
//! [part1]
return 0;

View File

@ -1,21 +1,32 @@
#include <inference_engine.hpp>
#include <fstream>
#include <vector>
void decrypt_file(std::ifstream & stream,
const std::string & pass,
std::vector<uint8_t> & result) {
}
int main() {
using namespace InferenceEngine;
//! [part0]
std::vector<uint8_t> model;
std::vector<uint8_t> weights;
std::string password; // taken from an user
std::ifstream model_file("model.xml"), weights_file("model.bin");
// Read model files and decrypt them into temporary memory block
decrypt_file(model_file, password, model);
decrypt_file(weights_file, password, weights);
//! [part0]
//! [part1]
Core core;
InferenceEngine::Core core;
// Load model from temporary memory block
std::string strModel(model.begin(), model.end());
CNNNetwork network = core.ReadNetwork(strModel, make_shared_blob<uint8_t>({Precision::U8, {weights.size()}, C}, weights.data()));
InferenceEngine::CNNNetwork network = core.ReadNetwork(strModel,
InferenceEngine::make_shared_blob<uint8_t>({InferenceEngine::Precision::U8,
{weights.size()}, InferenceEngine::C}, weights.data()));
//! [part1]
return 0;