From cf8ccb590a6d8753bb7e081d0403a098c762b234 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Mon, 21 Mar 2022 09:27:43 +0300 Subject: [PATCH] Removed obsolete code snippets (#11061) * Removed obsolete code snippets * NCC style * Fixed NCC for BA --- .../ncc_naming_style/ncc_naming_style.cmake | 7 +++- docs/Extensibility_UG/Intro.md | 2 +- .../low_precision_transformations/lpt.md | 2 +- docs/OV_Runtime_UG/Int8Inference.md | 2 +- docs/OV_Runtime_UG/PythonPackage_Overview.md | 14 ------- docs/OV_Runtime_UG/openvino_intro.md | 4 +- docs/documentation.md | 2 +- .../installing-openvino-linux.md | 2 +- .../installing-openvino-macos.md | 2 +- .../installing-openvino-windows.md | 2 +- docs/snippets/CMakeLists.txt | 32 ++++++++-------- docs/snippets/Graph_debug_capabilities0.cpp | 13 ------- docs/snippets/Graph_debug_capabilities1.cpp | 13 ------- docs/snippets/InferenceEngine_QueryAPI0.cpp | 10 ----- docs/snippets/InferenceEngine_QueryAPI1.cpp | 10 ----- docs/snippets/InferenceEngine_QueryAPI2.cpp | 10 ----- docs/snippets/InferenceEngine_QueryAPI3.cpp | 12 ------ docs/snippets/InferenceEngine_QueryAPI4.cpp | 12 ------ docs/snippets/InferenceEngine_QueryAPI5.cpp | 12 ------ docs/snippets/dldt_optimization_guide1.cpp | 16 -------- docs/snippets/dldt_optimization_guide2.cpp | 14 ------- docs/snippets/dldt_optimization_guide3.cpp | 22 ----------- docs/snippets/dldt_optimization_guide4.cpp | 20 ---------- docs/snippets/dldt_optimization_guide5.cpp | 30 --------------- docs/snippets/dldt_optimization_guide6.cpp | 24 ------------ docs/snippets/dldt_optimization_guide7.cpp | 15 -------- docs/snippets/example_async_infer_request.cpp | 20 +++++----- docs/snippets/movidius-programming-guide.cpp | 36 ------------------ docs/snippets/nGraphTutorial.cpp | 38 ------------------- samples/cpp/CMakeLists.txt | 2 +- samples/cpp/benchmark_app/README.md | 12 ++---- samples/cpp/benchmark_app/main.cpp | 10 ++--- samples/cpp/build_samples.sh | 4 +- samples/cpp/build_samples_msvc.bat | 4 +- samples/cpp/hello_reshape_ssd/main.cpp | 2 +- samples/cpp/speech_sample/main.cpp | 6 +-- .../model_creation_sample.py | 2 +- samples/scripts/README.txt | 8 ++-- samples/scripts/run_sample_benchmark_app.bat | 8 ++-- samples/scripts/run_sample_benchmark_app.sh | 6 +-- samples/scripts/run_sample_squeezenet.bat | 6 +-- samples/scripts/run_sample_squeezenet.sh | 4 +- 42 files changed, 75 insertions(+), 397 deletions(-) delete mode 100644 docs/OV_Runtime_UG/PythonPackage_Overview.md delete mode 100644 docs/snippets/Graph_debug_capabilities0.cpp delete mode 100644 docs/snippets/Graph_debug_capabilities1.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI0.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI1.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI2.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI3.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI4.cpp delete mode 100644 docs/snippets/InferenceEngine_QueryAPI5.cpp delete mode 100644 docs/snippets/dldt_optimization_guide1.cpp delete mode 100644 docs/snippets/dldt_optimization_guide2.cpp delete mode 100644 docs/snippets/dldt_optimization_guide3.cpp delete mode 100644 docs/snippets/dldt_optimization_guide4.cpp delete mode 100644 docs/snippets/dldt_optimization_guide5.cpp delete mode 100644 docs/snippets/dldt_optimization_guide6.cpp delete mode 100644 docs/snippets/dldt_optimization_guide7.cpp delete mode 100644 docs/snippets/movidius-programming-guide.cpp delete mode 100644 docs/snippets/nGraphTutorial.cpp diff --git a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake index 53b14c02c89..296552323b0 100644 --- a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake +++ b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake @@ -23,7 +23,7 @@ execute_process( ERROR_VARIABLE error_var) if(NOT clang_find_result EQUAL "0") - message(WARNING "Please, install libclang-[N]-dev package (required for ncc naming style check)") + message(WARNING "Please, install clang-[N] libclang-[N]-dev package (required for ncc naming style check)") message(WARNING "find_package(Clang) output: ${output_var}") message(WARNING "find_package(Clang) error: ${error_var}") set(ENABLE_NCC_STYLE OFF) @@ -107,8 +107,11 @@ function(ov_ncc_naming_style) list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_SOURCE_DIRECTORY}") + # without it sources with same name from different directories will map to same .ncc_style target + file(RELATIVE_PATH source_dir_rel ${CMAKE_SOURCE_DIR} ${NCC_STYLE_SOURCE_DIRECTORY}) + foreach(source IN LISTS sources) - set(output_file "${ncc_style_bin_dir}/${source}.ncc_style") + set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source}.ncc_style") set(full_source_path "${NCC_STYLE_SOURCE_DIRECTORY}/${source}") add_custom_command( diff --git a/docs/Extensibility_UG/Intro.md b/docs/Extensibility_UG/Intro.md index f0df72daf47..1291060b2c5 100644 --- a/docs/Extensibility_UG/Intro.md +++ b/docs/Extensibility_UG/Intro.md @@ -116,5 +116,5 @@ After the build you can use path to your extension library to load your extensio ## See Also * [OpenVINO Transformations](./ov_transformations.md) -* [Using Inference Engine Samples](../OV_Runtime_UG/Samples_Overview.md) +* [Using OpenVINO Runtime Samples](../OV_Runtime_UG/Samples_Overview.md) * [Hello Shape Infer SSD sample](../../samples/cpp/hello_reshape_ssd/README.md) diff --git a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/lpt.md b/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/lpt.md index dc7e2db4f78..28796f93d6e 100644 --- a/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/lpt.md +++ b/docs/IE_PLUGIN_DG/plugin_transformation_pipeline/low_precision_transformations/lpt.md @@ -259,7 +259,7 @@ Result model depends on different factors: Information about layer precision is stored in the performance counters that are -available from the Inference Engine API. For example, the part of performance counters table for quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model inference on CPU Plugin looks as follows: +available from the OpenVINO Runtime API. For example, the part of performance counters table for quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model inference on CPU Plugin looks as follows: | layerName | execStatus | layerType | execType | realTime (ms) | cpuTime (ms) | diff --git a/docs/OV_Runtime_UG/Int8Inference.md b/docs/OV_Runtime_UG/Int8Inference.md index dc3806e0586..bdfc41781b4 100644 --- a/docs/OV_Runtime_UG/Int8Inference.md +++ b/docs/OV_Runtime_UG/Int8Inference.md @@ -62,7 +62,7 @@ For 8-bit integer computations, a model must be quantized. Quantized models can ## Performance Counters Information about layer precision is stored in the performance counters that are -available from the Inference Engine API. For example, the part of performance counters table for quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model inference on [CPU Plugin](supported_plugins/CPU.md) looks as follows: +available from the OpenVINO Runtime API. For example, the part of performance counters table for quantized [TensorFlow* implementation of ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model inference on [CPU Plugin](supported_plugins/CPU.md) looks as follows: | layerName | execStatus | layerType | execType | realTime (ms) | cpuTime (ms) | diff --git a/docs/OV_Runtime_UG/PythonPackage_Overview.md b/docs/OV_Runtime_UG/PythonPackage_Overview.md deleted file mode 100644 index 5e03eb3295c..00000000000 --- a/docs/OV_Runtime_UG/PythonPackage_Overview.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpenVINO™ Python* Package - -OpenVINO™ Python\* package includes types to measure model and calibrate to low precision. - -The OpenVINO™ Python\* package available in the `/python/python3.X` directory. - -The OpenVINO™ Python\* package includes the following sub-packages: - - - [openvino.inference_engine](../../src/bindings/python/docs/api_overview.md) - Python\* wrapper on OpenVINO™ Inference Engine. - - `openvino.tools.accuracy_checker` - Measure accuracy. - - `openvino.tools.benchmark` - Measure latency and throughput. - -## See Also -* [Integrate with Customer Application New API](integrate_with_your_application.md) diff --git a/docs/OV_Runtime_UG/openvino_intro.md b/docs/OV_Runtime_UG/openvino_intro.md index 01ee85e2434..82dd1379c3f 100644 --- a/docs/OV_Runtime_UG/openvino_intro.md +++ b/docs/OV_Runtime_UG/openvino_intro.md @@ -2,7 +2,7 @@ @sphinxdirective -.. _deep learning inference engine: +.. _deep learning openvino runtime: .. toctree:: :maxdepth: 1 @@ -45,6 +45,6 @@ The scheme below illustrates the typical workflow for deploying a trained deep l - * - **Inference Engine Concept**. Duration: 3:43 + * - **OpenVINO Runtime Concept**. Duration: 3:43 @endsphinxdirective diff --git a/docs/documentation.md b/docs/documentation.md index 6324d435dfa..4730497de18 100644 --- a/docs/documentation.md +++ b/docs/documentation.md @@ -73,7 +73,7 @@ openvino_docs_Extensibility_UG_Intro openvino_docs_transformations - Inference Engine Plugin Developer Guide + OpenVINO Plugin Developer Guide groupie_dev_api Plugin Transformation Pipeline diff --git a/docs/install_guides/installing-openvino-linux.md b/docs/install_guides/installing-openvino-linux.md index 77c4462c93c..a29b45377b3 100644 --- a/docs/install_guides/installing-openvino-linux.md +++ b/docs/install_guides/installing-openvino-linux.md @@ -219,7 +219,7 @@ To uninstall the toolkit, follow the steps on the [Uninstalling page](uninstalli .. dropdown:: Additional Resources * Converting models for use with OpenVINO™: :ref:`Model Optimizer Developer Guide ` - * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` + * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` * Sample applications: :ref:`OpenVINO™ Toolkit Samples Overview ` * Pre-trained deep learning models: :ref:`Overview of OpenVINO™ Toolkit Pre-Trained Models ` * IoT libraries and code samples in the GitHUB repository: `Intel® IoT Developer Kit`_ diff --git a/docs/install_guides/installing-openvino-macos.md b/docs/install_guides/installing-openvino-macos.md index 383e56524b3..81e4f01c25e 100644 --- a/docs/install_guides/installing-openvino-macos.md +++ b/docs/install_guides/installing-openvino-macos.md @@ -143,7 +143,7 @@ To uninstall the toolkit, follow the steps on the [Uninstalling page](uninstalli .. dropdown:: Additional Resources * Converting models for use with OpenVINO™: :ref:`Model Optimizer Developer Guide ` - * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` + * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` * Sample applications: :ref:`OpenVINO™ Toolkit Samples Overview ` * Pre-trained deep learning models: :ref:`Overview of OpenVINO™ Toolkit Pre-Trained Models ` * IoT libraries and code samples in the GitHUB repository: `Intel® IoT Developer Kit`_ diff --git a/docs/install_guides/installing-openvino-windows.md b/docs/install_guides/installing-openvino-windows.md index 0aa6244de66..b3f5f7a0a32 100644 --- a/docs/install_guides/installing-openvino-windows.md +++ b/docs/install_guides/installing-openvino-windows.md @@ -180,7 +180,7 @@ To uninstall the toolkit, follow the steps on the [Uninstalling page](uninstalli .. dropdown:: Additional Resources * Converting models for use with OpenVINO™: :ref:`Model Optimizer Developer Guide ` - * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` + * Writing your own OpenVINO™ applications: :ref:`OpenVINO™ Runtime User Guide ` * Sample applications: :ref:`OpenVINO™ Toolkit Samples Overview ` * Pre-trained deep learning models: :ref:`Overview of OpenVINO™ Toolkit Pre-Trained Models ` * IoT libraries and code samples in the GitHUB repository: `Intel® IoT Developer Kit`_ diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index b9908896c8b..8df0cc3f287 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -4,6 +4,13 @@ set(TARGET_NAME ie_docs_snippets) +if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) + ie_add_compiler_flags(-Wno-unused-variable) + if(CMAKE_COMPILER_IS_GNUCXX) + ie_add_compiler_flags(-Wno-unused-variable -Wno-unused-but-set-variable) + endif() +endif() + file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/gpu/*.cpp") @@ -57,9 +64,9 @@ endif() # remove OpenCV related sources if (ENABLE_OPENCV) - find_package(OpenCV QUIET) + find_package(OpenCV QUIET) else() - set(OpenCV_FOUND FALSE) + set(OpenCV_FOUND OFF) endif() if(NOT OpenCV_FOUND) @@ -102,30 +109,23 @@ if(ENABLE_OV_ONNX_FRONTEND) target_link_libraries(${TARGET_NAME} PRIVATE openvino_onnx_frontend) endif() -if(NOT MSVC) - target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-variable) - if(CMAKE_COMPILER_IS_GNUCXX) - target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-but-set-variable) - endif() -endif() - target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime openvino::runtime::dev) +ov_ncc_naming_style(FOR_TARGET "${TARGET_NAME}" + SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + +# +# Example +# + set(TARGET_NAME "ov_integration_snippet") # [cmake:integration_example] cmake_minimum_required(VERSION 3.10) set(CMAKE_CXX_STANDARD 11) - find_package(OpenVINO REQUIRED) add_executable(${TARGET_NAME} src/main.cpp) target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) # [cmake:integration_example] -if(NOT MSVC) - target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-variable) - if(CMAKE_COMPILER_IS_GNUCXX) - target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-but-set-variable) - endif() -endif() diff --git a/docs/snippets/Graph_debug_capabilities0.cpp b/docs/snippets/Graph_debug_capabilities0.cpp deleted file mode 100644 index 02c6a2c153b..00000000000 --- a/docs/snippets/Graph_debug_capabilities0.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include -#include -#include - -int main() { -using namespace InferenceEngine; -//! [part0] -std::shared_ptr model; -// ... -ov::pass::VisualizeTree("after.png").run_on_model(model); // Visualize the nGraph function to an image -//! [part0] -return 0; -} diff --git a/docs/snippets/Graph_debug_capabilities1.cpp b/docs/snippets/Graph_debug_capabilities1.cpp deleted file mode 100644 index 5649ed5abfb..00000000000 --- a/docs/snippets/Graph_debug_capabilities1.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include -#include - -int main() { -using namespace InferenceEngine; -//! [part1] -std::shared_ptr nGraph; -// ... -CNNNetwork network(nGraph); -network.serialize("test_ir.xml", "test_ir.bin"); -//! [part1] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI0.cpp b/docs/snippets/InferenceEngine_QueryAPI0.cpp deleted file mode 100644 index ebd1fdf30a3..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI0.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part0] -InferenceEngine::Core core; -std::vector availableDevices = core.GetAvailableDevices(); -//! [part0] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI1.cpp b/docs/snippets/InferenceEngine_QueryAPI1.cpp deleted file mode 100644 index 26c75cab072..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI1.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part1] -InferenceEngine::Core core; -bool dumpDotFile = core.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as(); -//! [part1] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI2.cpp b/docs/snippets/InferenceEngine_QueryAPI2.cpp deleted file mode 100644 index 473f217fa1f..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI2.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part2] -InferenceEngine::Core core; -std::string cpuDeviceName = core.GetMetric("GPU", METRIC_KEY(FULL_DEVICE_NAME)).as(); -//! [part2] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI3.cpp b/docs/snippets/InferenceEngine_QueryAPI3.cpp deleted file mode 100644 index afd9f36948d..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI3.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part3] -InferenceEngine::Core core; -auto network = core.ReadNetwork("sample.xml"); -auto exeNetwork = core.LoadNetwork(network, "CPU"); -auto nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); -//! [part3] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI4.cpp b/docs/snippets/InferenceEngine_QueryAPI4.cpp deleted file mode 100644 index ee7476a76ee..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI4.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part4] -InferenceEngine::Core core; -auto network = core.ReadNetwork("sample.xml"); -auto exeNetwork = core.LoadNetwork(network, "MYRIAD"); -float temperature = exeNetwork.GetMetric(METRIC_KEY(DEVICE_THERMAL)).as(); -//! [part4] -return 0; -} diff --git a/docs/snippets/InferenceEngine_QueryAPI5.cpp b/docs/snippets/InferenceEngine_QueryAPI5.cpp deleted file mode 100644 index 4297c886699..00000000000 --- a/docs/snippets/InferenceEngine_QueryAPI5.cpp +++ /dev/null @@ -1,12 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part5] -InferenceEngine::Core core; -auto network = core.ReadNetwork("sample.xml"); -auto exeNetwork = core.LoadNetwork(network, "CPU"); -auto ncores = exeNetwork.GetConfig(PluginConfigParams::KEY_CPU_THREADS_NUM).as(); -//! [part5] -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide1.cpp b/docs/snippets/dldt_optimization_guide1.cpp deleted file mode 100644 index 91b44081351..00000000000 --- a/docs/snippets/dldt_optimization_guide1.cpp +++ /dev/null @@ -1,16 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part1] -Core ie; -auto netReader = ie.ReadNetwork("sample.xml"); -InferenceEngine::InputsDataMap info(netReader.getInputsInfo()); -auto& inputInfoFirst = info.begin()->second; -for (auto& it : info) { - it.second->setPrecision(Precision::U8); -} -//! [part1] - -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide2.cpp b/docs/snippets/dldt_optimization_guide2.cpp deleted file mode 100644 index 97f6e28e3ee..00000000000 --- a/docs/snippets/dldt_optimization_guide2.cpp +++ /dev/null @@ -1,14 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part2] -//Lock Intel MSS surface -mfxFrameSurface1 *frame_in; //Input MSS surface. -mfxFrameAllocator* pAlloc = &m_mfxCore.FrameAllocator(); -pAlloc->Lock(pAlloc->pthis, frame_in->Data.MemId, &frame_in->Data); -//Inference Engine code -//! [part2] - -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide3.cpp b/docs/snippets/dldt_optimization_guide3.cpp deleted file mode 100644 index e3be0da706d..00000000000 --- a/docs/snippets/dldt_optimization_guide3.cpp +++ /dev/null @@ -1,22 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part3] -InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - (size_t) frame_in->Info.Height /* Height */, - (size_t) frame_in->Info.Width /* Width */, - 3 /*Channels,*/, - }; -InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); -/* wrapping the surface data, as RGB is interleaved, need to pass only ptr to the R, notice that this wouldn’t work with planar formats as these are 3 separate planes/pointers*/ -InferenceEngine::TBlob::Ptr p = InferenceEngine::make_shared_blob( desc, (uint8_t*) frame_in->Data.R); -inferRequest.SetBlob("input", p); -inferRequest.Infer(); -//Make sure to unlock the surface upon inference completion, to return the ownership back to the Intel MSS -pAlloc->Unlock(pAlloc->pthis, frame_in->Data.MemId, &frame_in->Data); -//! [part3] - -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide4.cpp b/docs/snippets/dldt_optimization_guide4.cpp deleted file mode 100644 index 52396aa268b..00000000000 --- a/docs/snippets/dldt_optimization_guide4.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part4] -InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - 3 /*Channels,*/, - (size_t) frame_in->Info.Height /* Height */, - (size_t) frame_in->Info.Width /* Width */, - }; -TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NCHW); -/* wrapping the RGBP surface data*/ -InferenceEngine::TBlob::Ptr p = InferenceEngine::make_shared_blob( desc, (uint8_t*) frame_in->Data.R); -inferRequest.SetBlob("input", p); -// … -//! [part4] - -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide5.cpp b/docs/snippets/dldt_optimization_guide5.cpp deleted file mode 100644 index a9226ecce3a..00000000000 --- a/docs/snippets/dldt_optimization_guide5.cpp +++ /dev/null @@ -1,30 +0,0 @@ -#include -#include - -int main() { -InferenceEngine::InferRequest inferRequest; -//! [part5] -cv::Mat frame(cv::Size(100, 100), CV_8UC3); // regular CV_8UC3 image, interleaved -// creating blob that wraps the OpenCV’s Mat -// (the data it points should persists until the blob is released): -InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - (size_t)frame.rows /* Height */, - (size_t)frame.cols /* Width */, - (size_t)frame.channels() /*Channels,*/, - }; -InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); -InferenceEngine::TBlob::Ptr p = InferenceEngine::make_shared_blob( desc, (uint8_t*)frame.data, frame.step[0] * frame.rows); -inferRequest.SetBlob("input", p); -inferRequest.Infer(); -// … -// similarly, you can wrap the output tensor (let’s assume it is FP32) -// notice that the output should be also explicitly stated as NHWC with setLayout -auto output_blob = inferRequest.GetBlob("output"); -const float* output_data = output_blob->buffer().as(); -auto dims = output_blob->getTensorDesc().getDims(); -cv::Mat res (dims[2], dims[3], CV_32FC3, (void *)output_data); -//! [part5] - -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide6.cpp b/docs/snippets/dldt_optimization_guide6.cpp deleted file mode 100644 index 5e5cd2de485..00000000000 --- a/docs/snippets/dldt_optimization_guide6.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include - -int main() { -using namespace InferenceEngine; -//! [part6] -InferenceEngine::Core ie; -auto network = ie.ReadNetwork("Model.xml", "Model.bin"); -InferenceEngine::InputsDataMap input_info(network.getInputsInfo()); - -auto executable_network = ie.LoadNetwork(network, "GPU"); -auto infer_request = executable_network.CreateInferRequest(); - -for (auto & item : input_info) { - std::string input_name = item.first; - auto input = infer_request.GetBlob(input_name); - /** Lock/Fill input tensor with data **/ - unsigned char* data = input->buffer().as::value_type*>(); - // ... -} - -infer_request.Infer(); -//! [part6] -return 0; -} diff --git a/docs/snippets/dldt_optimization_guide7.cpp b/docs/snippets/dldt_optimization_guide7.cpp deleted file mode 100644 index c2fdc529c37..00000000000 --- a/docs/snippets/dldt_optimization_guide7.cpp +++ /dev/null @@ -1,15 +0,0 @@ -#include - -int main() { -InferenceEngine::Core core; -auto network0 = core.ReadNetwork("sample.xml"); -auto network1 = core.ReadNetwork("sample.xml"); -//! [part7] -//these two networks go thru same plugin (aka device) and their requests will not overlap. -auto executable_network0 = core.LoadNetwork(network0, "CPU", - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}); -auto executable_network1 = core.LoadNetwork(network1, "GPU", - {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}); -//! [part7] -return 0; -} diff --git a/docs/snippets/example_async_infer_request.cpp b/docs/snippets/example_async_infer_request.cpp index 782182f5caa..ed3f880e4a8 100644 --- a/docs/snippets/example_async_infer_request.cpp +++ b/docs/snippets/example_async_infer_request.cpp @@ -12,11 +12,11 @@ class AcceleratorSyncRequest : public IInferRequestInternal { public: using Ptr = std::shared_ptr; - void Preprocess(); - void WriteToDevice(); - void RunOnDevice(); - void ReadFromDevice(); - void PostProcess(); + void preprocess(); + void write_to_device(); + void run_on_device(); + void read_from_device(); + void post_process(); }; // ! [async_infer_request:define_pipeline] @@ -40,19 +40,19 @@ class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault { // Five pipeline stages of synchronous infer request are run by different executors _pipeline = { { _preprocessExecutor , [this] { - _accSyncRequest->Preprocess(); + _accSyncRequest->preprocess(); }}, { _writeToDeviceExecutor , [this] { - _accSyncRequest->WriteToDevice(); + _accSyncRequest->write_to_device(); }}, { _runOnDeviceExecutor , [this] { - _accSyncRequest->RunOnDevice(); + _accSyncRequest->run_on_device(); }}, { _readFromDeviceExecutor , [this] { - _accSyncRequest->ReadFromDevice(); + _accSyncRequest->read_from_device(); }}, { _postProcessExecutor , [this] { - _accSyncRequest->PostProcess(); + _accSyncRequest->post_process(); }}, }; } diff --git a/docs/snippets/movidius-programming-guide.cpp b/docs/snippets/movidius-programming-guide.cpp deleted file mode 100644 index 39f28ae254d..00000000000 --- a/docs/snippets/movidius-programming-guide.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include - -int main() { -InferenceEngine::Core core; -int numRequests = 42; -int i = 1; -auto network = core.ReadNetwork("sample.xml"); -auto executable_network = core.LoadNetwork(network, "CPU"); -//! [part0] -struct Request { - InferenceEngine::InferRequest inferRequest; - int frameidx; -}; -//! [part0] - -//! [part1] -// numRequests is the number of frames (max size, equal to the number of VPUs in use) -std::vector request(numRequests); -//! [part1] - -//! [part2] -// initialize infer request pointer – Consult IE API for more detail. -request[i].inferRequest = executable_network.CreateInferRequest(); -//! [part2] - -//! [part3] -// Run inference -request[i].inferRequest.StartAsync(); -//! [part3] - -//! [part4] -request[i].inferRequest.SetCompletionCallback([] () {}); -//! [part4] - -return 0; -} diff --git a/docs/snippets/nGraphTutorial.cpp b/docs/snippets/nGraphTutorial.cpp deleted file mode 100644 index e39e783d5eb..00000000000 --- a/docs/snippets/nGraphTutorial.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include -#include "ngraph/opsets/opset.hpp" -#include "ngraph/opsets/opset3.hpp" - - -int main() { -//! [part0] - -using namespace std; -using namespace ngraph; - -auto arg0 = make_shared(element::f32, Shape{7}); -auto arg1 = make_shared(element::f32, Shape{7}); -// Create an 'Add' operation with two inputs 'arg0' and 'arg1' -auto add0 = make_shared(arg0, arg1); -auto abs0 = make_shared(add0); -// Create a node whose inputs/attributes will be specified later -auto acos0 = make_shared(); -// Create a node using opset factories -auto add1 = shared_ptr(get_opset3().create("Add")); -// Set inputs to nodes explicitly -acos0->set_argument(0, add0); -add1->set_argument(0, acos0); -add1->set_argument(1, abs0); - -// Create a graph with one output (add1) and four inputs (arg0, arg1) -auto ng_function = make_shared(OutputVector{add1}, ParameterVector{arg0, arg1}); -// Run shape inference on the nodes -ng_function->validate_nodes_and_infer_types(); - -//! [part0] - -//! [part1] -InferenceEngine::CNNNetwork net (ng_function); -//! [part1] - -return 0; -} diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 3433dee0e5e..9a73582a73e 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -233,7 +233,7 @@ macro(ie_add_sample) endif() if(COMMAND ov_ncc_naming_style AND NOT c_sample) ov_ncc_naming_style(FOR_TARGET "${IE_SAMPLE_NAME}" - SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") endif() endmacro() diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index 11a3239a362..64be241a142 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -10,11 +10,7 @@ Performance can be measured for two inference modes: latency- and throughput-ori Upon start-up, the application reads command-line parameters and loads a network and inputs (images/binary files) to the specified device. - **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. - If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application - or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. - For more information about the argument, refer to **When to Reverse Input Channels** section of - [Converting a Model](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model.md). +> **NOTE**: By default, OpenVINO Runtime samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model.md). Device-specific execution parameters (number of streams, threads, and so on) can be either explicitly specified through the command line or left default. In the last case, the sample logic will select the values for the optimal throughput. @@ -156,7 +152,7 @@ If a model has mixed input types, input folder should contain all required files To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's](@ref omz_models_group_intel) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader). -> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the OpenVINO IR (\*.xml + \*.bin) using the [Model Optimizer tool](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). > > The sample accepts models in ONNX format (.onnx) that do not require preprocessing. @@ -173,7 +169,7 @@ This section provides step-by-step instructions on how to run the Benchmark Tool ```sh python3 downloader.py --name googlenet-v1 -o ``` -2. Convert the model to the Inference Engine IR format. Run the Model Optimizer using the `mo` command with the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: +2. Convert the model to the OpenVINO IR format. Run the Model Optimizer using the `mo` command with the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files: ```sh mo --input_model /public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir ``` @@ -243,6 +239,6 @@ Below are fragments of sample output static and dynamic networks: ``` ## See Also -* [Using Inference Engine Samples](../../../docs/OV_Runtime_UG/Samples_Overview.md) +* [Using OpenVINO Runtime Samples](../../../docs/OV_Runtime_UG/Samples_Overview.md) * [Model Optimizer](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Model Downloader](@ref omz_tools_downloader) diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 3518ffc5eee..f6fded1b22c 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -32,7 +32,7 @@ static const size_t progressBarDefaultTotalCount = 1000; -bool ParseAndCheckCommandLine(int argc, char* argv[]) { +bool parse_and_check_command_line(int argc, char* argv[]) { // ---------------------------Parsing and validating input // arguments-------------------------------------- slog::info << "Parsing input parameters" << slog::endl; @@ -88,7 +88,7 @@ static void next_step(const std::string additional_info = "") { static size_t step_id = 0; static const std::map step_names = { {1, "Parsing and validating input arguments"}, - {2, "Loading Inference Engine"}, + {2, "Loading OpenVINO Runtime"}, {3, "Setting device configuration"}, {4, "Reading network files"}, {5, "Resizing network to match image sizes and given batch"}, @@ -151,7 +151,7 @@ int main(int argc, char* argv[]) { // ------------------------------------------------- next_step(); - if (!ParseAndCheckCommandLine(argc, argv)) { + if (!parse_and_check_command_line(argc, argv)) { return 0; } @@ -203,7 +203,7 @@ int main(int argc, char* argv[]) { /** This vector stores paths to the processed images with input names**/ auto inputFiles = parse_input_arguments(gflags::GetArgvs()); - // ----------------- 2. Loading the Inference Engine + // ----------------- 2. Loading the OpenVINO Runtime // ----------------------------------------------------------- next_step(); @@ -1089,7 +1089,7 @@ int main(int argc, char* argv[]) { if (!FLAGS_dump_config.empty()) { dump_config(FLAGS_dump_config, config); - slog::info << "Inference Engine configuration settings were dumped to " << FLAGS_dump_config << slog::endl; + slog::info << "OpenVINO Runtime configuration settings were dumped to " << FLAGS_dump_config << slog::endl; } if (!FLAGS_exec_graph_path.empty()) { diff --git a/samples/cpp/build_samples.sh b/samples/cpp/build_samples.sh index e3e97c7d599..c1c9536b5e1 100755 --- a/samples/cpp/build_samples.sh +++ b/samples/cpp/build_samples.sh @@ -4,7 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 usage() { - echo "Build inference engine samples" + echo "Build OpenVINO Runtime samples" echo echo "Options:" echo " -h Print the help message" @@ -70,7 +70,7 @@ else fi if ! command -v cmake &>/dev/null; then - printf "\n\nCMAKE is not installed. It is required to build Inference Engine samples. Please install it. \n\n" + printf "\n\nCMAKE is not installed. It is required to build OpenVINO Runtime samples. Please install it. \n\n" exit 1 fi diff --git a/samples/cpp/build_samples_msvc.bat b/samples/cpp/build_samples_msvc.bat index bfa707d958c..259906a9420 100644 --- a/samples/cpp/build_samples_msvc.bat +++ b/samples/cpp/build_samples_msvc.bat @@ -52,7 +52,7 @@ if exist "%SAMPLE_BUILD_DIR%\CMakeCache.txt" del "%SAMPLE_BUILD_DIR%\CMakeCache. cd /d "%ROOT_DIR%" && cmake -E make_directory "%SAMPLE_BUILD_DIR%" && cd /d "%SAMPLE_BUILD_DIR%" && cmake -G "Visual Studio 16 2019" -A %PLATFORM% "%ROOT_DIR%" echo. -echo ###############^|^| Build Inference Engine samples using MS Visual Studio (MSBuild.exe) ^|^|############### +echo ###############^|^| Build OpenVINO Runtime samples using MS Visual Studio (MSBuild.exe) ^|^|############### echo. echo cmake --build . --config Release @@ -65,7 +65,7 @@ echo Done. exit /b :usage -echo Build inference engine samples +echo Build OpenVINO Runtime samples echo. echo Options: echo -h Print the help message diff --git a/samples/cpp/hello_reshape_ssd/main.cpp b/samples/cpp/hello_reshape_ssd/main.cpp index 70646b03330..ad3dfdbea07 100644 --- a/samples/cpp/hello_reshape_ssd/main.cpp +++ b/samples/cpp/hello_reshape_ssd/main.cpp @@ -36,7 +36,7 @@ int main(int argc, char* argv[]) { const std::string device_name{argv[3]}; // ------------------------------------------------------------------- - // Step 1. Initialize inference engine core + // Step 1. Initialize OpenVINO Runtime core ov::Core core; // ------------------------------------------------------------------- diff --git a/samples/cpp/speech_sample/main.cpp b/samples/cpp/speech_sample/main.cpp index 7ebc4adde8c..861058bf94c 100644 --- a/samples/cpp/speech_sample/main.cpp +++ b/samples/cpp/speech_sample/main.cpp @@ -32,13 +32,13 @@ using namespace ov::preprocess; /** - * @brief The entry point for inference engine automatic speech recognition sample + * @brief The entry point for OpenVINO Runtime automatic speech recognition sample * @file speech_sample/main.cpp * @example speech_sample/main.cpp */ int main(int argc, char* argv[]) { try { - // ------------------------------ Get Inference Engine version ---------------------------------------------- + // ------------------------------ Get OpenVINO Runtime version ---------------------------------------------- slog::info << "OpenVINO runtime: " << ov::get_openvino_version() << slog::endl; // ------------------------------ Parsing and validation of input arguments --------------------------------- @@ -79,7 +79,7 @@ int main(int argc, char* argv[]) { } size_t numInputFiles(inputFiles.size()); - // --------------------------- Step 1. Initialize inference engine core and read model + // --------------------------- Step 1. Initialize OpenVINO Runtime core and read model // ------------------------------------- ov::Core core; slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl; diff --git a/samples/python/model_creation_sample/model_creation_sample.py b/samples/python/model_creation_sample/model_creation_sample.py index b652f06fae3..33b796d83da 100755 --- a/samples/python/model_creation_sample/model_creation_sample.py +++ b/samples/python/model_creation_sample/model_creation_sample.py @@ -133,7 +133,7 @@ def main(): device_name = sys.argv[2] labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] number_top = 1 - # ---------------------------Step 1. Initialize inference engine core-------------------------------------------------- + # ---------------------------Step 1. Initialize OpenVINO Runtime Core-------------------------------------------------- log.info('Creating OpenVINO Runtime Core') core = Core() diff --git a/samples/scripts/README.txt b/samples/scripts/README.txt index a013aad62cc..7d32960dfa0 100644 --- a/samples/scripts/README.txt +++ b/samples/scripts/README.txt @@ -39,12 +39,12 @@ The sample illustrates the general workflow of using the Intel(R) Deep Learning - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) - Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine classification_sample (samples\cpp\classification_sample) + - Builds the OpenVINO Runtime classification_sample (samples\cpp\classification_sample) - Runs the sample with the car.png picture located in the demo folder The sample application prints top-10 inference results for the picture. -For more information about the Inference Engine classification sample, refer to the documentation available in the sample folder. +For more information about the OpenVINO Runtime classification sample, refer to the documentation available in the sample folder. Benchmark Sample Using SqueezeNet =============================== @@ -56,9 +56,9 @@ The sample script does the following: - Downloads a public SqueezeNet model using the Model Downloader (extras\open_model_zoo\tools\downloader\downloader.py) - Installs all prerequisites required for running the Model Optimizer using the scripts from the "tools\model_optimizer\install_prerequisites" folder - Converts SqueezeNet to an IR using the Model Optimizer (tools\model_optimizer\mo.py) via the Model Converter (extras\open_model_zoo\tools\downloader\converter.py) - - Builds the Inference Engine benchmark tool (samples\benchmark_app) + - Builds the OpenVINO Runtime benchmark tool (samples\benchmark_app) - Runs the tool with the car.png picture located in the demo folder The benchmark app prints performance counters, resulting latency, and throughput values. -For more information about the Inference Engine benchmark app, refer to the documentation available in the sample folder. +For more information about the OpenVINO Runtime benchmark app, refer to the documentation available in the sample folder. diff --git a/samples/scripts/run_sample_benchmark_app.bat b/samples/scripts/run_sample_benchmark_app.bat index e92737076be..c3c975251fe 100644 --- a/samples/scripts/run_sample_benchmark_app.bat +++ b/samples/scripts/run_sample_benchmark_app.bat @@ -155,7 +155,7 @@ CALL :delay 7 :buildSample echo. -echo ###############^|^| Generate VS solution for Inference Engine samples using cmake ^|^|############### +echo ###############^|^| Generate VS solution for OpenVINO Runtime samples using cmake ^|^|############### echo. CALL :delay 3 @@ -173,7 +173,7 @@ if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 echo. -echo ###############^|^| Build Inference Engine samples using cmake ^|^|############### +echo ###############^|^| Build OpenVINO Runtime samples using cmake ^|^|############### echo. CALL :delay 3 @@ -186,7 +186,7 @@ CALL :delay 7 :runSample echo. -echo ###############^|^| Run Inference Engine benchmark app ^|^|############### +echo ###############^|^| Run OpenVINO Runtime benchmark app ^|^|############### echo. CALL :delay 3 copy /Y "%ROOT_DIR%%model_name%.labels" "%ir_dir%" @@ -198,7 +198,7 @@ benchmark_app.exe -i "%target_image_path%" -m "%ir_dir%\%model_name%.xml" -pc - if ERRORLEVEL 1 GOTO errorHandling echo. -echo ###############^|^| Inference Engine benchmark app completed successfully ^|^|############### +echo ###############^|^| OpenVINO Runtime benchmark app completed successfully ^|^|############### CALL :delay 10 cd /d "%ROOT_DIR%" diff --git a/samples/scripts/run_sample_benchmark_app.sh b/samples/scripts/run_sample_benchmark_app.sh index 65f9b7154e3..d4f9875a5c9 100755 --- a/samples/scripts/run_sample_benchmark_app.sh +++ b/samples/scripts/run_sample_benchmark_app.sh @@ -158,7 +158,7 @@ else fi # Step 3. Build samples -echo -ne "\n###############|| Build Inference Engine samples ||###############\n\n" +echo -ne "\n###############|| Build OpenVINO Runtime samples ||###############\n\n" OS_PATH=$(uname -m) NUM_THREADS="-j2" @@ -181,7 +181,7 @@ cmake -DCMAKE_BUILD_TYPE=Release "$samples_path" make $NUM_THREADS benchmark_app # Step 4. Run samples -echo -ne "\n###############|| Run Inference Engine benchmark app ||###############\n\n" +echo -ne "\n###############|| Run OpenVINO Runtime benchmark app ||###############\n\n" cd "$binaries_dir" @@ -189,4 +189,4 @@ cp -f "$ROOT_DIR/${model_name}.labels" "${ir_dir}/" print_and_run ./benchmark_app -d "$target" -i "$target_image_path" -m "${ir_dir}/${model_name}.xml" -pc "${sampleoptions[@]}" -echo -ne "\n###############|| Inference Engine benchmark app completed successfully ||###############\n\n" +echo -ne "\n###############|| OpenVINO Runtime benchmark app completed successfully ||###############\n\n" diff --git a/samples/scripts/run_sample_squeezenet.bat b/samples/scripts/run_sample_squeezenet.bat index 034d300feb3..944d3b21edb 100644 --- a/samples/scripts/run_sample_squeezenet.bat +++ b/samples/scripts/run_sample_squeezenet.bat @@ -151,7 +151,7 @@ CALL :delay 7 :buildSample echo. -echo ###############^|^| Generate VS solution for Inference Engine samples using cmake ^|^|############### +echo ###############^|^| Generate VS solution for OpenVINO Runtime samples using cmake ^|^|############### echo. CALL :delay 3 @@ -169,7 +169,7 @@ if ERRORLEVEL 1 GOTO errorHandling CALL :delay 7 echo. -echo ###############^|^| Build Inference Engine samples using cmake ^|^|############### +echo ###############^|^| Build OpenVINO Runtime samples using cmake ^|^|############### echo. CALL :delay 3 @@ -182,7 +182,7 @@ CALL :delay 7 :runSample echo. -echo ###############^|^| Run Inference Engine classification sample ^|^|############### +echo ###############^|^| Run OpenVINO Runtime classification sample ^|^|############### echo. CALL :delay 3 copy /Y "%ROOT_DIR%%model_name%.labels" "%ir_dir%" diff --git a/samples/scripts/run_sample_squeezenet.sh b/samples/scripts/run_sample_squeezenet.sh index e429326f6d9..01472e81b77 100755 --- a/samples/scripts/run_sample_squeezenet.sh +++ b/samples/scripts/run_sample_squeezenet.sh @@ -154,7 +154,7 @@ else fi # Step 3. Build samples -echo -ne "\n###############|| Build Inference Engine samples ||###############\n\n" +echo -ne "\n###############|| Build OpenVINO Runtime samples ||###############\n\n" OS_PATH=$(uname -m) NUM_THREADS="-j2" @@ -178,7 +178,7 @@ cmake -DCMAKE_BUILD_TYPE=Release "$samples_path" make $NUM_THREADS classification_sample_async # Step 4. Run sample -echo -ne "\n###############|| Run Inference Engine classification sample ||###############\n\n" +echo -ne "\n###############|| Run OpenVINO Runtime classification sample ||###############\n\n" cd "$binaries_dir"