diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 15eabf321ab..4d3135903de 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -223,7 +223,11 @@ function(build_docs) "${OpenVINO_SOURCE_DIR}/inference-engine/*.md" "${OpenVINO_SOURCE_DIR}/inference-engine/*.png" "${OpenVINO_SOURCE_DIR}/inference-engine/*.gif" - "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg") + "${OpenVINO_SOURCE_DIR}/inference-engine/*.jpg" + "${OpenVINO_SOURCE_DIR}/tools/*.md" + "${OpenVINO_SOURCE_DIR}/tools/*.png" + "${OpenVINO_SOURCE_DIR}/tools/*.gif" + "${OpenVINO_SOURCE_DIR}/tools/*.jpg") foreach(source_file ${ovino_doc_files}) list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index db39cbfc5b4..f9e21cf5e4d 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -14,7 +14,7 @@ Inference Engine sample applications include the following: - [Automatic Speech Recognition Python Sample](../../inference-engine/ie_bridges/python/sample/speech_sample/README.md) - **Benchmark Application** – Estimates deep learning inference performance on supported devices for synchronous and asynchronous modes. - [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md) - - [Benchmark Python Tool](../../inference-engine/tools/benchmark_tool/README.md) + - [Benchmark Python Tool](../../tools/benchmark_tool/README.md) - **Hello Classification Sample** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API. Input of any size and layout can be set to an infer request which will be pre-processed automatically during inference (the sample supports only images as inputs and supports Unicode paths). - [Hello Classification C++ Sample](../../inference-engine/samples/hello_classification/README.md) - [Hello Classification C Sample](../../inference-engine/ie_bridges/c/samples/hello_classification/README.md) diff --git a/docs/benchmarks/performance_benchmarks_faq.md b/docs/benchmarks/performance_benchmarks_faq.md index a89d0fc07c3..2ff33612097 100644 --- a/docs/benchmarks/performance_benchmarks_faq.md +++ b/docs/benchmarks/performance_benchmarks_faq.md @@ -15,7 +15,7 @@ The models used in the performance benchmarks were chosen based on general adopt CF means Caffe*, while TF means TensorFlow*. #### 5. How can I run the benchmark results on my own? -All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../inference-engine/tools/benchmark_tool/README.md). +All of the performance benchmarks were generated using the open-sourced tool within the Intel® Distribution of OpenVINO™ toolkit called `benchmark_app`, which is available in both [C++](../../inference-engine/samples/benchmark_app/README.md) and [Python](../../tools/benchmark_tool/README.md). #### 6. What image sizes are used for the classification network models? The image size used in the inference depends on the network being benchmarked. The following table shows the list of input sizes for each network model. diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index 2d5076a60c6..c705d592ce3 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -2,7 +2,7 @@ This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. Performance can be measured for two inference modes: synchronous (latency-oriented) and asynchronous (throughput-oriented). -> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../tools/benchmark_tool/README.md). +> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../../tools/benchmark_tool/README.md). > **TIP**: You also can work with the Benchmark Tool inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench). > [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare diff --git a/inference-engine/tools/package_BOM.txt b/inference-engine/tools/package_BOM.txt index fcff3b75a0f..b1d58875c48 100644 --- a/inference-engine/tools/package_BOM.txt +++ b/inference-engine/tools/package_BOM.txt @@ -1,6 +1,3 @@ -benchmark_tool/benchmark_app.py -benchmark_tool/requirements.txt -benchmark_tool/README.md cross_check_tool/__init__.py cross_check_tool/utils.py cross_check_tool/requirements.txt diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 5469ac09336..fc468719d16 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -29,8 +29,10 @@ if(ENABLE_PYTHON) ie_cpack_add_component(python_tools_${PYTHON_VERSION}) ie_cpack_add_component(python_tools) - install(DIRECTORY ../inference-engine/tools/benchmark_tool - DESTINATION deployment_tools/tools + install(FILES benchmark_tool/benchmark_app.py + benchmark_tool/README.md + benchmark_tool/requirements.txt + DESTINATION deployment_tools/tools/benchmark_tool COMPONENT python_tools) install(DIRECTORY ../inference-engine/tools/cross_check_tool diff --git a/inference-engine/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md similarity index 96% rename from inference-engine/tools/benchmark_tool/README.md rename to tools/benchmark_tool/README.md index 1eacb8f56ad..4c7608b6cce 100644 --- a/inference-engine/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -2,7 +2,7 @@ This topic demonstrates how to run the Benchmark Python* Tool, which performs inference using convolutional networks. Performance can be measured for two inference modes: synchronous (latency-oriented) and asynchronous (throughput-oriented). -> **NOTE:** This topic describes usage of Python implementation of the Benchmark Tool. For the C++ implementation, refer to [Benchmark C++ Tool](../../samples/benchmark_app/README.md). +> **NOTE:** This topic describes usage of Python implementation of the Benchmark Tool. For the C++ implementation, refer to [Benchmark C++ Tool](../../inference-engine/samples/benchmark_app/README.md). > **TIP**: You also can work with the Benchmark Tool inside the OpenVINO™ [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction) (DL Workbench). > [DL Workbench](@ref workbench_docs_Workbench_DG_Introduction) is a platform built upon OpenVINO™ and provides a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare @@ -15,7 +15,7 @@ This topic demonstrates how to run the Benchmark Python* Tool, which performs in Upon start-up, the application reads command-line parameters and loads a network and images/binary files to the Inference Engine plugin, which is chosen depending on a specified device. The number of infer requests and execution approach depend on the mode defined with the `-api` command-line parameter. -> **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). +> **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). ### Synchronous API @@ -54,7 +54,7 @@ Notice that the benchmark_app usually produces optimal performance for any devic python3 benchmark_app.py -m -i -d CPU ``` -But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md). +But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../docs/IE_DG/Intro_to_Performance.md). Running the application with the `-h` or `--help`' option yields the following usage message: @@ -147,7 +147,7 @@ If a model has mixed input types, input folder should contain all required files To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's](@ref omz_models_group_intel) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader). -> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> **NOTE**: Before running the tool with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). ## Examples of Running the Tool @@ -211,6 +211,6 @@ Below are fragments of sample output for CPU and FPGA devices: ``` ## See Also -* [Using Inference Engine Samples](../../../docs/IE_DG/Samples_Overview.md) -* [Model Optimizer](../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +* [Using Inference Engine Samples](../../docs/IE_DG/Samples_Overview.md) +* [Model Optimizer](../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Model Downloader](@ref omz_tools_downloader) diff --git a/inference-engine/tools/benchmark_tool/benchmark_app.py b/tools/benchmark_tool/benchmark_app.py similarity index 100% rename from inference-engine/tools/benchmark_tool/benchmark_app.py rename to tools/benchmark_tool/benchmark_app.py diff --git a/tools/benchmark_tool/openvino/tools/benchmark/README.md b/tools/benchmark_tool/openvino/tools/benchmark/README.md index 16491d6c9b2..5bef2ffe498 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/README.md +++ b/tools/benchmark_tool/openvino/tools/benchmark/README.md @@ -8,7 +8,7 @@ Upon start-up, the application reads command-line parameters and loads a network plugin, which is chosen depending on a specified device. The number of infer requests and execution approach depend on the mode defined with the `-api` command-line parameter. -> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). +> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md). ### Synchronous API @@ -47,7 +47,7 @@ Notice that the benchmark_app usually produces optimal performance for any devic $benchmark_app -m -i -d CPU ``` -But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](./docs/IE_DG/Intro_to_Performance.md). +But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../../../docs/IE_DG/Intro_to_Performance.md). Running the application with the `-h` or `--help`' option yields the following usage message: @@ -158,7 +158,7 @@ If a model has mixed input types, input folder should contain all required files To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's](@ref omz_models_group_intel) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader). -> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). For example, to do inference of an image using a trained network with multiple outputs on CPU, run the following command: @@ -187,6 +187,6 @@ Throughput: 73.28 FPS ``` ## See Also -* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md) -* [Model Optimizer](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +* [Using Inference Engine Samples](../../../../../docs/IE_DG/Samples_Overview.md) +* [Model Optimizer](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) * [Model Downloader](https://github.com/openvinotoolkit/open_model_zoo/tree/2018/model_downloader) diff --git a/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt b/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt deleted file mode 100644 index 5fbd19cbc89..00000000000 --- a/tools/benchmark_tool/openvino/tools/benchmark/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -py-cpuinfo>=7.0.0 -numpy>=1.16.6,<1.20 -progress>=1.5 -opencv-python==4.5.* \ No newline at end of file diff --git a/inference-engine/tools/benchmark_tool/requirements.txt b/tools/benchmark_tool/requirements.txt similarity index 100% rename from inference-engine/tools/benchmark_tool/requirements.txt rename to tools/benchmark_tool/requirements.txt diff --git a/tools/benchmark_tool/setup.py b/tools/benchmark_tool/setup.py index 31ce11e4a36..c696a999459 100644 --- a/tools/benchmark_tool/setup.py +++ b/tools/benchmark_tool/setup.py @@ -10,10 +10,10 @@ $ python setup.py sdist bdist_wheel """ from setuptools import setup, find_packages -with open('openvino/tools/benchmark/README.md', 'r', encoding='utf-8') as f: +with open('README.md', 'r', encoding='utf-8') as f: long_description = f.read() -with open('openvino/tools/benchmark/requirements.txt') as f: +with open('requirements.txt') as f: required = f.read().splitlines() required.extend(['openvino'])