From 7e709a0ecceb3348d399505abfe4c389a9e3ca6f Mon Sep 17 00:00:00 2001 From: Zlobin Vladimir Date: Mon, 2 Oct 2023 13:50:49 +0400 Subject: [PATCH] benchmark: remove deprecation notice (#20175) Python version didn't mark -api as deprecated --- samples/cpp/benchmark_app/README.md | 2 +- samples/cpp/benchmark_app/benchmark_app.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index 76a1697aec4..168d3b85ba8 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -190,7 +190,7 @@ Running the application with the ``-h`` or ``--help`` option yields the followin -c Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. -cache_dir Optional. Enables caching of loaded models to specified directory. List of devices which support caching is shown at the end of this message. -load_from_file Optional. Loads model from file directly without read_model. All CNNNetwork options (like re-shape) will be ignored - -api Optional (deprecated). Enable Sync/Async API. Default value is "async". + -api Optional. Enable Sync/Async API. Default value is "async". -nireq Optional. Number of infer requests. Default value is determined automatically for device. -nstreams Optional. Number of streams to use for inference on the CPU or GPU devices (for HETERO and MULTI device cases use format :, : or just ). Default value is determined automatically for a device.Please note that although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small models. See sample's README for more details. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams should be set to 1. -inference_only Optional. Measure only inference stage. Default option for static models. Dynamic models are measured in full mode which includes inputs setup stage, inference only mode available for them with single input data shape only. To enable full mode for static models pass "false" value to this argument: ex. "-inference_only=false". diff --git a/samples/cpp/benchmark_app/benchmark_app.hpp b/samples/cpp/benchmark_app/benchmark_app.hpp index e168445c61b..6ae1ec072c3 100644 --- a/samples/cpp/benchmark_app/benchmark_app.hpp +++ b/samples/cpp/benchmark_app/benchmark_app.hpp @@ -96,7 +96,7 @@ static const char layout_message[] = "For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size."; /// @brief message for execution mode -static const char api_message[] = "Optional (deprecated). Enable Sync/Async API. Default value is \"async\"."; +static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\"."; /// @brief message for #streams for CPU inference static const char infer_num_streams_message[] =