From 18d6ece4e4044d56bfe06b667e8e18f9bd8d7bf6 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 1 Jun 2023 12:50:56 +0400 Subject: [PATCH] Deprecate ExecutableNetwork and InferRequest API (#17801) * Deprecate ExecutableNetwork and InferRequest API * Fixed some warnings * Fixed some warnings * Try to fix documentation * Try to skip documentation warnings --- docs/snippets/gpu/custom_kernels_api.cpp | 2 + docs/suppress_warnings.txt | 2 + samples/cpp/benchmark_app/main.cpp | 4 + ...nfer_async_request_thread_safe_default.hpp | 3 + .../include/ie/cpp/ie_executable_network.hpp | 16 ++- .../include/ie/cpp/ie_infer_request.hpp | 19 +++- .../include/ie/cpp/ie_memory_state.hpp | 15 ++- .../include/ie/ie_iexecutable_network.hpp | 17 +++- .../include/ie/ie_iinfer_request.hpp | 17 +++- src/inference/include/ie/ie_parallel.hpp | 10 ++ src/inference/include/ie/ie_plugin_config.hpp | 97 ++++++++++++++++--- .../src/dev/threading/istreams_executor.cpp | 1 + .../functional/async_infer_request_test.cpp | 2 + .../tests/functional/variable_state.cpp | 3 + 14 files changed, 178 insertions(+), 30 deletions(-) diff --git a/docs/snippets/gpu/custom_kernels_api.cpp b/docs/snippets/gpu/custom_kernels_api.cpp index ccadfaa6abf..e244c6d47a4 100644 --- a/docs/snippets/gpu/custom_kernels_api.cpp +++ b/docs/snippets/gpu/custom_kernels_api.cpp @@ -13,11 +13,13 @@ #endif int main() { + IE_SUPPRESS_DEPRECATED_START //! [part0] ov::Core core; // Load GPU Extensions core.set_property("GPU", {{ CONFIG_KEY(CONFIG_FILE), "" }}); //! [part0] + IE_SUPPRESS_DEPRECATED_END return 0; } diff --git a/docs/suppress_warnings.txt b/docs/suppress_warnings.txt index ace3d1ce5d0..b741960b8fc 100644 --- a/docs/suppress_warnings.txt +++ b/docs/suppress_warnings.txt @@ -103,3 +103,5 @@ unexpected unindent failed to import object autosummary: stub file not found failed to parse name +for undefined enum \'waitmode\' found +internal inconsistency: scope for class inferenceengine diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 7e00e221fa7..d93462a81a9 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -309,6 +309,7 @@ int main(int argc, char* argv[]) { slog::info << "Extensions are loaded: " << FLAGS_extensions << slog::endl; } + OPENVINO_SUPPRESS_DEPRECATED_START // Load clDNN Extensions if ((FLAGS_d.find("GPU") != std::string::npos) && !FLAGS_c.empty()) { // Override config if command line parameter is specified @@ -321,6 +322,7 @@ int main(int argc, char* argv[]) { core.set_property("GPU", {{CONFIG_KEY(CONFIG_FILE), ext}}); slog::info << "GPU extensions are loaded: " << ext << slog::endl; } + OPENVINO_SUPPRESS_DEPRECATED_END slog::info << "OpenVINO:" << slog::endl; slog::info << ov::get_openvino_version() << slog::endl; @@ -842,10 +844,12 @@ int main(int argc, char* argv[]) { for (auto& item : devices_properties) { slog::info << " " << item.first << ": " << slog::endl; for (auto& item2 : item.second.as()) { + OPENVINO_SUPPRESS_DEPRECATED_START if (item2.first == ov::supported_properties || item2.first == METRIC_KEY(SUPPORTED_CONFIG_KEYS) || item2.first == METRIC_KEY(SUPPORTED_METRICS)) continue; + OPENVINO_SUPPRESS_DEPRECATED_END slog::info << " " << item2.first << ": " << item2.second.as() << slog::endl; } } diff --git a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp index e0899c431ec..01c4b4a7342 100644 --- a/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp +++ b/src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp @@ -15,11 +15,13 @@ #include #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" +#include "ie_api.h" #include "threading/ie_immediate_executor.hpp" #include "threading/ie_istreams_executor.hpp" #include "threading/ie_itask_executor.hpp" namespace InferenceEngine { +IE_SUPPRESS_DEPRECATED_START /** * @ingroup ie_dev_api_async_infer_request_api @@ -459,4 +461,5 @@ private: Futures _futures; InferState _state = InferState::Idle; }; +IE_SUPPRESS_DEPRECATED_END } // namespace InferenceEngine diff --git a/src/inference/include/ie/cpp/ie_executable_network.hpp b/src/inference/include/ie/cpp/ie_executable_network.hpp index b6f220eac42..bcdfd013000 100644 --- a/src/inference/include/ie/cpp/ie_executable_network.hpp +++ b/src/inference/include/ie/cpp/ie_executable_network.hpp @@ -10,6 +10,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -32,7 +42,7 @@ class IExecutableNetworkInternal; /** * @brief This is an interface of an executable network */ -class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { +class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { std::shared_ptr _impl; std::shared_ptr _so; @@ -180,7 +190,6 @@ public: */ explicit operator bool() const noexcept; - IE_SUPPRESS_DEPRECATED_START /** * @deprecated The method Will be removed * @brief reset owned object to new pointer. @@ -188,7 +197,6 @@ public: * Essential for cases when simultaneously loaded networks not expected. * @param newActual actual pointed object */ - INFERENCE_ENGINE_DEPRECATED("The method will be removed") void reset(std::shared_ptr newActual); /** @@ -196,7 +204,6 @@ public: * @brief cast operator is used when this wrapper initialized by LoadNetwork * @return A shared pointer to IExecutableNetwork interface. */ - INFERENCE_ENGINE_DEPRECATED("The method will be removed. Use operator bool") operator std::shared_ptr(); /** @@ -206,7 +213,6 @@ public: * Wraps IExecutableNetwork::CreateInferRequest. * @return shared pointer on InferenceEngine::InferRequest object */ - INFERENCE_ENGINE_DEPRECATED("Use ExecutableNetwork::CreateInferRequest instead") InferRequest::Ptr CreateInferRequestPtr(); }; diff --git a/src/inference/include/ie/cpp/ie_infer_request.hpp b/src/inference/include/ie/cpp/ie_infer_request.hpp index 70e4cd40a2f..c4154dd2dbc 100644 --- a/src/inference/include/ie/cpp/ie_infer_request.hpp +++ b/src/inference/include/ie/cpp/ie_infer_request.hpp @@ -9,6 +9,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -31,7 +41,7 @@ class ICompletionCallbackWrapper; * Wraps IInferRequest * It can throw exceptions safely for the application, where it is properly handled. */ -class INFERENCE_ENGINE_API_CLASS(InferRequest) { +class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(InferRequest) { std::shared_ptr _impl; std::shared_ptr _so; @@ -49,7 +59,7 @@ public: * @enum WaitMode * @brief Enumeration to hold wait mode for IInferRequest */ - enum WaitMode : int64_t { + enum INFERENCE_ENGINE_1_0_DEPRECATED WaitMode : int64_t { /** Wait until inference result becomes available */ RESULT_READY = -1, /** IInferRequest doesn't block or interrupt current thread and immediately returns inference status */ @@ -219,6 +229,7 @@ public: SetCallback{*this}(std::move(callbackToSet)); } + IE_SUPPRESS_DEPRECATED_START /** * @brief Gets state control interface for given infer request. * @@ -227,7 +238,6 @@ public: */ std::vector QueryState(); - IE_SUPPRESS_DEPRECATED_START /** * @brief IInferRequest pointer to be used directly in CreateInferRequest functions * @return A shared pointer to IInferRequest interface @@ -261,6 +271,7 @@ public: bool operator==(const InferRequest&) const noexcept; }; +IE_SUPPRESS_DEPRECATED_START /** * @private */ @@ -272,8 +283,6 @@ struct InferRequest::SetCallback> InferRequest& _this; }; -IE_SUPPRESS_DEPRECATED_START - /** * @private */ diff --git a/src/inference/include/ie/cpp/ie_memory_state.hpp b/src/inference/include/ie/cpp/ie_memory_state.hpp index 504d4adf473..bef706632d4 100644 --- a/src/inference/include/ie/cpp/ie_memory_state.hpp +++ b/src/inference/include/ie/cpp/ie_memory_state.hpp @@ -10,6 +10,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include @@ -19,11 +29,12 @@ namespace InferenceEngine { class IVariableStateInternal; +IE_SUPPRESS_DEPRECATED_START /** * @brief VariableState class */ -class INFERENCE_ENGINE_API_CLASS(VariableState) { +class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(VariableState) { std::shared_ptr _impl; std::shared_ptr _so; @@ -95,4 +106,6 @@ public: */ using MemoryState = VariableState; +IE_SUPPRESS_DEPRECATED_END + } // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_iexecutable_network.hpp b/src/inference/include/ie/ie_iexecutable_network.hpp index c1299fea0c1..be826d107aa 100644 --- a/src/inference/include/ie/ie_iexecutable_network.hpp +++ b/src/inference/include/ie/ie_iexecutable_network.hpp @@ -9,6 +9,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -24,13 +34,12 @@ namespace InferenceEngine { -_IE_SUPPRESS_DEPRECATED_START_GCC +IE_SUPPRESS_DEPRECATED_START /** * @brief This is an interface of an executable network */ -class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::ExecutableNetwork instead") IExecutableNetwork - : public std::enable_shared_from_this { +class INFERENCE_ENGINE_1_0_DEPRECATED IExecutableNetwork : public std::enable_shared_from_this { public: IE_SUPPRESS_DEPRECATED_START /** @@ -162,6 +171,6 @@ protected: virtual ~IExecutableNetwork() = default; }; -_IE_SUPPRESS_DEPRECATED_END_GCC +IE_SUPPRESS_DEPRECATED_END } // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_iinfer_request.hpp b/src/inference/include/ie/ie_iinfer_request.hpp index 99371c31a9c..7d5a33b4513 100644 --- a/src/inference/include/ie/ie_iinfer_request.hpp +++ b/src/inference/include/ie/ie_iinfer_request.hpp @@ -10,6 +10,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -20,14 +30,13 @@ namespace InferenceEngine { -_IE_SUPPRESS_DEPRECATED_START_GCC +IE_SUPPRESS_DEPRECATED_START /** * @deprecated Use InferenceEngine::InferRequest C++ wrapper * @brief This is an interface of asynchronous infer request */ -class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::InferRequest C++ wrapper") IInferRequest - : public std::enable_shared_from_this { +class INFERENCE_ENGINE_1_0_DEPRECATED IInferRequest : public std::enable_shared_from_this { public: /** * @enum WaitMode @@ -207,6 +216,6 @@ protected: virtual ~IInferRequest() = default; }; -_IE_SUPPRESS_DEPRECATED_END_GCC +IE_SUPPRESS_DEPRECATED_END } // namespace InferenceEngine diff --git a/src/inference/include/ie/ie_parallel.hpp b/src/inference/include/ie/ie_parallel.hpp index 9bad1d7b8c3..21dfc6d0e1c 100644 --- a/src/inference/include/ie/ie_parallel.hpp +++ b/src/inference/include/ie/ie_parallel.hpp @@ -14,6 +14,16 @@ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include "openvino/core/parallel.hpp" #define IE_THREAD_TBB OV_THREAD_TBB diff --git a/src/inference/include/ie/ie_plugin_config.hpp b/src/inference/include/ie/ie_plugin_config.hpp index 8894fe5acd7..ec513f2d5ee 100644 --- a/src/inference/include/ie/ie_plugin_config.hpp +++ b/src/inference/include/ie/ie_plugin_config.hpp @@ -10,6 +10,16 @@ */ #pragma once +#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED) +# define IE_LEGACY_HEADER_INCLUDED +# ifdef _MSC_VER +# pragma message( \ + "The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# else +# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") +# endif +#endif + #include #include #include @@ -18,6 +28,7 @@ #include "ie_precision.hpp" namespace InferenceEngine { +IE_SUPPRESS_DEPRECATED_START /** * @brief %Metrics @@ -56,6 +67,7 @@ namespace Metrics { /** * @brief Metric to get a std::vector of available device IDs. String value is "AVAILABLE_DEVICES" */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(AVAILABLE_DEVICES, std::vector); /** @@ -67,6 +79,7 @@ DECLARE_METRIC_KEY(AVAILABLE_DEVICES, std::vector); * can be passed to ExecutableNetwork::GetMetric. * */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(SUPPORTED_METRICS, std::vector); /** @@ -79,11 +92,13 @@ DECLARE_METRIC_KEY(SUPPORTED_METRICS, std::vector); * ExecutableNetwork::GetConfig. * */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(SUPPORTED_CONFIG_KEYS, std::vector); /** * @brief Metric to get a std::string value representing a full device name. String value is "FULL_DEVICE_NAME" */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(FULL_DEVICE_NAME, std::string); /** @@ -99,14 +114,22 @@ DECLARE_METRIC_KEY(FULL_DEVICE_NAME, std::string); * - "WINOGRAD" - device can support models where convolution implemented via Winograd transformations * - "BATCHED_BLOB" - device can support BatchedBlob */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(OPTIMIZATION_CAPABILITIES, std::vector); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(FP32); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(BF16); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(FP16); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(INT8); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(BIN); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(WINOGRAD); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_VALUE(BATCHED_BLOB); /** @@ -117,6 +140,7 @@ DECLARE_METRIC_VALUE(BATCHED_BLOB); * - Second value is upper bound. * String value for metric name is "RANGE_FOR_STREAMS". */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(RANGE_FOR_STREAMS, std::tuple); /** * @brief Metric to query information optimal batch size for the given device and the network @@ -129,6 +153,7 @@ DECLARE_METRIC_KEY(RANGE_FOR_STREAMS, std::tuple); * so that the result (>1) governs the automatic batching (transparently to the application). * The automatic batching can be disabled with ALLOW_AUTO_BATCHING set to NO */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(OPTIMAL_BATCH_SIZE, unsigned int); /** @@ -139,6 +164,7 @@ DECLARE_METRIC_KEY(OPTIMAL_BATCH_SIZE, unsigned int); * Also, MODEL_PTR is the required option for this metric since the available max batch size depends on the model size. * If the MODEL_PTR is not given, it will return 1. */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(MAX_BATCH_SIZE, unsigned int); /** @@ -151,6 +177,7 @@ DECLARE_METRIC_KEY(MAX_BATCH_SIZE, unsigned int); * - Third value is step inside this range. * String value for metric name is "RANGE_FOR_ASYNC_INFER_REQUESTS". */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS, std::tuple); /** @@ -158,6 +185,7 @@ DECLARE_METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS, std::tuple); /** * @brief Metric which defines support of import/export functionality by plugin */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(IMPORT_EXPORT_SUPPORT, bool); /** * @brief Metric to get a name of network. String value is "NETWORK_NAME". */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_EXEC_NETWORK_METRIC_KEY(NETWORK_NAME, std::string); /** * @brief Metric to get a float of device thermal. String value is "DEVICE_THERMAL" */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_METRIC_KEY(DEVICE_THERMAL, float); /** * @brief Metric to get an unsigned integer value of optimal number of executable network infer requests. */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS, unsigned int); } // namespace Metrics @@ -255,9 +293,13 @@ namespace PluginConfigParams { * @brief (Optional) config key that defines what model should be provided with more performant bounded resource first * It provides 3 types of levels: High, Medium and Low. The default value is Medium */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(MODEL_PRIORITY); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(MODEL_PRIORITY_HIGH); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(MODEL_PRIORITY_MED); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(MODEL_PRIORITY_LOW); /** @@ -265,40 +307,52 @@ DECLARE_CONFIG_VALUE(MODEL_PRIORITY_LOW); * unlike low-level config keys that are individual (per-device), the hints are smth that every device accepts * and turns into device-specific settings */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(PERFORMANCE_HINT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(LATENCY); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(THROUGHPUT); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(UNDEFINED); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(CUMULATIVE_THROUGHPUT); /** * @brief (Optional) config key that backs the (above) Performance Hints * by giving additional information on how many inference requests the application will be keeping in flight * usually this value comes from the actual use-case (e.g. number of video-cameras, or other sources of inputs) */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS); /** * @brief (Optional) config key that governs Auto-Batching (with YES/NO values, below) */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(ALLOW_AUTO_BATCHING); /** * @brief generic boolean values */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(YES); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(NO); /** * @brief Auto-batching configuration, string for the device + batch size, e.g. "GPU(4)" */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG); /** * @brief Auto-batching configuration: string with timeout (in ms), e.g. "100" */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(AUTO_BATCH_TIMEOUT); /** * @brief Limit `#threads` that are used by Inference Engine for inference on the CPU. */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(CPU_THREADS_NUM); /** @@ -318,8 +372,11 @@ DECLARE_CONFIG_KEY(CPU_THREADS_NUM); * Also, the settings are ignored, if the OpenVINO compiled with OpenMP and any affinity-related OpenMP's * environment variable is set (as affinity is configured explicitly) */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(CPU_BIND_THREAD); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(NUMA); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(HYBRID_AWARE); /** @@ -332,8 +389,11 @@ DECLARE_CONFIG_VALUE(HYBRID_AWARE); * (and what is the optimal number of streams) * - finally, specifying the positive integer value creates the requested number of streams */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(CPU_THROUGHPUT_STREAMS); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_NUMA); +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_AUTO); /** @@ -342,6 +402,7 @@ DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_AUTO); * It is passed to Core::SetConfig(), this option should be used with values: * PluginConfigParams::YES or PluginConfigParams::NO */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(PERF_COUNT); /** @@ -373,6 +434,7 @@ DECLARE_CONFIG_KEY(DYN_BATCH_ENABLED); * * The value should be a file name with the plugin specific configuration */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(CONFIG_FILE); /** @@ -382,23 +444,31 @@ DECLARE_CONFIG_KEY(CONFIG_FILE); * PluginConfigParams::LOG_ERROR, PluginConfigParams::LOG_WARNING, * PluginConfigParams::LOG_INFO, PluginConfigParams::LOG_DEBUG, PluginConfigParams::LOG_TRACE */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(LOG_LEVEL); -DECLARE_CONFIG_VALUE(LOG_NONE); // turn off logging -DECLARE_CONFIG_VALUE(LOG_ERROR); // error events that might still allow the - // application to continue running +INFERENCE_ENGINE_1_0_DEPRECATED +DECLARE_CONFIG_VALUE(LOG_NONE); // turn off logging +INFERENCE_ENGINE_1_0_DEPRECATED +DECLARE_CONFIG_VALUE(LOG_ERROR); // error events that might still allow the + // application to continue running +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_VALUE(LOG_WARNING); // potentially harmful situations which may // further lead to ERROR -DECLARE_CONFIG_VALUE(LOG_INFO); // informational messages that display the progress of the - // application at coarse-grained level -DECLARE_CONFIG_VALUE(LOG_DEBUG); // fine-grained events that are most useful to - // debug an application. -DECLARE_CONFIG_VALUE(LOG_TRACE); // finer-grained informational events than the DEBUG +INFERENCE_ENGINE_1_0_DEPRECATED +DECLARE_CONFIG_VALUE(LOG_INFO); // informational messages that display the progress of the + // application at coarse-grained level +INFERENCE_ENGINE_1_0_DEPRECATED +DECLARE_CONFIG_VALUE(LOG_DEBUG); // fine-grained events that are most useful to + // debug an application. +INFERENCE_ENGINE_1_0_DEPRECATED +DECLARE_CONFIG_VALUE(LOG_TRACE); // finer-grained informational events than the DEBUG /** * @brief the key for setting of required device to execute on * values: device id starts from "0" - first device, "1" - second device, etc */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(DEVICE_ID); /** @@ -411,6 +481,7 @@ DECLARE_CONFIG_KEY(DEVICE_ID); * By default, the option is set to YES for hetero cases, and to NO for conventional (single-plugin) cases * Notice that setting YES disables the CPU streams feature (see another config key in this file) */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS); /** @@ -421,7 +492,7 @@ DECLARE_CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS); * corresponding configuration information. Value is a name of output dot file without extension. * Files `_init.dot` and `_perf.dot` will be produced. */ -INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::ExecutableNetwork::GetExecGraphInfo::serialize method") +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(DUMP_EXEC_GRAPH_AS_DOT); /** @@ -433,6 +504,7 @@ DECLARE_CONFIG_KEY(DUMP_EXEC_GRAPH_AS_DOT); * verified separately by the user and basing on performance and accuracy results it should be * user's decision to use this option or not to use */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(ENFORCE_BF16); /** @@ -453,6 +525,7 @@ DECLARE_CONFIG_KEY(ENFORCE_BF16); * ie.SetConfig({{CONFIG_KEY(CACHE_DIR), "cache/"}}); // enables models cache * @endcode */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(CACHE_DIR); /** @@ -466,6 +539,7 @@ DECLARE_CONFIG_KEY(CACHE_DIR); * ie.SetConfig({{CONFIG_KEY(FORCE_TBB_TERMINATE), CONFIG_VALUE(YES)}}); // enable * @endcode */ +INFERENCE_ENGINE_1_0_DEPRECATED DECLARE_CONFIG_KEY(FORCE_TBB_TERMINATE); } // namespace PluginConfigParams @@ -478,6 +552,7 @@ DECLARE_CONFIG_KEY(FORCE_TBB_TERMINATE); #define DECLARE_AUTO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(AUTO_##name) +IE_SUPPRESS_DEPRECATED_END } // namespace InferenceEngine #include "gpu/gpu_config.hpp" diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index e7858614ad0..12d6cd281b3 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -19,6 +19,7 @@ namespace ov { namespace threading { +OPENVINO_SUPPRESS_DEPRECATED_START IStreamsExecutor::~IStreamsExecutor() {} diff --git a/src/inference/tests/functional/async_infer_request_test.cpp b/src/inference/tests/functional/async_infer_request_test.cpp index 540c7c0b725..b51ec2b4095 100644 --- a/src/inference/tests/functional/async_infer_request_test.cpp +++ b/src/inference/tests/functional/async_infer_request_test.cpp @@ -11,6 +11,8 @@ using namespace std; using namespace InferenceEngine; using namespace InferenceEngine::details; +IE_SUPPRESS_DEPRECATED_START + TEST(InferRequestCPPTests, throwsOnUninitializedSetBlob) { InferRequest req; ASSERT_THROW(req.SetBlob({}, {}), InferenceEngine::NotAllocated); diff --git a/src/inference/tests/functional/variable_state.cpp b/src/inference/tests/functional/variable_state.cpp index 25b9c553131..5d23b4b7132 100644 --- a/src/inference/tests/functional/variable_state.cpp +++ b/src/inference/tests/functional/variable_state.cpp @@ -6,10 +6,13 @@ #include +#include "openvino/core/deprecated.hpp" + using namespace ::testing; using namespace std; using namespace InferenceEngine; using namespace InferenceEngine::details; +OPENVINO_SUPPRESS_DEPRECATED_START TEST(VariableStateCPPTests, throwsOnUninitializedReset) { VariableState req;