Deprecate legacy Core and Allocator (#17646)

* Deprecate legacy Core and Allocator

* Suppress blob warnings

* Suppress some warnings

* Suppress more warnings

* Suppress blob allocator

* Suppress more warnings

* Suppress more warnings

* Fixed compilation issues for Template plugin

* Fixed some warnings

* Fixed tests

* Add WA for benchmark_app

* Suppress #warning for developer package

* Rename define

* Disable warnings for compile_tool and benchmark_app

* Suppress Windows warnings

* Suppress more warnings for Windows

* Fixed compile_tool install

* Added message for VS

* Fixed snippets and throw only first error
This commit is contained in:
Ilya Churaev 2023-05-26 07:06:03 +04:00 committed by GitHub
parent ef041565a8
commit dd0060a582
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 294 additions and 277 deletions

View File

@ -54,6 +54,8 @@ macro(ov_deprecated_no_errors)
endif()
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
# Suppress #warning messages
set(ie_c_cxx_deprecated_no_errors "${ie_c_cxx_deprecated_no_errors} -Wno-cpp")
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()

View File

@ -1,30 +0,0 @@
#include <ie_core.hpp>
int main() {
{
//! [part1]
// Inference Engine API
InferenceEngine::Core ie;
// Read a network in IR, PaddlePaddle, or ONNX format:
InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
// Load a network to AUTO using the default list of device candidates.
// The following lines are equivalent:
InferenceEngine::ExecutableNetwork exec0 = ie.LoadNetwork(network);
InferenceEngine::ExecutableNetwork exec1 = ie.LoadNetwork(network, "AUTO");
InferenceEngine::ExecutableNetwork exec2 = ie.LoadNetwork(network, "AUTO", {});
// Optional
// You can also specify the devices to be used by AUTO in its selection process.
// The following lines are equivalent:
InferenceEngine::ExecutableNetwork exec3 = ie.LoadNetwork(network, "AUTO:GPU,CPU");
InferenceEngine::ExecutableNetwork exec4 = ie.LoadNetwork(network, "AUTO", {{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}});
// Optional
// the AUTO plugin is pre-configured (globally) with the explicit option:
ie.SetConfig({{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}, "AUTO");
//! [part1]
}
return 0;
}

View File

@ -1,12 +0,0 @@
#include <ie_core.hpp>
int main() {
{
//! [part2]
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "AUTO");
//! [part2]
}
return 0;
}

View File

@ -1,14 +0,0 @@
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
// Load CPU extension as a shared library
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string{"<shared lib path>"});
// Add extension to the CPU device
core.AddExtension(extension_ptr, "CPU");
//! [part0]
return 0;
}

View File

@ -1,27 +0,0 @@
#include <openvino/openvino.hpp>
int main() {
//! [part1]
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
ov::CompiledModel compileModel = core.compile_model(model, "MULTI:CPU,GPU");
// Once the priority list is set, you can alter it on the fly:
// reverse the order of priorities
compileModel.set_property(ov::device::priorities("GPU,CPU"));
// exclude some devices (in this case, CPU)
compileModel.set_property(ov::device::priorities("GPU"));
// bring back the excluded devices
compileModel.set_property(ov::device::priorities("GPU,CPU"));
// You cannot add new devices on the fly!
// Attempting to do so will trigger the following exception:
// [ ERROR ] [NOT_FOUND] You can only change device
// priorities but not add new devices with the model's
// ov::device::priorities. CPU device was not in the original device list!
//! [part1]
return 0;
}

View File

@ -1,7 +1,6 @@
#include <openvino/runtime/core.hpp>
int main() {
using namespace InferenceEngine;
//! [part1]
ov::Core core;
auto network = core.read_model("sample.xml");

View File

@ -1,7 +1,6 @@
#include <openvino/runtime/core.hpp>
int main() {
using namespace InferenceEngine;
//! [part2]
ov::Core core;
core.set_property("CPU", ov::hint::inference_precision(ov::element::f32));

View File

@ -1,5 +1,3 @@
#include <ie_core.hpp>
int main() {
//! [part8]
while(true) {

View File

@ -1,5 +1,3 @@
#include <ie_core.hpp>
int main() {
//! [part9]
while(true) {

View File

@ -2,10 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <threading/ie_itask_executor.hpp>
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include <memory>
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
using namespace InferenceEngine;
class AcceleratorSyncRequest : public IInferRequestInternal {

View File

@ -2,8 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <threading/ie_cpu_streams_executor.hpp>
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
#include <memory>
#include <future>
#include <iostream>

View File

@ -1,155 +0,0 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <ngraph/rt_info.hpp>
#include <openvino/pass/pattern/op/wrap_type.hpp>
#include <openvino/pass/manager.hpp>
#include <openvino/pass/visualize_tree.hpp>
#include <openvino/pass/serialize.hpp>
#include <transformations/common_optimizations/common_optimizations.hpp>
#include <transformations/op_conversions/convert_gelu.hpp>
#include <transformations/op_conversions/convert_space_to_depth.hpp>
#include <transformations/op_conversions/convert_depth_to_space.hpp>
#include <transformations/op_conversions/convert_pad_to_group_conv.hpp>
// ! [ov:include]
#include <openvino/core/model.hpp>
#include <openvino/opsets/opset8.hpp>
// ! [ov:include]
bool ngraph_api_examples(std::shared_ptr<ov::Node> node) {
{
// ! [ngraph:ports_example]
// Let's suppose that node is opset8::Convolution operation
// as we know opset8::Convolution has two input ports (data, weights) and one output port
ov::Input<ov::Node> data = node->input(0);
ov::Input<ov::Node> weights = node->input(1);
ov::Output<ov::Node> output = node->output(0);
// Getting shape and type
auto pshape = data.get_partial_shape();
auto el_type = data.get_element_type();
// Getting parent for input port
ov::Output<ov::Node> parent_output;
parent_output = data.get_source_output();
// Another short way to get partent for output port
parent_output = node->input_value(0);
// Getting all consumers for output port
auto consumers = output.get_target_inputs();
// ! [ngraph:ports_example]
(void)el_type;
(void)pshape;
}
{
// ! [ngraph:shape_check]
auto partial_shape = node->input(0).get_partial_shape(); // get zero input partial shape
// Check that input shape rank is static
if (!partial_shape.rank().is_static()) {
return false;
}
auto rank_size = partial_shape.rank().get_length();
// Check that second dimension is not dynamic
if (rank_size < 2 || partial_shape[1].is_dynamic()) {
return false;
}
auto dim = partial_shape[1].get_length();
// ! [ngraph:shape_check]
}
return true;
}
// ! [ov:serialize]
void serialize_example(std::shared_ptr<ov::Model> f) {
ov::pass::Manager manager;
// Serialize ov::Function to before.svg file before transformation
manager.register_pass<ov::pass::VisualizeTree>("/path/to/file/before.svg");
// Run your transformation
// manager.register_pass<ov::pass::MyTransformation>();
// Serialize ov::Function to after.svg file after transformation
manager.register_pass<ov::pass::VisualizeTree>("/path/to/file/after.svg");
manager.run_passes(f);
}
// ! [ov:serialize]
// ! [ov:visualize]
void visualization_example(std::shared_ptr<ov::Model> f) {
ov::pass::Manager manager;
// Serialize ov::Function to IR
manager.register_pass<ov::pass::Serialize>("/path/to/file/model.xml", "/path/to/file/model.bin");
manager.run_passes(f);
}
// ! [ov:visualize]
void pass_manager_example1(std::shared_ptr<ov::Model> f) {
// ! [ngraph:disable_gelu]
ov::pass::Manager manager;
manager.register_pass<ov::pass::CommonOptimizations>();
auto pass_config = manager.get_pass_config();
pass_config->disable<ov::pass::ConvertGELU>();
manager.run_passes(f);
// ! [ngraph:disable_gelu]
}
void pass_manager_example2(std::shared_ptr<ov::Model> f) {
ov::pass::Manager manager;
std::function<bool(const std::shared_ptr<const ov::Node>)> transformation_callback;
// ! [ngraph:disable_callback]
// Set callback to particular transformation with specific condition
auto pass_config = manager.get_pass_config();
pass_config->set_callback<ov::pass::ConvertSpaceToDepth,
ov::pass::ConvertDepthToSpace>(
[](const std::shared_ptr<const ov::Node> &node) -> bool {
return node->input_value(0).get_shape().size() <= 5lu &&
node->input_value(0).get_shape().size() == node->get_output_shape(0).size();
});
// Update transformation to call callback
ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher &m) {
auto node = m.get_match_root();
if (transformation_callback(node)) {
return false;
}
// transformation code
return false;
};
// ! [ngraph:disable_callback]
}
void pass_manager_example3(std::shared_ptr<ov::Model> f) {
std::function<bool(const std::shared_ptr<const ov::Node>)> transformation_callback;
// ! [ngraph:disabled_by_default]
// Example of disabled by default transformation
{
ov::pass::Manager manager;
manager.register_pass<ov::pass::ConvertPadToGroupConvolution, false>();
manager.run_passes(f);
}
// Enable disabled by default transformation inside plugin
{
ov::pass::Manager manager;
manager.register_pass<ov::pass::CommonOptimizations>();
auto pass_config = manager.get_pass_config();
pass_config->enable<ov::pass::ConvertPadToGroupConvolution>();
manager.run_passes(f);
}
// ! [ngraph:disabled_by_default]
}

View File

@ -2,8 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <ie_core.hpp>
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
int main() {
//! [ie:create_core]
InferenceEngine::Core core;

View File

@ -1,5 +1,3 @@
#include <ie_core.hpp>
#include <transformations/low_precision/mark_dequantization_subgraph.hpp>
#include <low_precision/common/quantization_granularity_restriction.hpp>

View File

@ -1,8 +1,18 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <ie_extension.h>
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
#include <openvino/core/core.hpp>
#include <openvino/runtime/runtime.hpp>

View File

@ -5,8 +5,18 @@
#include <openvino/opsets/opset8.hpp>
#include <openvino/core/preprocess/pre_post_process.hpp>
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include "inference_engine.hpp"
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
int main_new() {
std::string model_path;
std::string tensor_name;

View File

@ -1,6 +1,16 @@
#include <openvino/runtime/core.hpp>
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <inference_engine.hpp>
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
int main_new() {
ov::Core core;

View File

@ -2,6 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
#
add_definitions(-DIN_OV_COMPONENT)
add_definitions(-DPROJECT_ROOT_DIR="${OpenVINO_SOURCE_DIR}")
include(cmake/install_tbb.cmake)

View File

@ -10,6 +10,7 @@
#include "openvino/runtime/common.hpp"
#include "system_allocator.hpp" // IE private header
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
struct BlobAllocator : public IAllocator {
BlobAllocator(const ov::Allocator& impl) : _impl{impl} {}
@ -79,3 +80,4 @@ struct BlobAllocator {
std::shared_ptr<ie::IAllocator> _impl;
};
} // namespace ov
IE_SUPPRESS_DEPRECATED_END

View File

@ -18,6 +18,7 @@
#include "ie_memcpy.h"
#include "ie_preprocess.hpp"
IE_SUPPRESS_DEPRECATED_START
/**
* @private
*/
@ -147,3 +148,4 @@ void CopyVectorToBlob(const InferenceEngine::Blob::Ptr outputBlob, const std::ve
IE_THROW() << "Element size mismatch between blob and vector";
ie_memcpy(outputBlob->buffer().as<T*>(), outputBlob->byteSize(), &inputVector[0], inputVector.size() * sizeof(T));
}
IE_SUPPRESS_DEPRECATED_END

View File

@ -18,6 +18,7 @@
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
class IExecutableNetworkInternal;
class IVariableStateInternal;
@ -368,4 +369,6 @@ private:
*/
using SoIInferRequestInternal = ov::SoPtr<IInferRequestInternal>;
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>
@ -28,7 +38,7 @@ class IExtension;
/**
* @brief This class contains all the information about the Neural Network and the related binary information
*/
class INFERENCE_ENGINE_API_CLASS(CNNNetwork) {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(CNNNetwork) {
public:
/**
* @brief A default constructor

View File

@ -10,17 +10,29 @@
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <utility>
#include "ie_api.h"
#include "ie_locked_memory.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
namespace details {
/**
* @brief This class provides range loops support for TBlob objects
*/
template <class T>
class BlobIterator {
class INFERENCE_ENGINE_1_0_DEPRECATED BlobIterator {
LockedMemory<T> _mem;
size_t _offset;
@ -85,3 +97,4 @@ public:
};
} // namespace details
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@ -4,4 +4,14 @@
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include "ie_common.h"

View File

@ -9,16 +9,27 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <memory>
#include "ie_allocator.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
namespace details {
/*
* @brief This is a helper class to wrap external memory
*/
class PreAllocator final : public IAllocator {
class INFERENCE_ENGINE_1_0_DEPRECATED PreAllocator final : public IAllocator {
void* _actualData;
size_t _sizeInBytes;
@ -67,9 +78,10 @@ public:
* @return A new allocator
*/
template <class T>
std::shared_ptr<IAllocator> make_pre_allocator(T* ptr, size_t size) {
std::shared_ptr<IAllocator> INFERENCE_ENGINE_1_0_DEPRECATED make_pre_allocator(T* ptr, size_t size) {
return std::make_shared<PreAllocator>(ptr, size * sizeof(T));
}
} // namespace details
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <memory>
#include "ie_api.h"
@ -20,8 +30,7 @@ namespace details {
* @deprecated This is internal stuff. Use Inference Engine Plugin API
* @brief This class provides an OS shared module abstraction
*/
class INFERENCE_ENGINE_DEPRECATED("This is internal stuff. Use Inference Engine Plugin API")
INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) {
std::shared_ptr<void> _so;
public:

View File

@ -8,6 +8,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <cassert>
#include <functional>
#include <memory>
@ -24,7 +34,7 @@ namespace details {
* parameter
*/
template <class T>
class SOCreatorTrait {};
class INFERENCE_ENGINE_1_0_DEPRECATED SOCreatorTrait {};
/**
* @brief Enables only `char` or `wchar_t` template specializations
@ -40,7 +50,7 @@ using enableIfSupportedChar =
* @tparam T An type of object SOPointer can hold
*/
template <class T>
class INFERENCE_ENGINE_DEPRECATED("This is internal stuff. Use Inference Engine Plugin API") SOPointer {
class INFERENCE_ENGINE_1_0_DEPRECATED SOPointer {
template <class U>
friend class SOPointer;

View File

@ -10,6 +10,12 @@
*/
#pragma once
// TODO: Remove after migration to new API in the benchmark app
#ifndef IN_OV_COMPONENT
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <ie_remote_context.hpp>
#include <memory>
#include <string>
@ -362,3 +368,8 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::
} // namespace gpu
} // namespace InferenceEngine
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif

View File

@ -9,16 +9,27 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <memory>
#include "ie_api.h"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
/**
* @brief Allocator handle mapping type
*/
enum LockOp {
enum INFERENCE_ENGINE_1_0_DEPRECATED LockOp {
LOCK_FOR_READ = 0, //!< A flag to lock data for read
LOCK_FOR_WRITE //!< A flag to lock data for write
};
@ -27,7 +38,7 @@ enum LockOp {
* @interface IAllocator
* @brief Allocator concept to be used for memory management and is used as part of the Blob.
*/
class IAllocator : public std::enable_shared_from_this<IAllocator> {
class INFERENCE_ENGINE_1_0_DEPRECATED IAllocator : public std::enable_shared_from_this<IAllocator> {
public:
/**
* @brief Maps handle to heap memory accessible by any memory manipulation routines.
@ -69,6 +80,8 @@ protected:
*
* @return The Inference Engine IAllocator* instance
*/
INFERENCE_ENGINE_API_CPP(std::shared_ptr<InferenceEngine::IAllocator>) CreateDefaultAllocator() noexcept;
INFERENCE_ENGINE_API_CPP(std::shared_ptr<InferenceEngine::IAllocator>)
INFERENCE_ENGINE_1_0_DEPRECATED CreateDefaultAllocator() noexcept;
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@ -28,6 +28,7 @@
#include "ie_precision.hpp"
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
/**
* @brief This class represents a universal container in the Inference Engine
@ -921,4 +922,5 @@ INFERENCE_ENGINE_API_CPP(Blob::Ptr) make_shared_blob(const Blob::Ptr& inputBlob,
INFERENCE_ENGINE_API_CPP(Blob::Ptr)
make_shared_blob(const Blob::Ptr& inputBlob, const std::vector<size_t>& begin, const std::vector<size_t>& end);
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -15,6 +15,7 @@
#include "ie_blob.h"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
/**
* @brief This class represents a blob that contains other blobs
@ -315,3 +316,4 @@ public:
explicit BatchedBlob(std::vector<Blob::Ptr>&& blobs);
};
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <istream>
#include <map>
#include <memory>
@ -29,7 +39,7 @@ namespace InferenceEngine {
*
* It can throw exceptions safely for the application, where it is properly handled.
*/
class INFERENCE_ENGINE_API_CLASS(Core) {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(Core) {
class Impl;
std::shared_ptr<Impl> _impl;
@ -365,5 +375,5 @@ public:
* You might want to use this function if you are developing a dynamically-loaded library which should clean up all
* resources after itself when the library is unloaded.
*/
INFERENCE_ENGINE_API_CPP(void) shutdown();
INFERENCE_ENGINE_API_CPP(void) INFERENCE_ENGINE_1_0_DEPRECATED shutdown();
} // namespace InferenceEngine

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>
@ -21,22 +31,21 @@
namespace InferenceEngine {
_IE_SUPPRESS_DEPRECATED_START_GCC
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
* @interface ICNNNetwork
* @brief This is the main interface to describe the NN topology
*/
class INFERENCE_ENGINE_API_CLASS(ICNNNetwork) : public std::enable_shared_from_this<ICNNNetwork> {
class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(ICNNNetwork)
: public std::enable_shared_from_this<ICNNNetwork> {
public:
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
* @brief A shared pointer to a ICNNNetwork interface
*/
using Ptr = std::shared_ptr<ICNNNetwork>;
IE_SUPPRESS_DEPRECATED_END
/**
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
@ -257,14 +266,12 @@ public:
}
protected:
IE_SUPPRESS_DEPRECATED_START
/**
* @brief Default destructor.
*/
~ICNNNetwork() = default;
IE_SUPPRESS_DEPRECATED_END
};
_IE_SUPPRESS_DEPRECATED_END_GCC
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>

View File

@ -9,6 +9,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <map>
#include <memory>
#include <string>
@ -26,7 +36,7 @@ IE_SUPPRESS_DEPRECATED_START
/**
* @brief This class contains information about each input of the network
*/
class InputInfo {
class INFERENCE_ENGINE_1_0_DEPRECATED InputInfo {
public:
/** @brief A smart pointer to the InputInfo instance */
using Ptr = std::shared_ptr<InputInfo>;
@ -130,11 +140,12 @@ public:
}
/**
* @brief Initializes the pointer to the input data that stores the main input parameters like dims, etc
* @brief Initializes the pointer to the input data that stores the main input parameters like dims,
* etc
*
* This method initializes the precision with the information from the inputPtr if it was not set
* explicitly through InputInfo::setPrecision. If InputInfo::setPrecision is called, this method does not overwrite
* the precision.
* explicitly through InputInfo::setPrecision. If InputInfo::setPrecision is called, this method does
* not overwrite the precision.
* @param inputPtr Pointer to the input data to set
*/
void setInputData(DataPtr inputPtr) {

View File

@ -9,17 +9,28 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <utility>
#include "ie_allocator.hpp"
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
namespace details {
/**
* @brief This class is a LockedMemory concept for hardware memory
*/
template <class T>
class LockedMemoryBase {
class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemoryBase {
IAllocator* _allocator = nullptr;
void* _handle = nullptr;
mutable T* _locked = nullptr;
@ -114,7 +125,7 @@ protected:
* @brief This class represents locked memory for read/write memory
*/
template <class T>
class LockedMemory : public details::LockedMemoryBase<T> {
class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory : public details::LockedMemoryBase<T> {
using base = details::LockedMemoryBase<T>;
public:
@ -222,7 +233,7 @@ public:
* @brief This class is for <void*> data and allows casting to any pointers
*/
template <>
class LockedMemory<void> : public details::LockedMemoryBase<void> {
class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory<void> : public details::LockedMemoryBase<void> {
using base = details::LockedMemoryBase<void>;
public:
@ -291,6 +302,7 @@ public:
return base::isEqualTo(lm.as<void*>());
}
IE_SUPPRESS_DEPRECATED_START
/**
* @brief Compares the object with the one stored in the memory
* @param pointer A pointer to compare with
@ -300,6 +312,7 @@ public:
friend bool operator==(const void* pointer, const LockedMemory<void>& lm) {
return lm.operator==(pointer);
}
IE_SUPPRESS_DEPRECATED_END
/**
* @brief Casts stored object to any given type
@ -332,7 +345,7 @@ public:
* @brief This class is for read-only segments
*/
template <class T>
class LockedMemory<const T> : public details::LockedMemoryBase<T> {
class INFERENCE_ENGINE_1_0_DEPRECATED LockedMemory<const T> : public details::LockedMemoryBase<T> {
using base = details::LockedMemoryBase<T>;
public:
@ -411,4 +424,5 @@ public:
return reinterpret_cast<S>(base::dereference());
}
};
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -8,6 +8,16 @@
*/
#pragma once
#if !defined(IN_OV_COMPONENT) && !defined(IE_LEGACY_HEADER_INCLUDED)
# define IE_LEGACY_HEADER_INCLUDED
# ifdef _MSC_VER
# pragma message( \
"The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# else
# warning("The Inference Engine API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html")
# endif
#endif
#include <algorithm>
#include <cctype>
#include <iterator>

View File

@ -6,6 +6,8 @@
#include <memory>
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::Blob::Ptr make_blob_with_precision(const InferenceEngine::TensorDesc& desc) {
return make_blob_with_precision(desc.getPrecision(), desc);
}

View File

@ -14,6 +14,8 @@
//----------------------------------------------------------------------
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
template <InferenceEngine::Precision::ePrecision PRC>

View File

@ -10,6 +10,7 @@
#include "system_allocator.hpp"
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
void Blob::setShape(const SizeVector& dims) {
// we don't want to allow setShape for:

View File

@ -5,6 +5,7 @@
#include "system_allocator.hpp"
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
INFERENCE_ENGINE_API_CPP(std::shared_ptr<IAllocator>) CreateDefaultAllocator() noexcept {
try {

View File

@ -6,6 +6,7 @@
#include "ie_allocator.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
class SystemMemoryAllocator : public InferenceEngine::IAllocator {
public:
@ -32,5 +33,6 @@ public:
return true;
}
};
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -12,6 +12,8 @@
using namespace ::testing;
using namespace InferenceEngine;
IE_SUPPRESS_DEPRECATED_START
using ChannelNum = size_t;
using BatchNum = size_t;
using PrecisionType = InferenceEngine::Precision::ePrecision;

View File

@ -13,6 +13,8 @@ using namespace ::testing;
using namespace std;
using namespace InferenceEngine;
IE_SUPPRESS_DEPRECATED_START
class PreallocatorTests : public ::testing::Test {
protected:
std::vector<float> mybuf;

View File

@ -8,6 +8,8 @@
#include "unit_test_utils/mocks/mock_allocator.hpp"
IE_SUPPRESS_DEPRECATED_START
class BlobTests : public ::testing::Test {
protected:
std::shared_ptr<MockAllocator> createMockAllocator() {

View File

@ -12,6 +12,8 @@ using namespace ::testing;
using namespace std;
using namespace InferenceEngine;
IE_SUPPRESS_DEPRECATED_START
class CompoundBlobTests : public ::testing::Test {
protected:
Blob::Ptr _test_blob;

View File

@ -9,6 +9,8 @@
using namespace InferenceEngine;
using namespace ::testing;
IE_SUPPRESS_DEPRECATED_START
TEST(LockedMemoryTest, canUnlockMemoryAfterUsage) {
std::unique_ptr<MockAllocator> allocator(new MockAllocator());
char array[] = {1, 2, 3};

View File

@ -7,7 +7,6 @@
#include <memory>
#include "async_infer_request.hpp"
#include "ie_ngraph_utils.hpp"
#include "ie_plugin_config.hpp"
#include "itt.hpp"
#include "openvino/op/util/op_types.hpp"

View File

@ -6,7 +6,6 @@
#include <memory>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "ie_plugin_config.hpp"
#include "itt.hpp"
#include "openvino/pass/manager.hpp"

View File

@ -12,6 +12,7 @@
#include "ie_allocator.hpp"
IE_SUPPRESS_DEPRECATED_START
class MockAllocator : public InferenceEngine::IAllocator {
public:
MOCK_METHOD(void*, lock, (void*, InferenceEngine::LockOp), (noexcept));
@ -19,3 +20,4 @@ public:
MOCK_METHOD(void*, alloc, (size_t), (noexcept));
MOCK_METHOD(bool, free, (void*), (noexcept)); // NOLINT(readability/casting)
};
IE_SUPPRESS_DEPRECATED_END

View File

@ -3,6 +3,7 @@
#
set(TARGET_NAME compile_tool)
add_definitions(-DIN_OV_COMPONENT)
file(GLOB SRCS
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp

View File

@ -3,6 +3,7 @@
#
set(TARGET_NAME benchmark_app_legacy)
add_definitions(-DIN_OV_COMPONENT)
file (GLOB SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file (GLOB HDR ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)