Removed ie:: namespace (#20172)

This commit is contained in:
Ilya Churaev 2023-10-02 14:02:14 +04:00 committed by GitHub
parent 78ef7e85c9
commit ea37126ea5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 53 additions and 51 deletions

View File

@ -47,7 +47,7 @@ struct BlobAllocator : public IAllocator {
namespace ov {
struct BlobAllocator {
BlobAllocator() : _impl{std::make_shared<ie::SystemMemoryAllocator>()} {}
BlobAllocator() : _impl{std::make_shared<InferenceEngine::SystemMemoryAllocator>()} {}
void* allocate(const size_t bytes, const size_t alignment) {
OPENVINO_ASSERT(alignment == alignof(max_align_t),
@ -70,14 +70,15 @@ struct BlobAllocator {
bool is_equal(const BlobAllocator& other) const {
if (other._impl == _impl)
return true;
auto other_system_memory_allocator = dynamic_cast<const ie::SystemMemoryAllocator*>(other._impl.get());
auto system_allocator = dynamic_cast<const ie::SystemMemoryAllocator*>(_impl.get());
auto other_system_memory_allocator =
dynamic_cast<const InferenceEngine::SystemMemoryAllocator*>(other._impl.get());
auto system_allocator = dynamic_cast<const InferenceEngine::SystemMemoryAllocator*>(_impl.get());
if (system_allocator != nullptr && other_system_memory_allocator != nullptr)
return true;
return false;
}
std::shared_ptr<ie::IAllocator> _impl;
std::shared_ptr<InferenceEngine::IAllocator> _impl;
};
} // namespace ov
IE_SUPPRESS_DEPRECATED_END

View File

@ -14,7 +14,7 @@ namespace legacy_convert {
INFERENCE_ENGINE_API_CPP(ov::SoPtr<ov::IRemoteContext>)
convert_remote_context(const std::shared_ptr<InferenceEngine::RemoteContext>& context);
INFERENCE_ENGINE_API_CPP(ie::Blob*) get_hardware_blob(ie::Blob* blob);
INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob*) get_hardware_blob(InferenceEngine::Blob* blob);
class INFERENCE_ENGINE_API_CLASS(TensorHolder) {
public:
@ -42,7 +42,7 @@ class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor {
mutable std::string m_dev_name;
public:
std::shared_ptr<ie::RemoteBlob> blob;
std::shared_ptr<InferenceEngine::RemoteBlob> blob;
RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} {
OPENVINO_ASSERT(blob);
@ -99,10 +99,12 @@ public:
/**
* @brief Create InferenceEngine::RemoteBlob from the Tensor
*/
class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, public ov::legacy_convert::TensorHolder {
class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob)
: public InferenceEngine::RemoteBlob,
public ov::legacy_convert::TensorHolder {
public:
TensorRemoteBlob(const ov::SoPtr<ITensor>& tensor, ie::TensorDesc desc)
: ie::RemoteBlob{desc},
TensorRemoteBlob(const ov::SoPtr<ITensor>& tensor, InferenceEngine::TensorDesc desc)
: InferenceEngine::RemoteBlob{desc},
ov::legacy_convert::TensorHolder(tensor) {
OPENVINO_ASSERT(this->get_tensor());
}
@ -121,7 +123,7 @@ public:
return {};
}
}
std::shared_ptr<ie::RemoteContext> getContext() const noexcept override {
std::shared_ptr<InferenceEngine::RemoteContext> getContext() const noexcept override {
return {};
}
@ -129,22 +131,22 @@ public:
bool deallocate() noexcept override {
return true;
}
ie::LockedMemory<void> buffer() noexcept override {
InferenceEngine::LockedMemory<void> buffer() noexcept override {
return {nullptr, nullptr, 0};
}
ie::LockedMemory<const void> cbuffer() const noexcept override {
InferenceEngine::LockedMemory<const void> cbuffer() const noexcept override {
return {nullptr, nullptr, 0};
}
ie::LockedMemory<void> rwmap() noexcept override {
InferenceEngine::LockedMemory<void> rwmap() noexcept override {
return {nullptr, nullptr, 0};
}
ie::LockedMemory<const void> rmap() const noexcept override {
InferenceEngine::LockedMemory<const void> rmap() const noexcept override {
return {nullptr, nullptr, 0};
}
ie::LockedMemory<void> wmap() noexcept override {
InferenceEngine::LockedMemory<void> wmap() noexcept override {
return {nullptr, nullptr, 0};
}
const std::shared_ptr<ie::IAllocator>& getAllocator() const noexcept override {
const std::shared_ptr<InferenceEngine::IAllocator>& getAllocator() const noexcept override {
return m_allocator;
}
void* getHandle() const noexcept override {
@ -154,7 +156,7 @@ public:
using TensorHolder::get_tensor;
private:
std::shared_ptr<ie::IAllocator> m_allocator;
std::shared_ptr<InferenceEngine::IAllocator> m_allocator;
};
} // namespace ov

View File

@ -39,10 +39,7 @@
# define OPENVINO_PLUGIN_API OPENVINO_EXTERN_C
#endif
namespace InferenceEngine {}
namespace ov {
namespace ie = InferenceEngine;
/**
* @brief This type of map is used for result of Core::query_model

View File

@ -160,7 +160,7 @@ CompiledModel Core::compile_model(const std::shared_ptr<const ov::Model>& model,
});
}
void Core::add_extension(const ie::IExtensionPtr& extension) {
void Core::add_extension(const InferenceEngine::IExtensionPtr& extension) {
OV_CORE_CALL_STATEMENT(_impl->AddExtension(extension););
}

View File

@ -346,7 +346,7 @@ public:
const std::map<std::string, std::string>& config) const override {
auto res = m_plugin->query_model(ov::legacy_convert::convert_model(network, m_plugin->is_new_api()),
ov::any_copy(config));
ie::QueryNetworkResult ret;
InferenceEngine::QueryNetworkResult ret;
if (!network.getFunction() || res.empty()) {
ret.rc = InferenceEngine::GENERAL_ERROR;
return ret;

View File

@ -944,7 +944,7 @@ std::vector<std::string> ov::CoreImpl::get_available_devices() const {
try {
const ov::Any p = GetMetric(deviceName, propertyName);
devicesIDs = p.as<std::vector<std::string>>();
} catch (const ie::Exception&) {
} catch (const InferenceEngine::Exception&) {
// plugin is not created by e.g. invalid env
} catch (const ov::Exception&) {
// plugin is not created by e.g. invalid env

View File

@ -143,7 +143,7 @@ InferenceEngine::QueryNetworkResult ov::CoreImpl::QueryNetwork(const InferenceEn
const std::string& deviceName,
const std::map<std::string, std::string>& config) const {
OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::QueryNetwork");
ie::QueryNetworkResult ret;
InferenceEngine::QueryNetworkResult ret;
if (!network.getFunction()) {
ret.rc = InferenceEngine::GENERAL_ERROR;
return ret;

View File

@ -307,7 +307,7 @@ class BlobTensor : public ITensor {
}
public:
std::shared_ptr<ie::Blob> blob;
std::shared_ptr<InferenceEngine::Blob> blob;
BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} {
auto remote_impl = dynamic_cast<InferenceEngine::RemoteBlob*>(blob.get());
@ -349,7 +349,7 @@ public:
void* data(const element::Type& element_type) const override {
OPENVINO_ASSERT(blob != nullptr, "Tensor was not initialized.");
#define TYPE_CHECK(TYPE) (dynamic_cast<const ie::TBlob<TYPE>*>(blob.get()) != nullptr)
#define TYPE_CHECK(TYPE) (dynamic_cast<const InferenceEngine::TBlob<TYPE>*>(blob.get()) != nullptr)
auto host_accesable_implementation = TYPE_CHECK(bool) || TYPE_CHECK(int8_t) || TYPE_CHECK(uint8_t) ||
TYPE_CHECK(int16_t) || TYPE_CHECK(uint16_t) || TYPE_CHECK(int32_t) ||
TYPE_CHECK(uint32_t) || TYPE_CHECK(int64_t) || TYPE_CHECK(uint64_t) ||
@ -379,10 +379,10 @@ public:
* @tparam T Blob data type
*/
template <typename T>
class TensorMemoryBlob : public ie::TBlob<T> {
class TensorMemoryBlob : public InferenceEngine::TBlob<T> {
public:
~TensorMemoryBlob() override = default;
explicit TensorMemoryBlob(const ov::SoPtr<ITensor>& tensor_, ie::TensorDesc desc) try : ie
explicit TensorMemoryBlob(const ov::SoPtr<ITensor>& tensor_, InferenceEngine::TensorDesc desc) try : InferenceEngine
::TBlob<T>{desc, static_cast<T*>(tensor_->data()), tensor_->get_byte_size()}, tensor{tensor_} {
OPENVINO_ASSERT(!std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr));
}
@ -390,24 +390,24 @@ public:
OPENVINO_THROW(ex.what());
}
void setShape(const ie::SizeVector& dims) override {
void setShape(const InferenceEngine::SizeVector& dims) override {
tensor->set_shape(dims);
ie::TBlob<T>::getTensorDesc().setDims(dims);
InferenceEngine::TBlob<T>::getTensorDesc().setDims(dims);
allocate();
}
void allocate() noexcept override {
if (ie::TBlob<T>::buffer() != tensor->data()) {
ie::TBlob<T>::_allocator =
ie::details::make_pre_allocator(static_cast<T*>(tensor->data()), tensor->get_byte_size());
ie::TBlob<T>::allocate();
if (InferenceEngine::TBlob<T>::buffer() != tensor->data()) {
InferenceEngine::TBlob<T>::_allocator =
InferenceEngine::details::make_pre_allocator(static_cast<T*>(tensor->data()), tensor->get_byte_size());
InferenceEngine::TBlob<T>::allocate();
}
}
ov::SoPtr<ITensor> tensor;
};
ov::SoPtr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob, bool unwrap) {
ov::SoPtr<ITensor> make_tensor(const std::shared_ptr<InferenceEngine::Blob>& blob, bool unwrap) {
#define ELSE_IF(type) \
else if (auto tblob = dynamic_cast<const TensorMemoryBlob<type>*>(blob.get())) { \
return tblob->tensor; \
@ -440,7 +440,7 @@ ov::SoPtr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob, bool unwra
#undef IF
}
ie::Blob* get_hardware_blob(ie::Blob* blob) {
InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob) {
#ifdef PROXY_PLUGIN_ENABLED
if (auto remote_blob = dynamic_cast<TensorRemoteBlob*>(blob)) {
const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor());
@ -455,7 +455,7 @@ ie::Blob* get_hardware_blob(ie::Blob* blob) {
return blob;
}
const ie::Blob* get_hardware_blob(const ie::Blob* blob) {
const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob) {
#ifdef PROXY_PLUGIN_ENABLED
if (auto remote_blob = dynamic_cast<const TensorRemoteBlob*>(blob)) {
const auto& tensor = ov::proxy::get_hardware_tensor(remote_blob->get_tensor());
@ -470,7 +470,9 @@ const ie::Blob* get_hardware_blob(const ie::Blob* blob) {
return blob;
}
ie::Blob::Ptr tensor_to_blob(const ov::SoPtr<ITensor>& orig_tensor, bool unwrap, InferenceEngine::TensorDesc desc) {
InferenceEngine::Blob::Ptr tensor_to_blob(const ov::SoPtr<ITensor>& orig_tensor,
bool unwrap,
InferenceEngine::TensorDesc desc) {
auto create_desc = [](const ov::SoPtr<ov::ITensor>& tensor,
const InferenceEngine::TensorDesc& desc) -> InferenceEngine::TensorDesc {
if (desc.getLayout() != InferenceEngine::ANY ||
@ -479,10 +481,10 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr<ITensor>& orig_tensor, bool unwrap,
}
auto element_type = tensor->get_element_type();
auto shape = tensor->get_shape();
ie::SizeVector blk_order(shape.size());
InferenceEngine::SizeVector blk_order(shape.size());
std::iota(blk_order.begin(), blk_order.end(), 0);
ie::SizeVector dim_offset(shape.size(), 0);
ie::SizeVector blk_strides;
InferenceEngine::SizeVector dim_offset(shape.size(), 0);
InferenceEngine::SizeVector blk_strides;
auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{};
if (byte_strides.empty()) {
blk_strides = ov::row_major_strides(shape);
@ -500,9 +502,9 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr<ITensor>& orig_tensor, bool unwrap,
return byte_stride / element_type.size();
});
}
return ie::TensorDesc{ie::details::convertPrecision(element_type),
shape,
ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}};
return InferenceEngine::TensorDesc{InferenceEngine::details::convertPrecision(element_type),
shape,
InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}};
};
#ifdef PROXY_PLUGIN_ENABLED
const auto& tensor = unwrap ? ov::proxy::get_hardware_tensor(orig_tensor) : orig_tensor;

View File

@ -52,7 +52,7 @@ const ov::Version ov::Plugin::get_version() const {
OV_PLUGIN_CALL_STATEMENT(return m_ptr->get_version());
}
void ov::Plugin::add_extension(const ie::IExtensionPtr& extension) {
void ov::Plugin::add_extension(const InferenceEngine::IExtensionPtr& extension) {
OPENVINO_SUPPRESS_DEPRECATED_START
OV_PLUGIN_CALL_STATEMENT(m_ptr->add_extension(extension));
OPENVINO_SUPPRESS_DEPRECATED_END
@ -116,7 +116,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument
if (ov::supported_properties == name) {
try {
return {m_ptr->get_property(name, arguments), {m_so}};
} catch (const ie::Exception&) {
} catch (const InferenceEngine::Exception&) {
std::vector<ov::PropertyName> supported_properties;
try {
auto ro_properties =
@ -128,7 +128,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument
}
}
} catch (const ov::Exception&) {
} catch (const ie::Exception&) {
} catch (const InferenceEngine::Exception&) {
}
try {
auto rw_properties = m_ptr->get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS), arguments)
@ -137,7 +137,7 @@ ov::Any ov::Plugin::get_property(const std::string& name, const AnyMap& argument
supported_properties.emplace_back(rw_property, PropertyMutability::RW);
}
} catch (const ov::Exception&) {
} catch (const ie::Exception&) {
} catch (const InferenceEngine::Exception&) {
}
supported_properties.emplace_back(ov::supported_properties.name(), PropertyMutability::RO);
return supported_properties;

View File

@ -41,7 +41,7 @@ public:
const ov::Version get_version() const;
void add_extension(const ie::IExtensionPtr& extension);
void add_extension(const InferenceEngine::IExtensionPtr& extension);
void set_property(const ov::AnyMap& config);

View File

@ -248,7 +248,7 @@ void InferRequest::wait() {
_impl->wait();
} catch (const ov::Cancelled&) {
throw;
} catch (const ie::InferCancelled& e) {
} catch (const InferenceEngine::InferCancelled& e) {
Cancelled::create(e.what());
} catch (const std::exception& ex) {
OPENVINO_THROW(ex.what());
@ -263,7 +263,7 @@ bool InferRequest::wait_for(const std::chrono::milliseconds timeout) {
OPENVINO_SUPPRESS_DEPRECATED_START
try {
return _impl->wait_for(timeout);
} catch (const ie::InferCancelled& e) {
} catch (const InferenceEngine::InferCancelled& e) {
Cancelled::create(e.what());
} catch (const std::exception& ex) {
OPENVINO_THROW(ex.what());