diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 73ef940bd16..cd4d765f5f5 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -20,7 +20,7 @@ file (GLOB LIBRARY_SRC ) # TODO: WA for OneHot pass usage in reshape -set(LEGACY_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src/") +set(LEGACY_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src") set(LEGACY_LIBRARY_SHARED_SRCS "${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp" "${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_nms_5_to_legacy.cpp" diff --git a/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h b/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h index 25d527b7919..8f9aee7481e 100644 --- a/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h +++ b/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h @@ -18,12 +18,13 @@ #include "ie_blob.h" #include "ie_common.h" #include "ie_data.h" -#include "ie_extension.h" #include "ie_icnn_network.hpp" #include "ngraph/function.hpp" namespace InferenceEngine { +class IExtension; + /** * @brief This class contains all the information about the Neural Network and the related binary information */ @@ -52,7 +53,8 @@ public: * @param network Pointer to the ngraph::Function object * @param exts Vector of pointers to IE extension objects */ - explicit CNNNetwork(const std::shared_ptr& network, const std::vector& exts = {}); + explicit CNNNetwork(const std::shared_ptr& network, + const std::vector>& exts = {}); /** * @brief Gets the network output Data node information. The received info is stored in the given Data node. diff --git a/inference-engine/src/inference_engine/include/ie/ie_data.h b/inference-engine/src/inference_engine/include/ie/ie_data.h index 95776398a8d..281bbaa2dec 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_data.h +++ b/inference-engine/src/inference_engine/include/ie/ie_data.h @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp index 6dd5a143ab6..ec971d53ecd 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp @@ -15,11 +15,11 @@ #include "openvino/runtime/common.hpp" #include "openvino/runtime/profiling_info.hpp" +#include "openvino/runtime/tensor.hpp" #include "openvino/runtime/variable_state.hpp" namespace InferenceEngine { class IInferRequestInternal; -class Blob; } // namespace InferenceEngine namespace ov { @@ -55,20 +55,20 @@ public: * @brief Sets input/output data to infer * * @note Memory allocation does not happen - * @param name Name of input or output blob. - * @param data Reference to input or output blob. The type of a blob must match the network input precision and - * size. + * @param name Name of input or output tensor. + * @param tensor Reference to input or output tensor. The type of a tensor must match the network input precision + * and size. */ - void set_blob(const std::string& name, const std::shared_ptr& data); + void set_tensor(const std::string& name, const Tensor& tensor); /** * @brief Gets input/output data for inference * * @note Memory allocation does not happen - * @param name A name of Blob to get - * @return A shared pointer to a Blob with a name @p name. If a blob is not found, an exception is thrown. + * @param name A name of tensor to get + * @return A Tensor with a name @p name. If a tensor is not found, an exception is thrown. */ - std::shared_ptr get_blob(const std::string& name); + Tensor get_tensor(const std::string& name); /** * @brief Infers specified input(s) in synchronous mode @@ -91,31 +91,6 @@ public: */ std::vector get_profiling_info() const; - /** - * @brief Sets input data to infer - * - * @note Memory allocation doesn't happen - * @param inputs A reference to a map of input blobs accessed by input names. - * The type of Blob must correspond to the network input precision and size. - */ - void set_input(const std::map>& inputs); - - /** - * @brief Sets data that will contain result of the inference - * - * @note Memory allocation doesn't happen - * @param results - a reference to a map of result blobs accessed by output names. - * The type of Blob must correspond to the network output precision and size. - */ - void set_output(const std::map>& results); - - /** - * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request. - * - * @param batch new batch size to be used by all the following inference calls for this request. - */ - void set_batch(const int batch); - /** * @brief Start inference of specified input(s) in asynchronous mode * diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/parameter.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/parameter.hpp index e1d7cc1728e..026b0fd4dff 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/parameter.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/parameter.hpp @@ -19,18 +19,17 @@ #include #include -#include "ie_api.h" #include "openvino/core/except.hpp" +#include "openvino/runtime/common.hpp" namespace ov { - namespace runtime { /** * @brief This class represents an object to work with different parameters * */ -class INFERENCE_ENGINE_API_CLASS(Parameter) { +class OPENVINO_RUNTIME_API Parameter { public: /** * @brief Default constructor @@ -342,19 +341,18 @@ private: using ParamMap = std::map; #ifdef __ANDROID__ -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData>); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData>); -extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData>); -extern template struct INFERENCE_ENGINE_API_CLASS( - ov::runtime::Parameter::RealData>); -extern template struct INFERENCE_ENGINE_API_CLASS( - ov::runtime::Parameter::RealData>); +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData>; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData>; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData>; +extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData>; +extern template struct OPENVINO_RUNTIME_API + ov::runtime::Parameter::RealData>; #endif } // namespace runtime diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/remote_context.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/remote_context.hpp index b4c9848f913..6b5e6065c7f 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/remote_context.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/remote_context.hpp @@ -18,6 +18,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/common.hpp" #include "openvino/runtime/parameter.hpp" +#include "openvino/runtime/remote_tensor.hpp" namespace InferenceEngine { class RemoteBlob; @@ -108,7 +109,7 @@ public: /** * @brief Returns name of the device on which underlying object is allocated. * Abstract method. - * @return A device name string in the same format as that in plugin metric. + * @return A device name string in fully specified format `[.[.]]`. */ std::string get_device_name() const; @@ -116,21 +117,19 @@ public: * @brief Allocates memory tensor in device memory or wraps user-supplied memory handle * using the specified tensor description and low-level device-specific parameters. * Returns a pointer to the object which implements RemoteTensor interface. - * @param element_type Defines the element type of the tensor + * @param type Defines the element type of the tensor * @param shape Defines the shape of the tensor * @param params Map of the low-level tensor object parameters. * Abstract method. * @return A pointer to plugin object that implements RemoteTensor interface. */ - std::shared_ptr create_blob(element::Type element_type, - const Shape& shape, - const ParamMap& params = {}); + RemoteTensor create_tensor(const element::Type& type, const Shape& shape, const ParamMap& params = {}); /** * @brief Returns a map of device-specific parameters required for low-level * operations with underlying object. * Parameters include device/context handles, access flags, - * etc. Contents of the map returned depend on remote execution context that is + * etc. Content of the returned map depends on remote execution context that is * currently set on the device (working scenario). * Abstract method. * @return A map of name/parameter elements. diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/remote_tensor.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/remote_tensor.hpp new file mode 100644 index 00000000000..738a2c61c77 --- /dev/null +++ b/inference-engine/src/inference_engine/include/openvino/runtime/remote_tensor.hpp @@ -0,0 +1,104 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief This is a header file for the OpenVINO Runtime tensor API + * + * @file openvino/runtime/remote_tensor.hpp + */ +#pragma once + +#include "ie_remote_blob.hpp" +#include "openvino/runtime/parameter.hpp" +#include "openvino/runtime/tensor.hpp" + +namespace ov { +namespace runtime { +class RemoteContext; + +/** + * @brief Remote memory access and interpretation API + * + * It can throw exceptions safely for the application, where it is properly handled. + */ +class OPENVINO_RUNTIME_API RemoteTensor : public Tensor { + using Tensor::Tensor; + friend class ov::runtime::RemoteContext; + +public: + void* data(const element::Type) = delete; + + template + T* data() = delete; + + /** + * @brief Returns a map of device-specific parameters required for low-level + * operations with underlying object. + * Parameters include device/context/surface/buffer handles, access flags, + * etc. Content of the returned map depends on remote execution context that is + * currently set on the device (working scenario). + * Abstract method. + * @return A map of name/parameter elements. + */ + runtime::ParamMap get_params() const; + + /** + * @brief Returns name of the device on which underlying object is allocated. + * Abstract method. + * @return A device name string in fully specified format `[.[.]]`. + */ + std::string get_device_name() const; + + /** + * @brief Checks if the RemoteTensor object can be cast to the type T* + * + * @tparam T Type to be checked. Must represent a class derived from the RemoteTensor + * @return true if this object can be dynamically cast to the type T*. Otherwise, false + */ + template ::value && !std::is_reference::value, int>::type = 0, + typename std::enable_if::value, int>::type = 0> + bool is() noexcept { + return dynamic_cast(_impl.get()) != nullptr; + } + + /** + * @brief Checks if the RemoteTensor object can be cast to the type const T* + * + * @tparam T Type to be checked. Must represent a class derived from the RemoteTensor + * @return true if this object can be dynamically cast to the type const T*. Otherwise, false + */ + template + bool is() const noexcept { + return dynamic_cast(_impl.get()) != nullptr; + } + + /** + * @brief Casts this RemoteTensor object to the type T*. + * + * @tparam T Type to cast to. Must represent a class derived from the RemoteTensor + * @return Raw pointer to the object of the type T or nullptr on error + */ + template ::value && !std::is_reference::value, int>::type = 0, + typename std::enable_if::value, int>::type = 0> + T* as() noexcept { + return dynamic_cast(_impl.get()); + } + + /** + * @brief Casts this RemoteTensor object to the type const T*. + * + * @tparam T Type to cast to. Must represent a class derived from the RemoteTensor + * @return Raw pointer to the object of the type const T or nullptr on error + */ + template ::value && !std::is_reference::value, int>::type = 0, + typename std::enable_if::value, int>::type = 0> + const T* as() const noexcept { + return dynamic_cast(_impl.get()); + } +}; +} // namespace runtime +} // namespace ov \ No newline at end of file diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/variable_state.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/variable_state.hpp index 93144180488..dc065671db0 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/variable_state.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/variable_state.hpp @@ -14,10 +14,10 @@ #include #include "openvino/runtime/common.hpp" +#include "openvino/runtime/tensor.hpp" namespace InferenceEngine { class IVariableStateInternal; -class Blob; } // namespace InferenceEngine namespace ov { @@ -65,13 +65,13 @@ public: * @brief Returns the value of the variable state. * @return A blob representing a state */ - std::shared_ptr get_state() const; + Tensor get_state() const; /** * @brief Sets the new state for the next inference. * @param state The current state to set */ - void set_state(const std::shared_ptr& state); + void set_state(const Tensor& state); }; } // namespace runtime } // namespace ov diff --git a/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp b/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp index 0253c23321f..271cd710b5d 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_infer_request.cpp @@ -8,13 +8,13 @@ #include #include -#include "cpp/exception2status.hpp" #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" -#include "details/ie_so_loader.h" #include "ie_infer_async_request_base.hpp" +#include "ie_ngraph_utils.hpp" #include "ie_remote_context.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/infer_request.hpp" +#include "transformations/utils/utils.hpp" namespace InferenceEngine { @@ -213,20 +213,23 @@ InferRequest::InferRequest(const std::shared_ptr& so, const ie::IInferRequ OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); } -void InferRequest::set_blob(const std::string& name, const ie::Blob::Ptr& data) { - OV_INFER_REQ_CALL_STATEMENT(_impl->SetBlob(name, data);) -} +void InferRequest::set_tensor(const std::string& name, const Tensor& tensor){ + OV_INFER_REQ_CALL_STATEMENT({ _impl->SetBlob(name, tensor._impl); })} -ie::Blob::Ptr InferRequest::get_blob(const std::string& name) { - ie::Blob::Ptr blobPtr; - OV_INFER_REQ_CALL_STATEMENT(blobPtr = _impl->GetBlob(name);) - std::string error = "Internal error: blob with name `" + name + "` is not allocated!"; - const bool remoteBlobPassed = blobPtr->is(); - if (blobPtr == nullptr) - IE_THROW() << error; - if (!remoteBlobPassed && blobPtr->buffer() == nullptr) - IE_THROW() << error; - return blobPtr; +Tensor InferRequest::get_tensor(const std::string& name) { + OV_INFER_REQ_CALL_STATEMENT({ + auto blob = _impl->GetBlob(name); + const bool remoteBlobPassed = blob->is(); + if (blob == nullptr) { + IE_THROW(NotAllocated) << "Internal tensor implementation with name `" << name << "` is not allocated!"; + } + if (!remoteBlobPassed && blob->buffer() == nullptr) { + IE_THROW(NotAllocated) << "Internal tensor implementation with name `" << name << "` is not allocated!"; + } + auto tensorDesc = blob->getTensorDesc(); + auto dims = tensorDesc.getDims(); + return {_so, blob}; + }) } void InferRequest::infer() { @@ -275,18 +278,6 @@ std::vector InferRequest::get_profiling_info() const { }) } -void InferRequest::set_input(const ie::BlobMap& inputs) { - OV_INFER_REQ_CALL_STATEMENT(for (auto&& input : inputs) { _impl->SetBlob(input.first, input.second); }) -} - -void InferRequest::set_output(const ie::BlobMap& results) { - OV_INFER_REQ_CALL_STATEMENT(for (auto&& result : results) { _impl->SetBlob(result.first, result.second); }) -} - -void InferRequest::set_batch(const int batch) { - OV_INFER_REQ_CALL_STATEMENT(_impl->SetBatch(batch);) -} - void InferRequest::start_async() { OV_INFER_REQ_CALL_STATEMENT(_impl->StartAsync();) } diff --git a/inference-engine/src/inference_engine/src/cpp/ie_remote_context.cpp b/inference-engine/src/inference_engine/src/cpp/ie_remote_context.cpp index 5e41d30dd1c..90e7b48597a 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_remote_context.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_remote_context.cpp @@ -17,14 +17,16 @@ __VA_ARGS__; \ } catch (const std::exception& ex) { \ throw ov::Exception(ex.what()); \ + } catch (...) { \ + OPENVINO_ASSERT(false, "Unexpected exception"); \ } namespace ov { namespace runtime { RemoteContext::RemoteContext(const std::shared_ptr& so, const ie::RemoteContext::Ptr& impl) - : _so(so), - _impl(impl) { + : _so{so}, + _impl{impl} { OPENVINO_ASSERT(_impl != nullptr, "RemoteContext was not initialized."); } @@ -32,13 +34,15 @@ std::string RemoteContext::get_device_name() const { OV_REMOTE_CONTEXT_STATEMENT(return _impl->getDeviceName()); } -std::shared_ptr RemoteContext::create_blob(element::Type type, - const Shape& shape, - const ie::ParamMap& params) { - ie::TensorDesc tensorDesc(ie::details::convertPrecision(type), - shape, - ie::TensorDesc::getLayoutByRank(shape.size())); - OV_REMOTE_CONTEXT_STATEMENT(return _impl->CreateBlob(tensorDesc, params)); +RemoteTensor RemoteContext::create_tensor(const element::Type& element_type, + const Shape& shape, + const ie::ParamMap& params) { + OV_REMOTE_CONTEXT_STATEMENT({ + return {_so, + _impl->CreateBlob( + {ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())}, + params)}; + }); } ie::ParamMap RemoteContext::get_params() const { diff --git a/inference-engine/src/inference_engine/src/cpp/ie_variable_state.cpp b/inference-engine/src/inference_engine/src/cpp/ie_variable_state.cpp index 16d347fd490..a1f81768e32 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_variable_state.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_variable_state.cpp @@ -72,12 +72,12 @@ std::string VariableState::get_name() const { OV_VARIABLE_CALL_STATEMENT(return _impl->GetName()); } -ie::Blob::CPtr VariableState::get_state() const { - OV_VARIABLE_CALL_STATEMENT(return _impl->GetState()); +Tensor VariableState::get_state() const { + OV_VARIABLE_CALL_STATEMENT(return {_so, std::const_pointer_cast(_impl->GetState())}); } -void VariableState::set_state(const ie::Blob::Ptr& state) { - OV_VARIABLE_CALL_STATEMENT(_impl->SetState(state)); +void VariableState::set_state(const Tensor& state) { + OV_VARIABLE_CALL_STATEMENT(_impl->SetState(state._impl)); } } // namespace runtime diff --git a/inference-engine/src/inference_engine/src/ie_blob_common.cpp b/inference-engine/src/inference_engine/src/ie_blob_common.cpp index fc5d9b9144f..9bfa02eb3aa 100644 --- a/inference-engine/src/inference_engine/src/ie_blob_common.cpp +++ b/inference-engine/src/inference_engine/src/ie_blob_common.cpp @@ -31,4 +31,31 @@ Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, return inputBlob->createROI(begin, end); } +// +// RTTI +// + +Blob::~Blob() {} +MemoryBlob::~MemoryBlob() {} + +template +TBlob::~TBlob() { + free(); +} + +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); +template class INFERENCE_ENGINE_API_CLASS(TBlob); + } // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/src/ie_common.cpp b/inference-engine/src/inference_engine/src/ie_common.cpp index 09d1333a444..ce194f1bc6c 100644 --- a/inference-engine/src/inference_engine/src/ie_common.cpp +++ b/inference-engine/src/inference_engine/src/ie_common.cpp @@ -30,12 +30,6 @@ const ngraph::NodeTypeInfo& ExecutionNode::get_type_info() const { } // namespace ExecGraphInfoSerialization namespace InferenceEngine { -// -// ie_blob.h -// - -Blob::~Blob() {} -MemoryBlob::~MemoryBlob() {} // // ie_iextension.h @@ -127,30 +121,6 @@ StatusCode InferenceEngineException::getStatus() const { } // namespace details IE_SUPPRESS_DEPRECATED_END -// -// ie_blob.h -// - -template -TBlob::~TBlob() { - free(); -} - -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); -template class INFERENCE_ENGINE_API_CLASS(TBlob); - } // namespace InferenceEngine namespace ov { diff --git a/inference-engine/src/inference_engine/src/ie_layouts.cpp b/inference-engine/src/inference_engine/src/ie_layouts.cpp index f9f906cc34f..b8557a9b558 100644 --- a/inference-engine/src/inference_engine/src/ie_layouts.cpp +++ b/inference-engine/src/inference_engine/src/ie_layouts.cpp @@ -500,7 +500,9 @@ TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, const std::vector& begin, const std::vector& end, bool useOrigMemDesc) { - IE_ASSERT(begin.size() == end.size()); + if (begin.size() != end.size()) { + IE_THROW() << "`begin` vector size must match `end` vector size"; + } TensorSlice slice; for (size_t i = 0; i < begin.size(); ++i) { IE_ASSERT(end[i] >= begin[i]); diff --git a/inference-engine/src/inference_engine/src/remote_tensor.cpp b/inference-engine/src/inference_engine/src/remote_tensor.cpp new file mode 100644 index 00000000000..be57a8e8333 --- /dev/null +++ b/inference-engine/src/inference_engine/src/remote_tensor.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/remote_tensor.hpp" + +namespace ov { +namespace runtime { +ie::ParamMap RemoteTensor::get_params() const { + OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized."); + auto remote_impl = InferenceEngine::as(_impl); + OPENVINO_ASSERT(remote_impl != nullptr, "Remote tensor was not initialized using remote implementation"); + try { + return remote_impl->getParams(); + } catch (const std::exception& ex) { + throw ov::Exception(ex.what()); + } catch (...) { + OPENVINO_ASSERT(false, "Unexpected exception"); + } +} + +std::string RemoteTensor::get_device_name() const { + OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized."); + auto remote_impl = InferenceEngine::as(_impl); + OPENVINO_ASSERT(remote_impl != nullptr, "Remote tensor was not initialized using remote implementation"); + try { + return remote_impl->getDeviceName(); + } catch (const std::exception& ex) { + throw ov::Exception(ex.what()); + } catch (...) { + OPENVINO_ASSERT(false, "Unexpected exception"); + } +} +} // namespace runtime +} // namespace ov diff --git a/inference-engine/src/plugin_api/blob_factory.hpp b/inference-engine/src/plugin_api/blob_factory.hpp index 0c821471ee6..57d65df69c0 100644 --- a/inference-engine/src/plugin_api/blob_factory.hpp +++ b/inference-engine/src/plugin_api/blob_factory.hpp @@ -32,6 +32,9 @@ public: static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr) { return InferenceEngine::make_shared_blob(desc, reinterpret_cast(ptr)); } + static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr, const std::size_t size) { + return InferenceEngine::make_shared_blob(desc, reinterpret_cast(ptr), size); + } static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, const std::shared_ptr& alloc) { return InferenceEngine::make_shared_blob(desc, alloc); diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index b82ae2de2e6..b9f9813372d 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -16,6 +16,7 @@ #include "blob_factory.hpp" #include "cpp/ie_cnn_network.h" +#include "details/ie_so_pointer.hpp" #include "ie_iextension.h" #include "ie_input_info.hpp" #include "ie_parameter.hpp" diff --git a/inference-engine/tests/functional/inference_engine/caching_test.cpp b/inference-engine/tests/functional/inference_engine/caching_test.cpp index a24c6f18d96..1b5a025833f 100644 --- a/inference-engine/tests/functional/inference_engine/caching_test.cpp +++ b/inference-engine/tests/functional/inference_engine/caching_test.cpp @@ -16,6 +16,7 @@ #include "ngraph/function.hpp" #include "details/ie_so_loader.h" #include "ie_metric_helpers.hpp" +#include "openvino/op/logical_not.hpp" #include "ie_remote_context.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" @@ -226,11 +227,11 @@ public: EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))).Times(AnyNumber()).WillRepeatedly(Return(Parameter{1u})); EXPECT_CALL(*mock, GetExecGraphInfo()).Times(AnyNumber()).WillRepeatedly(Return([] { ngraph::ParameterVector parameters; - parameters.push_back(std::make_shared( + parameters.push_back(std::make_shared( ov::element::f32, ov::Shape{1, 3, 8, 8})); - auto notOp = std::make_shared(parameters.back()); + auto notOp = std::make_shared(parameters.back()); ngraph::ResultVector results; - results.push_back(std::make_shared(notOp)); + results.push_back(std::make_shared(notOp)); return std::make_shared(results, parameters, "empty_function"); } ())); auto ptr = std::make_shared(); diff --git a/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp b/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp index bad26ae73a0..65429ddc57c 100644 --- a/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ov_infer_request_test.cpp @@ -7,21 +7,21 @@ #include #include #include +#include using namespace ::testing; using namespace std; using namespace InferenceEngine; using namespace InferenceEngine::details; - -TEST(InferRequestOVTests, throwsOnUninitializedSetBlob) { +TEST(InferRequestOVTests, throwsOnUninitializedSetTensor) { ov::runtime::InferRequest req; - ASSERT_THROW(req.set_blob({}, {}), ov::Exception); + ASSERT_THROW(req.set_tensor({}, {}), ov::Exception); } -TEST(InferRequestOVTests, throwsOnUninitializedGetBlob) { +TEST(InferRequestOVTests, throwsOnUninitializedGetTensor) { ov::runtime::InferRequest req; - ASSERT_THROW(req.get_blob({}), ov::Exception); + ASSERT_THROW(req.get_tensor({}), ov::Exception); } TEST(InferRequestOVTests, throwsOnUninitializedInfer) { @@ -34,21 +34,6 @@ TEST(InferRequestOVTests, throwsOnUninitializedGetPerformanceCounts) { ASSERT_THROW(req.get_profiling_info(), ov::Exception); } -TEST(InferRequestOVTests, throwsOnUninitializedSetInput) { - ov::runtime::InferRequest req; - ASSERT_THROW(req.set_input({{}}), ov::Exception); -} - -TEST(InferRequestOVTests, throwsOnUninitializedSetOutput) { - ov::runtime::InferRequest req; - ASSERT_THROW(req.set_output({{}}), ov::Exception); -} - -TEST(InferRequestOVTests, throwsOnUninitializedSetBatch) { - ov::runtime::InferRequest req; - ASSERT_THROW(req.set_batch({}), ov::Exception); -} - TEST(InferRequestOVTests, throwsOnUninitializedStartAsync) { ov::runtime::InferRequest req; ASSERT_THROW(req.start_async(), ov::Exception); @@ -74,3 +59,10 @@ TEST(InferRequestOVTests, throwsOnUninitializedQueryState) { ov::runtime::InferRequest req; ASSERT_THROW(req.query_state(), ov::Exception); } + + +TEST(InferRequestOVTests, throwsOnUninitializedSetRemoteTensor) { + ov::runtime::InferRequest req; + ov::runtime::RemoteTensor remote_tensor; + ASSERT_THROW(req.set_tensor({}, remote_tensor), ov::Exception); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/ov_remote_context_test.cpp b/inference-engine/tests/functional/inference_engine/ov_remote_context_test.cpp index 86e786ebe4d..302d68b73e8 100644 --- a/inference-engine/tests/functional/inference_engine/ov_remote_context_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ov_remote_context_test.cpp @@ -17,7 +17,7 @@ TEST(RemoteContextOVTests, throwsOnUninitializedReset) { TEST(RemoteContextOVTests, throwsOnUninitializedGetname) { ov::runtime::RemoteContext ctx; - ASSERT_THROW(ctx.create_blob({}, {}), ov::Exception); + ASSERT_THROW(ctx.create_tensor({}, {}, {}), ov::Exception); } TEST(RemoteContextOVTests, throwsOnUninitializedGetParams) { diff --git a/inference-engine/tests/functional/inference_engine/ov_remote_tensor.cpp b/inference-engine/tests/functional/inference_engine/ov_remote_tensor.cpp new file mode 100644 index 00000000000..f2072b96a6a --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ov_remote_tensor.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +using namespace ::testing; +using namespace std; +using namespace InferenceEngine; +using namespace InferenceEngine::details; + +TEST(RemoteTensorOVTests, throwsOnGetParams) { + ov::runtime::RemoteTensor tensor; + ASSERT_THROW(tensor.get_params(), ov::Exception); +} + +TEST(RemoteTensorOVTests, throwsOnGetDeviceName) { + ov::runtime::RemoteTensor tensor; + ASSERT_THROW(tensor.get_device_name(), ov::Exception); +} diff --git a/inference-engine/tests/functional/inference_engine/ov_variable_state_test.cpp b/inference-engine/tests/functional/inference_engine/ov_variable_state_test.cpp index 088fe120f18..3dfa33ed31c 100644 --- a/inference-engine/tests/functional/inference_engine/ov_variable_state_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ov_variable_state_test.cpp @@ -29,6 +29,6 @@ TEST(VariableStateOVTests, throwsOnUninitializedGetState) { TEST(VariableStateOVTests, throwsOnUninitializedSetState) { ov::runtime::VariableState state; - InferenceEngine::Blob::Ptr blob; - ASSERT_THROW(state.set_state(blob), ov::Exception); + ov::runtime::Tensor tensor; + ASSERT_THROW(state.set_state(tensor), ov::Exception); } diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index 351bfd1001b..dcaaf45f391 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -12,6 +12,26 @@ add_subdirectory(builder) add_subdirectory(reference) add_subdirectory(shape_inference) +# WA for Tensor implementation via ie::Blob::Ptr +set(IE_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/inference_engine/src") +set(IE_SHARED_SRCS + "${IE_SRC_ROOT}/system_allocator.cpp" + "${IE_SRC_ROOT}/blob_factory.cpp" + "${IE_SRC_ROOT}/ie_blob_common.cpp" + "${IE_SRC_ROOT}/ie_layouts.cpp") +set(MIXED_SRC ${IE_SHARED_SRCS} + "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/allocator.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/ov_tensor.cpp") + +set_property(SOURCE ${MIXED_SRC} + APPEND PROPERTY INCLUDE_DIRECTORIES + ${IE_SRC_ROOT} + $ + $) + +set_source_files_properties(${MIXED_SRC} + PROPERTIES COMPILE_DEFINITIONS IMPLEMENT_INFERENCE_ENGINE_API) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj @@ -19,7 +39,7 @@ source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${PUBLIC_HEADERS}) # Create static or shared library depending on BUILD_SHARED_LIBS -add_library(ngraph ${LIBRARY_SRC} ${PUBLIC_HEADERS}) +add_library(ngraph ${LIBRARY_SRC} ${PUBLIC_HEADERS} ${IE_SHARED_SRCS}) if(COMMAND ie_faster_build) diff --git a/ngraph/core/include/openvino/core/type/element_type.hpp b/ngraph/core/include/openvino/core/type/element_type.hpp index 7fc07ce766d..abb500b4179 100644 --- a/ngraph/core/include/openvino/core/type/element_type.hpp +++ b/ngraph/core/include/openvino/core/type/element_type.hpp @@ -162,6 +162,8 @@ OPENVINO_API Type from(); template <> OPENVINO_API Type from(); +OPENVINO_API Type fundamental_type_for(const Type& type); + OPENVINO_API std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj); } // namespace element diff --git a/ngraph/core/include/openvino/runtime/allocator.hpp b/ngraph/core/include/openvino/runtime/allocator.hpp new file mode 100644 index 00000000000..91ab1d901ee --- /dev/null +++ b/ngraph/core/include/openvino/runtime/allocator.hpp @@ -0,0 +1,131 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief A header file that provides Allocator interface + * + * @file openvino/runtime/allocator.hpp + */ +#pragma once + +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { +namespace runtime { + +/** + * @interface AllocatorImpl + * @brief Tries to act like [std::pmr::memory_resource](https://en.cppreference.com/w/cpp/memory/memory_resource) + */ +struct AllocatorImpl : public std::enable_shared_from_this { + /** + * @brief A smart pointer containing AllocatorImpl object + */ + using Ptr = std::shared_ptr; + + /** + * @brief Allocates memory + * + * @param bytes The size in bytes at least to allocate + * @param alignment The alignment of storage + * @return Handle to the allocated resource + * @throw Exception if specified size and alignment is not supported + */ + virtual void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)) = 0; + + /** + * @brief Releases the handle and all associated memory resources which invalidates the handle. + * @param handle The handle to free + * @param bytes The size in bytes that was passed into allocate() method + * @param alignment The alignment of storage that was passed into allocate() method + */ + virtual void deallocate(void* handle, const size_t bytes, size_t alignment = alignof(max_align_t)) = 0; + + /** + * @brief Compares with other AllocatorImpl + * @param other Other instance of allocator + * @return `true` if and only if memory allocated from one AllocatorImpl can be deallocated from the other and vice + * versa + */ + virtual bool is_equal(const AllocatorImpl& other) const = 0; + +protected: + ~AllocatorImpl() = default; +}; + +class Tensor; + +/** + * @brief Wraps allocator implementation to provide safe way to store allocater loaded from shared library + * And constructs default based on `new` `delete` c++ calls allocator if created without parameters + */ +class OPENVINO_API Allocator { + std::shared_ptr _so; + AllocatorImpl::Ptr _impl; + + /** + * @brief Constructs Tensor from the initialized std::shared_ptr + * @param so Plugin to use. This is required to ensure that Allocator can work properly even if plugin object is + * destroyed. + * @param impl Initialized shared pointer + */ + Allocator(const std::shared_ptr& so, const AllocatorImpl::Ptr& impl); + + friend class ov::runtime::Tensor; + +public: + /** + * @brief Creates the default implementation of the OpenVINO allocator. + */ + Allocator(); + + /** + * @brief Constructs Allocator from the initialized std::shared_ptr + * @param impl Initialized shared pointer + */ + Allocator(const AllocatorImpl::Ptr& impl); + + /** + * @brief Allocates memory + * + * @param bytes The size in bytes at least to allocate + * @param alignment The alignment of storage + * @return Handle to the allocated resource + * @throw Exception if specified size and alignment is not supported + */ + void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)); + + /** + * @brief Releases the handle and all associated memory resources which invalidates the handle. + * @param ptr The handle to free + * @param bytes The size in bytes that was passed into allocate() method + * @param alignment The alignment of storage that was passed into allocate() method + */ + void deallocate(void* ptr, const size_t bytes = 0, const size_t alignment = alignof(max_align_t)); + + /** + * @brief Compares with other AllocatorImpl + * @param other Other instance of allocator + * @return `true` if and only if memory allocated from one AllocatorImpl can be deallocated from the other and vice + * versa + */ + bool operator==(const Allocator& other) const; + + /** + * @brief Checks if current Allocator object is not initialized + * @return `true` if current Allocator object is not initialized, `false` - otherwise + */ + bool operator!() const noexcept; + + /** + * @brief Checks if current Allocator object is initialized + * @return `true` if current Allocator object is initialized, `false` - otherwise + */ + explicit operator bool() const noexcept; +}; +} // namespace runtime +} // namespace ov diff --git a/ngraph/core/include/openvino/runtime/tensor.hpp b/ngraph/core/include/openvino/runtime/tensor.hpp new file mode 100644 index 00000000000..b7701129dc9 --- /dev/null +++ b/ngraph/core/include/openvino/runtime/tensor.hpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief This is a header file for the OpenVINO Runtime tensor API + * + * @file openvino/runtime/tensor.hpp + */ +#pragma once + +#include "openvino/core/coordinate.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/runtime/allocator.hpp" + +namespace InferenceEngine { +class Blob; +} // namespace InferenceEngine + +namespace ov { +namespace runtime { + +class InferRequest; +class RemoteContext; +class VariableState; + +/** + * @brief Tensor API holding host memory + * + * It can throw exceptions safely for the application, where it is properly handled. + */ +class OPENVINO_API Tensor { +protected: + std::shared_ptr _so; //!< Reference to dynamicly loaded library + std::shared_ptr _impl; //!< Shared pointer to internal tensor representation + + /** + * @brief Constructs Tensor from the initialized std::shared_ptr + * @param so Plugin to use. This is required to ensure that Tensor can work properly even if plugin object is + * destroyed. + * @param impl Initialized shared pointer + */ + Tensor(const std::shared_ptr& so, const std::shared_ptr& impl); + + friend class ov::runtime::InferRequest; + friend class ov::runtime::RemoteContext; + friend class ov::runtime::VariableState; + +public: + /** + * @brief Default constructor + */ + Tensor() = default; + + /** + * @brief Constructs Tensor using element type and shape. Allocate internal host storage using default allocator + * @param type Tensor element type + * @param shape Tensor shape + * @param allocator allocates memory for internal tensor storage + */ + Tensor(const element::Type type, const Shape& shape, const Allocator& allocator = {}); + + /** + * @brief Constructs Tensor using element type and shape. Wraps allocated host memory. + * @note Does not perform memory allocation internally + * @param type Tensor element type + * @param shape Tensor shape + * @param host_ptr Pointer to pre-allocated host memory + * @param size Optional size of allocated host memory in elements. If it is not set (default is `0`), the size of + * memory supposed to be not less then ov::shape_size(shape) * type.size() in bytes. + * @param strides Optional strides parameters in elements. Strides are supposed to be equal to shape if they are not + * set + */ + Tensor(const element::Type type, + const Shape& shape, + void* host_ptr, + const size_t size = 0, + const Strides& strides = {}); + + /** + * @brief Constructs region of interest (ROI) tensor form another tensor. + * @note Does not perform memory allocation internally + * @param other original tensor + * @param begin start coordinate of ROI object inside of the original object. + * @param end end coordinate of ROI object inside of the original object. + * @note A Number of dimensions in `begin` and `end` must match number of dimensions in `other.get_shape()` + */ + Tensor(const Tensor& other, const Coordinate& begin, const Coordinate& end); + + /** + * @brief Set new shape for tensor, deallocate/allocate if new total size is bigger than previous one. + * @note Memory allocation may happen + * @param shape A new shape + */ + void set_shape(const ov::Shape& shape); + + /** + * @return A tensor element type + */ + element::Type get_element_type() const; + + /** + * @return A tensor shape + */ + Shape get_shape() const; + + /** + * @brief Returns the total number of elements (a product of all the dims or 1 for scalar) + * @return The total number of elements + */ + size_t get_size() const; + + /** + * @brief Returns the size of the current Tensor in bytes. + * @return Tensor's size in bytes + */ + size_t get_byte_size() const; + + /** + * @return Tensor's strides in elements + */ + Strides get_strides() const; + + /** + * @brief Provides an access to the underlaying host memory + * @param type Optional type parameter. + * @note If type parameter is specified, the method throws an exception + * if specified type's fundamental type does not match with tensor element type's fundamental type + * @return A host pointer to tensor memory + */ + void* data(const element::Type type = {}) const; + + /** + * @brief Provides an access to the underlaying host memory casted to type `T` + * @return A host pointer to tensor memory casted to specified type `T`. + * @note Throws exception if specified type does not match with tensor element type + */ + template + T* data() const { + return static_cast(data(element::from())); + } + + /** + * @brief Checks if current Tensor object is not initialized + * @return `true` if current Tensor object is not initialized, `false` - otherwise + */ + bool operator!() const noexcept; + + /** + * @brief Checks if current Tensor object is initialized + * @return `true` if current Tensor object is initialized, `false` - otherwise + */ + explicit operator bool() const noexcept; +}; +} // namespace runtime +} // namespace ov \ No newline at end of file diff --git a/ngraph/core/src/runtime/allocator.cpp b/ngraph/core/src/runtime/allocator.cpp new file mode 100644 index 00000000000..67c28917fe1 --- /dev/null +++ b/ngraph/core/src/runtime/allocator.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/allocator.hpp" + +#include "blob_allocator.hpp" +#include "ie_allocator.hpp" +#include "ie_common.h" +#include "openvino/core/except.hpp" + +namespace ov { +namespace runtime { + +Allocator::Allocator() : _impl{std::make_shared()} {} + +Allocator::Allocator(const std::shared_ptr& so, const std::shared_ptr& impl) + : _so{so}, + _impl{impl} { + OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized."); +} + +Allocator::Allocator(const std::shared_ptr& impl) : _impl{impl} { + OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized."); +} + +#define OV_ALLOCATOR_STATEMENT(...) \ + OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized."); \ + try { \ + __VA_ARGS__; \ + } catch (const std::exception& ex) { \ + throw ov::Exception(ex.what()); \ + } catch (...) { \ + OPENVINO_ASSERT(false, "Unexpected exception"); \ + } + +void* Allocator::allocate(const size_t bytes, const size_t alignment) { + OV_ALLOCATOR_STATEMENT(return _impl->allocate(bytes, alignment)); +} +void Allocator::deallocate(void* handle, const size_t bytes, const size_t alignment) { + OV_ALLOCATOR_STATEMENT(_impl->deallocate(handle, bytes, alignment)); +} +bool Allocator::operator==(const Allocator& other) const { + OV_ALLOCATOR_STATEMENT({ + if (_impl == other._impl) { + return true; + } + return _impl->is_equal(*other._impl); + }); +} + +bool Allocator::operator!() const noexcept { + return !_impl; +} + +Allocator::operator bool() const noexcept { + return (!!_impl); +} + +} // namespace runtime +} // namespace ov diff --git a/ngraph/core/src/runtime/blob_allocator.hpp b/ngraph/core/src/runtime/blob_allocator.hpp new file mode 100644 index 00000000000..33378e38890 --- /dev/null +++ b/ngraph/core/src/runtime/blob_allocator.hpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "ie_allocator.hpp" // IE public header +#include "openvino/core/except.hpp" +#include "openvino/runtime/allocator.hpp" +#include "openvino/runtime/common.hpp" +#include "system_allocator.hpp" // IE private header + +namespace InferenceEngine { +struct BlobAllocator : public IAllocator { + BlobAllocator(const std::shared_ptr& impl) : _impl{impl} {} + + void* lock(void* handle, LockOp) noexcept override { + return handle; + } + + void unlock(void*) noexcept override {} + + void* alloc(const size_t size) noexcept override { + try { + return size_map.emplace(_impl->allocate(size), size).first->first; + } catch (...) { + return nullptr; + } + } + + bool free(void* handle) noexcept override { + try { + auto size = size_map.at(handle); + size_map.erase(handle); + _impl->deallocate(handle, size); + return true; + } catch (...) { + return false; + } + } + + std::shared_ptr _impl; + std::unordered_map size_map; +}; +} // namespace InferenceEngine + +namespace ov { +struct BlobAllocator : public runtime::AllocatorImpl { + BlobAllocator(const std::shared_ptr& impl = std::make_shared()) + : _impl{impl} {} + + void* allocate(const size_t bytes, const size_t alignment) override { + OPENVINO_ASSERT(alignment == alignof(max_align_t), + "Aligned deallocation is not implemented. alignment: ", + alignment); + auto handle = _impl->alloc(bytes); + OPENVINO_ASSERT(handle != nullptr, "Can not allocate storage for at least ", bytes, " bytes"); + return handle; + } + + void deallocate(void* handle, const size_t bytes, const size_t alignment) override { + OPENVINO_ASSERT(bytes == 0, "Sized deallocation is not implemented. bytes: ", bytes); + OPENVINO_ASSERT(alignment == alignof(max_align_t), + "Aligned deallocation is not implemented. alignment: ", + alignment); + auto res = _impl->free(handle); + OPENVINO_ASSERT(res != false, "Can not deallocate storage"); + } + + bool is_equal(const AllocatorImpl& other) const override { + auto other_blob_allocator = dynamic_cast(&other); + if (other_blob_allocator == nullptr) + return false; + if (other_blob_allocator->_impl == _impl) + return true; + auto other_system_memory_allocator = + dynamic_cast(other_blob_allocator->_impl.get()); + auto system_allocator = dynamic_cast(_impl.get()); + if (system_allocator != nullptr && other_system_memory_allocator != nullptr) + return true; + return false; + } + + std::shared_ptr _impl; +}; +} // namespace ov diff --git a/ngraph/core/src/runtime/ov_tensor.cpp b/ngraph/core/src/runtime/ov_tensor.cpp new file mode 100644 index 00000000000..a003633e1b7 --- /dev/null +++ b/ngraph/core/src/runtime/ov_tensor.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "blob_factory.hpp" // IE private header +#include "ie_ngraph_utils.hpp" // IE private header +#include "openvino/core/except.hpp" +#include "openvino/runtime/tensor.hpp" +#include "runtime/blob_allocator.hpp" + +namespace ov { +namespace runtime { + +#define OV_TENSOR_STATEMENT(...) \ + OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized."); \ + try { \ + __VA_ARGS__; \ + } catch (const std::exception& ex) { \ + throw ov::Exception(ex.what()); \ + } catch (...) { \ + OPENVINO_ASSERT(false, "Unexpected exception"); \ + } + +Tensor::Tensor(const std::shared_ptr& so, const std::shared_ptr& impl) : _so{so}, _impl{impl} { + OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized."); +} + +Tensor::Tensor(const element::Type element_type, const Shape& shape, const Allocator& allocator) { + OPENVINO_ASSERT(allocator, "Allocator was not initialized"); + auto allocator_impl = dynamic_cast(allocator._impl.get()); + auto blob_allocator = + (allocator_impl != nullptr) ? allocator_impl->_impl : std::make_shared(allocator._impl); + _impl = make_blob_with_precision( + {ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())}, + blob_allocator); + _impl->allocate(); +} + +Tensor::Tensor(const element::Type element_type, + const Shape& shape, + void* host_ptr, + const size_t size, + const Strides& strides) { + ie::SizeVector blk_order(shape.size()); + std::iota(blk_order.begin(), blk_order.end(), 0); + ie::SizeVector dim_offset(shape.size(), 0); + ie::SizeVector blk_strides; + if (strides.empty()) { + blk_strides = ov::row_major_strides(shape); + } else { + OPENVINO_ASSERT(shape.size() == strides.size(), + "shape.size() (", + shape.size(), + ") must be equal to strides.size() (", + strides.size(), + ")"); + blk_strides.assign(strides.begin(), strides.end()); + } + + try { + _impl = make_blob_with_precision(ie::details::convertPrecision(element_type), + ie::TensorDesc{ie::details::convertPrecision(element_type), + shape, + ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}, + host_ptr, + size); + } catch (const std::exception& ex) { + throw ov::Exception(ex.what()); + } catch (...) { + OPENVINO_ASSERT(false, "Unexpected exception"); + } +} + +Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& end) : _so{owner._so} { + try { + _impl = owner._impl->createROI(begin, end); + } catch (const std::exception& ex) { + throw ov::Exception(ex.what()); + } catch (...) { + OPENVINO_ASSERT(false, "Unexpected exception"); + } +} + +element::Type Tensor::get_element_type() const { + OV_TENSOR_STATEMENT(return ie::details::convertPrecision(_impl->getTensorDesc().getPrecision())); +} + +void Tensor::set_shape(const ov::Shape& shape) { + OV_TENSOR_STATEMENT(_impl->setShape({shape.begin(), shape.end()})); +} + +Shape Tensor::get_shape() const { + OV_TENSOR_STATEMENT({ return _impl->getTensorDesc().getDims(); }); +} + +Strides Tensor::get_strides() const { + OV_TENSOR_STATEMENT(return _impl->getTensorDesc().getBlockingDesc().getStrides();); +} + +size_t Tensor::get_size() const { + OV_TENSOR_STATEMENT(return ov::shape_size(get_shape())); +} + +size_t Tensor::get_byte_size() const { + OV_TENSOR_STATEMENT(return ov::shape_size(get_shape()) * get_element_type().size()); +} + +void* Tensor::data(const element::Type element_type) const { + OV_TENSOR_STATEMENT({ + if (element_type != element::undefined) { + OPENVINO_ASSERT( + element::fundamental_type_for(element_type) == element::fundamental_type_for(get_element_type()), + get_element_type(), + " tensor fundamental element type is ", + element::fundamental_type_for(get_element_type()), + ", but it casted to ", + element_type, + " with fundamental element type", + element::fundamental_type_for(element_type)); + } + return _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size() + + InferenceEngine::as(_impl)->rmap().as(); + }); +} + +bool Tensor::operator!() const noexcept { + return !_impl; +} + +Tensor::operator bool() const noexcept { + return (!!_impl); +} + +} // namespace runtime +} // namespace ov diff --git a/ngraph/core/src/type/element_type.cpp b/ngraph/core/src/type/element_type.cpp index 99168f5e7b7..fd12c5e4f24 100644 --- a/ngraph/core/src/type/element_type.cpp +++ b/ngraph/core/src/type/element_type.cpp @@ -195,6 +195,46 @@ template <> Type from() { return Type_t::bf16; } + +Type fundamental_type_for(const Type& type) { + switch (type) { + case Type_t::boolean: + return from::value_type>(); + case Type_t::bf16: + return from::value_type>(); + case Type_t::f16: + return from::value_type>(); + case Type_t::f32: + return from::value_type>(); + case Type_t::f64: + return from::value_type>(); + case Type_t::i4: + return from::value_type>(); + case Type_t::i8: + return from::value_type>(); + case Type_t::i16: + return from::value_type>(); + case Type_t::i32: + return from::value_type>(); + case Type_t::i64: + return from::value_type>(); + case Type_t::u1: + return from::value_type>(); + case Type_t::u4: + return from::value_type>(); + case Type_t::u8: + return from::value_type>(); + case Type_t::u16: + return from::value_type>(); + case Type_t::u32: + return from::value_type>(); + case Type_t::u64: + return from::value_type>(); + default: + OPENVINO_UNREACHABLE("Unsupported Data type: ", type); + } +} + } // namespace element } // namespace ov diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 7d1e91ac2e8..0330baf4b44 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -77,6 +77,8 @@ set(SRC op_eval/transpose.cpp op_eval/variadic_split.cpp opset1.cpp + ov_default_allocator_test.cpp + ov_tensor_test.cpp partial_shape.cpp pass_config.cpp pass_manager.cpp @@ -371,6 +373,11 @@ set(SRC util.cpp ) +if(SUGGEST_OVERRIDE_SUPPORTED) + set_source_files_properties(ov_tensor_test.cpp + PROPERTIES COMPILE_OPTIONS -Wno-suggest-override) +endif() + set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph") diff --git a/ngraph/test/ov_default_allocator_test.cpp b/ngraph/test/ov_default_allocator_test.cpp new file mode 100644 index 00000000000..22c9c6794ca --- /dev/null +++ b/ngraph/test/ov_default_allocator_test.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "openvino/core/except.hpp" +#include "openvino/runtime/allocator.hpp" + +using OVDefaultAllocatorTest = ::testing::Test; + +TEST_F(OVDefaultAllocatorTest, notThrowOnZeroSize) { + ov::runtime::Allocator allocator; + void* ptr = nullptr; + ASSERT_NO_THROW(ptr = allocator.allocate(0)); + ASSERT_NO_THROW(allocator.deallocate(ptr)); +} + +TEST_F(OVDefaultAllocatorTest, canAllocateAndDeallocate) { + ov::runtime::Allocator allocator; + void* ptr = nullptr; + ASSERT_NO_THROW(ptr = allocator.allocate(64)); + ASSERT_NO_THROW(allocator.deallocate(ptr)); +} + +TEST_F(OVDefaultAllocatorTest, alignedAllocationIsNotImplemented) { + ov::runtime::Allocator allocator; + ASSERT_THROW(allocator.allocate(64, 64), ov::Exception); +} + +TEST_F(OVDefaultAllocatorTest, sizedAndAlignedDeallocationAreNotImplemented) { + ov::runtime::Allocator allocator; + void* ptr = nullptr; + ASSERT_NO_THROW(ptr = allocator.allocate(64)); + ASSERT_THROW(allocator.deallocate(ptr, 64), ov::Exception); + ASSERT_THROW(allocator.deallocate(ptr, 0, 64), ov::Exception); + ASSERT_NO_THROW(allocator.deallocate(ptr)); +} + +TEST_F(OVDefaultAllocatorTest, defaultAllocatorsAreEqual) { + ov::runtime::Allocator allocator0, allocator1; + ASSERT_TRUE(allocator0 == allocator1); + void* ptr = nullptr; + ASSERT_NO_THROW(ptr = allocator0.allocate(64)); + ASSERT_NO_THROW(allocator1.deallocate(ptr)); + ASSERT_NO_THROW(ptr = allocator1.allocate(64)); + ASSERT_NO_THROW(allocator0.deallocate(ptr)); +} + +TEST_F(OVDefaultAllocatorTest, canAllocate10KMemory) { + ov::runtime::Allocator allocator; + // large block such as 10k will result in sigsegv if not allocated + void* handle = allocator.allocate(10000); + char* ptr = reinterpret_cast(handle); + ptr[9999] = 11; + EXPECT_EQ(ptr[9999], 11); + allocator.deallocate(handle); +} \ No newline at end of file diff --git a/ngraph/test/ov_tensor_test.cpp b/ngraph/test/ov_tensor_test.cpp new file mode 100644 index 00000000000..ee19938ada1 --- /dev/null +++ b/ngraph/test/ov_tensor_test.cpp @@ -0,0 +1,196 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include +#include +#include +#include + +#include "openvino/runtime/allocator.hpp" +#include "openvino/runtime/tensor.hpp" + +using OVTensorTest = ::testing::Test; + +TEST_F(OVTensorTest, canCreateTensor) { + ov::Shape shape = {4, 3, 2}; + ov::runtime::Tensor t{ov::element::f32, shape}; + const std::size_t totalSize = ov::shape_size(shape); + ASSERT_EQ(totalSize, t.get_size()); + ASSERT_NE(nullptr, t.data()); + ASSERT_EQ(ov::element::f32, t.get_element_type()); + ASSERT_EQ(shape, t.get_shape()); + ASSERT_NE(shape, t.get_strides()); + ASSERT_EQ(ov::Strides({6, 2, 1}), t.get_strides()); + ASSERT_EQ(ov::element::f32.size() * totalSize, t.get_byte_size()); + ASSERT_THROW(t.data(ov::element::i64), ov::Exception); + ASSERT_THROW(t.data(), ov::Exception); +} + +TEST_F(OVTensorTest, operators) { + ov::runtime::Tensor t; + ASSERT_FALSE(t); + ASSERT_TRUE(!t); +} + +class OVMockAllocator : public ov::runtime::AllocatorImpl { +public: + MOCK_METHOD(void*, allocate, (size_t, size_t), ()); + MOCK_METHOD(void, deallocate, (void*, size_t, size_t), ()); // NOLINT(readability/casting) + MOCK_METHOD(bool, is_equal, (const ov::runtime::AllocatorImpl&), (const, noexcept)); // NOLINT(readability/casting) +}; + +TEST_F(OVTensorTest, canCreateTensorUsingMockAllocator) { + ov::Shape shape = {1, 2, 3}; + auto allocator = std::make_shared(); + + EXPECT_CALL(*allocator, allocate(::testing::_, ::testing::_)) + .WillRepeatedly(testing::Return(reinterpret_cast(1))); + EXPECT_CALL(*allocator, deallocate(::testing::_, ::testing::_, ::testing::_)).Times(1); + + { ov::runtime::Tensor t{ov::element::f32, shape, ov::runtime::Allocator{allocator}}; } +} + +TEST_F(OVTensorTest, canAccessExternalData) { + ov::Shape shape = {1, 1, 3}; + float data[] = {5.f, 6.f, 7.f}; + ov::runtime::Tensor t{ov::element::f32, shape, data, 3}; + { + float* ptr = t.data(); + ASSERT_EQ(ptr[2], 7); + ASSERT_EQ(data, t.data(ov::element::f32)); + ASSERT_EQ(data, ptr); + ASSERT_THROW(t.data(), ov::Exception); + ASSERT_EQ(ov::row_major_strides(shape), t.get_strides()); + ASSERT_EQ(ov::shape_size(shape), t.get_size()); + ASSERT_EQ(ov::shape_size(shape) * ov::element::f32.size(), t.get_byte_size()); + } +} + +TEST_F(OVTensorTest, canAccessExternalDataWithStrides) { + ov::Shape shape = {2, 3}; + float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f}; + ov::runtime::Tensor t{ov::element::f32, shape, data, 8, {4, 1}}; + { + ASSERT_EQ((ov::Shape{2, 3}), t.get_shape()); + float* ptr = t.data(); + ASSERT_EQ(ptr[5], 42); + } +} + +TEST_F(OVTensorTest, cannotCreateTensorWithExternalNullptr) { + ov::Shape shape = {2, 3}; + ASSERT_THROW(ov::runtime::Tensor(ov::element::f32, shape, nullptr), ov::Exception); +} + +TEST_F(OVTensorTest, cannotCreateTensorWithWrongStrides) { + ov::Shape shape = {2, 3}; + float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f}; + ASSERT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, 8, {4, 1, 2}), ov::Exception); +} + +TEST_F(OVTensorTest, saveDimsAndSizeAfterMove) { + ov::Shape shape = {1, 2, 3}; + ov::runtime::Tensor t{ov::element::f32, shape}; + + ov::runtime::Tensor new_tensor(std::move(t)); + + ASSERT_EQ(shape, new_tensor.get_shape()); + ASSERT_EQ(ov::element::f32, new_tensor.get_element_type()); + ASSERT_EQ(ov::row_major_strides(shape), new_tensor.get_strides()); + + ASSERT_THROW(t.get_size(), ov::Exception); + ASSERT_THROW(t.get_element_type(), ov::Exception); + ASSERT_THROW(t.get_byte_size(), ov::Exception); + ASSERT_THROW(t.get_strides(), ov::Exception); + ASSERT_THROW(t.get_shape(), ov::Exception); + ASSERT_THROW(t.set_shape({}), ov::Exception); + ASSERT_THROW(t.data(), ov::Exception); + ASSERT_THROW(t.data(), ov::Exception); +} + +// SetShape +TEST_F(OVTensorTest, canSetShape) { + ov::runtime::Tensor t{ov::element::f32, {1, 2, 3}}; + const ov::Shape newShape({4, 5, 6}); + ASSERT_EQ(t.get_shape(), (ov::Shape{1, 2, 3})); + ASSERT_NO_THROW(t.set_shape({4, 5, 6})); + ASSERT_EQ(newShape, t.get_shape()); + ASSERT_EQ(ov::row_major_strides(newShape), t.get_strides()); + + // check that setShape for copy changes original Tensor + { + ov::runtime::Tensor t2 = t; + t2.set_shape(newShape); + ASSERT_EQ(newShape, t.get_shape()); + ASSERT_EQ(t2.get_shape(), t.get_shape()); + } +} + +TEST_F(OVTensorTest, makeRangeRoiTensor) { + ov::runtime::Tensor t{ov::element::i8, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6 + ov::runtime::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}}; + ov::Shape ref_shape = {1, 3, 4, 2}; + ptrdiff_t ref_offset = 7; + ov::Strides ref_strides = {90, 30, 5, 1}; + ASSERT_EQ(roi_tensor.get_shape(), ref_shape); + ASSERT_EQ(roi_tensor.data() - t.data(), ref_offset); + ASSERT_EQ(reinterpret_cast(roi_tensor.data()) - reinterpret_cast(t.data()), ref_offset); + ASSERT_EQ(roi_tensor.get_strides(), t.get_strides()); + ASSERT_EQ(ref_strides, roi_tensor.get_strides()); + ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type()); +} + +TEST_F(OVTensorTest, makeRangeRoiTensorInt4) { + ov::runtime::Tensor t{ov::element::i4, {1, 6, 5, 3}}; // RGB picture of size (WxH) = 5x6 + ov::runtime::Tensor roi_tensor{t, {0, 1, 2, 0}, {1, 5, 4, 3}}; + ov::Shape ref_shape = {1, 4, 2, 3}; + ptrdiff_t ref_offset = 21; + ov::Strides ref_strides = {90, 15, 3, 1}; + ASSERT_EQ(roi_tensor.get_shape(), ref_shape); + ASSERT_EQ(roi_tensor.data() - t.data(), ref_offset); + ASSERT_EQ(roi_tensor.get_strides(), ref_strides); + ASSERT_EQ(roi_tensor.get_strides(), t.get_strides()); + ASSERT_EQ(ref_strides, roi_tensor.get_strides()); + ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type()); +} + +TEST_F(OVTensorTest, makeRangeRoiBlobWrongSize) { + ov::runtime::Tensor t{ov::element::f32, {1, 3, 4, 4}}; + ASSERT_THROW((ov::runtime::Tensor{t, {0, 0, 1, 1}, {1, 3, 5, 5}}), ov::Exception); + ASSERT_THROW((ov::runtime::Tensor{t, {0, 0, 1, 1, 3}, {1, 3, 4, 4}}), ov::Exception); +} + +TEST_F(OVTensorTest, readRangeRoiBlob) { + ov::runtime::Tensor t{ov::element::i32, {1, 3, 4, 8}}; + { + const auto origPtr = t.data(); + ASSERT_NE(nullptr, origPtr); + for (size_t i = 0; i < t.get_size(); ++i) { + origPtr[i] = i; + } + } + ov::runtime::Tensor roi_tensor{t, {0, 0, 2, 4}, {1, 3, 4, 8}}; + ASSERT_NE(false, static_cast(roi_tensor)); + { + auto roi = roi_tensor.data(); + ASSERT_NE(nullptr, roi); + auto strides = roi_tensor.get_strides(); + for (size_t n = 0; n < roi_tensor.get_shape()[0]; ++n) { + for (size_t c = 0; c < roi_tensor.get_shape()[1]; ++c) { + for (size_t h = 0; h < roi_tensor.get_shape()[2]; ++h) { + for (size_t w = 0; w < roi_tensor.get_shape()[3]; ++w) { + auto actual = roi[w * strides[3] + h * strides[2] + c * strides[1] + n * strides[0]]; + auto expected = t.data()[(w + 4) * strides[3] + (h + 2) * strides[2] + + (c + 0) * strides[1] + (n + 0) * strides[0]]; + ASSERT_EQ(expected, actual) << ov::Shape{n, c, h, w}; + } + } + } + } + } +}