Tensor API in ngraph (#7632)

* Added OpenVINO Tensor API

* Tensor API improvements

* Moved Tensor to ngraph

* Moved Tensor tests

* Fixed docs and code style

* Trying to fix Windows

* Fixed clang-format

* Moved Tensor to runtime namespace

* Fixed compilation

* Fixed clang-format

* Fixed tests in debug

Co-authored-by: apankratovantonp <anton.pankratov@intel.com>
This commit is contained in:
Ilya Lavrenov 2021-09-27 09:57:26 +03:00 committed by GitHub
parent 95f8544aa6
commit e87cc3fa9e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1184 additions and 160 deletions

View File

@ -20,7 +20,7 @@ file (GLOB LIBRARY_SRC
) )
# TODO: WA for OneHot pass usage in reshape # TODO: WA for OneHot pass usage in reshape
set(LEGACY_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src/") set(LEGACY_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src")
set(LEGACY_LIBRARY_SHARED_SRCS set(LEGACY_LIBRARY_SHARED_SRCS
"${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp" "${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp"
"${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_nms_5_to_legacy.cpp" "${LEGACY_SRC_ROOT}/transformations/convert_opset1_to_legacy/convert_nms_5_to_legacy.cpp"

View File

@ -18,12 +18,13 @@
#include "ie_blob.h" #include "ie_blob.h"
#include "ie_common.h" #include "ie_common.h"
#include "ie_data.h" #include "ie_data.h"
#include "ie_extension.h"
#include "ie_icnn_network.hpp" #include "ie_icnn_network.hpp"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
namespace InferenceEngine { namespace InferenceEngine {
class IExtension;
/** /**
* @brief This class contains all the information about the Neural Network and the related binary information * @brief This class contains all the information about the Neural Network and the related binary information
*/ */
@ -52,7 +53,8 @@ public:
* @param network Pointer to the ngraph::Function object * @param network Pointer to the ngraph::Function object
* @param exts Vector of pointers to IE extension objects * @param exts Vector of pointers to IE extension objects
*/ */
explicit CNNNetwork(const std::shared_ptr<ngraph::Function>& network, const std::vector<IExtensionPtr>& exts = {}); explicit CNNNetwork(const std::shared_ptr<ngraph::Function>& network,
const std::vector<std::shared_ptr<IExtension>>& exts = {});
/** /**
* @brief Gets the network output Data node information. The received info is stored in the given Data node. * @brief Gets the network output Data node information. The received info is stored in the given Data node.

View File

@ -11,7 +11,7 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include <ngraph/ngraph.hpp> #include <ngraph/partial_shape.hpp>
#include <string> #include <string>
#include <vector> #include <vector>

View File

@ -15,11 +15,11 @@
#include "openvino/runtime/common.hpp" #include "openvino/runtime/common.hpp"
#include "openvino/runtime/profiling_info.hpp" #include "openvino/runtime/profiling_info.hpp"
#include "openvino/runtime/tensor.hpp"
#include "openvino/runtime/variable_state.hpp" #include "openvino/runtime/variable_state.hpp"
namespace InferenceEngine { namespace InferenceEngine {
class IInferRequestInternal; class IInferRequestInternal;
class Blob;
} // namespace InferenceEngine } // namespace InferenceEngine
namespace ov { namespace ov {
@ -55,20 +55,20 @@ public:
* @brief Sets input/output data to infer * @brief Sets input/output data to infer
* *
* @note Memory allocation does not happen * @note Memory allocation does not happen
* @param name Name of input or output blob. * @param name Name of input or output tensor.
* @param data Reference to input or output blob. The type of a blob must match the network input precision and * @param tensor Reference to input or output tensor. The type of a tensor must match the network input precision
* size. * and size.
*/ */
void set_blob(const std::string& name, const std::shared_ptr<ie::Blob>& data); void set_tensor(const std::string& name, const Tensor& tensor);
/** /**
* @brief Gets input/output data for inference * @brief Gets input/output data for inference
* *
* @note Memory allocation does not happen * @note Memory allocation does not happen
* @param name A name of Blob to get * @param name A name of tensor to get
* @return A shared pointer to a Blob with a name @p name. If a blob is not found, an exception is thrown. * @return A Tensor with a name @p name. If a tensor is not found, an exception is thrown.
*/ */
std::shared_ptr<ie::Blob> get_blob(const std::string& name); Tensor get_tensor(const std::string& name);
/** /**
* @brief Infers specified input(s) in synchronous mode * @brief Infers specified input(s) in synchronous mode
@ -91,31 +91,6 @@ public:
*/ */
std::vector<ProfilingInfo> get_profiling_info() const; std::vector<ProfilingInfo> get_profiling_info() const;
/**
* @brief Sets input data to infer
*
* @note Memory allocation doesn't happen
* @param inputs A reference to a map of input blobs accessed by input names.
* The type of Blob must correspond to the network input precision and size.
*/
void set_input(const std::map<std::string, std::shared_ptr<ie::Blob>>& inputs);
/**
* @brief Sets data that will contain result of the inference
*
* @note Memory allocation doesn't happen
* @param results - a reference to a map of result blobs accessed by output names.
* The type of Blob must correspond to the network output precision and size.
*/
void set_output(const std::map<std::string, std::shared_ptr<ie::Blob>>& results);
/**
* @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
*
* @param batch new batch size to be used by all the following inference calls for this request.
*/
void set_batch(const int batch);
/** /**
* @brief Start inference of specified input(s) in asynchronous mode * @brief Start inference of specified input(s) in asynchronous mode
* *

View File

@ -19,18 +19,17 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "ie_api.h"
#include "openvino/core/except.hpp" #include "openvino/core/except.hpp"
#include "openvino/runtime/common.hpp"
namespace ov { namespace ov {
namespace runtime { namespace runtime {
/** /**
* @brief This class represents an object to work with different parameters * @brief This class represents an object to work with different parameters
* *
*/ */
class INFERENCE_ENGINE_API_CLASS(Parameter) { class OPENVINO_RUNTIME_API Parameter {
public: public:
/** /**
* @brief Default constructor * @brief Default constructor
@ -342,19 +341,18 @@ private:
using ParamMap = std::map<std::string, Parameter>; using ParamMap = std::map<std::string, Parameter>;
#ifdef __ANDROID__ #ifdef __ANDROID__
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<int>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<int>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<bool>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<bool>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<float>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<float>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<uint32_t>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<uint32_t>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<std::string>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<std::string>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<unsigned long>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<unsigned long>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<std::vector<int>>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<std::vector<int>>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<std::vector<std::string>>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<std::vector<std::string>>;
extern template struct INFERENCE_ENGINE_API_CLASS(ov::runtime::Parameter::RealData<std::vector<unsigned long>>); extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<std::vector<unsigned long>>;
extern template struct INFERENCE_ENGINE_API_CLASS( extern template struct OPENVINO_RUNTIME_API ov::runtime::Parameter::RealData<std::tuple<unsigned int, unsigned int>>;
ov::runtime::Parameter::RealData<std::tuple<unsigned int, unsigned int>>); extern template struct OPENVINO_RUNTIME_API
extern template struct INFERENCE_ENGINE_API_CLASS( ov::runtime::Parameter::RealData<std::tuple<unsigned int, unsigned int, unsigned int>>;
ov::runtime::Parameter::RealData<std::tuple<unsigned int, unsigned int, unsigned int>>);
#endif #endif
} // namespace runtime } // namespace runtime

View File

@ -18,6 +18,7 @@
#include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/common.hpp" #include "openvino/runtime/common.hpp"
#include "openvino/runtime/parameter.hpp" #include "openvino/runtime/parameter.hpp"
#include "openvino/runtime/remote_tensor.hpp"
namespace InferenceEngine { namespace InferenceEngine {
class RemoteBlob; class RemoteBlob;
@ -108,7 +109,7 @@ public:
/** /**
* @brief Returns name of the device on which underlying object is allocated. * @brief Returns name of the device on which underlying object is allocated.
* Abstract method. * Abstract method.
* @return A device name string in the same format as that in plugin metric. * @return A device name string in fully specified format `<device_name>[.<device_id>[.<tile_id>]]`.
*/ */
std::string get_device_name() const; std::string get_device_name() const;
@ -116,21 +117,19 @@ public:
* @brief Allocates memory tensor in device memory or wraps user-supplied memory handle * @brief Allocates memory tensor in device memory or wraps user-supplied memory handle
* using the specified tensor description and low-level device-specific parameters. * using the specified tensor description and low-level device-specific parameters.
* Returns a pointer to the object which implements RemoteTensor interface. * Returns a pointer to the object which implements RemoteTensor interface.
* @param element_type Defines the element type of the tensor * @param type Defines the element type of the tensor
* @param shape Defines the shape of the tensor * @param shape Defines the shape of the tensor
* @param params Map of the low-level tensor object parameters. * @param params Map of the low-level tensor object parameters.
* Abstract method. * Abstract method.
* @return A pointer to plugin object that implements RemoteTensor interface. * @return A pointer to plugin object that implements RemoteTensor interface.
*/ */
std::shared_ptr<ie::RemoteBlob> create_blob(element::Type element_type, RemoteTensor create_tensor(const element::Type& type, const Shape& shape, const ParamMap& params = {});
const Shape& shape,
const ParamMap& params = {});
/** /**
* @brief Returns a map of device-specific parameters required for low-level * @brief Returns a map of device-specific parameters required for low-level
* operations with underlying object. * operations with underlying object.
* Parameters include device/context handles, access flags, * Parameters include device/context handles, access flags,
* etc. Contents of the map returned depend on remote execution context that is * etc. Content of the returned map depends on remote execution context that is
* currently set on the device (working scenario). * currently set on the device (working scenario).
* Abstract method. * Abstract method.
* @return A map of name/parameter elements. * @return A map of name/parameter elements.

View File

@ -0,0 +1,104 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief This is a header file for the OpenVINO Runtime tensor API
*
* @file openvino/runtime/remote_tensor.hpp
*/
#pragma once
#include "ie_remote_blob.hpp"
#include "openvino/runtime/parameter.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {
namespace runtime {
class RemoteContext;
/**
* @brief Remote memory access and interpretation API
*
* It can throw exceptions safely for the application, where it is properly handled.
*/
class OPENVINO_RUNTIME_API RemoteTensor : public Tensor {
using Tensor::Tensor;
friend class ov::runtime::RemoteContext;
public:
void* data(const element::Type) = delete;
template <typename T>
T* data() = delete;
/**
* @brief Returns a map of device-specific parameters required for low-level
* operations with underlying object.
* Parameters include device/context/surface/buffer handles, access flags,
* etc. Content of the returned map depends on remote execution context that is
* currently set on the device (working scenario).
* Abstract method.
* @return A map of name/parameter elements.
*/
runtime::ParamMap get_params() const;
/**
* @brief Returns name of the device on which underlying object is allocated.
* Abstract method.
* @return A device name string in fully specified format `<device_name>[.<device_id>[.<tile_id>]]`.
*/
std::string get_device_name() const;
/**
* @brief Checks if the RemoteTensor object can be cast to the type T*
*
* @tparam T Type to be checked. Must represent a class derived from the RemoteTensor
* @return true if this object can be dynamically cast to the type T*. Otherwise, false
*/
template <typename T,
typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
typename std::enable_if<std::is_base_of<ie::RemoteBlob, T>::value, int>::type = 0>
bool is() noexcept {
return dynamic_cast<T*>(_impl.get()) != nullptr;
}
/**
* @brief Checks if the RemoteTensor object can be cast to the type const T*
*
* @tparam T Type to be checked. Must represent a class derived from the RemoteTensor
* @return true if this object can be dynamically cast to the type const T*. Otherwise, false
*/
template <typename T>
bool is() const noexcept {
return dynamic_cast<const T*>(_impl.get()) != nullptr;
}
/**
* @brief Casts this RemoteTensor object to the type T*.
*
* @tparam T Type to cast to. Must represent a class derived from the RemoteTensor
* @return Raw pointer to the object of the type T or nullptr on error
*/
template <typename T,
typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
typename std::enable_if<std::is_base_of<ie::RemoteBlob, T>::value, int>::type = 0>
T* as() noexcept {
return dynamic_cast<T*>(_impl.get());
}
/**
* @brief Casts this RemoteTensor object to the type const T*.
*
* @tparam T Type to cast to. Must represent a class derived from the RemoteTensor
* @return Raw pointer to the object of the type const T or nullptr on error
*/
template <typename T,
typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
typename std::enable_if<std::is_base_of<ie::RemoteBlob, T>::value, int>::type = 0>
const T* as() const noexcept {
return dynamic_cast<const T*>(_impl.get());
}
};
} // namespace runtime
} // namespace ov

View File

@ -14,10 +14,10 @@
#include <string> #include <string>
#include "openvino/runtime/common.hpp" #include "openvino/runtime/common.hpp"
#include "openvino/runtime/tensor.hpp"
namespace InferenceEngine { namespace InferenceEngine {
class IVariableStateInternal; class IVariableStateInternal;
class Blob;
} // namespace InferenceEngine } // namespace InferenceEngine
namespace ov { namespace ov {
@ -65,13 +65,13 @@ public:
* @brief Returns the value of the variable state. * @brief Returns the value of the variable state.
* @return A blob representing a state * @return A blob representing a state
*/ */
std::shared_ptr<const ie::Blob> get_state() const; Tensor get_state() const;
/** /**
* @brief Sets the new state for the next inference. * @brief Sets the new state for the next inference.
* @param state The current state to set * @param state The current state to set
*/ */
void set_state(const std::shared_ptr<ie::Blob>& state); void set_state(const Tensor& state);
}; };
} // namespace runtime } // namespace runtime
} // namespace ov } // namespace ov

View File

@ -8,13 +8,13 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "cpp/exception2status.hpp"
#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp" #include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp"
#include "details/ie_so_loader.h"
#include "ie_infer_async_request_base.hpp" #include "ie_infer_async_request_base.hpp"
#include "ie_ngraph_utils.hpp"
#include "ie_remote_context.hpp" #include "ie_remote_context.hpp"
#include "openvino/core/except.hpp" #include "openvino/core/except.hpp"
#include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/infer_request.hpp"
#include "transformations/utils/utils.hpp"
namespace InferenceEngine { namespace InferenceEngine {
@ -213,20 +213,23 @@ InferRequest::InferRequest(const std::shared_ptr<void>& so, const ie::IInferRequ
OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized.");
} }
void InferRequest::set_blob(const std::string& name, const ie::Blob::Ptr& data) { void InferRequest::set_tensor(const std::string& name, const Tensor& tensor){
OV_INFER_REQ_CALL_STATEMENT(_impl->SetBlob(name, data);) OV_INFER_REQ_CALL_STATEMENT({ _impl->SetBlob(name, tensor._impl); })}
}
ie::Blob::Ptr InferRequest::get_blob(const std::string& name) { Tensor InferRequest::get_tensor(const std::string& name) {
ie::Blob::Ptr blobPtr; OV_INFER_REQ_CALL_STATEMENT({
OV_INFER_REQ_CALL_STATEMENT(blobPtr = _impl->GetBlob(name);) auto blob = _impl->GetBlob(name);
std::string error = "Internal error: blob with name `" + name + "` is not allocated!"; const bool remoteBlobPassed = blob->is<ie::RemoteBlob>();
const bool remoteBlobPassed = blobPtr->is<ie::RemoteBlob>(); if (blob == nullptr) {
if (blobPtr == nullptr) IE_THROW(NotAllocated) << "Internal tensor implementation with name `" << name << "` is not allocated!";
IE_THROW() << error; }
if (!remoteBlobPassed && blobPtr->buffer() == nullptr) if (!remoteBlobPassed && blob->buffer() == nullptr) {
IE_THROW() << error; IE_THROW(NotAllocated) << "Internal tensor implementation with name `" << name << "` is not allocated!";
return blobPtr; }
auto tensorDesc = blob->getTensorDesc();
auto dims = tensorDesc.getDims();
return {_so, blob};
})
} }
void InferRequest::infer() { void InferRequest::infer() {
@ -275,18 +278,6 @@ std::vector<ProfilingInfo> InferRequest::get_profiling_info() const {
}) })
} }
void InferRequest::set_input(const ie::BlobMap& inputs) {
OV_INFER_REQ_CALL_STATEMENT(for (auto&& input : inputs) { _impl->SetBlob(input.first, input.second); })
}
void InferRequest::set_output(const ie::BlobMap& results) {
OV_INFER_REQ_CALL_STATEMENT(for (auto&& result : results) { _impl->SetBlob(result.first, result.second); })
}
void InferRequest::set_batch(const int batch) {
OV_INFER_REQ_CALL_STATEMENT(_impl->SetBatch(batch);)
}
void InferRequest::start_async() { void InferRequest::start_async() {
OV_INFER_REQ_CALL_STATEMENT(_impl->StartAsync();) OV_INFER_REQ_CALL_STATEMENT(_impl->StartAsync();)
} }

View File

@ -17,14 +17,16 @@
__VA_ARGS__; \ __VA_ARGS__; \
} catch (const std::exception& ex) { \ } catch (const std::exception& ex) { \
throw ov::Exception(ex.what()); \ throw ov::Exception(ex.what()); \
} catch (...) { \
OPENVINO_ASSERT(false, "Unexpected exception"); \
} }
namespace ov { namespace ov {
namespace runtime { namespace runtime {
RemoteContext::RemoteContext(const std::shared_ptr<void>& so, const ie::RemoteContext::Ptr& impl) RemoteContext::RemoteContext(const std::shared_ptr<void>& so, const ie::RemoteContext::Ptr& impl)
: _so(so), : _so{so},
_impl(impl) { _impl{impl} {
OPENVINO_ASSERT(_impl != nullptr, "RemoteContext was not initialized."); OPENVINO_ASSERT(_impl != nullptr, "RemoteContext was not initialized.");
} }
@ -32,13 +34,15 @@ std::string RemoteContext::get_device_name() const {
OV_REMOTE_CONTEXT_STATEMENT(return _impl->getDeviceName()); OV_REMOTE_CONTEXT_STATEMENT(return _impl->getDeviceName());
} }
std::shared_ptr<ie::RemoteBlob> RemoteContext::create_blob(element::Type type, RemoteTensor RemoteContext::create_tensor(const element::Type& element_type,
const Shape& shape, const Shape& shape,
const ie::ParamMap& params) { const ie::ParamMap& params) {
ie::TensorDesc tensorDesc(ie::details::convertPrecision(type), OV_REMOTE_CONTEXT_STATEMENT({
shape, return {_so,
ie::TensorDesc::getLayoutByRank(shape.size())); _impl->CreateBlob(
OV_REMOTE_CONTEXT_STATEMENT(return _impl->CreateBlob(tensorDesc, params)); {ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())},
params)};
});
} }
ie::ParamMap RemoteContext::get_params() const { ie::ParamMap RemoteContext::get_params() const {

View File

@ -72,12 +72,12 @@ std::string VariableState::get_name() const {
OV_VARIABLE_CALL_STATEMENT(return _impl->GetName()); OV_VARIABLE_CALL_STATEMENT(return _impl->GetName());
} }
ie::Blob::CPtr VariableState::get_state() const { Tensor VariableState::get_state() const {
OV_VARIABLE_CALL_STATEMENT(return _impl->GetState()); OV_VARIABLE_CALL_STATEMENT(return {_so, std::const_pointer_cast<ie::Blob>(_impl->GetState())});
} }
void VariableState::set_state(const ie::Blob::Ptr& state) { void VariableState::set_state(const Tensor& state) {
OV_VARIABLE_CALL_STATEMENT(_impl->SetState(state)); OV_VARIABLE_CALL_STATEMENT(_impl->SetState(state._impl));
} }
} // namespace runtime } // namespace runtime

View File

@ -31,4 +31,31 @@ Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob,
return inputBlob->createROI(begin, end); return inputBlob->createROI(begin, end);
} }
//
// RTTI
//
Blob::~Blob() {}
MemoryBlob::~MemoryBlob() {}
template <typename T, typename U>
TBlob<T, U>::~TBlob() {
free();
}
template class INFERENCE_ENGINE_API_CLASS(TBlob<float>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<double>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int8_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint8_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int16_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint16_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int32_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint32_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<long long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<unsigned long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<unsigned long long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<bool>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<char>);
} // namespace InferenceEngine } // namespace InferenceEngine

View File

@ -30,12 +30,6 @@ const ngraph::NodeTypeInfo& ExecutionNode::get_type_info() const {
} // namespace ExecGraphInfoSerialization } // namespace ExecGraphInfoSerialization
namespace InferenceEngine { namespace InferenceEngine {
//
// ie_blob.h
//
Blob::~Blob() {}
MemoryBlob::~MemoryBlob() {}
// //
// ie_iextension.h // ie_iextension.h
@ -127,30 +121,6 @@ StatusCode InferenceEngineException::getStatus() const {
} // namespace details } // namespace details
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
//
// ie_blob.h
//
template <typename T, typename U>
TBlob<T, U>::~TBlob() {
free();
}
template class INFERENCE_ENGINE_API_CLASS(TBlob<float>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<double>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int8_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint8_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int16_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint16_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<int32_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<uint32_t>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<long long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<unsigned long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<unsigned long long>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<bool>);
template class INFERENCE_ENGINE_API_CLASS(TBlob<char>);
} // namespace InferenceEngine } // namespace InferenceEngine
namespace ov { namespace ov {

View File

@ -500,7 +500,9 @@ TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc,
const std::vector<size_t>& begin, const std::vector<size_t>& begin,
const std::vector<size_t>& end, const std::vector<size_t>& end,
bool useOrigMemDesc) { bool useOrigMemDesc) {
IE_ASSERT(begin.size() == end.size()); if (begin.size() != end.size()) {
IE_THROW() << "`begin` vector size must match `end` vector size";
}
TensorSlice slice; TensorSlice slice;
for (size_t i = 0; i < begin.size(); ++i) { for (size_t i = 0; i < begin.size(); ++i) {
IE_ASSERT(end[i] >= begin[i]); IE_ASSERT(end[i] >= begin[i]);

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/remote_tensor.hpp"
namespace ov {
namespace runtime {
ie::ParamMap RemoteTensor::get_params() const {
OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized.");
auto remote_impl = InferenceEngine::as<InferenceEngine::RemoteBlob>(_impl);
OPENVINO_ASSERT(remote_impl != nullptr, "Remote tensor was not initialized using remote implementation");
try {
return remote_impl->getParams();
} catch (const std::exception& ex) {
throw ov::Exception(ex.what());
} catch (...) {
OPENVINO_ASSERT(false, "Unexpected exception");
}
}
std::string RemoteTensor::get_device_name() const {
OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized.");
auto remote_impl = InferenceEngine::as<InferenceEngine::RemoteBlob>(_impl);
OPENVINO_ASSERT(remote_impl != nullptr, "Remote tensor was not initialized using remote implementation");
try {
return remote_impl->getDeviceName();
} catch (const std::exception& ex) {
throw ov::Exception(ex.what());
} catch (...) {
OPENVINO_ASSERT(false, "Unexpected exception");
}
}
} // namespace runtime
} // namespace ov

View File

@ -32,6 +32,9 @@ public:
static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr) { static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr) {
return InferenceEngine::make_shared_blob<BlobType>(desc, reinterpret_cast<BlobType*>(ptr)); return InferenceEngine::make_shared_blob<BlobType>(desc, reinterpret_cast<BlobType*>(ptr));
} }
static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, void* ptr, const std::size_t size) {
return InferenceEngine::make_shared_blob<BlobType>(desc, reinterpret_cast<BlobType*>(ptr), size);
}
static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc, static InferenceEngine::Blob::Ptr make(const InferenceEngine::TensorDesc& desc,
const std::shared_ptr<InferenceEngine::IAllocator>& alloc) { const std::shared_ptr<InferenceEngine::IAllocator>& alloc) {
return InferenceEngine::make_shared_blob<BlobType>(desc, alloc); return InferenceEngine::make_shared_blob<BlobType>(desc, alloc);

View File

@ -16,6 +16,7 @@
#include "blob_factory.hpp" #include "blob_factory.hpp"
#include "cpp/ie_cnn_network.h" #include "cpp/ie_cnn_network.h"
#include "details/ie_so_pointer.hpp"
#include "ie_iextension.h" #include "ie_iextension.h"
#include "ie_input_info.hpp" #include "ie_input_info.hpp"
#include "ie_parameter.hpp" #include "ie_parameter.hpp"

View File

@ -16,6 +16,7 @@
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "details/ie_so_loader.h" #include "details/ie_so_loader.h"
#include "ie_metric_helpers.hpp" #include "ie_metric_helpers.hpp"
#include "openvino/op/logical_not.hpp"
#include "ie_remote_context.hpp" #include "ie_remote_context.hpp"
#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
@ -226,11 +227,11 @@ public:
EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))).Times(AnyNumber()).WillRepeatedly(Return(Parameter{1u})); EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))).Times(AnyNumber()).WillRepeatedly(Return(Parameter{1u}));
EXPECT_CALL(*mock, GetExecGraphInfo()).Times(AnyNumber()).WillRepeatedly(Return([] { EXPECT_CALL(*mock, GetExecGraphInfo()).Times(AnyNumber()).WillRepeatedly(Return([] {
ngraph::ParameterVector parameters; ngraph::ParameterVector parameters;
parameters.push_back(std::make_shared<ngraph::op::v0::Parameter>( parameters.push_back(std::make_shared<ov::op::v0::Parameter>(
ov::element::f32, ov::Shape{1, 3, 8, 8})); ov::element::f32, ov::Shape{1, 3, 8, 8}));
auto notOp = std::make_shared<ngraph::op::v1::LogicalNot>(parameters.back()); auto notOp = std::make_shared<ov::op::v1::LogicalNot>(parameters.back());
ngraph::ResultVector results; ngraph::ResultVector results;
results.push_back(std::make_shared<ngraph::op::v0::Result>(notOp)); results.push_back(std::make_shared<ov::op::v0::Result>(notOp));
return std::make_shared<ov::Function>(results, parameters, "empty_function"); return std::make_shared<ov::Function>(results, parameters, "empty_function");
} ())); } ()));
auto ptr = std::make_shared<MockIInferRequestInternal>(); auto ptr = std::make_shared<MockIInferRequestInternal>();

View File

@ -7,21 +7,21 @@
#include <cpp/ie_infer_request.hpp> #include <cpp/ie_infer_request.hpp>
#include <openvino/core/except.hpp> #include <openvino/core/except.hpp>
#include <openvino/runtime/infer_request.hpp> #include <openvino/runtime/infer_request.hpp>
#include <openvino/runtime/remote_tensor.hpp>
using namespace ::testing; using namespace ::testing;
using namespace std; using namespace std;
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
TEST(InferRequestOVTests, throwsOnUninitializedSetTensor) {
TEST(InferRequestOVTests, throwsOnUninitializedSetBlob) {
ov::runtime::InferRequest req; ov::runtime::InferRequest req;
ASSERT_THROW(req.set_blob({}, {}), ov::Exception); ASSERT_THROW(req.set_tensor({}, {}), ov::Exception);
} }
TEST(InferRequestOVTests, throwsOnUninitializedGetBlob) { TEST(InferRequestOVTests, throwsOnUninitializedGetTensor) {
ov::runtime::InferRequest req; ov::runtime::InferRequest req;
ASSERT_THROW(req.get_blob({}), ov::Exception); ASSERT_THROW(req.get_tensor({}), ov::Exception);
} }
TEST(InferRequestOVTests, throwsOnUninitializedInfer) { TEST(InferRequestOVTests, throwsOnUninitializedInfer) {
@ -34,21 +34,6 @@ TEST(InferRequestOVTests, throwsOnUninitializedGetPerformanceCounts) {
ASSERT_THROW(req.get_profiling_info(), ov::Exception); ASSERT_THROW(req.get_profiling_info(), ov::Exception);
} }
TEST(InferRequestOVTests, throwsOnUninitializedSetInput) {
ov::runtime::InferRequest req;
ASSERT_THROW(req.set_input({{}}), ov::Exception);
}
TEST(InferRequestOVTests, throwsOnUninitializedSetOutput) {
ov::runtime::InferRequest req;
ASSERT_THROW(req.set_output({{}}), ov::Exception);
}
TEST(InferRequestOVTests, throwsOnUninitializedSetBatch) {
ov::runtime::InferRequest req;
ASSERT_THROW(req.set_batch({}), ov::Exception);
}
TEST(InferRequestOVTests, throwsOnUninitializedStartAsync) { TEST(InferRequestOVTests, throwsOnUninitializedStartAsync) {
ov::runtime::InferRequest req; ov::runtime::InferRequest req;
ASSERT_THROW(req.start_async(), ov::Exception); ASSERT_THROW(req.start_async(), ov::Exception);
@ -74,3 +59,10 @@ TEST(InferRequestOVTests, throwsOnUninitializedQueryState) {
ov::runtime::InferRequest req; ov::runtime::InferRequest req;
ASSERT_THROW(req.query_state(), ov::Exception); ASSERT_THROW(req.query_state(), ov::Exception);
} }
TEST(InferRequestOVTests, throwsOnUninitializedSetRemoteTensor) {
ov::runtime::InferRequest req;
ov::runtime::RemoteTensor remote_tensor;
ASSERT_THROW(req.set_tensor({}, remote_tensor), ov::Exception);
}

View File

@ -17,7 +17,7 @@ TEST(RemoteContextOVTests, throwsOnUninitializedReset) {
TEST(RemoteContextOVTests, throwsOnUninitializedGetname) { TEST(RemoteContextOVTests, throwsOnUninitializedGetname) {
ov::runtime::RemoteContext ctx; ov::runtime::RemoteContext ctx;
ASSERT_THROW(ctx.create_blob({}, {}), ov::Exception); ASSERT_THROW(ctx.create_tensor({}, {}, {}), ov::Exception);
} }
TEST(RemoteContextOVTests, throwsOnUninitializedGetParams) { TEST(RemoteContextOVTests, throwsOnUninitializedGetParams) {

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <cpp/ie_infer_request.hpp>
#include <openvino/runtime/remote_tensor.hpp>
using namespace ::testing;
using namespace std;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
TEST(RemoteTensorOVTests, throwsOnGetParams) {
ov::runtime::RemoteTensor tensor;
ASSERT_THROW(tensor.get_params(), ov::Exception);
}
TEST(RemoteTensorOVTests, throwsOnGetDeviceName) {
ov::runtime::RemoteTensor tensor;
ASSERT_THROW(tensor.get_device_name(), ov::Exception);
}

View File

@ -29,6 +29,6 @@ TEST(VariableStateOVTests, throwsOnUninitializedGetState) {
TEST(VariableStateOVTests, throwsOnUninitializedSetState) { TEST(VariableStateOVTests, throwsOnUninitializedSetState) {
ov::runtime::VariableState state; ov::runtime::VariableState state;
InferenceEngine::Blob::Ptr blob; ov::runtime::Tensor tensor;
ASSERT_THROW(state.set_state(blob), ov::Exception); ASSERT_THROW(state.set_state(tensor), ov::Exception);
} }

View File

@ -12,6 +12,26 @@ add_subdirectory(builder)
add_subdirectory(reference) add_subdirectory(reference)
add_subdirectory(shape_inference) add_subdirectory(shape_inference)
# WA for Tensor implementation via ie::Blob::Ptr
set(IE_SRC_ROOT "${IE_MAIN_SOURCE_DIR}/src/inference_engine/src")
set(IE_SHARED_SRCS
"${IE_SRC_ROOT}/system_allocator.cpp"
"${IE_SRC_ROOT}/blob_factory.cpp"
"${IE_SRC_ROOT}/ie_blob_common.cpp"
"${IE_SRC_ROOT}/ie_layouts.cpp")
set(MIXED_SRC ${IE_SHARED_SRCS}
"${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/allocator.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/ov_tensor.cpp")
set_property(SOURCE ${MIXED_SRC}
APPEND PROPERTY INCLUDE_DIRECTORIES
${IE_SRC_ROOT}
$<TARGET_PROPERTY:inference_engine,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)
set_source_files_properties(${MIXED_SRC}
PROPERTIES COMPILE_DEFINITIONS IMPLEMENT_INFERENCE_ENGINE_API)
# Create named folders for the sources within the .vcproj # Create named folders for the sources within the .vcproj
# Empty name lists them directly under the .vcproj # Empty name lists them directly under the .vcproj
@ -19,7 +39,7 @@ source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${PUBLIC_HEADERS}) source_group("include" FILES ${PUBLIC_HEADERS})
# Create static or shared library depending on BUILD_SHARED_LIBS # Create static or shared library depending on BUILD_SHARED_LIBS
add_library(ngraph ${LIBRARY_SRC} ${PUBLIC_HEADERS}) add_library(ngraph ${LIBRARY_SRC} ${PUBLIC_HEADERS} ${IE_SHARED_SRCS})
if(COMMAND ie_faster_build) if(COMMAND ie_faster_build)

View File

@ -162,6 +162,8 @@ OPENVINO_API Type from<ov::bfloat16>();
template <> template <>
OPENVINO_API Type from<ov::float16>(); OPENVINO_API Type from<ov::float16>();
OPENVINO_API Type fundamental_type_for(const Type& type);
OPENVINO_API OPENVINO_API
std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj); std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj);
} // namespace element } // namespace element

View File

@ -0,0 +1,131 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header file that provides Allocator interface
*
* @file openvino/runtime/allocator.hpp
*/
#pragma once
#include <cstddef>
#include <memory>
#include "openvino/core/core_visibility.hpp"
namespace ov {
namespace runtime {
/**
* @interface AllocatorImpl
* @brief Tries to act like [std::pmr::memory_resource](https://en.cppreference.com/w/cpp/memory/memory_resource)
*/
struct AllocatorImpl : public std::enable_shared_from_this<AllocatorImpl> {
/**
* @brief A smart pointer containing AllocatorImpl object
*/
using Ptr = std::shared_ptr<AllocatorImpl>;
/**
* @brief Allocates memory
*
* @param bytes The size in bytes at least to allocate
* @param alignment The alignment of storage
* @return Handle to the allocated resource
* @throw Exception if specified size and alignment is not supported
*/
virtual void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)) = 0;
/**
* @brief Releases the handle and all associated memory resources which invalidates the handle.
* @param handle The handle to free
* @param bytes The size in bytes that was passed into allocate() method
* @param alignment The alignment of storage that was passed into allocate() method
*/
virtual void deallocate(void* handle, const size_t bytes, size_t alignment = alignof(max_align_t)) = 0;
/**
* @brief Compares with other AllocatorImpl
* @param other Other instance of allocator
* @return `true` if and only if memory allocated from one AllocatorImpl can be deallocated from the other and vice
* versa
*/
virtual bool is_equal(const AllocatorImpl& other) const = 0;
protected:
~AllocatorImpl() = default;
};
class Tensor;
/**
* @brief Wraps allocator implementation to provide safe way to store allocater loaded from shared library
* And constructs default based on `new` `delete` c++ calls allocator if created without parameters
*/
class OPENVINO_API Allocator {
std::shared_ptr<void> _so;
AllocatorImpl::Ptr _impl;
/**
* @brief Constructs Tensor from the initialized std::shared_ptr
* @param so Plugin to use. This is required to ensure that Allocator can work properly even if plugin object is
* destroyed.
* @param impl Initialized shared pointer
*/
Allocator(const std::shared_ptr<void>& so, const AllocatorImpl::Ptr& impl);
friend class ov::runtime::Tensor;
public:
/**
* @brief Creates the default implementation of the OpenVINO allocator.
*/
Allocator();
/**
* @brief Constructs Allocator from the initialized std::shared_ptr
* @param impl Initialized shared pointer
*/
Allocator(const AllocatorImpl::Ptr& impl);
/**
* @brief Allocates memory
*
* @param bytes The size in bytes at least to allocate
* @param alignment The alignment of storage
* @return Handle to the allocated resource
* @throw Exception if specified size and alignment is not supported
*/
void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t));
/**
* @brief Releases the handle and all associated memory resources which invalidates the handle.
* @param ptr The handle to free
* @param bytes The size in bytes that was passed into allocate() method
* @param alignment The alignment of storage that was passed into allocate() method
*/
void deallocate(void* ptr, const size_t bytes = 0, const size_t alignment = alignof(max_align_t));
/**
* @brief Compares with other AllocatorImpl
* @param other Other instance of allocator
* @return `true` if and only if memory allocated from one AllocatorImpl can be deallocated from the other and vice
* versa
*/
bool operator==(const Allocator& other) const;
/**
* @brief Checks if current Allocator object is not initialized
* @return `true` if current Allocator object is not initialized, `false` - otherwise
*/
bool operator!() const noexcept;
/**
* @brief Checks if current Allocator object is initialized
* @return `true` if current Allocator object is initialized, `false` - otherwise
*/
explicit operator bool() const noexcept;
};
} // namespace runtime
} // namespace ov

View File

@ -0,0 +1,157 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief This is a header file for the OpenVINO Runtime tensor API
*
* @file openvino/runtime/tensor.hpp
*/
#pragma once
#include "openvino/core/coordinate.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/allocator.hpp"
namespace InferenceEngine {
class Blob;
} // namespace InferenceEngine
namespace ov {
namespace runtime {
class InferRequest;
class RemoteContext;
class VariableState;
/**
* @brief Tensor API holding host memory
*
* It can throw exceptions safely for the application, where it is properly handled.
*/
class OPENVINO_API Tensor {
protected:
std::shared_ptr<void> _so; //!< Reference to dynamicly loaded library
std::shared_ptr<InferenceEngine::Blob> _impl; //!< Shared pointer to internal tensor representation
/**
* @brief Constructs Tensor from the initialized std::shared_ptr
* @param so Plugin to use. This is required to ensure that Tensor can work properly even if plugin object is
* destroyed.
* @param impl Initialized shared pointer
*/
Tensor(const std::shared_ptr<void>& so, const std::shared_ptr<InferenceEngine::Blob>& impl);
friend class ov::runtime::InferRequest;
friend class ov::runtime::RemoteContext;
friend class ov::runtime::VariableState;
public:
/**
* @brief Default constructor
*/
Tensor() = default;
/**
* @brief Constructs Tensor using element type and shape. Allocate internal host storage using default allocator
* @param type Tensor element type
* @param shape Tensor shape
* @param allocator allocates memory for internal tensor storage
*/
Tensor(const element::Type type, const Shape& shape, const Allocator& allocator = {});
/**
* @brief Constructs Tensor using element type and shape. Wraps allocated host memory.
* @note Does not perform memory allocation internally
* @param type Tensor element type
* @param shape Tensor shape
* @param host_ptr Pointer to pre-allocated host memory
* @param size Optional size of allocated host memory in elements. If it is not set (default is `0`), the size of
* memory supposed to be not less then ov::shape_size(shape) * type.size() in bytes.
* @param strides Optional strides parameters in elements. Strides are supposed to be equal to shape if they are not
* set
*/
Tensor(const element::Type type,
const Shape& shape,
void* host_ptr,
const size_t size = 0,
const Strides& strides = {});
/**
* @brief Constructs region of interest (ROI) tensor form another tensor.
* @note Does not perform memory allocation internally
* @param other original tensor
* @param begin start coordinate of ROI object inside of the original object.
* @param end end coordinate of ROI object inside of the original object.
* @note A Number of dimensions in `begin` and `end` must match number of dimensions in `other.get_shape()`
*/
Tensor(const Tensor& other, const Coordinate& begin, const Coordinate& end);
/**
* @brief Set new shape for tensor, deallocate/allocate if new total size is bigger than previous one.
* @note Memory allocation may happen
* @param shape A new shape
*/
void set_shape(const ov::Shape& shape);
/**
* @return A tensor element type
*/
element::Type get_element_type() const;
/**
* @return A tensor shape
*/
Shape get_shape() const;
/**
* @brief Returns the total number of elements (a product of all the dims or 1 for scalar)
* @return The total number of elements
*/
size_t get_size() const;
/**
* @brief Returns the size of the current Tensor in bytes.
* @return Tensor's size in bytes
*/
size_t get_byte_size() const;
/**
* @return Tensor's strides in elements
*/
Strides get_strides() const;
/**
* @brief Provides an access to the underlaying host memory
* @param type Optional type parameter.
* @note If type parameter is specified, the method throws an exception
* if specified type's fundamental type does not match with tensor element type's fundamental type
* @return A host pointer to tensor memory
*/
void* data(const element::Type type = {}) const;
/**
* @brief Provides an access to the underlaying host memory casted to type `T`
* @return A host pointer to tensor memory casted to specified type `T`.
* @note Throws exception if specified type does not match with tensor element type
*/
template <typename T>
T* data() const {
return static_cast<T*>(data(element::from<T>()));
}
/**
* @brief Checks if current Tensor object is not initialized
* @return `true` if current Tensor object is not initialized, `false` - otherwise
*/
bool operator!() const noexcept;
/**
* @brief Checks if current Tensor object is initialized
* @return `true` if current Tensor object is initialized, `false` - otherwise
*/
explicit operator bool() const noexcept;
};
} // namespace runtime
} // namespace ov

View File

@ -0,0 +1,61 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/allocator.hpp"
#include "blob_allocator.hpp"
#include "ie_allocator.hpp"
#include "ie_common.h"
#include "openvino/core/except.hpp"
namespace ov {
namespace runtime {
Allocator::Allocator() : _impl{std::make_shared<BlobAllocator>()} {}
Allocator::Allocator(const std::shared_ptr<void>& so, const std::shared_ptr<AllocatorImpl>& impl)
: _so{so},
_impl{impl} {
OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized.");
}
Allocator::Allocator(const std::shared_ptr<AllocatorImpl>& impl) : _impl{impl} {
OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized.");
}
#define OV_ALLOCATOR_STATEMENT(...) \
OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized."); \
try { \
__VA_ARGS__; \
} catch (const std::exception& ex) { \
throw ov::Exception(ex.what()); \
} catch (...) { \
OPENVINO_ASSERT(false, "Unexpected exception"); \
}
void* Allocator::allocate(const size_t bytes, const size_t alignment) {
OV_ALLOCATOR_STATEMENT(return _impl->allocate(bytes, alignment));
}
void Allocator::deallocate(void* handle, const size_t bytes, const size_t alignment) {
OV_ALLOCATOR_STATEMENT(_impl->deallocate(handle, bytes, alignment));
}
bool Allocator::operator==(const Allocator& other) const {
OV_ALLOCATOR_STATEMENT({
if (_impl == other._impl) {
return true;
}
return _impl->is_equal(*other._impl);
});
}
bool Allocator::operator!() const noexcept {
return !_impl;
}
Allocator::operator bool() const noexcept {
return (!!_impl);
}
} // namespace runtime
} // namespace ov

View File

@ -0,0 +1,86 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <unordered_map>
#include "ie_allocator.hpp" // IE public header
#include "openvino/core/except.hpp"
#include "openvino/runtime/allocator.hpp"
#include "openvino/runtime/common.hpp"
#include "system_allocator.hpp" // IE private header
namespace InferenceEngine {
struct BlobAllocator : public IAllocator {
BlobAllocator(const std::shared_ptr<ov::runtime::AllocatorImpl>& impl) : _impl{impl} {}
void* lock(void* handle, LockOp) noexcept override {
return handle;
}
void unlock(void*) noexcept override {}
void* alloc(const size_t size) noexcept override {
try {
return size_map.emplace(_impl->allocate(size), size).first->first;
} catch (...) {
return nullptr;
}
}
bool free(void* handle) noexcept override {
try {
auto size = size_map.at(handle);
size_map.erase(handle);
_impl->deallocate(handle, size);
return true;
} catch (...) {
return false;
}
}
std::shared_ptr<ov::runtime::AllocatorImpl> _impl;
std::unordered_map<void*, size_t> size_map;
};
} // namespace InferenceEngine
namespace ov {
struct BlobAllocator : public runtime::AllocatorImpl {
BlobAllocator(const std::shared_ptr<ie::IAllocator>& impl = std::make_shared<ie::SystemMemoryAllocator>())
: _impl{impl} {}
void* allocate(const size_t bytes, const size_t alignment) override {
OPENVINO_ASSERT(alignment == alignof(max_align_t),
"Aligned deallocation is not implemented. alignment: ",
alignment);
auto handle = _impl->alloc(bytes);
OPENVINO_ASSERT(handle != nullptr, "Can not allocate storage for at least ", bytes, " bytes");
return handle;
}
void deallocate(void* handle, const size_t bytes, const size_t alignment) override {
OPENVINO_ASSERT(bytes == 0, "Sized deallocation is not implemented. bytes: ", bytes);
OPENVINO_ASSERT(alignment == alignof(max_align_t),
"Aligned deallocation is not implemented. alignment: ",
alignment);
auto res = _impl->free(handle);
OPENVINO_ASSERT(res != false, "Can not deallocate storage");
}
bool is_equal(const AllocatorImpl& other) const override {
auto other_blob_allocator = dynamic_cast<const BlobAllocator*>(&other);
if (other_blob_allocator == nullptr)
return false;
if (other_blob_allocator->_impl == _impl)
return true;
auto other_system_memory_allocator =
dynamic_cast<const ie::SystemMemoryAllocator*>(other_blob_allocator->_impl.get());
auto system_allocator = dynamic_cast<const ie::SystemMemoryAllocator*>(_impl.get());
if (system_allocator != nullptr && other_system_memory_allocator != nullptr)
return true;
return false;
}
std::shared_ptr<ie::IAllocator> _impl;
};
} // namespace ov

View File

@ -0,0 +1,137 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <numeric>
#include "blob_factory.hpp" // IE private header
#include "ie_ngraph_utils.hpp" // IE private header
#include "openvino/core/except.hpp"
#include "openvino/runtime/tensor.hpp"
#include "runtime/blob_allocator.hpp"
namespace ov {
namespace runtime {
#define OV_TENSOR_STATEMENT(...) \
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized."); \
try { \
__VA_ARGS__; \
} catch (const std::exception& ex) { \
throw ov::Exception(ex.what()); \
} catch (...) { \
OPENVINO_ASSERT(false, "Unexpected exception"); \
}
Tensor::Tensor(const std::shared_ptr<void>& so, const std::shared_ptr<ie::Blob>& impl) : _so{so}, _impl{impl} {
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
}
Tensor::Tensor(const element::Type element_type, const Shape& shape, const Allocator& allocator) {
OPENVINO_ASSERT(allocator, "Allocator was not initialized");
auto allocator_impl = dynamic_cast<const BlobAllocator*>(allocator._impl.get());
auto blob_allocator =
(allocator_impl != nullptr) ? allocator_impl->_impl : std::make_shared<ie::BlobAllocator>(allocator._impl);
_impl = make_blob_with_precision(
{ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())},
blob_allocator);
_impl->allocate();
}
Tensor::Tensor(const element::Type element_type,
const Shape& shape,
void* host_ptr,
const size_t size,
const Strides& strides) {
ie::SizeVector blk_order(shape.size());
std::iota(blk_order.begin(), blk_order.end(), 0);
ie::SizeVector dim_offset(shape.size(), 0);
ie::SizeVector blk_strides;
if (strides.empty()) {
blk_strides = ov::row_major_strides(shape);
} else {
OPENVINO_ASSERT(shape.size() == strides.size(),
"shape.size() (",
shape.size(),
") must be equal to strides.size() (",
strides.size(),
")");
blk_strides.assign(strides.begin(), strides.end());
}
try {
_impl = make_blob_with_precision(ie::details::convertPrecision(element_type),
ie::TensorDesc{ie::details::convertPrecision(element_type),
shape,
ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}},
host_ptr,
size);
} catch (const std::exception& ex) {
throw ov::Exception(ex.what());
} catch (...) {
OPENVINO_ASSERT(false, "Unexpected exception");
}
}
Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& end) : _so{owner._so} {
try {
_impl = owner._impl->createROI(begin, end);
} catch (const std::exception& ex) {
throw ov::Exception(ex.what());
} catch (...) {
OPENVINO_ASSERT(false, "Unexpected exception");
}
}
element::Type Tensor::get_element_type() const {
OV_TENSOR_STATEMENT(return ie::details::convertPrecision(_impl->getTensorDesc().getPrecision()));
}
void Tensor::set_shape(const ov::Shape& shape) {
OV_TENSOR_STATEMENT(_impl->setShape({shape.begin(), shape.end()}));
}
Shape Tensor::get_shape() const {
OV_TENSOR_STATEMENT({ return _impl->getTensorDesc().getDims(); });
}
Strides Tensor::get_strides() const {
OV_TENSOR_STATEMENT(return _impl->getTensorDesc().getBlockingDesc().getStrides(););
}
size_t Tensor::get_size() const {
OV_TENSOR_STATEMENT(return ov::shape_size(get_shape()));
}
size_t Tensor::get_byte_size() const {
OV_TENSOR_STATEMENT(return ov::shape_size(get_shape()) * get_element_type().size());
}
void* Tensor::data(const element::Type element_type) const {
OV_TENSOR_STATEMENT({
if (element_type != element::undefined) {
OPENVINO_ASSERT(
element::fundamental_type_for(element_type) == element::fundamental_type_for(get_element_type()),
get_element_type(),
" tensor fundamental element type is ",
element::fundamental_type_for(get_element_type()),
", but it casted to ",
element_type,
" with fundamental element type",
element::fundamental_type_for(element_type));
}
return _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size() +
InferenceEngine::as<InferenceEngine::MemoryBlob>(_impl)->rmap().as<uint8_t*>();
});
}
bool Tensor::operator!() const noexcept {
return !_impl;
}
Tensor::operator bool() const noexcept {
return (!!_impl);
}
} // namespace runtime
} // namespace ov

View File

@ -195,6 +195,46 @@ template <>
Type from<ov::bfloat16>() { Type from<ov::bfloat16>() {
return Type_t::bf16; return Type_t::bf16;
} }
Type fundamental_type_for(const Type& type) {
switch (type) {
case Type_t::boolean:
return from<element_type_traits<Type_t::boolean>::value_type>();
case Type_t::bf16:
return from<element_type_traits<Type_t::bf16>::value_type>();
case Type_t::f16:
return from<element_type_traits<Type_t::f16>::value_type>();
case Type_t::f32:
return from<element_type_traits<Type_t::f32>::value_type>();
case Type_t::f64:
return from<element_type_traits<Type_t::f64>::value_type>();
case Type_t::i4:
return from<element_type_traits<Type_t::i4>::value_type>();
case Type_t::i8:
return from<element_type_traits<Type_t::i8>::value_type>();
case Type_t::i16:
return from<element_type_traits<Type_t::i16>::value_type>();
case Type_t::i32:
return from<element_type_traits<Type_t::i32>::value_type>();
case Type_t::i64:
return from<element_type_traits<Type_t::i64>::value_type>();
case Type_t::u1:
return from<element_type_traits<Type_t::u1>::value_type>();
case Type_t::u4:
return from<element_type_traits<Type_t::u4>::value_type>();
case Type_t::u8:
return from<element_type_traits<Type_t::u8>::value_type>();
case Type_t::u16:
return from<element_type_traits<Type_t::u16>::value_type>();
case Type_t::u32:
return from<element_type_traits<Type_t::u32>::value_type>();
case Type_t::u64:
return from<element_type_traits<Type_t::u64>::value_type>();
default:
OPENVINO_UNREACHABLE("Unsupported Data type: ", type);
}
}
} // namespace element } // namespace element
} // namespace ov } // namespace ov

View File

@ -77,6 +77,8 @@ set(SRC
op_eval/transpose.cpp op_eval/transpose.cpp
op_eval/variadic_split.cpp op_eval/variadic_split.cpp
opset1.cpp opset1.cpp
ov_default_allocator_test.cpp
ov_tensor_test.cpp
partial_shape.cpp partial_shape.cpp
pass_config.cpp pass_config.cpp
pass_manager.cpp pass_manager.cpp
@ -371,6 +373,11 @@ set(SRC
util.cpp util.cpp
) )
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(ov_tensor_test.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()
set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS
NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph") NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph")

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <memory>
#include "openvino/core/except.hpp"
#include "openvino/runtime/allocator.hpp"
using OVDefaultAllocatorTest = ::testing::Test;
TEST_F(OVDefaultAllocatorTest, notThrowOnZeroSize) {
ov::runtime::Allocator allocator;
void* ptr = nullptr;
ASSERT_NO_THROW(ptr = allocator.allocate(0));
ASSERT_NO_THROW(allocator.deallocate(ptr));
}
TEST_F(OVDefaultAllocatorTest, canAllocateAndDeallocate) {
ov::runtime::Allocator allocator;
void* ptr = nullptr;
ASSERT_NO_THROW(ptr = allocator.allocate(64));
ASSERT_NO_THROW(allocator.deallocate(ptr));
}
TEST_F(OVDefaultAllocatorTest, alignedAllocationIsNotImplemented) {
ov::runtime::Allocator allocator;
ASSERT_THROW(allocator.allocate(64, 64), ov::Exception);
}
TEST_F(OVDefaultAllocatorTest, sizedAndAlignedDeallocationAreNotImplemented) {
ov::runtime::Allocator allocator;
void* ptr = nullptr;
ASSERT_NO_THROW(ptr = allocator.allocate(64));
ASSERT_THROW(allocator.deallocate(ptr, 64), ov::Exception);
ASSERT_THROW(allocator.deallocate(ptr, 0, 64), ov::Exception);
ASSERT_NO_THROW(allocator.deallocate(ptr));
}
TEST_F(OVDefaultAllocatorTest, defaultAllocatorsAreEqual) {
ov::runtime::Allocator allocator0, allocator1;
ASSERT_TRUE(allocator0 == allocator1);
void* ptr = nullptr;
ASSERT_NO_THROW(ptr = allocator0.allocate(64));
ASSERT_NO_THROW(allocator1.deallocate(ptr));
ASSERT_NO_THROW(ptr = allocator1.allocate(64));
ASSERT_NO_THROW(allocator0.deallocate(ptr));
}
TEST_F(OVDefaultAllocatorTest, canAllocate10KMemory) {
ov::runtime::Allocator allocator;
// large block such as 10k will result in sigsegv if not allocated
void* handle = allocator.allocate(10000);
char* ptr = reinterpret_cast<char*>(handle);
ptr[9999] = 11;
EXPECT_EQ(ptr[9999], 11);
allocator.deallocate(handle);
}

View File

@ -0,0 +1,196 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock-spec-builders.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <cstdint>
#include <openvino/core/shape.hpp>
#include <openvino/core/strides.hpp>
#include <openvino/core/type/element_type.hpp>
#include "openvino/runtime/allocator.hpp"
#include "openvino/runtime/tensor.hpp"
using OVTensorTest = ::testing::Test;
TEST_F(OVTensorTest, canCreateTensor) {
ov::Shape shape = {4, 3, 2};
ov::runtime::Tensor t{ov::element::f32, shape};
const std::size_t totalSize = ov::shape_size(shape);
ASSERT_EQ(totalSize, t.get_size());
ASSERT_NE(nullptr, t.data());
ASSERT_EQ(ov::element::f32, t.get_element_type());
ASSERT_EQ(shape, t.get_shape());
ASSERT_NE(shape, t.get_strides());
ASSERT_EQ(ov::Strides({6, 2, 1}), t.get_strides());
ASSERT_EQ(ov::element::f32.size() * totalSize, t.get_byte_size());
ASSERT_THROW(t.data(ov::element::i64), ov::Exception);
ASSERT_THROW(t.data<std::int32_t>(), ov::Exception);
}
TEST_F(OVTensorTest, operators) {
ov::runtime::Tensor t;
ASSERT_FALSE(t);
ASSERT_TRUE(!t);
}
class OVMockAllocator : public ov::runtime::AllocatorImpl {
public:
MOCK_METHOD(void*, allocate, (size_t, size_t), ());
MOCK_METHOD(void, deallocate, (void*, size_t, size_t), ()); // NOLINT(readability/casting)
MOCK_METHOD(bool, is_equal, (const ov::runtime::AllocatorImpl&), (const, noexcept)); // NOLINT(readability/casting)
};
TEST_F(OVTensorTest, canCreateTensorUsingMockAllocator) {
ov::Shape shape = {1, 2, 3};
auto allocator = std::make_shared<OVMockAllocator>();
EXPECT_CALL(*allocator, allocate(::testing::_, ::testing::_))
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
EXPECT_CALL(*allocator, deallocate(::testing::_, ::testing::_, ::testing::_)).Times(1);
{ ov::runtime::Tensor t{ov::element::f32, shape, ov::runtime::Allocator{allocator}}; }
}
TEST_F(OVTensorTest, canAccessExternalData) {
ov::Shape shape = {1, 1, 3};
float data[] = {5.f, 6.f, 7.f};
ov::runtime::Tensor t{ov::element::f32, shape, data, 3};
{
float* ptr = t.data<float>();
ASSERT_EQ(ptr[2], 7);
ASSERT_EQ(data, t.data(ov::element::f32));
ASSERT_EQ(data, ptr);
ASSERT_THROW(t.data<std::int16_t>(), ov::Exception);
ASSERT_EQ(ov::row_major_strides(shape), t.get_strides());
ASSERT_EQ(ov::shape_size(shape), t.get_size());
ASSERT_EQ(ov::shape_size(shape) * ov::element::f32.size(), t.get_byte_size());
}
}
TEST_F(OVTensorTest, canAccessExternalDataWithStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
ov::runtime::Tensor t{ov::element::f32, shape, data, 8, {4, 1}};
{
ASSERT_EQ((ov::Shape{2, 3}), t.get_shape());
float* ptr = t.data<float>();
ASSERT_EQ(ptr[5], 42);
}
}
TEST_F(OVTensorTest, cannotCreateTensorWithExternalNullptr) {
ov::Shape shape = {2, 3};
ASSERT_THROW(ov::runtime::Tensor(ov::element::f32, shape, nullptr), ov::Exception);
}
TEST_F(OVTensorTest, cannotCreateTensorWithWrongStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
ASSERT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, 8, {4, 1, 2}), ov::Exception);
}
TEST_F(OVTensorTest, saveDimsAndSizeAfterMove) {
ov::Shape shape = {1, 2, 3};
ov::runtime::Tensor t{ov::element::f32, shape};
ov::runtime::Tensor new_tensor(std::move(t));
ASSERT_EQ(shape, new_tensor.get_shape());
ASSERT_EQ(ov::element::f32, new_tensor.get_element_type());
ASSERT_EQ(ov::row_major_strides(shape), new_tensor.get_strides());
ASSERT_THROW(t.get_size(), ov::Exception);
ASSERT_THROW(t.get_element_type(), ov::Exception);
ASSERT_THROW(t.get_byte_size(), ov::Exception);
ASSERT_THROW(t.get_strides(), ov::Exception);
ASSERT_THROW(t.get_shape(), ov::Exception);
ASSERT_THROW(t.set_shape({}), ov::Exception);
ASSERT_THROW(t.data(), ov::Exception);
ASSERT_THROW(t.data<float>(), ov::Exception);
}
// SetShape
TEST_F(OVTensorTest, canSetShape) {
ov::runtime::Tensor t{ov::element::f32, {1, 2, 3}};
const ov::Shape newShape({4, 5, 6});
ASSERT_EQ(t.get_shape(), (ov::Shape{1, 2, 3}));
ASSERT_NO_THROW(t.set_shape({4, 5, 6}));
ASSERT_EQ(newShape, t.get_shape());
ASSERT_EQ(ov::row_major_strides(newShape), t.get_strides());
// check that setShape for copy changes original Tensor
{
ov::runtime::Tensor t2 = t;
t2.set_shape(newShape);
ASSERT_EQ(newShape, t.get_shape());
ASSERT_EQ(t2.get_shape(), t.get_shape());
}
}
TEST_F(OVTensorTest, makeRangeRoiTensor) {
ov::runtime::Tensor t{ov::element::i8, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
ov::runtime::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
ov::Shape ref_shape = {1, 3, 4, 2};
ptrdiff_t ref_offset = 7;
ov::Strides ref_strides = {90, 30, 5, 1};
ASSERT_EQ(roi_tensor.get_shape(), ref_shape);
ASSERT_EQ(roi_tensor.data<int8_t>() - t.data<int8_t>(), ref_offset);
ASSERT_EQ(reinterpret_cast<uint8_t*>(roi_tensor.data()) - reinterpret_cast<uint8_t*>(t.data()), ref_offset);
ASSERT_EQ(roi_tensor.get_strides(), t.get_strides());
ASSERT_EQ(ref_strides, roi_tensor.get_strides());
ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type());
}
TEST_F(OVTensorTest, makeRangeRoiTensorInt4) {
ov::runtime::Tensor t{ov::element::i4, {1, 6, 5, 3}}; // RGB picture of size (WxH) = 5x6
ov::runtime::Tensor roi_tensor{t, {0, 1, 2, 0}, {1, 5, 4, 3}};
ov::Shape ref_shape = {1, 4, 2, 3};
ptrdiff_t ref_offset = 21;
ov::Strides ref_strides = {90, 15, 3, 1};
ASSERT_EQ(roi_tensor.get_shape(), ref_shape);
ASSERT_EQ(roi_tensor.data<int8_t>() - t.data<int8_t>(), ref_offset);
ASSERT_EQ(roi_tensor.get_strides(), ref_strides);
ASSERT_EQ(roi_tensor.get_strides(), t.get_strides());
ASSERT_EQ(ref_strides, roi_tensor.get_strides());
ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type());
}
TEST_F(OVTensorTest, makeRangeRoiBlobWrongSize) {
ov::runtime::Tensor t{ov::element::f32, {1, 3, 4, 4}};
ASSERT_THROW((ov::runtime::Tensor{t, {0, 0, 1, 1}, {1, 3, 5, 5}}), ov::Exception);
ASSERT_THROW((ov::runtime::Tensor{t, {0, 0, 1, 1, 3}, {1, 3, 4, 4}}), ov::Exception);
}
TEST_F(OVTensorTest, readRangeRoiBlob) {
ov::runtime::Tensor t{ov::element::i32, {1, 3, 4, 8}};
{
const auto origPtr = t.data<int32_t>();
ASSERT_NE(nullptr, origPtr);
for (size_t i = 0; i < t.get_size(); ++i) {
origPtr[i] = i;
}
}
ov::runtime::Tensor roi_tensor{t, {0, 0, 2, 4}, {1, 3, 4, 8}};
ASSERT_NE(false, static_cast<bool>(roi_tensor));
{
auto roi = roi_tensor.data<int32_t>();
ASSERT_NE(nullptr, roi);
auto strides = roi_tensor.get_strides();
for (size_t n = 0; n < roi_tensor.get_shape()[0]; ++n) {
for (size_t c = 0; c < roi_tensor.get_shape()[1]; ++c) {
for (size_t h = 0; h < roi_tensor.get_shape()[2]; ++h) {
for (size_t w = 0; w < roi_tensor.get_shape()[3]; ++w) {
auto actual = roi[w * strides[3] + h * strides[2] + c * strides[1] + n * strides[0]];
auto expected = t.data<int32_t>()[(w + 4) * strides[3] + (h + 2) * strides[2] +
(c + 0) * strides[1] + (n + 0) * strides[0]];
ASSERT_EQ(expected, actual) << ov::Shape{n, c, h, w};
}
}
}
}
}
}