Introduce ITensor instead of Blob (#16048)
* Introduce ITensor * Added new allocator * Hide ITensor from dev api * Changed some python tests * Remove deprecated API from sample * Fixed warnings * Skiped unsupported tests * Fixed exception message * Fixed template func tests * Fixed incorrect tests * Fixed comments and move ITensor to developer API * Fixed CI issue * Fixed allocated tensor * Fixed docs and windows warning * Fixed set shape for strided tensors * Fixed build and some comments * Introduce remote tensor * Fixed code style * Fixed build * Remove static assert method * Remove fail type * Added device name API * Try to fix GPU remote tests * Added debug output * Try to fix GPU tests * Fixed comments * Fixed build * Added additional element type check * Revert some comment changes
This commit is contained in:
parent
596036a2db
commit
95faa573ed
@ -15,7 +15,6 @@
|
||||
|
||||
#include "format_reader_ptr.h"
|
||||
#include "samples/slog.hpp"
|
||||
#include "shared_tensor_allocator.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
template <typename T>
|
||||
@ -31,10 +30,8 @@ ov::Tensor create_tensor_from_image(const std::vector<std::string>& files,
|
||||
const benchmark_app::InputInfo& inputInfo,
|
||||
const std::string& inputName,
|
||||
std::string* filenames_used = nullptr) {
|
||||
size_t tensor_size =
|
||||
std::accumulate(inputInfo.dataShape.begin(), inputInfo.dataShape.end(), 1, std::multiplies<size_t>());
|
||||
auto allocator = std::make_shared<SharedTensorAllocator>(tensor_size * sizeof(T));
|
||||
auto data = reinterpret_cast<T*>(allocator->get_buffer());
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape);
|
||||
auto data = tensor.data<T>();
|
||||
|
||||
/** Collect images data ptrs **/
|
||||
std::vector<std::shared_ptr<uint8_t>> vreader;
|
||||
@ -90,7 +87,6 @@ ov::Tensor create_tensor_from_image(const std::vector<std::string>& files,
|
||||
}
|
||||
}
|
||||
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape, ov::Allocator(allocator));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
@ -103,8 +99,8 @@ ov::Tensor create_tensor_from_numpy(const std::vector<std::string>& files,
|
||||
std::string* filenames_used = nullptr) {
|
||||
size_t tensor_size =
|
||||
std::accumulate(inputInfo.dataShape.begin(), inputInfo.dataShape.end(), 1, std::multiplies<size_t>());
|
||||
auto allocator = std::make_shared<SharedTensorAllocator>(tensor_size * sizeof(T));
|
||||
auto data = reinterpret_cast<T*>(allocator->get_buffer());
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape);
|
||||
auto data = tensor.data<T>();
|
||||
|
||||
std::vector<std::shared_ptr<unsigned char>> numpy_array_pointers;
|
||||
numpy_array_pointers.reserve(batchSize);
|
||||
@ -150,7 +146,7 @@ ov::Tensor create_tensor_from_numpy(const std::vector<std::string>& files,
|
||||
}
|
||||
}
|
||||
|
||||
return ov::Tensor(inputInfo.type, inputInfo.dataShape, ov::Allocator(allocator));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -160,8 +156,8 @@ ov::Tensor create_tensor_im_info(const std::pair<size_t, size_t>& image_size,
|
||||
const std::string& inputName) {
|
||||
size_t tensor_size =
|
||||
std::accumulate(inputInfo.dataShape.begin(), inputInfo.dataShape.end(), 1, std::multiplies<size_t>());
|
||||
auto allocator = std::make_shared<SharedTensorAllocator>(tensor_size * sizeof(T));
|
||||
auto data = reinterpret_cast<T*>(allocator->get_buffer());
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape);
|
||||
char* data = static_cast<char*>(tensor.data());
|
||||
|
||||
size_t infoBatchSize = 1;
|
||||
if (!inputInfo.layout.empty() && ov::layout::has_batch(inputInfo.layout)) {
|
||||
@ -176,15 +172,14 @@ ov::Tensor create_tensor_im_info(const std::pair<size_t, size_t>& image_size,
|
||||
for (size_t i = 0; i < iminfoSize; i++) {
|
||||
size_t index = b * iminfoSize + i;
|
||||
if (0 == i)
|
||||
data[index] = static_cast<T>(image_size.first);
|
||||
data[index] = static_cast<char>(image_size.first);
|
||||
else if (1 == i)
|
||||
data[index] = static_cast<T>(image_size.second);
|
||||
data[index] = static_cast<char>(image_size.second);
|
||||
else
|
||||
data[index] = 1;
|
||||
data[index] = static_cast<char>(1);
|
||||
}
|
||||
}
|
||||
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape, ov::Allocator(allocator));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
@ -197,8 +192,8 @@ ov::Tensor create_tensor_from_binary(const std::vector<std::string>& files,
|
||||
std::string* filenames_used = nullptr) {
|
||||
size_t tensor_size =
|
||||
std::accumulate(inputInfo.dataShape.begin(), inputInfo.dataShape.end(), 1, std::multiplies<size_t>());
|
||||
auto allocator = std::make_shared<SharedTensorAllocator>(tensor_size * sizeof(T));
|
||||
char* data = allocator->get_buffer();
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape);
|
||||
char* data = static_cast<char*>(tensor.data());
|
||||
size_t binaryBatchSize = 1;
|
||||
if (!inputInfo.layout.empty() && ov::layout::has_batch(inputInfo.layout)) {
|
||||
binaryBatchSize = batchSize;
|
||||
@ -245,7 +240,6 @@ ov::Tensor create_tensor_from_binary(const std::vector<std::string>& files,
|
||||
}
|
||||
}
|
||||
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape, ov::Allocator(allocator));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
@ -255,8 +249,8 @@ ov::Tensor create_tensor_random(const benchmark_app::InputInfo& inputInfo,
|
||||
T rand_max = std::numeric_limits<uint8_t>::max()) {
|
||||
size_t tensor_size =
|
||||
std::accumulate(inputInfo.dataShape.begin(), inputInfo.dataShape.end(), 1, std::multiplies<size_t>());
|
||||
auto allocator = std::make_shared<SharedTensorAllocator>(tensor_size * sizeof(T));
|
||||
auto data = reinterpret_cast<T*>(allocator->get_buffer());
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape);
|
||||
auto data = tensor.data<T>();
|
||||
|
||||
std::mt19937 gen(0);
|
||||
uniformDistribution<T2> distribution(rand_min, rand_max);
|
||||
@ -264,7 +258,6 @@ ov::Tensor create_tensor_random(const benchmark_app::InputInfo& inputInfo,
|
||||
data[i] = static_cast<T>(distribution(gen));
|
||||
}
|
||||
|
||||
auto tensor = ov::Tensor(inputInfo.type, inputInfo.dataShape, ov::Allocator(allocator));
|
||||
return tensor;
|
||||
}
|
||||
|
||||
|
@ -1,42 +0,0 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
|
||||
class SharedTensorAllocator : public ov::AllocatorImpl {
|
||||
public:
|
||||
SharedTensorAllocator(size_t sizeBytes) : size(sizeBytes) {
|
||||
data = new char[size];
|
||||
}
|
||||
|
||||
~SharedTensorAllocator() {
|
||||
delete[] data;
|
||||
}
|
||||
|
||||
virtual void* allocate(const size_t bytes, const size_t) override {
|
||||
return bytes <= this->size ? (void*)data : nullptr;
|
||||
}
|
||||
|
||||
void deallocate(void* handle, const size_t bytes, const size_t) override {
|
||||
if (handle == data) {
|
||||
delete[] data;
|
||||
data = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool is_equal(const AllocatorImpl& other) const override {
|
||||
auto other_tensor_allocator = dynamic_cast<const SharedTensorAllocator*>(&other);
|
||||
return other_tensor_allocator != nullptr && other_tensor_allocator == this;
|
||||
}
|
||||
|
||||
char* get_buffer() {
|
||||
return data;
|
||||
}
|
||||
|
||||
private:
|
||||
char* data;
|
||||
size_t size;
|
||||
};
|
@ -241,7 +241,7 @@ def test_cannot_set_bigger_shape_on_preallocated_memory():
|
||||
assert np.shares_memory(ones_arr, ov_tensor.data)
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
ov_tensor.shape = ref_shape
|
||||
assert "Cannot call setShape for Blobs created on top of preallocated memory" in str(e.value)
|
||||
assert "failed" in str(e.value)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="no support yet")
|
||||
@ -258,11 +258,11 @@ def test_can_reset_shape_after_decreasing_on_preallocated_memory():
|
||||
assert list(ov_tensor.shape) == ref_shape_2
|
||||
|
||||
|
||||
def test_cannot_set_shape_incorrect_dims():
|
||||
def test_can_set_shape_other_dims():
|
||||
ov_tensor = Tensor(np.float32, [1, 3, 48, 48])
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
ov_tensor.shape = [3, 28, 28]
|
||||
assert "Dims and format are inconsistent" in str(e.value)
|
||||
ref_shape_1 = [3, 28, 28]
|
||||
ov_tensor.shape = ref_shape_1
|
||||
assert list(ov_tensor.shape) == ref_shape_1
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ov_type", [
|
||||
|
@ -23,6 +23,7 @@ add_subdirectory(shape_inference)
|
||||
|
||||
set(MIXED_SRC
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/allocator.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/itensor.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/src/runtime/ov_tensor.cpp")
|
||||
|
||||
set_property(SOURCE ${MIXED_SRC}
|
||||
|
76
src/core/dev_api/openvino/runtime/itensor.hpp
Normal file
76
src/core/dev_api/openvino/runtime/itensor.hpp
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/core/coordinate.hpp"
|
||||
#include "openvino/core/core_visibility.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
class OPENVINO_API ITensor : public std::enable_shared_from_this<ITensor> {
|
||||
public:
|
||||
/**
|
||||
* @brief Set new shape for tensor
|
||||
* @note Memory allocation may happen
|
||||
* @param shape A new shape
|
||||
*/
|
||||
virtual void set_shape(ov::Shape shape) = 0;
|
||||
|
||||
/**
|
||||
* @return A tensor element type
|
||||
*/
|
||||
virtual const element::Type& get_element_type() const = 0;
|
||||
|
||||
/**
|
||||
* @return A tensor shape
|
||||
*/
|
||||
virtual const Shape& get_shape() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the total number of elements (a product of all the dims or 1 for scalar)
|
||||
* @return The total number of elements
|
||||
*/
|
||||
virtual size_t get_size() const;
|
||||
|
||||
/**
|
||||
* @brief Returns the size of the current Tensor in bytes.
|
||||
* @return Tensor's size in bytes
|
||||
*/
|
||||
virtual size_t get_byte_size() const;
|
||||
|
||||
/**
|
||||
* @return Tensor's strides in bytes
|
||||
*/
|
||||
virtual const Strides& get_strides() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Provides an access to the underlaying host memory
|
||||
* @param type Optional type parameter.
|
||||
* @note If type parameter is specified, the method throws an exception
|
||||
* if specified type's fundamental type does not match with tensor element type's fundamental type
|
||||
* @return A host pointer to tensor memory
|
||||
*/
|
||||
virtual void* data(const element::Type& type = {}) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Provides an access to the underlaying host memory casted to type `T`
|
||||
* @return A host pointer to tensor memory casted to specified type `T`.
|
||||
* @note Throws exception if specified type does not match with tensor element type
|
||||
*/
|
||||
template <typename T, typename datatype = typename std::decay<T>::type>
|
||||
T* data() const {
|
||||
return static_cast<T*>(data(element::from<datatype>()));
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual ~ITensor();
|
||||
};
|
||||
|
||||
} // namespace ov
|
@ -29,6 +29,9 @@ class Plugin;
|
||||
/** @cond INTERNAL */
|
||||
class Any;
|
||||
namespace util {
|
||||
|
||||
OPENVINO_API bool equal(std::type_index lhs, std::type_index rhs);
|
||||
|
||||
template <typename T, typename = void>
|
||||
struct Read;
|
||||
|
||||
@ -416,8 +419,6 @@ class OPENVINO_API Any {
|
||||
}
|
||||
};
|
||||
|
||||
static bool equal(std::type_index lhs, std::type_index rhs);
|
||||
|
||||
class OPENVINO_API Base : public std::enable_shared_from_this<Base> {
|
||||
public:
|
||||
void type_check(const std::type_info&) const;
|
||||
@ -731,7 +732,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
for (const auto& type_index : _impl->base_type_info()) {
|
||||
if (equal(type_index, typeid(decay_t<T>))) {
|
||||
if (util::equal(type_index, typeid(decay_t<T>))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -797,7 +798,7 @@ public:
|
||||
return *static_cast<decay_t<T>*>(_temp->addressof());
|
||||
}
|
||||
for (const auto& type_index : _impl->base_type_info()) {
|
||||
if (equal(type_index, typeid(decay_t<T>))) {
|
||||
if (util::equal(type_index, typeid(decay_t<T>))) {
|
||||
return *static_cast<decay_t<T>*>(_impl->addressof());
|
||||
}
|
||||
}
|
||||
@ -820,7 +821,7 @@ public:
|
||||
return *static_cast<decay_t<T>*>(_impl->addressof());
|
||||
}
|
||||
for (const auto& type_index : _impl->base_type_info()) {
|
||||
if (equal(type_index, typeid(decay_t<T>))) {
|
||||
if (util::equal(type_index, typeid(decay_t<T>))) {
|
||||
return *static_cast<decay_t<T>*>(_impl->addressof());
|
||||
}
|
||||
}
|
||||
|
@ -12,15 +12,20 @@
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/core/any.hpp"
|
||||
#include "openvino/core/core_visibility.hpp"
|
||||
#include "openvino/core/deprecated.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
/**
|
||||
* @interface AllocatorImpl
|
||||
* @deprecated This class will be removed in 2024.0 release
|
||||
* @brief Tries to act like [std::pmr::memory_resource](https://en.cppreference.com/w/cpp/memory/memory_resource)
|
||||
*/
|
||||
struct AllocatorImpl : public std::enable_shared_from_this<AllocatorImpl> {
|
||||
struct OPENVINO_DEPRECATED("Do not inherit from AllocatorImpl. This class will be removed in 2024.0 release. Pass "
|
||||
"std::pmr::memory_resource like object directly to ov::Allocator") AllocatorImpl
|
||||
: public std::enable_shared_from_this<AllocatorImpl> {
|
||||
/**
|
||||
* @brief A smart pointer containing AllocatorImpl object
|
||||
*/
|
||||
@ -61,22 +66,63 @@ class Tensor;
|
||||
/**
|
||||
* @brief Wraps allocator implementation to provide safe way to store allocater loaded from shared library
|
||||
* And constructs default based on `new` `delete` c++ calls allocator if created without parameters
|
||||
* Accepts any [std::pmr::memory_resource](https://en.cppreference.com/w/cpp/memory/memory_resource) like
|
||||
* allocator
|
||||
* @ingroup ov_runtime_cpp_api
|
||||
*/
|
||||
class OPENVINO_API Allocator {
|
||||
AllocatorImpl::Ptr _impl;
|
||||
std::shared_ptr<void> _so;
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor from the initialized std::shared_ptr
|
||||
* @param impl Initialized shared pointer
|
||||
* @param other Initialized allocator
|
||||
* @param so Plugin to use. This is required to ensure that Allocator can work properly even if plugin object is
|
||||
* destroyed.
|
||||
*/
|
||||
Allocator(const AllocatorImpl::Ptr& impl, const std::shared_ptr<void>& so);
|
||||
Allocator(const Allocator& other, const std::shared_ptr<void>& so);
|
||||
|
||||
friend class ov::Tensor;
|
||||
|
||||
struct Base : public std::enable_shared_from_this<Base> {
|
||||
virtual void* addressof() = 0;
|
||||
const void* addressof() const {
|
||||
return const_cast<Base*>(this)->addressof();
|
||||
}
|
||||
virtual const std::type_info& type_info() const = 0;
|
||||
virtual void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)) = 0;
|
||||
virtual void deallocate(void* handle, const size_t bytes, size_t alignment = alignof(max_align_t)) = 0;
|
||||
virtual bool is_equal(const Base& other) const = 0;
|
||||
|
||||
protected:
|
||||
~Base() = default;
|
||||
};
|
||||
|
||||
template <typename A>
|
||||
struct Impl : public Base {
|
||||
template <typename... Args>
|
||||
explicit Impl(Args&&... args) : a(std::forward<Args>(args)...) {}
|
||||
void* addressof() override {
|
||||
return &a;
|
||||
}
|
||||
const std::type_info& type_info() const override {
|
||||
return typeid(a);
|
||||
}
|
||||
void* allocate(const size_t bytes, const size_t alignment = alignof(max_align_t)) override {
|
||||
return a.allocate(bytes, alignment);
|
||||
}
|
||||
void deallocate(void* handle, const size_t bytes, size_t alignment = alignof(max_align_t)) override {
|
||||
a.deallocate(handle, bytes, alignment);
|
||||
}
|
||||
bool is_equal(const Base& other) const override {
|
||||
if (util::equal(type_info(), other.type_info())) {
|
||||
return a.is_equal(*static_cast<const A*>(other.addressof()));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
A a;
|
||||
};
|
||||
|
||||
std::shared_ptr<Base> _impl;
|
||||
std::shared_ptr<void> _so;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Destructor preserves unloading order of implementation object and reference to library
|
||||
@ -104,11 +150,26 @@ public:
|
||||
/// @return reference to the current object
|
||||
Allocator& operator=(Allocator&& other) = default;
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @brief Constructs Allocator from the initialized std::shared_ptr
|
||||
* @param impl Initialized shared pointer
|
||||
*/
|
||||
Allocator(const AllocatorImpl::Ptr& impl);
|
||||
/**
|
||||
* @brief Initialize allocator using any allocator like object
|
||||
* @tparam A Type of allocator
|
||||
* @param a allocator object
|
||||
*/
|
||||
template <
|
||||
typename A,
|
||||
typename std::enable_if<!std::is_convertible<A, AllocatorImpl::Ptr>::value &&
|
||||
!std::is_same<typename std::decay<A>::type, Allocator>::value &&
|
||||
!std::is_abstract<typename std::decay<A>::type>::value &&
|
||||
!std::is_convertible<typename std::decay<A>::type, std::shared_ptr<Base>>::value,
|
||||
bool>::type = true>
|
||||
Allocator(A&& a) : _impl{std::make_shared<Impl<typename std::decay<A>::type>>(std::forward<A>(a))} {}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Allocates memory
|
||||
@ -129,9 +190,9 @@ public:
|
||||
void deallocate(void* ptr, const size_t bytes = 0, const size_t alignment = alignof(max_align_t));
|
||||
|
||||
/**
|
||||
* @brief Compares with other AllocatorImpl
|
||||
* @brief Compares with other Allocator
|
||||
* @param other Other instance of allocator
|
||||
* @return `true` if and only if memory allocated from one AllocatorImpl can be deallocated from the other and vice
|
||||
* @return `true` if and only if memory allocated from one Allocator can be deallocated from the other and vice
|
||||
* versa
|
||||
*/
|
||||
bool operator==(const Allocator& other) const;
|
||||
@ -149,9 +210,11 @@ public:
|
||||
explicit operator bool() const noexcept;
|
||||
};
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
namespace runtime {
|
||||
using ov::Allocator;
|
||||
using ov::AllocatorImpl;
|
||||
} // namespace runtime
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
} // namespace ov
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
class Blob;
|
||||
class IAsyncInferRequestWrapper;
|
||||
class IVariableStateWrapper;
|
||||
} // namespace InferenceEngine
|
||||
@ -33,6 +32,8 @@ class VariableState;
|
||||
class ISyncInferRequest;
|
||||
class IInferRequestInternalWrapper;
|
||||
class IVariableStateInternalWrapper;
|
||||
class ITensor;
|
||||
class RemoteTensor;
|
||||
|
||||
/**
|
||||
* @brief Tensor API holding host memory
|
||||
@ -41,8 +42,8 @@ class IVariableStateInternalWrapper;
|
||||
*/
|
||||
class OPENVINO_API Tensor {
|
||||
protected:
|
||||
std::shared_ptr<InferenceEngine::Blob> _impl; //!< Shared pointer to internal tensor representation
|
||||
std::vector<std::shared_ptr<void>> _so; //!< Reference to dynamically loaded library
|
||||
std::shared_ptr<ITensor> _impl; //!< Shared pointer to internal tensor representation
|
||||
std::vector<std::shared_ptr<void>> _so; //!< Reference to dynamically loaded library
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor from the initialized std::shared_ptr
|
||||
@ -50,11 +51,12 @@ protected:
|
||||
* @param so Plugin to use. This is required to ensure that Tensor can work properly even if plugin object is
|
||||
* destroyed.
|
||||
*/
|
||||
Tensor(const std::shared_ptr<InferenceEngine::Blob>& impl, const std::vector<std::shared_ptr<void>>& so);
|
||||
Tensor(const std::shared_ptr<ITensor>& impl, const std::vector<std::shared_ptr<void>>& so);
|
||||
|
||||
friend class ov::Core;
|
||||
friend class ov::CoreImpl;
|
||||
friend class ov::InferRequest;
|
||||
friend class ov::RemoteTensor;
|
||||
friend class ov::RemoteContext;
|
||||
friend class ov::VariableState;
|
||||
friend class ov::ISyncInferRequest;
|
||||
@ -103,7 +105,7 @@ public:
|
||||
* @param shape Tensor shape
|
||||
* @param allocator allocates memory for internal tensor storage
|
||||
*/
|
||||
Tensor(const element::Type type, const Shape& shape, const Allocator& allocator = {});
|
||||
Tensor(const element::Type& type, const Shape& shape, const Allocator& allocator = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using element type and shape. Wraps allocated host memory.
|
||||
@ -114,7 +116,7 @@ public:
|
||||
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
|
||||
* on shape and element size
|
||||
*/
|
||||
Tensor(const element::Type type, const Shape& shape, void* host_ptr, const Strides& strides = {});
|
||||
Tensor(const element::Type& type, const Shape& shape, void* host_ptr, const Strides& strides = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using port from node. Allocate internal host storage using default allocator
|
||||
@ -153,12 +155,12 @@ public:
|
||||
/**
|
||||
* @return A tensor element type
|
||||
*/
|
||||
element::Type get_element_type() const;
|
||||
const element::Type& get_element_type() const;
|
||||
|
||||
/**
|
||||
* @return A tensor shape
|
||||
*/
|
||||
Shape get_shape() const;
|
||||
const Shape& get_shape() const;
|
||||
|
||||
/**
|
||||
* @brief Copy tensor, destination tensor should have the same element type and shape
|
||||
@ -198,7 +200,7 @@ public:
|
||||
* if specified type's fundamental type does not match with tensor element type's fundamental type
|
||||
* @return A host pointer to tensor memory
|
||||
*/
|
||||
void* data(const element::Type type = {}) const;
|
||||
void* data(const element::Type& type = {}) const;
|
||||
|
||||
/**
|
||||
* @brief Provides an access to the underlaying host memory casted to type `T`
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace ov {
|
||||
|
||||
bool Any::equal(std::type_index lhs, std::type_index rhs) {
|
||||
bool util::equal(std::type_index lhs, std::type_index rhs) {
|
||||
auto result = lhs == rhs;
|
||||
#if (defined(__ANDROID__) || defined(__APPLE__)) && defined(__clang__)
|
||||
if (!result) {
|
||||
@ -20,7 +20,7 @@ bool Any::equal(std::type_index lhs, std::type_index rhs) {
|
||||
}
|
||||
|
||||
bool Any::Base::is(const std::type_info& other) const {
|
||||
return Any::equal(type_info(), other);
|
||||
return util::equal(type_info(), other);
|
||||
}
|
||||
|
||||
void Any::Base::type_check(const std::type_info& type_info_) const {
|
||||
|
@ -11,19 +11,68 @@
|
||||
|
||||
namespace ov {
|
||||
|
||||
Allocator::Allocator() : _impl{std::make_shared<BlobAllocator>()} {}
|
||||
struct DefaultAllocator {
|
||||
void* allocate(const size_t bytes, const size_t alignment) {
|
||||
if (alignment == alignof(max_align_t)) {
|
||||
return ::operator new(bytes);
|
||||
} else {
|
||||
OPENVINO_ASSERT(alignment && !static_cast<bool>(alignment & (alignment - static_cast<size_t>(1))),
|
||||
"Alignment is not power of 2: ",
|
||||
alignment);
|
||||
#if defined(_WIN32)
|
||||
return _aligned_malloc(bytes, alignment);
|
||||
#else
|
||||
void* result = nullptr;
|
||||
if (posix_memalign(&result, std::max(sizeof(void*), alignment), bytes) != 0) {
|
||||
OPENVINO_THROW("posix_memalign failed");
|
||||
}
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void deallocate(void* handle, const size_t bytes, const size_t alignment) {
|
||||
if (alignment == alignof(max_align_t)) {
|
||||
::operator delete(handle);
|
||||
} else {
|
||||
#if defined(_WIN32)
|
||||
return _aligned_free(handle);
|
||||
#else
|
||||
return free(handle);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
bool is_equal(const DefaultAllocator&) const {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
Allocator::Allocator() : Allocator{DefaultAllocator{}} {}
|
||||
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
struct AllocatorImplWrapper {
|
||||
AllocatorImplWrapper(const AllocatorImpl::Ptr& impl_) : impl{impl_} {}
|
||||
void* allocate(const size_t bytes, const size_t alignment) {
|
||||
return impl->allocate(bytes, alignment);
|
||||
}
|
||||
void deallocate(void* handle, const size_t bytes, const size_t alignment) {
|
||||
impl->deallocate(handle, bytes, alignment);
|
||||
}
|
||||
bool is_equal(const AllocatorImplWrapper& other) const {
|
||||
return impl->is_equal(*other.impl);
|
||||
}
|
||||
AllocatorImpl::Ptr impl;
|
||||
};
|
||||
|
||||
Allocator::Allocator(const AllocatorImpl::Ptr& allocator_impl) : Allocator{AllocatorImplWrapper{allocator_impl}} {}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
Allocator::~Allocator() {
|
||||
_impl = {};
|
||||
}
|
||||
|
||||
Allocator::Allocator(const std::shared_ptr<AllocatorImpl>& impl, const std::shared_ptr<void>& so)
|
||||
: _impl{impl},
|
||||
_so{so} {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized.");
|
||||
}
|
||||
|
||||
Allocator::Allocator(const std::shared_ptr<AllocatorImpl>& impl) : _impl{impl} {
|
||||
Allocator::Allocator(const Allocator& other, const std::shared_ptr<void>& so) : _impl{other._impl}, _so{so} {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Allocator was not initialized.");
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
namespace InferenceEngine {
|
||||
struct BlobAllocator : public IAllocator {
|
||||
BlobAllocator(const std::shared_ptr<ov::AllocatorImpl>& impl) : _impl{impl} {}
|
||||
BlobAllocator(const ov::Allocator& impl) : _impl{impl} {}
|
||||
|
||||
void* lock(void* handle, LockOp) noexcept override {
|
||||
return handle;
|
||||
@ -22,7 +22,7 @@ struct BlobAllocator : public IAllocator {
|
||||
|
||||
void* alloc(const size_t size) noexcept override {
|
||||
try {
|
||||
return size_map.emplace(_impl->allocate(size), size).first->first;
|
||||
return size_map.emplace(_impl.allocate(size), size).first->first;
|
||||
} catch (...) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -32,24 +32,23 @@ struct BlobAllocator : public IAllocator {
|
||||
try {
|
||||
auto size = size_map.at(handle);
|
||||
size_map.erase(handle);
|
||||
_impl->deallocate(handle, size);
|
||||
_impl.deallocate(handle, size);
|
||||
return true;
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::AllocatorImpl> _impl;
|
||||
ov::Allocator _impl;
|
||||
std::unordered_map<void*, size_t> size_map;
|
||||
};
|
||||
} // namespace InferenceEngine
|
||||
|
||||
namespace ov {
|
||||
struct BlobAllocator : public runtime::AllocatorImpl {
|
||||
BlobAllocator(const std::shared_ptr<ie::IAllocator>& impl = std::make_shared<ie::SystemMemoryAllocator>())
|
||||
: _impl{impl} {}
|
||||
struct BlobAllocator {
|
||||
BlobAllocator() : _impl{std::make_shared<ie::SystemMemoryAllocator>()} {}
|
||||
|
||||
void* allocate(const size_t bytes, const size_t alignment) override {
|
||||
void* allocate(const size_t bytes, const size_t alignment) {
|
||||
OPENVINO_ASSERT(alignment == alignof(max_align_t),
|
||||
"Aligned deallocation is not implemented. alignment: ",
|
||||
alignment);
|
||||
@ -58,7 +57,7 @@ struct BlobAllocator : public runtime::AllocatorImpl {
|
||||
return handle;
|
||||
}
|
||||
|
||||
void deallocate(void* handle, const size_t bytes, const size_t alignment) override {
|
||||
void deallocate(void* handle, const size_t bytes, const size_t alignment) {
|
||||
OPENVINO_ASSERT(bytes == 0, "Sized deallocation is not implemented. bytes: ", bytes);
|
||||
OPENVINO_ASSERT(alignment == alignof(max_align_t),
|
||||
"Aligned deallocation is not implemented. alignment: ",
|
||||
@ -67,14 +66,10 @@ struct BlobAllocator : public runtime::AllocatorImpl {
|
||||
OPENVINO_ASSERT(res != false, "Can not deallocate storage");
|
||||
}
|
||||
|
||||
bool is_equal(const AllocatorImpl& other) const override {
|
||||
auto other_blob_allocator = dynamic_cast<const BlobAllocator*>(&other);
|
||||
if (other_blob_allocator == nullptr)
|
||||
return false;
|
||||
if (other_blob_allocator->_impl == _impl)
|
||||
bool is_equal(const BlobAllocator& other) const {
|
||||
if (other._impl == _impl)
|
||||
return true;
|
||||
auto other_system_memory_allocator =
|
||||
dynamic_cast<const ie::SystemMemoryAllocator*>(other_blob_allocator->_impl.get());
|
||||
auto other_system_memory_allocator = dynamic_cast<const ie::SystemMemoryAllocator*>(other._impl.get());
|
||||
auto system_allocator = dynamic_cast<const ie::SystemMemoryAllocator*>(_impl.get());
|
||||
if (system_allocator != nullptr && other_system_memory_allocator != nullptr)
|
||||
return true;
|
||||
|
281
src/core/src/runtime/itensor.cpp
Normal file
281
src/core/src/runtime/itensor.cpp
Normal file
@ -0,0 +1,281 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
ITensor::~ITensor() = default;
|
||||
|
||||
size_t ITensor::get_size() const {
|
||||
return shape_size(get_shape());
|
||||
}
|
||||
|
||||
size_t ITensor::get_byte_size() const {
|
||||
return (get_size() * get_element_type().bitwidth() + 8 - 1) / 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief View tensor to external memory
|
||||
* The tensor doesn't own the external memory
|
||||
*/
|
||||
class ViewTensor : public ITensor {
|
||||
public:
|
||||
ViewTensor(const element::Type element_type, const Shape& shape, void* ptr)
|
||||
: m_element_type{element_type},
|
||||
m_shape{shape},
|
||||
m_capacity{shape},
|
||||
m_ptr{ptr} {
|
||||
OPENVINO_ASSERT(m_ptr != nullptr);
|
||||
OPENVINO_ASSERT(m_element_type != element::undefined && m_element_type != element::dynamic);
|
||||
update_strides();
|
||||
}
|
||||
|
||||
void* data(const element::Type& element_type) const override {
|
||||
if (element_type != element::undefined && element_type != element::dynamic) {
|
||||
OPENVINO_ASSERT(element_type == get_element_type(),
|
||||
"Tensor data with element type ",
|
||||
get_element_type(),
|
||||
", is not representable as pointer to ",
|
||||
element_type);
|
||||
}
|
||||
return m_ptr;
|
||||
}
|
||||
|
||||
const element::Type& get_element_type() const override {
|
||||
return m_element_type;
|
||||
}
|
||||
|
||||
const Shape& get_shape() const override {
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape new_shape) override {
|
||||
OPENVINO_ASSERT(shape_size(new_shape) <= ov::shape_size(m_capacity), "Could set new shape: ", new_shape);
|
||||
m_shape = std::move(new_shape);
|
||||
update_strides();
|
||||
}
|
||||
|
||||
const Strides& get_strides() const override {
|
||||
OPENVINO_ASSERT(m_element_type.bitwidth() >= 8,
|
||||
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
m_element_type);
|
||||
return m_strides;
|
||||
}
|
||||
|
||||
protected:
|
||||
void update_strides() {
|
||||
if (m_element_type.bitwidth() < 8)
|
||||
return;
|
||||
auto& shape = get_shape();
|
||||
m_strides.clear();
|
||||
if (!shape.empty()) {
|
||||
m_strides.resize(shape.size());
|
||||
m_strides.back() = m_element_type.size();
|
||||
std::copy(shape.rbegin(), shape.rend() - 1, m_strides.rbegin() + 1);
|
||||
std::partial_sum(m_strides.rbegin(), m_strides.rend(), m_strides.rbegin(), std::multiplies<size_t>());
|
||||
}
|
||||
}
|
||||
|
||||
element::Type m_element_type;
|
||||
Shape m_shape;
|
||||
Shape m_capacity;
|
||||
Strides m_strides;
|
||||
void* m_ptr;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief View tensor on external memory with strides
|
||||
*/
|
||||
class StridedViewTensor : public ViewTensor {
|
||||
public:
|
||||
StridedViewTensor(const element::Type element_type, const Shape& shape, void* ptr, const Strides& strides)
|
||||
: ViewTensor{element_type, shape, ptr} {
|
||||
OPENVINO_ASSERT(
|
||||
get_element_type().bitwidth() >= 8,
|
||||
"Could not create strided access tensor for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
get_element_type());
|
||||
// Save default strides
|
||||
auto shape_strides = m_strides;
|
||||
// Change strides
|
||||
m_strides = strides;
|
||||
OPENVINO_ASSERT(m_shape.size() == m_strides.size());
|
||||
|
||||
for (size_t i = 0; i < m_strides.size(); ++i) {
|
||||
OPENVINO_ASSERT(shape_strides[i] <= m_strides[i],
|
||||
"shape stride: ",
|
||||
shape_strides[i],
|
||||
", stride: ",
|
||||
m_strides[i]);
|
||||
OPENVINO_ASSERT((m_strides[i] % get_element_type().size()) == 0,
|
||||
"shape stride: ",
|
||||
shape_strides[i],
|
||||
", stride: ",
|
||||
m_strides[i]);
|
||||
if (i) {
|
||||
OPENVINO_ASSERT(m_strides[i - 1] >= m_strides[i] * shape[i],
|
||||
"Strides: ",
|
||||
m_strides,
|
||||
" are incompatible with shapes: ",
|
||||
m_shape);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape new_shape) override {
|
||||
OPENVINO_ASSERT(m_capacity.size() == new_shape.size(),
|
||||
"Cannot set new shape: ",
|
||||
new_shape,
|
||||
" for tensor with strides! Shapes are not compatible.");
|
||||
for (size_t i = 0; i < new_shape.size(); i++) {
|
||||
OPENVINO_ASSERT(m_capacity[i] >= new_shape[i],
|
||||
"Cannot set new shape: ",
|
||||
new_shape,
|
||||
" for tensor with strides! Dimension: ",
|
||||
i,
|
||||
" is not compatible.");
|
||||
}
|
||||
m_shape = std::move(new_shape);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Creates view tensor on external memory
|
||||
*
|
||||
* @param element_type Tensor element type
|
||||
* @param shape Tensor shape
|
||||
* @param ptr pointer to external memoty
|
||||
* @param byte_strides Tensor strides
|
||||
*
|
||||
* @return Shared pointer to tensor interface
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type element_type,
|
||||
const Shape& shape,
|
||||
void* ptr,
|
||||
const Strides& byte_strides) {
|
||||
return byte_strides.empty() ? std::make_shared<ViewTensor>(element_type, shape, ptr)
|
||||
: std::make_shared<StridedViewTensor>(element_type, shape, ptr, byte_strides);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Tensor with allocated memory
|
||||
* Tensor owns the memory
|
||||
*/
|
||||
class AllocatedTensor : public ViewTensor {
|
||||
public:
|
||||
AllocatedTensor(const element::Type element_type, const Shape& shape, const Allocator& allocator)
|
||||
: ViewTensor{element_type,
|
||||
shape,
|
||||
[&] {
|
||||
OPENVINO_ASSERT(allocator, "Allocator was not initialized");
|
||||
return const_cast<Allocator&>(allocator).allocate(element_type.size() * shape_size(shape));
|
||||
}()},
|
||||
m_allocator{allocator} {}
|
||||
|
||||
~AllocatedTensor() {
|
||||
m_allocator.deallocate(m_ptr, get_byte_size());
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape new_shape) override {
|
||||
auto old_byte_size = get_byte_size();
|
||||
m_shape = std::move(new_shape);
|
||||
if (get_byte_size() > old_byte_size) {
|
||||
m_allocator.deallocate(m_ptr, old_byte_size);
|
||||
m_ptr = m_allocator.allocate(get_byte_size());
|
||||
}
|
||||
update_strides();
|
||||
}
|
||||
|
||||
private:
|
||||
Allocator m_allocator;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Creates allocated tensor
|
||||
*
|
||||
* @param element_type Tensor element type
|
||||
* @param shape Tensor shape
|
||||
* @param allocator Tensor allocator
|
||||
*
|
||||
* @return Shared pointer to tensor interface
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type element_type, const Shape& shape, const Allocator& allocator) {
|
||||
return std::make_shared<AllocatedTensor>(element_type, shape, allocator);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief ROI tensor on other tensor
|
||||
* ROI tensor holds the owner
|
||||
*/
|
||||
class RoiTensor : public ITensor {
|
||||
public:
|
||||
RoiTensor(const std::shared_ptr<ITensor>& owner, const Coordinate& begin, const Coordinate& end)
|
||||
: m_owner{owner},
|
||||
m_offsets{begin} {
|
||||
OPENVINO_ASSERT(owner->get_element_type().bitwidth() >= 8,
|
||||
"ROI Tensor for types with bitwidths less then 8 bit is not implemented. Tensor type: ",
|
||||
owner->get_element_type());
|
||||
auto owner_shape = owner->get_shape();
|
||||
OPENVINO_ASSERT(owner_shape.size() == begin.size());
|
||||
OPENVINO_ASSERT(begin.size() == end.size());
|
||||
m_shape.resize(begin.size());
|
||||
for (size_t i = 0; i < begin.size(); ++i) {
|
||||
OPENVINO_ASSERT(begin[i] <= owner_shape[i]);
|
||||
OPENVINO_ASSERT(end[i] <= owner_shape[i]);
|
||||
m_shape[i] = end[i] - begin[i];
|
||||
OPENVINO_ASSERT(m_shape[i] <= owner_shape[i]);
|
||||
}
|
||||
}
|
||||
|
||||
const element::Type& get_element_type() const override {
|
||||
return m_owner->get_element_type();
|
||||
}
|
||||
|
||||
const Strides& get_strides() const override {
|
||||
return m_owner->get_strides();
|
||||
}
|
||||
|
||||
const Shape& get_shape() const override {
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape new_shape) override {
|
||||
OPENVINO_THROW("Shapes cannot be changed for ROI Tensor");
|
||||
}
|
||||
|
||||
void* data(const element::Type& element_type) const override {
|
||||
auto owner_data = m_owner->data(element_type);
|
||||
auto& strides = get_strides();
|
||||
size_t byte_offset =
|
||||
std::inner_product(m_offsets.begin(), m_offsets.end(), strides.begin(), static_cast<size_t>(0));
|
||||
return static_cast<uint8_t*>(owner_data) + byte_offset;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ITensor> m_owner;
|
||||
Coordinate m_offsets;
|
||||
Shape m_shape;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Creates ROI tensor
|
||||
*
|
||||
* @param other Tensor what owns the memory
|
||||
* @param begin Begin coordinates
|
||||
* @param end End coordinates
|
||||
*
|
||||
* @return Shared pointer to tensor interface
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ITensor>& other,
|
||||
const Coordinate& begin,
|
||||
const Coordinate& end) {
|
||||
return std::make_shared<RoiTensor>(other, begin, end);
|
||||
}
|
||||
|
||||
} // namespace ov
|
@ -4,14 +4,14 @@
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "blob_factory.hpp" // IE private header
|
||||
#include "ie_ngraph_utils.hpp" // IE private header
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/core/node_output.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/strides.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "runtime/blob_allocator.hpp"
|
||||
#include "shape_util.hpp"
|
||||
|
||||
namespace ov {
|
||||
@ -32,70 +32,21 @@ Tensor::~Tensor() {
|
||||
_impl = {};
|
||||
}
|
||||
|
||||
Tensor::Tensor(const std::shared_ptr<ie::Blob>& impl, const std::vector<std::shared_ptr<void>>& so)
|
||||
Tensor::Tensor(const std::shared_ptr<ITensor>& impl, const std::vector<std::shared_ptr<void>>& so)
|
||||
: _impl{impl},
|
||||
_so{so} {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
|
||||
}
|
||||
|
||||
Tensor::Tensor(const element::Type element_type, const Shape& shape, const Allocator& allocator) {
|
||||
OPENVINO_ASSERT(allocator, "Allocator was not initialized");
|
||||
auto allocator_impl = dynamic_cast<const BlobAllocator*>(allocator._impl.get());
|
||||
auto blob_allocator =
|
||||
(allocator_impl != nullptr) ? allocator_impl->_impl : std::make_shared<ie::BlobAllocator>(allocator._impl);
|
||||
_impl = make_blob_with_precision(
|
||||
{ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByDims(shape)},
|
||||
blob_allocator);
|
||||
_impl->allocate();
|
||||
}
|
||||
Tensor::Tensor(const element::Type& element_type, const Shape& shape, const Allocator& allocator)
|
||||
: _impl{make_tensor(element_type, shape, allocator)} {}
|
||||
|
||||
Tensor::Tensor(const element::Type element_type, const Shape& shape, void* host_ptr, const Strides& byte_strides) {
|
||||
ie::SizeVector blk_order(shape.size());
|
||||
std::iota(blk_order.begin(), blk_order.end(), 0);
|
||||
ie::SizeVector dim_offset(shape.size(), 0);
|
||||
ie::SizeVector blk_strides;
|
||||
if (byte_strides.empty()) {
|
||||
blk_strides = ov::row_major_strides(shape);
|
||||
} else {
|
||||
blk_strides.resize(byte_strides.size());
|
||||
std::transform(byte_strides.begin(),
|
||||
byte_strides.end(),
|
||||
blk_strides.begin(),
|
||||
[&element_type](size_t byte_stride) {
|
||||
OPENVINO_ASSERT(byte_stride % element_type.size() == 0,
|
||||
"Limitation: Stride in bytes ",
|
||||
byte_stride,
|
||||
" should be divisible by size of element ",
|
||||
element_type.size());
|
||||
return byte_stride / element_type.size();
|
||||
});
|
||||
}
|
||||
Tensor::Tensor(const element::Type& element_type, const Shape& shape, void* host_ptr, const Strides& byte_strides)
|
||||
: _impl{make_tensor(element_type, shape, host_ptr, byte_strides)} {}
|
||||
|
||||
try {
|
||||
_impl = make_blob_with_precision(ie::details::convertPrecision(element_type),
|
||||
ie::TensorDesc{ie::details::convertPrecision(element_type),
|
||||
shape,
|
||||
ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}},
|
||||
host_ptr);
|
||||
} catch (const std::exception& ex) {
|
||||
OPENVINO_THROW(ex.what());
|
||||
} catch (...) {
|
||||
OPENVINO_ASSERT(false, "Unexpected exception");
|
||||
}
|
||||
}
|
||||
|
||||
Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& end) : _so{owner._so} {
|
||||
OPENVINO_ASSERT(owner.get_element_type().bitwidth() >= 8,
|
||||
"ROI Tensor for types with bitwidths less then 8 bit is not implemented. Tensor type: ",
|
||||
owner.get_element_type());
|
||||
try {
|
||||
_impl = owner._impl->createROI(begin, end);
|
||||
} catch (const std::exception& ex) {
|
||||
OPENVINO_THROW(ex.what());
|
||||
} catch (...) {
|
||||
OPENVINO_ASSERT(false, "Unexpected exception");
|
||||
}
|
||||
}
|
||||
Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& end)
|
||||
: _impl{make_tensor(owner._impl, begin, end)},
|
||||
_so{owner._so} {}
|
||||
|
||||
Tensor::Tensor(const ov::Output<const ov::Node>& port, const Allocator& allocator)
|
||||
: Tensor(port.get_element_type(),
|
||||
@ -108,23 +59,16 @@ Tensor::Tensor(const ov::Output<const ov::Node>& port, void* host_ptr, const Str
|
||||
host_ptr,
|
||||
byte_strides) {}
|
||||
|
||||
element::Type Tensor::get_element_type() const {
|
||||
OV_TENSOR_STATEMENT(return ie::details::convertPrecision(_impl->getTensorDesc().getPrecision()));
|
||||
const element::Type& Tensor::get_element_type() const {
|
||||
OV_TENSOR_STATEMENT(return _impl->get_element_type());
|
||||
}
|
||||
|
||||
void Tensor::set_shape(const ov::Shape& shape) {
|
||||
// WA for tensor conversion from host tensor with dynamic shape.
|
||||
if (util::is_dynamic_shape(get_shape())) {
|
||||
_impl = make_blob_with_precision(
|
||||
{_impl->getTensorDesc().getPrecision(), shape, ie::TensorDesc::getLayoutByRank(shape.size())});
|
||||
_impl->allocate();
|
||||
} else {
|
||||
OV_TENSOR_STATEMENT(_impl->setShape({shape.begin(), shape.end()}));
|
||||
}
|
||||
OV_TENSOR_STATEMENT(_impl->set_shape(shape));
|
||||
}
|
||||
|
||||
Shape Tensor::get_shape() const {
|
||||
OV_TENSOR_STATEMENT({ return _impl->getTensorDesc().getBlockingDesc().getBlockDims(); });
|
||||
const Shape& Tensor::get_shape() const {
|
||||
OV_TENSOR_STATEMENT(return _impl->get_shape());
|
||||
}
|
||||
|
||||
void Tensor::copy_to(ov::Tensor& dst) const {
|
||||
@ -258,55 +202,19 @@ void Tensor::copy_to(ov::Tensor& dst) const {
|
||||
}
|
||||
|
||||
Strides Tensor::get_strides() const {
|
||||
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
|
||||
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
get_element_type());
|
||||
OV_TENSOR_STATEMENT({
|
||||
const auto& element_strides = _impl->getTensorDesc().getBlockingDesc().getStrides();
|
||||
const size_t elem_size = get_element_type().size();
|
||||
Strides byte_strides;
|
||||
byte_strides.resize(element_strides.size());
|
||||
std::transform(element_strides.begin(),
|
||||
element_strides.end(),
|
||||
byte_strides.begin(),
|
||||
[&elem_size](size_t stride) {
|
||||
return stride * elem_size;
|
||||
});
|
||||
return byte_strides;
|
||||
});
|
||||
OV_TENSOR_STATEMENT(return _impl->get_strides(););
|
||||
}
|
||||
|
||||
size_t Tensor::get_size() const {
|
||||
OV_TENSOR_STATEMENT(return _impl->size());
|
||||
OV_TENSOR_STATEMENT(return _impl->get_size());
|
||||
}
|
||||
|
||||
size_t Tensor::get_byte_size() const {
|
||||
OV_TENSOR_STATEMENT(return _impl->byteSize(););
|
||||
OV_TENSOR_STATEMENT(return _impl->get_byte_size(););
|
||||
}
|
||||
|
||||
void* Tensor::data(const element::Type element_type) const {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
|
||||
#define TYPE_CHECK(TYPE) (dynamic_cast<const ie::TBlob<TYPE>*>(_impl.get()) != nullptr)
|
||||
auto host_accesable_implementation = TYPE_CHECK(bool) || TYPE_CHECK(int8_t) || TYPE_CHECK(uint8_t) ||
|
||||
TYPE_CHECK(int16_t) || TYPE_CHECK(uint16_t) || TYPE_CHECK(int32_t) ||
|
||||
TYPE_CHECK(uint32_t) || TYPE_CHECK(int64_t) || TYPE_CHECK(uint64_t) ||
|
||||
TYPE_CHECK(float) || TYPE_CHECK(double);
|
||||
#undef TYPE_CHECK
|
||||
OPENVINO_ASSERT(host_accesable_implementation, "Tensor implementation type dose not contains host accessable data");
|
||||
if (element_type != element::undefined) {
|
||||
OPENVINO_ASSERT(element_type == get_element_type(),
|
||||
"Tensor data with element type ",
|
||||
get_element_type(),
|
||||
", is not representable as pointer to ",
|
||||
element_type);
|
||||
}
|
||||
// since we don't use byte offsets, we need to explicitly multiply by element_size
|
||||
auto byte_offset = _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size();
|
||||
OPENVINO_ASSERT((get_element_type().bitwidth() >= 8) || (byte_offset == 0),
|
||||
"ROI access for types with bitwidths less then 8 bit is not implemented. Tensor type: ",
|
||||
get_element_type());
|
||||
OV_TENSOR_STATEMENT(
|
||||
{ return byte_offset + InferenceEngine::as<InferenceEngine::MemoryBlob>(_impl)->rmap().as<uint8_t*>(); });
|
||||
void* Tensor::data(const element::Type& element_type) const {
|
||||
OV_TENSOR_STATEMENT(return _impl->data(element_type));
|
||||
}
|
||||
|
||||
bool Tensor::operator!() const noexcept {
|
||||
|
@ -25,18 +25,18 @@ TEST_F(OVDefaultAllocatorTest, canAllocateAndDeallocate) {
|
||||
ASSERT_NO_THROW(allocator.deallocate(ptr));
|
||||
}
|
||||
|
||||
TEST_F(OVDefaultAllocatorTest, alignedAllocationIsNotImplemented) {
|
||||
TEST_F(OVDefaultAllocatorTest, alignedAllocationNotThrow) {
|
||||
ov::Allocator allocator;
|
||||
ASSERT_THROW(allocator.allocate(64, 64), ov::Exception);
|
||||
ASSERT_NO_THROW(allocator.allocate(64, 64));
|
||||
}
|
||||
|
||||
TEST_F(OVDefaultAllocatorTest, sizedAndAlignedDeallocationAreNotImplemented) {
|
||||
TEST_F(OVDefaultAllocatorTest, sizedAndAlignedDeallocationNotThrow) {
|
||||
ov::Allocator allocator;
|
||||
void* ptr = nullptr;
|
||||
ASSERT_NO_THROW(ptr = allocator.allocate(64));
|
||||
ASSERT_THROW(allocator.deallocate(ptr, 64), ov::Exception);
|
||||
ASSERT_THROW(allocator.deallocate(ptr, 0, 64), ov::Exception);
|
||||
ASSERT_NO_THROW(allocator.deallocate(ptr));
|
||||
ASSERT_NO_THROW(allocator.deallocate(ptr, 64));
|
||||
ASSERT_NO_THROW(ptr = allocator.allocate(64, 64));
|
||||
ASSERT_NO_THROW(allocator.deallocate(ptr, 64, 64));
|
||||
}
|
||||
|
||||
TEST_F(OVDefaultAllocatorTest, defaultAllocatorsAreEqual) {
|
||||
|
@ -87,16 +87,18 @@ TEST_F(OVTensorTest, operators) {
|
||||
ASSERT_TRUE(!t);
|
||||
}
|
||||
|
||||
class OVMockAllocator : public ov::AllocatorImpl {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
class OVMockAllocatorImpl : public ov::AllocatorImpl {
|
||||
public:
|
||||
MOCK_METHOD(void*, allocate, (size_t, size_t), ());
|
||||
MOCK_METHOD(void, deallocate, (void*, size_t, size_t), ()); // NOLINT(readability/casting)
|
||||
MOCK_METHOD(bool, is_equal, (const ov::AllocatorImpl&), (const, noexcept)); // NOLINT(readability/casting)
|
||||
};
|
||||
|
||||
TEST_F(OVTensorTest, canCreateTensorUsingMockAllocator) {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
TEST_F(OVTensorTest, canCreateTensorUsingMockAllocatorImpl) {
|
||||
ov::Shape shape = {1, 2, 3};
|
||||
auto allocator = std::make_shared<OVMockAllocator>();
|
||||
auto allocator = std::make_shared<OVMockAllocatorImpl>();
|
||||
|
||||
EXPECT_CALL(*allocator, allocate(::testing::_, ::testing::_))
|
||||
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
|
||||
@ -104,6 +106,40 @@ TEST_F(OVTensorTest, canCreateTensorUsingMockAllocator) {
|
||||
|
||||
{ ov::Tensor t{ov::element::f32, shape, ov::Allocator{allocator}}; }
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
struct OVMockAllocator {
|
||||
struct Impl {
|
||||
MOCK_METHOD(void*, allocate, (size_t, size_t), ());
|
||||
MOCK_METHOD(void, deallocate, (void*, size_t, size_t), ());
|
||||
MOCK_METHOD(bool, is_equal, (const Impl&), (const, noexcept));
|
||||
};
|
||||
OVMockAllocator() : impl{std::make_shared<Impl>()} {}
|
||||
|
||||
void* allocate(size_t b, size_t a) {
|
||||
return impl->allocate(b, a);
|
||||
}
|
||||
|
||||
void deallocate(void* ptr, size_t b, size_t a) {
|
||||
impl->deallocate(ptr, b, a);
|
||||
}
|
||||
bool is_equal(const OVMockAllocator& other) const {
|
||||
return impl->is_equal(*other.impl);
|
||||
}
|
||||
|
||||
std::shared_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
TEST_F(OVTensorTest, canCreateTensorUsingMockAllocator) {
|
||||
ov::Shape shape = {1, 2, 3};
|
||||
OVMockAllocator allocator;
|
||||
|
||||
EXPECT_CALL(*allocator.impl, allocate(::testing::_, ::testing::_))
|
||||
.WillRepeatedly(testing::Return(reinterpret_cast<void*>(1)));
|
||||
EXPECT_CALL(*allocator.impl, deallocate(::testing::_, ::testing::_, ::testing::_)).Times(1);
|
||||
|
||||
{ ov::Tensor t{ov::element::f32, shape, allocator}; }
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, canAccessExternalData) {
|
||||
ov::Shape shape = {1, 1, 3};
|
||||
@ -235,7 +271,7 @@ TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemory) {
|
||||
ASSERT_NO_THROW(t.set_shape(newShape));
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, DISABLED_canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemory) {
|
||||
TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemory) {
|
||||
float data[4 * 5 * 6 * 2];
|
||||
ov::Tensor t{ov::element::f32, {4, 5, 6}, data};
|
||||
const ov::Shape smallerShape({1, 2, 3});
|
||||
@ -245,6 +281,16 @@ TEST_F(OVTensorTest, DISABLED_canSetShapeOfOriginalSizeAfterDecreasingOnPrealloc
|
||||
ASSERT_NO_THROW(t.set_shape(originalShape));
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, canChangeShapeOnStridedTensor) {
|
||||
float data[64 * 4];
|
||||
ov::Tensor t{ov::element::f32, {4, 2, 2}, data, {64, 16, 4}};
|
||||
const ov::Shape incorrect_shape({2, 4, 2});
|
||||
const ov::Shape correct_shape({1, 1, 2});
|
||||
|
||||
ASSERT_THROW(t.set_shape(incorrect_shape), ov::Exception);
|
||||
ASSERT_NO_THROW(t.set_shape(correct_shape));
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, makeRangeRoiTensor) {
|
||||
ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
|
||||
ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
|
||||
|
37
src/inference/dev_api/openvino/runtime/iremote_tensor.hpp
Normal file
37
src/inference/dev_api/openvino/runtime/iremote_tensor.hpp
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief OpenVINO Runtime IRemoteTensor interface
|
||||
* @file openvino/runtime/iremote_tensor.hpp
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
class OPENVINO_RUNTIME_API IRemoteTensor : public ITensor {
|
||||
public:
|
||||
void* data(const element::Type& type = {}) const final {
|
||||
OPENVINO_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
~IRemoteTensor() override;
|
||||
|
||||
/**
|
||||
* @brief Returns additional information associated with tensor
|
||||
* @return Map of property names to properties
|
||||
*/
|
||||
virtual const AnyMap& get_properties() const = 0;
|
||||
/**
|
||||
* @brief Returns device name
|
||||
* @return Device name
|
||||
*/
|
||||
virtual const std::string& get_device_name() const = 0;
|
||||
};
|
||||
} // namespace ov
|
@ -192,7 +192,7 @@ public:
|
||||
*
|
||||
* @param dims new shape
|
||||
*/
|
||||
void setShape(const SizeVector& dims);
|
||||
virtual void setShape(const SizeVector& dims);
|
||||
|
||||
/**
|
||||
* @deprecated Cast to MemoryBlob and use new wlock/rwlock API instead.
|
||||
|
@ -7,9 +7,11 @@
|
||||
#include <exception>
|
||||
|
||||
#include "any_copy.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
|
||||
#define OV_REMOTE_CONTEXT_STATEMENT(...) \
|
||||
@ -67,7 +69,7 @@ RemoteTensor RemoteContext::create_tensor(const element::Type& type, const Shape
|
||||
{ie::details::convertPrecision(type), shape, ie::TensorDesc::getLayoutByRank(shape.size())},
|
||||
params);
|
||||
blob->allocate();
|
||||
return {blob, {_so}};
|
||||
return {ov::make_tensor(blob), {_so}};
|
||||
});
|
||||
}
|
||||
|
||||
@ -76,7 +78,7 @@ Tensor RemoteContext::create_host_tensor(const element::Type element_type, const
|
||||
auto blob = _impl->CreateHostBlob(
|
||||
{ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())});
|
||||
blob->allocate();
|
||||
return {blob, {_so}};
|
||||
return {ov::make_tensor(blob), {_so}};
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "icompiled_model_wrapper.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_common.h"
|
||||
@ -30,6 +31,7 @@
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/iinfer_request.hpp"
|
||||
#include "openvino/runtime/iplugin.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/ivariable_state.hpp"
|
||||
#include "openvino/runtime/profiling_info.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
@ -206,11 +208,11 @@ public:
|
||||
}
|
||||
|
||||
void SetState(const InferenceEngine::Blob::Ptr& newState) override {
|
||||
m_state->set_state(ov::Tensor(newState, {}));
|
||||
m_state->set_state(ov::Tensor(ov::make_tensor(newState), {}));
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::CPtr GetState() const override {
|
||||
return m_state->get_state()._impl;
|
||||
return tensor_to_blob(m_state->get_state()._impl);
|
||||
}
|
||||
};
|
||||
|
||||
@ -499,7 +501,7 @@ public:
|
||||
|
||||
void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& data) override {
|
||||
try {
|
||||
m_request->set_tensor(find_port(name), ov::Tensor{data, {}});
|
||||
m_request->set_tensor(find_port(name), ov::Tensor{ov::make_tensor(data), {}});
|
||||
} catch (const ov::Exception& ex) {
|
||||
const std::string what = ex.what();
|
||||
if (what.find("Failed to set tensor") != std::string::npos) {
|
||||
@ -513,7 +515,7 @@ public:
|
||||
try {
|
||||
std::vector<ov::Tensor> tensors;
|
||||
for (const auto& blob : blobs) {
|
||||
tensors.emplace_back(ov::Tensor{blob, {}});
|
||||
tensors.emplace_back(ov::Tensor{ov::make_tensor(blob), {}});
|
||||
}
|
||||
m_request->set_tensors(find_port(name), tensors);
|
||||
} catch (const ov::Exception& ex) {
|
||||
@ -522,14 +524,14 @@ public:
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override {
|
||||
return m_request->get_tensor(find_port(name))._impl;
|
||||
return tensor_to_blob(m_request->get_tensor(find_port(name))._impl);
|
||||
}
|
||||
|
||||
InferenceEngine::BatchedBlob::Ptr GetBlobs(const std::string& name) override {
|
||||
auto tensors = m_request->get_tensors(find_port(name));
|
||||
std::vector<InferenceEngine::Blob::Ptr> blobs;
|
||||
for (const auto& tensor : tensors) {
|
||||
blobs.emplace_back(tensor._impl);
|
||||
blobs.emplace_back(tensor_to_blob(tensor._impl));
|
||||
}
|
||||
return std::make_shared<InferenceEngine::BatchedBlob>(blobs);
|
||||
}
|
||||
@ -604,11 +606,12 @@ public:
|
||||
}
|
||||
|
||||
void set_state(const ov::Tensor& state) override {
|
||||
m_state->SetState(state._impl);
|
||||
m_state->SetState(ov::tensor_to_blob(state._impl));
|
||||
}
|
||||
|
||||
const ov::Tensor& get_state() const override {
|
||||
m_converted_state = ov::Tensor(std::const_pointer_cast<InferenceEngine::Blob>(m_state->GetState()), {});
|
||||
m_converted_state =
|
||||
ov::Tensor(ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(m_state->GetState())), {});
|
||||
return m_converted_state;
|
||||
}
|
||||
};
|
||||
@ -706,11 +709,11 @@ public:
|
||||
name,
|
||||
"'");
|
||||
auto blob = m_request->GetBlob(name);
|
||||
ov::Tensor tensor = {blob, {m_request->getPointerToSo()}};
|
||||
ov::Tensor tensor = {ov::make_tensor(blob), {m_request->getPointerToSo()}};
|
||||
return tensor;
|
||||
}
|
||||
void set_tensor(const ov::Output<const ov::Node>& port, const ov::Tensor& tensor) override {
|
||||
m_request->SetBlob(get_legacy_name_from_port(port), tensor._impl);
|
||||
m_request->SetBlob(get_legacy_name_from_port(port), ov::tensor_to_blob(tensor._impl));
|
||||
}
|
||||
|
||||
std::vector<ov::Tensor> get_tensors(const ov::Output<const ov::Node>& port) const override {
|
||||
@ -719,14 +722,14 @@ public:
|
||||
if (!blobs)
|
||||
return ret;
|
||||
for (size_t i = 0; i < blobs->size(); i++) {
|
||||
ret.emplace_back(ov::Tensor{blobs->getBlob(i), {m_request->getPointerToSo()}});
|
||||
ret.emplace_back(ov::Tensor{ov::make_tensor(blobs->getBlob(i)), {m_request->getPointerToSo()}});
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
void set_tensors(const ov::Output<const ov::Node>& port, const std::vector<ov::Tensor>& tensors) override {
|
||||
std::vector<InferenceEngine::Blob::Ptr> blobs;
|
||||
for (const auto& tensor : tensors) {
|
||||
blobs.emplace_back(tensor._impl);
|
||||
blobs.emplace_back(ov::tensor_to_blob(tensor._impl));
|
||||
}
|
||||
m_request->SetBlobs(get_legacy_name_from_port(port), blobs);
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "dev/converter_utils.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "file_utils.h"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_network_reader.hpp"
|
||||
@ -27,6 +28,7 @@
|
||||
#include "openvino/core/version.hpp"
|
||||
#include "openvino/pass/manager.hpp"
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/remote_context.hpp"
|
||||
#include "openvino/runtime/threading/executor_manager.hpp"
|
||||
#include "openvino/util/common_util.hpp"
|
||||
@ -1097,7 +1099,7 @@ std::shared_ptr<ov::Model> ov::CoreImpl::read_model(const std::string& model,
|
||||
bool frontendMode) const {
|
||||
InferenceEngine::Blob::Ptr blob;
|
||||
if (weights) {
|
||||
blob = weights._impl;
|
||||
blob = tensor_to_blob(weights._impl);
|
||||
}
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::IE_RT, "CoreImpl::read_model from memory");
|
||||
return ReadNetwork(model, blob, frontendMode).getFunction();
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
|
||||
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
|
||||
#include "dev/converter_utils.hpp"
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_network_reader.hpp"
|
||||
#include "iplugin_wrapper.hpp"
|
||||
@ -17,6 +18,7 @@
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "openvino/itt.hpp"
|
||||
#include "openvino/runtime/icompiled_model.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/util/common_util.hpp"
|
||||
|
||||
bool ov::CoreImpl::isNewAPI() const {
|
||||
@ -113,10 +115,11 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork(
|
||||
const std::function<void(const InferenceEngine::CNNNetwork&)>& val) {
|
||||
OV_ITT_SCOPE(FIRST_INFERENCE, InferenceEngine::itt::domains::IE_LT, "Core::LoadNetwork::Memory");
|
||||
|
||||
auto compiled_model = compile_model(modelStr,
|
||||
ov::Tensor{std::const_pointer_cast<InferenceEngine::Blob>(weights), {}},
|
||||
deviceName,
|
||||
ov::any_copy(config));
|
||||
auto compiled_model =
|
||||
compile_model(modelStr,
|
||||
ov::Tensor{ov::make_tensor(std::const_pointer_cast<InferenceEngine::Blob>(weights)), {}},
|
||||
deviceName,
|
||||
ov::any_copy(config));
|
||||
return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so};
|
||||
}
|
||||
|
||||
|
351
src/inference/src/dev/iremote_tensor.cpp
Normal file
351
src/inference/src/dev/iremote_tensor.cpp
Normal file
@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "dev/make_tensor.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
IRemoteTensor::~IRemoteTensor() = default;
|
||||
|
||||
/**
|
||||
* @brief Tensor what contains InferenceEngine::Blob inside
|
||||
* Blob owns the memory
|
||||
*/
|
||||
class BlobTensor : public ITensor {
|
||||
mutable element::Type m_type;
|
||||
mutable Shape m_shape;
|
||||
mutable Strides m_strides;
|
||||
|
||||
public:
|
||||
std::shared_ptr<ie::Blob> blob;
|
||||
|
||||
BlobTensor(const InferenceEngine::Blob::Ptr& blob) : blob{blob} {
|
||||
auto remote_impl = dynamic_cast<InferenceEngine::RemoteBlob*>(blob.get());
|
||||
OPENVINO_ASSERT(!remote_impl);
|
||||
OPENVINO_ASSERT(blob);
|
||||
m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims();
|
||||
}
|
||||
|
||||
const element::Type& get_element_type() const override {
|
||||
m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision());
|
||||
return m_type;
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape shape) override {
|
||||
blob->setShape({shape.begin(), shape.end()});
|
||||
}
|
||||
|
||||
const Shape& get_shape() const override {
|
||||
m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims();
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
const Strides& get_strides() const override {
|
||||
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
|
||||
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
get_element_type());
|
||||
const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides();
|
||||
const size_t elem_size = get_element_type().size();
|
||||
m_strides.clear();
|
||||
m_strides.resize(element_strides.size());
|
||||
std::transform(element_strides.begin(), element_strides.end(), m_strides.begin(), [&elem_size](size_t stride) {
|
||||
return stride * elem_size;
|
||||
});
|
||||
return m_strides;
|
||||
}
|
||||
|
||||
size_t get_size() const override {
|
||||
return blob->size();
|
||||
}
|
||||
|
||||
size_t get_byte_size() const override {
|
||||
return blob->byteSize();
|
||||
}
|
||||
|
||||
void* data(const element::Type& element_type) const override {
|
||||
OPENVINO_ASSERT(blob != nullptr, "Tensor was not initialized.");
|
||||
#define TYPE_CHECK(TYPE) (dynamic_cast<const ie::TBlob<TYPE>*>(blob.get()) != nullptr)
|
||||
auto host_accesable_implementation = TYPE_CHECK(bool) || TYPE_CHECK(int8_t) || TYPE_CHECK(uint8_t) ||
|
||||
TYPE_CHECK(int16_t) || TYPE_CHECK(uint16_t) || TYPE_CHECK(int32_t) ||
|
||||
TYPE_CHECK(uint32_t) || TYPE_CHECK(int64_t) || TYPE_CHECK(uint64_t) ||
|
||||
TYPE_CHECK(float) || TYPE_CHECK(double);
|
||||
#undef TYPE_CHECK
|
||||
OPENVINO_ASSERT(host_accesable_implementation,
|
||||
"Tensor implementation type dose not contains host accessable data");
|
||||
if (element_type != element::undefined) {
|
||||
OPENVINO_ASSERT(element_type == get_element_type(),
|
||||
"Tensor data with element type ",
|
||||
get_element_type(),
|
||||
", is not representable as pointer to ",
|
||||
element_type);
|
||||
}
|
||||
// since we don't use byte offsets, we need to explicitly multiply by element_size
|
||||
auto byte_offset = blob->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size();
|
||||
OPENVINO_ASSERT((get_element_type().bitwidth() >= 8) || (byte_offset == 0),
|
||||
"ROI access for types with bitwidths less then 8 bit is not implemented. Tensor type: ",
|
||||
get_element_type());
|
||||
return byte_offset + InferenceEngine::as<InferenceEngine::MemoryBlob>(blob)->rmap().as<uint8_t*>();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Tensor what contains InferenceEngine::RemoteBlob inside
|
||||
* Blob owns the memory
|
||||
*/
|
||||
class RemoteBlobTensor : public IRemoteTensor {
|
||||
mutable element::Type m_type;
|
||||
mutable Shape m_shape;
|
||||
mutable Strides m_strides;
|
||||
mutable ov::AnyMap m_properties;
|
||||
mutable std::string m_dev_name;
|
||||
|
||||
public:
|
||||
std::shared_ptr<ie::RemoteBlob> blob;
|
||||
|
||||
RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} {
|
||||
OPENVINO_ASSERT(blob);
|
||||
m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims();
|
||||
}
|
||||
|
||||
const element::Type& get_element_type() const override {
|
||||
m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision());
|
||||
return m_type;
|
||||
}
|
||||
|
||||
void set_shape(ov::Shape shape) override {
|
||||
blob->setShape({shape.begin(), shape.end()});
|
||||
}
|
||||
|
||||
const Shape& get_shape() const override {
|
||||
m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims();
|
||||
return m_shape;
|
||||
}
|
||||
|
||||
const Strides& get_strides() const override {
|
||||
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
|
||||
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
get_element_type());
|
||||
const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides();
|
||||
const size_t elem_size = get_element_type().size();
|
||||
m_strides.clear();
|
||||
m_strides.resize(element_strides.size());
|
||||
std::transform(element_strides.begin(), element_strides.end(), m_strides.begin(), [&elem_size](size_t stride) {
|
||||
return stride * elem_size;
|
||||
});
|
||||
return m_strides;
|
||||
}
|
||||
|
||||
size_t get_size() const override {
|
||||
return blob->size();
|
||||
}
|
||||
|
||||
size_t get_byte_size() const override {
|
||||
return blob->byteSize();
|
||||
}
|
||||
|
||||
const AnyMap& get_properties() const override {
|
||||
m_properties = blob->getParams();
|
||||
return m_properties;
|
||||
}
|
||||
|
||||
const std::string& get_device_name() const override {
|
||||
m_dev_name = blob->getDeviceName();
|
||||
return m_dev_name;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Create InferenceEngine::RemoteBlob from the Tensor
|
||||
*/
|
||||
class TensorRemoteBlob : public ie::RemoteBlob {
|
||||
public:
|
||||
TensorRemoteBlob(const std::shared_ptr<ITensor>& tensor)
|
||||
: ie::RemoteBlob{ie::TensorDesc{ie::details::convertPrecision(tensor->get_element_type()),
|
||||
tensor->get_shape(),
|
||||
ie::TensorDesc::getLayoutByRank(tensor->get_shape().size())}},
|
||||
tensor{std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor)} {
|
||||
OPENVINO_ASSERT(this->tensor);
|
||||
}
|
||||
AnyMap getParams() const override {
|
||||
return tensor->get_properties();
|
||||
}
|
||||
std::string getDeviceName() const noexcept override {
|
||||
try {
|
||||
return tensor->get_device_name();
|
||||
} catch (...) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
std::shared_ptr<ie::RemoteContext> getContext() const noexcept override {
|
||||
return {};
|
||||
}
|
||||
|
||||
void allocate() noexcept override {}
|
||||
bool deallocate() noexcept override {
|
||||
return true;
|
||||
}
|
||||
ie::LockedMemory<void> buffer() noexcept override {
|
||||
return {nullptr, nullptr, 0};
|
||||
}
|
||||
ie::LockedMemory<const void> cbuffer() const noexcept override {
|
||||
return {nullptr, nullptr, 0};
|
||||
}
|
||||
ie::LockedMemory<void> rwmap() noexcept override {
|
||||
return {nullptr, nullptr, 0};
|
||||
}
|
||||
ie::LockedMemory<const void> rmap() const noexcept override {
|
||||
return {nullptr, nullptr, 0};
|
||||
}
|
||||
ie::LockedMemory<void> wmap() noexcept override {
|
||||
return {nullptr, nullptr, 0};
|
||||
}
|
||||
const std::shared_ptr<ie::IAllocator>& getAllocator() const noexcept override {
|
||||
return m_allocator;
|
||||
}
|
||||
void* getHandle() const noexcept override {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<IRemoteTensor> tensor;
|
||||
|
||||
private:
|
||||
std::shared_ptr<ie::IAllocator> m_allocator;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Create InferenceEngine::TBlob<T> from the tensor
|
||||
*
|
||||
* @tparam T Blob data type
|
||||
*/
|
||||
template <typename T>
|
||||
class TensorMemoryBlob : public ie::TBlob<T> {
|
||||
public:
|
||||
~TensorMemoryBlob() override = default;
|
||||
explicit TensorMemoryBlob(const std::shared_ptr<ITensor>& tensor_) try : ie
|
||||
::TBlob<T>{[&] {
|
||||
auto element_type = tensor_->get_element_type();
|
||||
auto shape = tensor_->get_shape();
|
||||
ie::SizeVector blk_order(shape.size());
|
||||
std::iota(blk_order.begin(), blk_order.end(), 0);
|
||||
ie::SizeVector dim_offset(shape.size(), 0);
|
||||
ie::SizeVector blk_strides;
|
||||
auto byte_strides = element_type.bitwidth() >= 8 ? tensor_->get_strides() : Strides{};
|
||||
if (byte_strides.empty()) {
|
||||
blk_strides = ov::row_major_strides(shape);
|
||||
} else {
|
||||
blk_strides.resize(byte_strides.size());
|
||||
std::transform(byte_strides.begin(),
|
||||
byte_strides.end(),
|
||||
blk_strides.begin(),
|
||||
[&element_type](size_t byte_stride) {
|
||||
OPENVINO_ASSERT(byte_stride % element_type.size() == 0,
|
||||
"Limitation: Stride in bytes ",
|
||||
byte_stride,
|
||||
" should be divisible by size of element ",
|
||||
element_type.size());
|
||||
return byte_stride / element_type.size();
|
||||
});
|
||||
}
|
||||
return ie::TensorDesc{ie::details::convertPrecision(element_type),
|
||||
shape,
|
||||
ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}};
|
||||
}(),
|
||||
static_cast<T*>(tensor_->data()),
|
||||
tensor_->get_byte_size()},
|
||||
tensor{tensor_} {
|
||||
OPENVINO_ASSERT(!std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor));
|
||||
}
|
||||
catch (const std::exception& ex) {
|
||||
throw ov::Exception(ex.what());
|
||||
}
|
||||
|
||||
void setShape(const ie::SizeVector& dims) override {
|
||||
tensor->set_shape(dims);
|
||||
ie::TBlob<T>::setShape(dims);
|
||||
}
|
||||
|
||||
std::shared_ptr<ITensor> tensor;
|
||||
};
|
||||
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ie::Blob>& blob) {
|
||||
#define ELSE_IF(type) \
|
||||
else if (auto tblob = dynamic_cast<const TensorMemoryBlob<type>*>(blob.get())) { \
|
||||
return tblob->tensor; \
|
||||
}
|
||||
if (blob == nullptr) {
|
||||
return {};
|
||||
} else if (auto remote_blob = std::dynamic_pointer_cast<TensorRemoteBlob>(blob)) {
|
||||
return remote_blob->tensor;
|
||||
} else if (auto remote_blob = std::dynamic_pointer_cast<InferenceEngine::RemoteBlob>(blob)) {
|
||||
return std::make_shared<RemoteBlobTensor>(remote_blob);
|
||||
}
|
||||
ELSE_IF(float)
|
||||
ELSE_IF(double)
|
||||
ELSE_IF(int8_t)
|
||||
ELSE_IF(int8_t)
|
||||
ELSE_IF(int16_t)
|
||||
ELSE_IF(int32_t)
|
||||
ELSE_IF(int64_t)
|
||||
ELSE_IF(uint8_t)
|
||||
ELSE_IF(uint8_t)
|
||||
ELSE_IF(uint16_t)
|
||||
ELSE_IF(uint32_t)
|
||||
ELSE_IF(uint64_t)
|
||||
ELSE_IF(int8_t)
|
||||
ELSE_IF(bool) else {
|
||||
return std::make_shared<BlobTensor>(blob);
|
||||
}
|
||||
#undef IF
|
||||
}
|
||||
|
||||
ie::Blob::Ptr tensor_to_blob(const std::shared_ptr<ITensor>& tensor) {
|
||||
if (tensor == nullptr) {
|
||||
return {};
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<BlobTensor>(tensor)) {
|
||||
return blob_tensor->blob;
|
||||
} else if (auto blob_tensor = std::dynamic_pointer_cast<RemoteBlobTensor>(tensor)) {
|
||||
return blob_tensor->blob;
|
||||
} else if (auto blob_tensor = dynamic_cast<const BlobTensor*>(tensor.get())) {
|
||||
return blob_tensor->blob;
|
||||
} else if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor)) {
|
||||
return std::make_shared<TensorRemoteBlob>(tensor);
|
||||
} else {
|
||||
#define CASE(precision, T) \
|
||||
case element::precision: \
|
||||
return std::make_shared<TensorMemoryBlob<T>>(tensor);
|
||||
switch (tensor->get_element_type()) {
|
||||
CASE(f32, float);
|
||||
CASE(f64, double);
|
||||
CASE(i4, int8_t);
|
||||
CASE(i8, int8_t);
|
||||
CASE(i16, int16_t);
|
||||
CASE(i32, int32_t);
|
||||
CASE(i64, int64_t);
|
||||
CASE(u4, uint8_t);
|
||||
CASE(u8, uint8_t);
|
||||
CASE(u16, uint16_t);
|
||||
CASE(u32, uint32_t);
|
||||
CASE(u64, uint64_t);
|
||||
CASE(u1, int8_t);
|
||||
CASE(boolean, bool);
|
||||
case element::f16:
|
||||
return std::make_shared<TensorMemoryBlob<int16_t>>(tensor);
|
||||
case element::bf16:
|
||||
return std::make_shared<TensorMemoryBlob<int16_t>>(tensor);
|
||||
default:
|
||||
OPENVINO_THROW("Unsupported element type");
|
||||
}
|
||||
#undef CASE
|
||||
}
|
||||
OPENVINO_THROW("Cannot convert tensor to blob!");
|
||||
}
|
||||
} // namespace ov
|
@ -7,7 +7,6 @@
|
||||
#include <unordered_map>
|
||||
|
||||
#include "cpp_interfaces/plugin_itt.hpp"
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/core/layout.hpp"
|
||||
#include "openvino/core/parallel.hpp"
|
||||
@ -248,16 +247,11 @@ void ov::ISyncInferRequest::check_tensor(const ov::Output<const ov::Node>& port,
|
||||
" tensor size is not equal to the model ",
|
||||
tensor_type,
|
||||
" type: got ",
|
||||
tensor.get_size(),
|
||||
tensor.get_shape(),
|
||||
" expecting ",
|
||||
port.get_shape(),
|
||||
".");
|
||||
OPENVINO_ASSERT(tensor.data() != nullptr, "Tensor data equal nullptr!");
|
||||
|
||||
// FIXME: SyncInferRequest is a friend only to check that blob is correct
|
||||
OPENVINO_ASSERT(ov::shape_size(tensor._impl->getTensorDesc().getDims()) ==
|
||||
ov::shape_size(tensor._impl->getTensorDesc().getBlockingDesc().getBlockDims()),
|
||||
"Tensor is corrupted!");
|
||||
}
|
||||
|
||||
void ov::ISyncInferRequest::allocate_tensor(const ov::Output<const ov::Node>& port,
|
||||
|
52
src/inference/src/dev/make_tensor.hpp
Normal file
52
src/inference/src/dev/make_tensor.hpp
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ie_blob.h"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using element type and shape. Allocate internal host storage using default allocator
|
||||
* @param type Tensor element type
|
||||
* @param shape Tensor shape
|
||||
* @param allocator allocates memory for internal tensor storage
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type type, const Shape& shape, const Allocator& allocator = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using element type and shape. Wraps allocated host memory.
|
||||
* @note Does not perform memory allocation internally
|
||||
* @param type Tensor element type
|
||||
* @param shape Tensor shape
|
||||
* @param host_ptr Pointer to pre-allocated host memory
|
||||
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
|
||||
* on shape and element size
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const element::Type type,
|
||||
const Shape& shape,
|
||||
void* host_ptr,
|
||||
const Strides& strides = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs region of interest (ROI) tensor form another tensor.
|
||||
* @note Does not perform memory allocation internally
|
||||
* @param other original tensor
|
||||
* @param begin start coordinate of ROI object inside of the original object.
|
||||
* @param end end coordinate of ROI object inside of the original object.
|
||||
* @note A Number of dimensions in `begin` and `end` must match number of dimensions in `other.get_shape()`
|
||||
*/
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<ITensor>& other,
|
||||
const Coordinate& begin,
|
||||
const Coordinate& end);
|
||||
|
||||
/** @cond INTERNAL */
|
||||
std::shared_ptr<ITensor> make_tensor(const std::shared_ptr<InferenceEngine::Blob>& tensor);
|
||||
|
||||
std::shared_ptr<InferenceEngine::Blob> tensor_to_blob(const std::shared_ptr<ITensor>& tensor);
|
||||
/** @endcond */
|
||||
|
||||
} // namespace ov
|
@ -4,22 +4,26 @@
|
||||
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
|
||||
#include "any_copy.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ie_remote_blob.hpp"
|
||||
#include <memory>
|
||||
|
||||
#include "openvino/runtime/iremote_tensor.hpp"
|
||||
#include "openvino/runtime/itensor.hpp"
|
||||
#include "openvino/runtime/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
void RemoteTensor::type_check(const Tensor& tensor, const std::map<std::string, std::vector<std::string>>& type_info) {
|
||||
OPENVINO_ASSERT(tensor, "Could not check empty tensor type");
|
||||
auto remote_tensor = static_cast<const RemoteTensor*>(&tensor);
|
||||
auto remote_impl = dynamic_cast<ie::RemoteBlob*>(remote_tensor->_impl.get());
|
||||
OPENVINO_ASSERT(remote_impl != nullptr, "Tensor was not initialized using remote implementation");
|
||||
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._impl);
|
||||
OPENVINO_ASSERT(remote_tensor, "Tensor is not remote.");
|
||||
if (!type_info.empty()) {
|
||||
auto params = remote_impl->getParams();
|
||||
auto remote_properties = remote_tensor->get_properties();
|
||||
for (auto&& type_info_value : type_info) {
|
||||
auto it_param = params.find(type_info_value.first);
|
||||
OPENVINO_ASSERT(it_param != params.end(), "Parameter with key ", type_info_value.first, " not found");
|
||||
auto it_param = remote_properties.find(type_info_value.first);
|
||||
OPENVINO_ASSERT(it_param != remote_properties.end(),
|
||||
"Parameter with key ",
|
||||
type_info_value.first,
|
||||
" not found");
|
||||
if (!type_info_value.second.empty()) {
|
||||
auto param_value = it_param->second.as<std::string>();
|
||||
auto param_found = std::any_of(type_info_value.second.begin(),
|
||||
@ -34,12 +38,12 @@ void RemoteTensor::type_check(const Tensor& tensor, const std::map<std::string,
|
||||
}
|
||||
|
||||
AnyMap RemoteTensor::get_params() const {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized.");
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
|
||||
type_check(*this);
|
||||
auto remote_impl = static_cast<ie::RemoteBlob*>(_impl.get());
|
||||
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(_impl);
|
||||
try {
|
||||
AnyMap paramMap;
|
||||
for (auto&& param : remote_impl->getParams()) {
|
||||
for (auto&& param : remote_tensor->get_properties()) {
|
||||
paramMap.emplace(param.first, Any{param.second, _so});
|
||||
}
|
||||
return paramMap;
|
||||
@ -51,11 +55,12 @@ AnyMap RemoteTensor::get_params() const {
|
||||
}
|
||||
|
||||
std::string RemoteTensor::get_device_name() const {
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized.");
|
||||
auto remote_impl = static_cast<ie::RemoteBlob*>(_impl.get());
|
||||
OPENVINO_ASSERT(_impl != nullptr, "Tensor was not initialized.");
|
||||
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(_impl);
|
||||
OPENVINO_ASSERT(remote_tensor, "Tensor is not remote.");
|
||||
type_check(*this);
|
||||
try {
|
||||
return remote_impl->getDeviceName();
|
||||
return remote_tensor->get_device_name();
|
||||
} catch (const std::exception& ex) {
|
||||
OPENVINO_THROW(ex.what());
|
||||
} catch (...) {
|
||||
|
@ -126,6 +126,13 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
// New plugin API doesn't support changes of pre-processing
|
||||
R"(.*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)",
|
||||
R"(.*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)",
|
||||
// New plugin work with tensors, so it means that blob in old API can have different pointers
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)",
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)",
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)",
|
||||
R"(.*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)",
|
||||
// Old API cannot deallocate tensor
|
||||
R"(.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)",
|
||||
};
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -122,10 +122,14 @@ TEST_P(InferRequestIOBBlobTest, failToSetInputWithIncorrectSizes) {
|
||||
// Create InferRequest
|
||||
InferenceEngine::InferRequest req;
|
||||
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
|
||||
auto td = cnnNet.getInputsInfo().begin()->second->getTensorDesc();
|
||||
auto dims = td.getDims();
|
||||
dims[0] *= 2;
|
||||
td.reshape(dims);
|
||||
|
||||
InferenceEngine::Blob::Ptr blob =
|
||||
FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc());
|
||||
FuncTestUtils::createAndFillBlob(td);
|
||||
blob->allocate();
|
||||
blob->getTensorDesc().getDims()[0] *= 2;
|
||||
ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob), InferenceEngine::Exception);
|
||||
}
|
||||
|
||||
@ -133,10 +137,14 @@ TEST_P(InferRequestIOBBlobTest, failToSetOutputWithIncorrectSizes) {
|
||||
// Create InferRequest
|
||||
InferenceEngine::InferRequest req;
|
||||
ASSERT_NO_THROW(req = execNet.CreateInferRequest());
|
||||
auto td = cnnNet.getOutputsInfo().begin()->second->getTensorDesc();
|
||||
auto dims = td.getDims();
|
||||
dims[0] *= 2;
|
||||
td.reshape(dims);
|
||||
|
||||
InferenceEngine::Blob::Ptr blob =
|
||||
FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc());
|
||||
FuncTestUtils::createAndFillBlob(td);
|
||||
blob->allocate();
|
||||
blob->getTensorDesc().getDims()[0] *= 2;
|
||||
ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob), InferenceEngine::Exception);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user