Extend tensor API (#15811)
* Added some new tensor API * Added tests on constructors * Small changes * Fixed tensor tests * Fixed tests * Added parametrized tests * Extend tests and delete copy_to from remote tensor
This commit is contained in:
parent
27ea9eab32
commit
893f96a7da
@ -103,6 +103,7 @@ public:
|
||||
bool operator>(const Output& other) const;
|
||||
bool operator<=(const Output& other) const;
|
||||
bool operator>=(const Output& other) const;
|
||||
operator Output<const Node>() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Node> m_node;
|
||||
|
@ -116,6 +116,23 @@ public:
|
||||
*/
|
||||
Tensor(const element::Type type, const Shape& shape, void* host_ptr, const Strides& strides = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using port from node. Allocate internal host storage using default allocator
|
||||
* @param port port from node
|
||||
* @param allocator allocates memory for internal tensor storage
|
||||
*/
|
||||
Tensor(const ov::Output<const ov::Node>& port, const Allocator& allocator = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs Tensor using port from node. Wraps allocated host memory.
|
||||
* @note Does not perform memory allocation internally
|
||||
* @param port port from node
|
||||
* @param host_ptr Pointer to pre-allocated host memory
|
||||
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
|
||||
* on shape and element size
|
||||
*/
|
||||
Tensor(const ov::Output<const ov::Node>& port, void* host_ptr, const Strides& strides = {});
|
||||
|
||||
/**
|
||||
* @brief Constructs region of interest (ROI) tensor form another tensor.
|
||||
* @note Does not perform memory allocation internally
|
||||
@ -143,10 +160,17 @@ public:
|
||||
*/
|
||||
Shape get_shape() const;
|
||||
|
||||
/**
|
||||
* @brief Copy tensor, destination tensor should have the same element type and shape
|
||||
*
|
||||
* @param dst destination tensor
|
||||
*/
|
||||
void copy_to(ov::Tensor& dst) const;
|
||||
|
||||
/**
|
||||
* @brief Reports whether the tensor is continuous or not
|
||||
*
|
||||
* @return true if blob is continuous
|
||||
* @return true if tensor is continuous
|
||||
*/
|
||||
bool is_continuous() const;
|
||||
|
||||
|
@ -147,6 +147,10 @@ bool Output<Node>::operator>=(const Output& other) const {
|
||||
return !(*this < other);
|
||||
}
|
||||
|
||||
Output<Node>::operator Output<const Node>() const {
|
||||
return Output<const Node>(get_node(), get_index());
|
||||
}
|
||||
|
||||
Output<const Node>::Output(const Node* node, size_t index) : m_index(index) {
|
||||
OPENVINO_ASSERT(node, "Cannot create ov::Output<const ov::Node> from nullptr!");
|
||||
m_node = node->shared_from_this();
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include "blob_factory.hpp" // IE private header
|
||||
#include "ie_ngraph_utils.hpp" // IE private header
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/core/shape.hpp"
|
||||
#include "openvino/core/strides.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
#include "runtime/blob_allocator.hpp"
|
||||
#include "shape_util.hpp"
|
||||
@ -94,6 +97,17 @@ Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& e
|
||||
}
|
||||
}
|
||||
|
||||
Tensor::Tensor(const ov::Output<const ov::Node>& port, const Allocator& allocator)
|
||||
: Tensor(port.get_element_type(),
|
||||
port.get_partial_shape().is_dynamic() ? ov::Shape{0} : port.get_shape(),
|
||||
allocator) {}
|
||||
|
||||
Tensor::Tensor(const ov::Output<const ov::Node>& port, void* host_ptr, const Strides& byte_strides)
|
||||
: Tensor(port.get_element_type(),
|
||||
port.get_partial_shape().is_dynamic() ? ov::Shape{0} : port.get_shape(),
|
||||
host_ptr,
|
||||
byte_strides) {}
|
||||
|
||||
element::Type Tensor::get_element_type() const {
|
||||
OV_TENSOR_STATEMENT(return ie::details::convertPrecision(_impl->getTensorDesc().getPrecision()));
|
||||
}
|
||||
@ -113,6 +127,128 @@ Shape Tensor::get_shape() const {
|
||||
OV_TENSOR_STATEMENT({ return _impl->getTensorDesc().getBlockingDesc().getBlockDims(); });
|
||||
}
|
||||
|
||||
void Tensor::copy_to(ov::Tensor& dst) const {
|
||||
OV_TENSOR_STATEMENT({
|
||||
OPENVINO_ASSERT(dst, "Destination tensor was not initialized.");
|
||||
OPENVINO_ASSERT(!is<ov::RemoteTensor>(), "Default copy to doesn't support copy from remote tensor.");
|
||||
OPENVINO_ASSERT(!dst.is<ov::RemoteTensor>(), "Default copy to doesn't support copy to remote tensor.");
|
||||
OPENVINO_ASSERT(dst.get_element_type() == get_element_type(),
|
||||
"Tensor element types are not equal. (src: ",
|
||||
get_element_type(),
|
||||
" != dst: ",
|
||||
dst.get_element_type(),
|
||||
")");
|
||||
if (dst.get_shape() == ov::Shape{0})
|
||||
dst.set_shape(get_shape());
|
||||
OPENVINO_ASSERT(dst.get_shape() == get_shape(),
|
||||
"Tensor shapes are not equal. (src: ",
|
||||
get_shape(),
|
||||
" != dst: ",
|
||||
dst.get_shape(),
|
||||
")");
|
||||
const auto& shape = get_shape();
|
||||
auto* src_data = static_cast<const uint8_t*>(data());
|
||||
auto* dst_data = static_cast<uint8_t*>(dst.data());
|
||||
ov::Strides src_strides{get_byte_size()};
|
||||
ov::Strides dst_strides{dst.get_byte_size()};
|
||||
ov::Shape cur_pos{0};
|
||||
ov::Shape max_pos{1};
|
||||
|
||||
if (get_element_type().bitwidth() < 8 || (get_strides() == dst.get_strides() && is_continuous())) {
|
||||
// OpenVINO doesn't support strides for LP types
|
||||
// or both tensors have default strides
|
||||
// Strides and positions already initialized
|
||||
} else {
|
||||
// Tensors have default strides
|
||||
const auto& type = get_element_type();
|
||||
std::vector<size_t> strides(shape.size());
|
||||
if (!shape.empty()) {
|
||||
strides[shape.size() - 1] = 1;
|
||||
}
|
||||
auto size = shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
strides[size - i - 1] = strides[size - i] * shape[size - i];
|
||||
}
|
||||
|
||||
ov::Strides default_strides(strides.size());
|
||||
for (size_t i = 0; i < strides.size(); ++i)
|
||||
default_strides[i] = strides[i] * type.size();
|
||||
|
||||
src_strides = get_strides();
|
||||
dst_strides = dst.get_strides();
|
||||
|
||||
ov::Strides src_str, dst_str;
|
||||
|
||||
// Calculate src and dst shapes
|
||||
bool found_step = false;
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
size_t inverted_idx = shape.size() - i - 1;
|
||||
if (!found_step) {
|
||||
if (default_strides[inverted_idx] == src_strides[inverted_idx] &&
|
||||
src_strides[inverted_idx] == dst_strides[inverted_idx]) {
|
||||
continue;
|
||||
} else {
|
||||
found_step = true;
|
||||
size_t strides_size = inverted_idx + 1;
|
||||
// Set right size
|
||||
src_str.resize(strides_size + 1);
|
||||
dst_str.resize(strides_size + 1);
|
||||
max_pos.resize(strides_size + 1);
|
||||
cur_pos.resize(strides_size + 1);
|
||||
// In case of default continuous strides we can copy several elements
|
||||
// In other case only one element
|
||||
size_t dim = 1;
|
||||
size_t strides = type.size();
|
||||
|
||||
if (strides_size < default_strides.size()) {
|
||||
strides = default_strides[strides_size];
|
||||
dim = get_shape()[strides_size];
|
||||
}
|
||||
src_str[strides_size] = strides;
|
||||
dst_str[strides_size] = strides;
|
||||
max_pos[strides_size] = dim;
|
||||
cur_pos[strides_size] = 0;
|
||||
}
|
||||
}
|
||||
src_str[inverted_idx] = src_strides[inverted_idx];
|
||||
dst_str[inverted_idx] = dst_strides[inverted_idx];
|
||||
max_pos[inverted_idx] = shape[inverted_idx];
|
||||
cur_pos[inverted_idx] = 0;
|
||||
}
|
||||
src_strides = src_str;
|
||||
dst_strides = dst_str;
|
||||
}
|
||||
|
||||
const auto update_index = [](const ov::Shape& pos, const ov::Shape& shape, const ov::Strides& strides) {
|
||||
size_t offset = 0;
|
||||
|
||||
for (size_t i = 0; i < pos.size(); i++) {
|
||||
offset += pos[i] * strides[i];
|
||||
}
|
||||
return offset;
|
||||
};
|
||||
|
||||
bool finish = false;
|
||||
for (size_t dst_idx = 0, src_idx = 0; !finish;) {
|
||||
memcpy(dst_data + dst_idx, src_data + src_idx, src_strides[src_strides.size() - 1]);
|
||||
// update indexes
|
||||
for (size_t i = 0; i < cur_pos.size(); i++) {
|
||||
size_t inverted_idx = cur_pos.size() - i - 1;
|
||||
cur_pos[inverted_idx]++;
|
||||
if (cur_pos[inverted_idx] != max_pos[inverted_idx]) {
|
||||
break;
|
||||
}
|
||||
if (inverted_idx)
|
||||
cur_pos[inverted_idx] = 0;
|
||||
else
|
||||
finish = true;
|
||||
}
|
||||
src_idx = update_index(cur_pos, max_pos, src_strides);
|
||||
dst_idx = update_index(cur_pos, max_pos, dst_strides);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Strides Tensor::get_strides() const {
|
||||
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
|
||||
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
|
||||
@ -174,24 +310,26 @@ Tensor::operator bool() const noexcept {
|
||||
}
|
||||
|
||||
bool Tensor::is_continuous() const {
|
||||
if (get_element_type().bitwidth() < 8)
|
||||
// OpenVINO doesn't support strides for lp types
|
||||
return true;
|
||||
const auto& shape = get_shape();
|
||||
const auto& type = get_element_type();
|
||||
std::vector<size_t> strides(shape.size());
|
||||
if (!shape.empty()) {
|
||||
strides[shape.size() - 1] = 1;
|
||||
}
|
||||
auto size = shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
strides[size - i - 1] = strides[size - i] * shape[size - i];
|
||||
}
|
||||
OV_TENSOR_STATEMENT({
|
||||
if (get_element_type().bitwidth() < 8)
|
||||
// OpenVINO doesn't support strides for lp types
|
||||
return true;
|
||||
const auto& shape = get_shape();
|
||||
const auto& type = get_element_type();
|
||||
std::vector<size_t> strides(shape.size());
|
||||
if (!shape.empty()) {
|
||||
strides[shape.size() - 1] = 1;
|
||||
}
|
||||
auto size = shape.size();
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
strides[size - i - 1] = strides[size - i] * shape[size - i];
|
||||
}
|
||||
|
||||
ov::Strides byte_strides(strides.size());
|
||||
for (size_t i = 0; i < strides.size(); ++i)
|
||||
byte_strides[i] = strides[i] * type.size();
|
||||
return byte_strides == get_strides();
|
||||
ov::Strides byte_strides(strides.size());
|
||||
for (size_t i = 0; i < strides.size(); ++i)
|
||||
byte_strides[i] = strides[i] * type.size();
|
||||
return byte_strides == get_strides();
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace ov
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <gmock/gmock-spec-builders.h>
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest-param-test.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <cstdint>
|
||||
@ -13,7 +14,11 @@
|
||||
|
||||
#include "ngraph/coordinate_transform.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/type/element_type_traits.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/runtime/allocator.hpp"
|
||||
#include "openvino/runtime/remote_tensor.hpp"
|
||||
#include "openvino/runtime/tensor.hpp"
|
||||
|
||||
using OVTensorTest = ::testing::Test;
|
||||
@ -40,6 +45,26 @@ TEST_F(OVTensorTest, canCreateTensor) {
|
||||
ASSERT_THROW(t.data<std::int32_t>(), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, createTensorFromPort) {
|
||||
auto parameter1 = std::make_shared<ov::op::v0::Parameter>(ov::element::f64, ov::Shape{1, 3, 2, 2});
|
||||
auto parameter2 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3});
|
||||
auto parameter3 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
|
||||
float data[] = {5.f, 6.f, 7.f};
|
||||
ov::Tensor t1{parameter1->output(0)};
|
||||
ov::Tensor t2{parameter2->output(0), data};
|
||||
ov::Tensor t3{parameter3->output(0)};
|
||||
ov::Tensor t4{parameter3->output(0), data};
|
||||
|
||||
EXPECT_EQ(t1.get_shape(), parameter1->get_shape());
|
||||
EXPECT_EQ(t1.get_element_type(), parameter1->get_element_type());
|
||||
EXPECT_EQ(t2.get_shape(), parameter2->get_shape());
|
||||
EXPECT_EQ(t2.get_element_type(), parameter2->get_element_type());
|
||||
EXPECT_EQ(t3.get_shape(), ov::Shape{0});
|
||||
EXPECT_EQ(t3.get_element_type(), parameter3->get_element_type());
|
||||
EXPECT_EQ(t4.get_shape(), ov::Shape{0});
|
||||
EXPECT_EQ(t4.get_element_type(), parameter3->get_element_type());
|
||||
}
|
||||
|
||||
TEST_F(OVTensorTest, canAccessF16Tensor) {
|
||||
ov::Shape shape = {4, 3, 2};
|
||||
ov::Tensor t{ov::element::f16, shape};
|
||||
@ -281,3 +306,201 @@ TEST_F(OVTensorTest, readRangeRoiBlob) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct TestParams {
|
||||
ov::Shape src_shape;
|
||||
ov::Strides src_strides;
|
||||
ov::Shape dst_shape;
|
||||
ov::Strides dst_strides;
|
||||
};
|
||||
|
||||
struct OVTensorTestCopy : ::testing::TestWithParam<std::tuple<ov::element::Type, TestParams>> {};
|
||||
|
||||
namespace {
|
||||
template <class T>
|
||||
std::vector<T> fill_data(const ov::Tensor& tensor) {
|
||||
std::vector<T> actual;
|
||||
const T* data = tensor.data<T>();
|
||||
auto strides = tensor.get_strides();
|
||||
for (auto&& c : ngraph::CoordinateTransformBasic{tensor.get_shape()}) {
|
||||
actual.emplace_back(
|
||||
*(data + (c[2] * strides[2] + c[1] * strides[1] + c[0] * strides[0]) / tensor.get_element_type().size()));
|
||||
}
|
||||
return actual;
|
||||
};
|
||||
template <class T>
|
||||
void compare_data(const ov::Tensor& src, const ov::Tensor& dst) {
|
||||
auto source_vec = fill_data<T>(src);
|
||||
auto dest_vec = fill_data<T>(dst);
|
||||
|
||||
ASSERT_EQ(source_vec.size(), dest_vec.size());
|
||||
|
||||
for (size_t i = 0; i < source_vec.size(); i++) {
|
||||
EXPECT_EQ(source_vec[i], dest_vec[i]);
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
void init_tensor(const ov::Tensor& tensor, bool input) {
|
||||
const auto origPtr = tensor.data<T>();
|
||||
ASSERT_NE(nullptr, origPtr);
|
||||
for (size_t i = 0; i < tensor.get_size(); ++i) {
|
||||
origPtr[i] = static_cast<T>(input ? i : -1);
|
||||
}
|
||||
}
|
||||
|
||||
void init_tensor(const ov::Tensor& tensor, bool input) {
|
||||
switch (tensor.get_element_type()) {
|
||||
case ov::element::bf16:
|
||||
init_tensor<ov::element_type_traits<ov::element::bf16>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::f16:
|
||||
init_tensor<ov::element_type_traits<ov::element::f16>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::f32:
|
||||
init_tensor<ov::element_type_traits<ov::element::f32>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::f64:
|
||||
init_tensor<ov::element_type_traits<ov::element::f64>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::i8:
|
||||
init_tensor<ov::element_type_traits<ov::element::i8>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::i16:
|
||||
init_tensor<ov::element_type_traits<ov::element::i16>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::i32:
|
||||
init_tensor<ov::element_type_traits<ov::element::i32>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::i64:
|
||||
init_tensor<ov::element_type_traits<ov::element::i64>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::u8:
|
||||
init_tensor<ov::element_type_traits<ov::element::u8>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::u16:
|
||||
init_tensor<ov::element_type_traits<ov::element::u16>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::u32:
|
||||
init_tensor<ov::element_type_traits<ov::element::u32>::value_type>(tensor, input);
|
||||
break;
|
||||
case ov::element::u64:
|
||||
init_tensor<ov::element_type_traits<ov::element::u64>::value_type>(tensor, input);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_UNREACHABLE("Unsupported data type");
|
||||
}
|
||||
}
|
||||
|
||||
void compare_tensors(const ov::Tensor& src, const ov::Tensor& dst) {
|
||||
ASSERT_EQ(src.get_byte_size(), dst.get_byte_size());
|
||||
ASSERT_EQ(src.get_shape(), dst.get_shape());
|
||||
ASSERT_EQ(src.get_element_type(), dst.get_element_type());
|
||||
switch (src.get_element_type()) {
|
||||
case ov::element::bf16:
|
||||
compare_data<ov::element_type_traits<ov::element::bf16>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::f16:
|
||||
compare_data<ov::element_type_traits<ov::element::f16>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::f32:
|
||||
compare_data<ov::element_type_traits<ov::element::f32>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::f64:
|
||||
compare_data<ov::element_type_traits<ov::element::f64>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::i8:
|
||||
compare_data<ov::element_type_traits<ov::element::i8>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::i16:
|
||||
compare_data<ov::element_type_traits<ov::element::i16>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::i32:
|
||||
compare_data<ov::element_type_traits<ov::element::i32>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::i64:
|
||||
compare_data<ov::element_type_traits<ov::element::i64>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::u8:
|
||||
compare_data<ov::element_type_traits<ov::element::u8>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::u16:
|
||||
compare_data<ov::element_type_traits<ov::element::u16>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::u32:
|
||||
compare_data<ov::element_type_traits<ov::element::u32>::value_type>(src, dst);
|
||||
break;
|
||||
case ov::element::u64:
|
||||
compare_data<ov::element_type_traits<ov::element::u64>::value_type>(src, dst);
|
||||
break;
|
||||
default:
|
||||
OPENVINO_UNREACHABLE("Unsupported data type");
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST_P(OVTensorTestCopy, copy_to) {
|
||||
ov::element::Type type;
|
||||
TestParams p;
|
||||
std::tie(type, p) = GetParam();
|
||||
// Source tensors
|
||||
ov::Tensor full_src_tensor;
|
||||
ov::Tensor src_tensor;
|
||||
if (!p.src_strides.empty()) {
|
||||
full_src_tensor = ov::Tensor(type, ov::Shape{p.src_shape[0] * p.src_strides[0]});
|
||||
src_tensor = ov::Tensor(type, p.src_shape, full_src_tensor.data(), p.src_strides);
|
||||
} else {
|
||||
src_tensor = full_src_tensor = ov::Tensor(type, p.src_shape);
|
||||
}
|
||||
init_tensor(full_src_tensor, true);
|
||||
|
||||
ov::Tensor full_dst_tensor;
|
||||
ov::Tensor dst_tensor;
|
||||
if (!p.dst_strides.empty()) {
|
||||
full_dst_tensor = ov::Tensor(type, ov::Shape{p.dst_shape[0] * p.dst_strides[0]});
|
||||
dst_tensor = ov::Tensor(type, p.dst_shape, full_dst_tensor.data(), p.dst_strides);
|
||||
} else {
|
||||
dst_tensor = full_dst_tensor = ov::Tensor(type, p.dst_shape);
|
||||
}
|
||||
init_tensor(full_src_tensor, false);
|
||||
|
||||
src_tensor.copy_to(dst_tensor);
|
||||
compare_tensors(src_tensor, dst_tensor);
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
INSTANTIATE_TEST_SUITE_P(copy_tests,
|
||||
OVTensorTestCopy,
|
||||
::testing::Combine(::testing::Values(
|
||||
ov::element::bf16,
|
||||
ov::element::f16,
|
||||
ov::element::f32,
|
||||
ov::element::f64,
|
||||
ov::element::i8,
|
||||
ov::element::i16,
|
||||
ov::element::i32,
|
||||
ov::element::i64,
|
||||
ov::element::u8,
|
||||
ov::element::u16,
|
||||
ov::element::u32,
|
||||
ov::element::u64
|
||||
),
|
||||
::testing::Values(
|
||||
TestParams {
|
||||
ov::Shape{1, 3, 4, 8}, {},
|
||||
{0}, {}
|
||||
},
|
||||
TestParams {
|
||||
ov::Shape{3, 2, 2}, {},
|
||||
ov::Shape{3, 2, 2}, ov::Strides{128, 24, 8}
|
||||
},
|
||||
TestParams {
|
||||
ov::Shape{3, 2, 2}, ov::Strides{64, 16, 8},
|
||||
ov::Shape{3, 2, 2}, ov::Strides{}
|
||||
},
|
||||
TestParams {
|
||||
ov::Shape{3, 2, 2}, ov::Strides{64, 16, 8},
|
||||
ov::Shape{3, 2, 2}, ov::Strides{128, 24, 8}
|
||||
}
|
||||
)));
|
||||
// clang-format on
|
||||
|
@ -44,6 +44,8 @@ public:
|
||||
template <typename T>
|
||||
T* data() = delete;
|
||||
|
||||
void copy_to(ov::Tensor& dst) const = delete;
|
||||
|
||||
/**
|
||||
* @brief Returns a map of device-specific parameters required for low-level
|
||||
* operations with underlying object.
|
||||
|
Loading…
Reference in New Issue
Block a user