Extend data_utils to work with ov::Tensor (#18142)

* Extend data_utils to work with ov::Tensor. Update SplitConcatMemory tests

* Fix1

* Apply comments

* Apply comments 2

* Apply comments 3

* Apply comments 4
This commit is contained in:
Oleg Pipikin 2023-07-20 10:29:47 +02:00 committed by GitHub
parent 3f675ce396
commit 60a8c2bc7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 215 additions and 139 deletions

View File

@ -6,20 +6,20 @@
#include "subgraph_tests/split_concat_memory.hpp"
using namespace SubgraphTestsDefinitions;
using namespace ov::test::subgraph;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I8,
const std::vector<ov::element::Type> netPrecisions = {
ov::element::f32,
ov::element::i32,
ov::element::f16,
ov::element::i16,
ov::element::u8,
ov::element::i8,
};
const std::vector<InferenceEngine::SizeVector> shapes = {
const std::vector<ov::Shape> shapes = {
{1, 8, 3, 2},
{3, 8, 3, 2},
{3, 8, 3},

View File

@ -5,18 +5,13 @@
#pragma once
#include "shared_test_classes/subgraph/split_concat_memory.hpp"
#include "common_test_utils/data_utils.hpp"
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
namespace subgraph {
TEST_P(SplitConcatMemory, cyclicBufferCorrectness) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto ie = PluginCache::get().ie();
cnnNetwork = InferenceEngine::CNNNetwork{function};
auto exe_net = ie->LoadNetwork(cnnNetwork, targetDevice);
auto inf_reg = exe_net.CreateInferRequest();
/*
* cnc1 out | mem | In|q
* |===============|
@ -25,42 +20,50 @@ TEST_P(SplitConcatMemory, cyclicBufferCorrectness) {
* iter 3 | 0 | 1 | 2 | 3 |
*/
auto i_blob = inf_reg.GetBlob("input");
auto o_blob = inf_reg.GetBlob("plus_one");
compile_model();
inferRequest = compiledModel.create_infer_request();
auto o_blob_ref = make_blob_with_precision(o_blob->getTensorDesc());
o_blob_ref->allocate();
auto i_tensor = inferRequest.get_tensor(*function->inputs().begin());
auto fill_by_quarter = [this] (InferenceEngine::Blob::Ptr& blob, std::vector<float> vals) {
IE_ASSERT(vals.size() == 4);
auto quarter_blocked_shape = blob->getTensorDesc().getDims();
auto o_tensor = inferRequest.get_tensor(*function->outputs().begin());
auto output_tensor_ref = ov::Tensor(o_tensor.get_element_type(), o_tensor.get_shape());
auto fill_by_quarter = [this] (ov::Tensor& tensor, std::vector<float> vals) {
OPENVINO_ASSERT(vals.size() == 4);
auto quarter_blocked_shape = tensor.get_shape();
// splis axis dimension into chunk
IE_ASSERT(quarter_blocked_shape[axis] % vals.size() == 0);
OPENVINO_ASSERT(quarter_blocked_shape[axis] % vals.size() == 0);
quarter_blocked_shape[axis] /= vals.size();
quarter_blocked_shape.insert(quarter_blocked_shape.begin() + axis, vals.size());
auto quarter_blocked_view = CommonTestUtils::make_reshape_view(blob, quarter_blocked_shape);
OPENVINO_ASSERT(ov::shape_size(quarter_blocked_shape) == tensor.get_size());
auto quarter_blocked_view = ov::Tensor(tensor.get_element_type(), quarter_blocked_shape, tensor.data());
CommonTestUtils::fill_data_with_broadcast(quarter_blocked_view, axis, vals);
};
// iteration 1
CommonTestUtils::fill_data_const(i_blob, 1);
fill_by_quarter(o_blob_ref, {1, 1, 1, 2});
inf_reg.Infer();
Compare(o_blob_ref, o_blob);
CommonTestUtils::fill_data_with_broadcast(i_tensor, 0, {1});
fill_by_quarter(output_tensor_ref, {1, 1, 1, 2});
inferRequest.infer();
compare({output_tensor_ref}, {o_tensor});
// iteration 2
CommonTestUtils::fill_data_const(i_blob, 2);
fill_by_quarter(o_blob_ref, {1, 1, 2, 3});
inf_reg.Infer();
Compare(o_blob_ref, o_blob);
CommonTestUtils::fill_data_with_broadcast(i_tensor, 0, {2});
fill_by_quarter(output_tensor_ref, {1, 1, 2, 3});
inferRequest.infer();
compare({output_tensor_ref}, {o_tensor});
// iteration 3
CommonTestUtils::fill_data_const(i_blob, 3);
fill_by_quarter(o_blob_ref, {1, 2, 3, 4});
inf_reg.Infer();
Compare(o_blob_ref, o_blob);
CommonTestUtils::fill_data_with_broadcast(i_tensor, 0, {3});
fill_by_quarter(output_tensor_ref, {1, 2, 3, 4});
inferRequest.infer();
compare({output_tensor_ref}, {o_tensor});
}
} // namespace SubgraphTestsDefinitions
} // namespace subgraph
} // namespace test
} // namespace ov

View File

@ -4,29 +4,31 @@
#pragma once
#include <string>
#include <gtest/gtest.h>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
namespace subgraph {
using SplitConcatMemoryParamsTuple = typename std::tuple<
std::vector<size_t>, // input shapes
InferenceEngine::Precision, // precision
ov::Shape, // input shapes
ov::element::Type, // precision
int, // axis of split
std::string // device name
>;
class SplitConcatMemory : public testing::WithParamInterface<SplitConcatMemoryParamsTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<ParamType>& obj);
protected:
void SetUp() override;
int axis;
};
} // namespace SubgraphTestsDefinitions
} // namespace subgraph
} // namespace test
} // namespace ov

View File

@ -2,32 +2,32 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/opsets/opset5.hpp"
#include "shared_test_classes/subgraph/split_concat_memory.hpp"
namespace SubgraphTestsDefinitions {
using namespace CommonTestUtils;
using namespace InferenceEngine;
namespace ov {
namespace test {
namespace subgraph {
std::string SplitConcatMemory::getTestCaseName(const testing::TestParamInfo<ParamType>& obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShapes;
ov::element::Type netPrecision;
ov::Shape inputShapes;
int axis;
std::string targetDevice;
std::tie(inputShapes, netPrecision, axis, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "PRC=" << netPrecision.name() << "_";
result << "PRC=" << netPrecision.get_type_name() << "_";
result << "axis=" << axis << "_";
result << "dev=" << targetDevice;
return result.str();
}
void SplitConcatMemory::SetUp() {
SizeVector shape;
std::tie(shape, inPrc, axis, targetDevice) = this->GetParam();
abs_threshold = 0.01;
ov::Shape shape;
std::tie(shape, inType, axis, targetDevice) = this->GetParam();
auto shape_14 = shape;
shape_14[axis] /= 4;
@ -47,27 +47,33 @@ void SplitConcatMemory::SetUp() {
* __|___ __|___
* [_out1_] [_mem2_]
*/
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc);
ngraph::Shape ng_share_14(shape_14);
ngraph::Shape ng_share_34(shape_34);
auto input = std::make_shared<ngraph::opset5::Parameter>(ngPrc, ng_share_14);
auto input = std::make_shared<ov::op::v0::Parameter>(inType, ng_share_14);
input->set_friendly_name("input");
auto& tensor = input->get_output_tensor(0);
tensor.set_names({"input_t"});
//input->output(0).set_names({"input"});
auto mem_c = std::make_shared<ngraph::opset5::Constant>(ngPrc, ng_share_34, 0);
auto mem_r = std::make_shared<ngraph::opset5::ReadValue>(mem_c, "id");
auto cnc = std::make_shared<ngraph::opset5::Concat>(ngraph::NodeVector{mem_r, input}, axis);
auto mem_c = std::make_shared<ov::op::v0::Constant>(inType, ng_share_34, 0);
auto mem_r = std::make_shared<ov::op::v3::ReadValue>(mem_c, "id");
auto cnc = std::make_shared<ov::op::v0::Concat>(ngraph::NodeVector{mem_r, input}, axis);
std::vector<int64_t> chunks_val {static_cast<int64_t>(ng_share_14[axis]), static_cast<int64_t>(ng_share_34[axis])};
auto chunk_c = std::make_shared<ngraph::opset5::Constant>(::ngraph::element::i64, ngraph::Shape{chunks_val.size()}, chunks_val);
auto axis_c = std::make_shared<ngraph::opset5::Constant>(::ngraph::element::i64, ngraph::Shape{}, axis);
auto spl = std::make_shared<ngraph::opset5::VariadicSplit>(cnc, axis_c, chunk_c);
auto chunk_c = std::make_shared<ov::op::v0::Constant>(::ngraph::element::i64, ngraph::Shape{chunks_val.size()}, chunks_val);
auto axis_c = std::make_shared<ov::op::v0::Constant>(::ngraph::element::i64, ngraph::Shape{}, axis);
auto spl = std::make_shared<ov::op::v1::VariadicSplit>(cnc, axis_c, chunk_c);
auto one = std::make_shared<ngraph::opset5::Constant>(ngPrc, ngraph::Shape{}, 1);
auto plus = std::make_shared<ngraph::opset5::Add>(cnc, one, ngraph::op::AutoBroadcastType::NUMPY);
auto one = std::make_shared<ov::op::v0::Constant>(inType, ngraph::Shape{}, 1);
auto plus = std::make_shared<ov::op::v1::Add>(cnc, one, ngraph::op::AutoBroadcastType::NUMPY);
plus->set_friendly_name("plus_one");
auto mem_w = std::make_shared<ngraph::opset5::Assign>(spl->output(1), "id");
auto& o_tensor = plus->get_output_tensor(0);
o_tensor.set_names({"plus_one_t"});
//input->output(0).set_names({"plus_one"});
auto mem_w = std::make_shared<ov::op::v3::Assign>(spl->output(1), "id");
// WA. Ngraph limitations. Assign should have control dependencies on read.
// And someone should hold assign node.
@ -79,4 +85,8 @@ void SplitConcatMemory::SetUp() {
ngraph::ParameterVector {input},
"CyclicBuffer4");
}
} // namespace SubgraphTestsDefinitions
} // namespace subgraph
} // namespace test
} // namespace ov

View File

@ -53,7 +53,7 @@ function(add_common_utils ADD_TARGET_NAME)
ie_faster_build(${ADD_TARGET_NAME}
UNITY
PCH PRIVATE "include/common_test_utils/precomp.hpp"
PCH PRIVATE "src/precomp.hpp"
)
# detecting regex support

View File

@ -6,17 +6,17 @@
#include <cmath>
#include <utility>
#include <gtest/gtest.h>
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/tensor.hpp"
#include <ngraph/type/bfloat16.hpp>
#include <ngraph/type/float16.hpp>
#include <ngraph/type/element_type_traits.hpp>
#include <ie_blob.h>
#include <random>
#include "common_test_utils/common_utils.hpp"
#include "gtest/gtest.h"
#include "ie_blob.h"
#include "openvino/core/type/element_type_traits.hpp"
#include "openvino/runtime/tensor.hpp"
#include "ngraph/type/bfloat16.hpp"
#include "ngraph/type/float16.hpp"
namespace CommonTestUtils {
OPENVINO_SUPPRESS_DEPRECATED_START
@ -53,6 +53,7 @@ inline std::vector<float> generate_float_numbers(std::size_t vec_len, float min,
* @param values src tensor which should be broadcast
*/
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr &blob, InferenceEngine::Blob::Ptr &values);
void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values);
/**
* Wrapper on top of fill_data_with_broadcast with simplified signature
@ -62,32 +63,20 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr &blob, InferenceEngine:
* @param values data to broadcast
*/
void fill_data_with_broadcast(InferenceEngine::Blob::Ptr &blob, size_t axis, std::vector<float> values);
void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float> values);
/**
* Make a view blob with new shape. It will reinterpret original tensor data as a tensor with new shape.
*
* NB! Limitation: the nwe one blob will no have ownership of data buffer. The original blob should be alive
* while view is in use.
*
* @param blob original source tensor
* @param tensor original source tensor
* @param new_shape new one shape for view blob
* @return new one blob view
*/
InferenceEngine::Blob::Ptr
make_reshape_view(const InferenceEngine::Blob::Ptr &blob, InferenceEngine::SizeVector new_shape);
/**
* Fill blob with single value for all elements
*
* like:
* fill_data_with_broadcast(blob, 0, {val});
*
* @param blob tensor to fill in
* @param val value to set into each element
*/
void fill_data_const(InferenceEngine::Blob::Ptr &blob, float val);
/**
* Calculate size of buffer required for provided tensor descriptor.
* @param tdesc provided tensor descriptor

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <bitset>

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/log.hpp"
#include "ngraph/pattern/matcher.hpp"

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/data_utils.hpp"
#include <cmath>
#include <debug.h> // to allow putting vector into exception string stream
@ -9,6 +11,8 @@
#include <ie_blob.h>
#include <blob_factory.hpp>
#include "openvino/core/deprecated.hpp"
#include "openvino/core/type/element_type_traits.hpp"
#include "openvino/runtime/tensor.hpp"
using namespace InferenceEngine::details;
@ -63,10 +67,10 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
IE_ASSERT(values_dims.size() <= n_dims);
IE_ASSERT(n_dims <= MAX_N_DIMS);
SizeVector src_dims(MAX_N_DIMS, 1);
ov::Shape src_dims(MAX_N_DIMS, 1);
std::copy(values_dims.rbegin(), values_dims.rend(), src_dims.rbegin());
SizeVector dst_dims(MAX_N_DIMS, 1);
ov::Shape dst_dims(MAX_N_DIMS, 1);
std::copy(blob_dims.rbegin(), blob_dims.rend(), dst_dims.rbegin());
bool compatible = true;
@ -77,8 +81,8 @@ void fill_data_with_broadcast(InferenceEngine::Blob::Ptr& blob, InferenceEngine:
IE_ASSERT(compatible);
auto fill_strides_like_plain = [] (SizeVector dims) {
SizeVector str(dims.size());
auto fill_strides_like_plain = [] (ov::Shape dims) {
ov::Shape str(dims.size());
if (str.empty())
return str;
else
@ -202,58 +206,123 @@ size_t byte_size(const InferenceEngine::TensorDesc &tdesc) {
auto dims = tdesc.getDims();
return prc.size() * std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies<size_t>());
}
OPENVINO_SUPPRESS_DEPRECATED_END
/**
* repeated filling tensor with data.
*
* @tparam PRC
* @param data
* @param size
* @param values
*/
template<InferenceEngine::Precision::ePrecision PRC = InferenceEngine::Precision::FP32>
static void fill_data_const(void *data, size_t size, const std::vector<float> &values) {
auto t_data = static_cast<typename InferenceEngine::PrecisionTrait<PRC>::value_type *>(data);
auto val_size = values.size();
for (size_t i = 0, j = 0; i < size; i++) {
t_data[i] = values[j++];
if (j == val_size) j = 0;
void fill_data_with_broadcast(ov::Tensor& tensor, ov::Tensor& values) {
constexpr size_t MAX_N_DIMS = 7; // Suppose it's enough
OPENVINO_ASSERT(tensor.get_element_type() == values.get_element_type());
auto values_dims = values.get_shape();
auto tensor_dims = tensor.get_shape();
auto n_dims = tensor_dims.size();
OPENVINO_ASSERT(values_dims.size() <= n_dims);
OPENVINO_ASSERT(n_dims <= MAX_N_DIMS);
ov::Shape src_dims(MAX_N_DIMS, 1);
std::copy(values_dims.rbegin(), values_dims.rend(), src_dims.rbegin());
ov::Shape dst_dims(MAX_N_DIMS, 1);
std::copy(tensor_dims.rbegin(), tensor_dims.rend(), dst_dims.rbegin());
bool compatible = true;
for (int i = 0; i < MAX_N_DIMS; i++) {
if (src_dims[i] != dst_dims[i] && src_dims[i] != 1)
compatible = false;
}
}
void fill_data_const(InferenceEngine::Blob::Ptr& blob, const std::vector<float> &val) {
auto prc = blob->getTensorDesc().getPrecision();
auto raw_data_ptr = blob->buffer().as<void*>();
auto raw_data_size = blob->size();
OPENVINO_ASSERT(compatible);
using InferenceEngine::Precision;
switch (prc) {
case Precision::FP32:
fill_data_const<Precision::FP32>(raw_data_ptr, raw_data_size, val);
auto fill_strides_like_plain = [] (ov::Shape dims) {
ov::Shape str(dims.size());
if (str.empty())
return str;
else
str.back() = 1;
// stride[i] = stride[i+1]*d[i+1]
std::transform(dims.rbegin(), dims.rend() - 1, str.rbegin(), str.rbegin() + 1,
[] (size_t d, size_t s) { return d * s; });
// zeroing broadcast dimension equal 1
std::transform(str.begin(), str.end(), dims.begin(), str.begin(),
[] (size_t s, size_t d) { return d == 1 ? 0 : s; });
return str;
};
ov::Shape src_strides = fill_strides_like_plain(src_dims);
ov::Shape dst_strides = fill_strides_like_plain(dst_dims);
auto dst_ptr = tensor.data();
auto src_ptr = values.data();
using namespace ov::element;
switch (tensor.get_element_type()) {
case u64:
case i64:
copy_7D<uint64_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
break;
case Precision::I32:
fill_data_const<Precision::I32>(raw_data_ptr, raw_data_size, val);
case f32:
case i32:
copy_7D<uint32_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
break;
case Precision::U8:
fill_data_const<Precision::U8>(raw_data_ptr, raw_data_size, val);
case i16:
case u16:
case f16:
case bf16:
copy_7D<uint16_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
break;
case Precision::I8:
fill_data_const<Precision::I8>(raw_data_ptr, raw_data_size, val);
break;
case Precision::U16:
fill_data_const<Precision::U16>(raw_data_ptr, raw_data_size, val);
break;
case Precision::I16:
fill_data_const<Precision::I16>(raw_data_ptr, raw_data_size, val);
case u8:
case i8:
copy_7D<uint8_t>(src_ptr, src_strides, dst_ptr, dst_strides, dst_dims);
break;
default:
IE_THROW() << "Unsupported precision by fill_data_const() function";
OPENVINO_THROW("Unsupported precision by fill_data_with_broadcast function");
}
}
void fill_data_const(InferenceEngine::Blob::Ptr& blob, float val) {
fill_data_const(blob, std::vector<float> {val});
template<ov::element::Type_t SRC_E, ov::element::Type_t DST_E>
void copy_with_convert(ov::Tensor& src_tensor, ov::Tensor& dst_tensor) {
using SRC_TYPE = typename ov::fundamental_type_for<SRC_E>;
using DST_TYPE = typename ov::fundamental_type_for<DST_E>;
auto src_ptr = src_tensor.data<SRC_TYPE>();
auto src_size = src_tensor.get_size();
auto dst_ptr = dst_tensor.data<DST_TYPE>();
std::copy(src_ptr, src_ptr + src_size, dst_ptr);
}
ov::Tensor make_with_precision_convert(ov::Tensor& tensor, ov::element::Type prc) {
ov::Tensor new_tensor(prc, tensor.get_shape());
#define CASE(_PRC) case ov::element::_PRC: \
copy_with_convert<ov::element::Type_t::f32, ov::element::Type_t::_PRC> (tensor, new_tensor); break
switch (prc) {
CASE(f32); CASE(f16); CASE(i64); CASE(u64); CASE(i32); CASE(u32); CASE(i16); CASE(u16); CASE(i8); CASE(u8);
default: OPENVINO_THROW("Unsupported precision case");
}
#undef CASE
return new_tensor;
}
void fill_data_with_broadcast(ov::Tensor& tensor, size_t axis, std::vector<float> values) {
ov::Shape value_dims(tensor.get_shape().size() - axis, 1);
value_dims.front() = values.size();
auto prc = tensor.get_element_type();
ov::Tensor values_tensor;
values_tensor = ov::Tensor(ov::element::f32, value_dims, values.data());
if (prc != ov::element::f32) {
values_tensor = make_with_precision_convert(values_tensor, prc);
}
fill_data_with_broadcast(tensor, values_tensor);
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace CommonTestUtils