Api 2.0/migrate shape inference test to new api (#19665)

* Migrate static shape inference test to new API

* Use new API in CPU custom shape inference tests

* Rename range shape inference test file
This commit is contained in:
Pawel Raasz 2023-09-12 13:15:04 +02:00 committed by GitHub
parent 4af1fd087c
commit f3d4665f7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 956 additions and 1114 deletions

View File

@ -21,12 +21,11 @@ protected:
TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, default_ctor) {
int32_t spatial_dims[] = {10, 20};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
const std::unordered_map<size_t, ov::Tensor> const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}};
op = make_op();
input_shapes = ShapeVector{{1, 3, 1, 2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 10, 20}));
@ -39,7 +38,7 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant)
op = make_op(data, out_shape);
input_shapes = ShapeVector{{1, 3, 10}, {1}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 17}));
@ -52,11 +51,10 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map)
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8, 7};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{3}, spatial_dims)}};
const std::unordered_map<size_t, ov::Tensor> const_data{{1, {element::i32, ov::Shape{3}, spatial_dims}}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 9, 8, 7}));
@ -69,11 +67,10 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
const std::unordered_map<size_t, ov::Tensor> const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
ov::NodeValidationFailure,
HasSubstr("Number of spatial dimensions is not compatible with input data rank"));
}

View File

@ -21,12 +21,11 @@ protected:
TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, default_ctor) {
int32_t spatial_dims[] = {10, 20};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
const std::unordered_map<size_t, Tensor> const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}};
op = make_op();
input_shapes = ShapeVector{{1, 3, 1, 2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 10, 20})));
@ -39,7 +38,7 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant)
op = make_op(data, out_shape);
input_shapes = ShapeVector{{1, 3, 10}, {1}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 17})));
@ -52,11 +51,10 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map)
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8, 7};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{3}, spatial_dims)}};
const std::unordered_map<size_t, Tensor> const_data{{1, {element::i32, ov::Shape{3}, spatial_dims}}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 9, 8, 7})));
@ -69,11 +67,10 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
const std::unordered_map<size_t, Tensor> const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
ov::NodeValidationFailure,
HasSubstr("Number of spatial dimensions is not compatible with input data rank"));
}

View File

@ -34,7 +34,7 @@ void assignTest() {
// Test StaticShape
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 2, 64, 64}}, static_output_shapes = {StaticShape{}};
shape_inference(assign.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(assign.get(), static_input_shapes);
ASSERT_EQ(static_input_shapes[0], (StaticShape{1, 2, 64, 64}));
}

View File

@ -34,7 +34,7 @@ TEST(StaticShapeInferenceTest, AUGRUCellTest_all_inputs_static_rank) {
std::vector<StaticShape> static_output_shapes{StaticShape{}, StaticShape{}};
shape_inference(augru.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(augru.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -62,6 +62,6 @@ TEST(StaticShapeInferenceTest, AUGRUCellTest_all_inputs_dynamic_rank) {
std::vector<StaticShape> static_output_shapes{StaticShape{}, StaticShape{}};
shape_inference(augru.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(augru.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({batch_size, hidden_size}));
}

View File

@ -38,9 +38,7 @@ TEST(StaticShapeInferenceTest, AGRUSequenceTest_FORWARD_all_static_rank) {
StaticShape{num_directions, gates_count * hidden_size}, // B
StaticShape{batch_size, seq_len, 1}}; // A
std::vector<StaticShape> static_output_shapes{StaticShape{}, StaticShape{}};
shape_inference(augru_sequence.get(), static_input_shapes, static_output_shapes);
const auto static_output_shapes = shape_inference(augru_sequence.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(static_output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -73,9 +71,7 @@ TEST(StaticShapeInferenceTest, AGRUSequenceTest_FORWARD_all_inputs_dynamic_rank)
StaticShape{num_directions, gates_count * hidden_size}, // B
StaticShape{batch_size, seq_len, 1}}; // A
std::vector<StaticShape> static_output_shapes{StaticShape{}, StaticShape{}};
shape_inference(augru_sequence.get(), static_input_shapes, static_output_shapes);
const auto static_output_shapes = shape_inference(augru_sequence.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(static_output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}

View File

@ -35,13 +35,12 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, default_ctor) {
int32_t crops_begin_val[] = {0, 2, 0, 0, 0};
int32_t crops_end_val[] = {0, 2, 1, 0, 0};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)},
{2, std::make_shared<HostTensor>(element::i32, Shape{5}, crops_begin_val)},
{3, std::make_shared<HostTensor>(element::i32, Shape{5}, crops_end_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}},
{2, {element::i32, Shape{5}, crops_begin_val}},
{3, {element::i32, Shape{5}, crops_end_val}}};
input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
}
@ -53,14 +52,13 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, blocks_crops_in_constant_map) {
int32_t crops_begin_val[] = {0, 2, 0, 0, 0};
int32_t crops_end_val[] = {0, 2, 1, 0, 0};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)},
{2, std::make_shared<HostTensor>(element::i32, Shape{5}, crops_begin_val)},
{3, std::make_shared<HostTensor>(element::i32, Shape{5}, crops_end_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}},
{2, {element::i32, Shape{5}, crops_begin_val}},
{3, {element::i32, Shape{5}, crops_end_val}}};
input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], (StaticShape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
}
@ -72,7 +70,7 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, blocs_crops_as_constants) {
op = make_op(data, block_shape, crops_begin, crops_end);
input_shapes = {{100, 7, 13, 3}, {4}, {4}, {4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
}
@ -83,11 +81,10 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, missing_tensor_data) {
int32_t block_val[] = {1, 6, 5, 1, 16};
int32_t crops_end_val[] = {0, 2, 1, 0, 0};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)},
{3, std::make_shared<HostTensor>(element::i32, Shape{5}, crops_end_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}},
{3, {element::i32, Shape{5}, crops_end_val}}};
input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}};
EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, constant_data), NodeValidationFailure);
EXPECT_THROW(shape_inference(op.get(), input_shapes, constant_data), NodeValidationFailure);
}

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/op/parameter.hpp"
#include "utils.hpp"
@ -27,8 +28,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_none) {
const auto op = this->make_op(a, b, op::AutoBroadcastType::NONE);
this->input_shapes = {StaticShape{3, 4, 7, 5}, StaticShape{3, 4, 7, 5}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 4, 7, 5}));
}
@ -40,7 +40,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_none_incompatible_shapes) {
this->input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Argument shapes are inconsistent."))
}
@ -52,7 +52,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_numpy_equal_rank) {
this->input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 1, 6, 5}));
}
@ -64,7 +64,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_numpy_a_rank_higher) {
this->input_shapes = {StaticShape{6, 5, 1, 8}, StaticShape{5, 6, 1}},
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({6, 5, 6, 8}));
}
@ -76,7 +76,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_numpy_b_rank_higher) {
this->input_shapes = {StaticShape{5, 6, 1}, StaticShape{6, 5, 1, 8}},
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({6, 5, 6, 8}));
}
@ -88,7 +88,7 @@ TYPED_TEST_P(BECStaticShapeInferenceTest, broadcast_numpy_incompatible_shapes) {
this->input_shapes = {StaticShape{3, 4, 6, 6}, StaticShape{2, 4, 6, 6}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Argument shapes are inconsistent."))
}

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/op/parameter.hpp"
#include "utils.hpp"
@ -30,7 +31,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_none) {
this->input_shapes = {StaticShape{3, 4, 7, 5}, StaticShape{3, 4, 7, 5}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 4, 7, 5}));
}
@ -42,7 +43,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_none_incompatible_shapes) {
this->input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Argument shapes are inconsistent."))
}
@ -53,8 +54,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_numpy_equal_rank) {
const auto op = this->make_op(a, b);
this->input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 1, 6, 5}));
}
@ -65,8 +65,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_numpy_a_rank_higher) {
const auto op = this->make_op(a, b);
this->input_shapes = {StaticShape{6, 5, 1, 8}, StaticShape{5, 6, 1}},
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({6, 5, 6, 8}));
}
@ -77,8 +76,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_numpy_b_rank_higher) {
const auto op = this->make_op(a, b);
this->input_shapes = {StaticShape{5, 6, 1}, StaticShape{6, 5, 1, 8}},
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({6, 5, 6, 8}));
}
@ -90,7 +88,7 @@ TYPED_TEST_P(BELStaticShapeInferenceTest, broadcast_numpy_incompatible_shapes) {
this->input_shapes = {StaticShape{3, 4, 6, 6}, StaticShape{2, 4, 6, 6}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Argument shapes are inconsistent."))
}

View File

@ -74,7 +74,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pads_same_lower_inputs_
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
@ -93,7 +93,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pad_same_lower_inputs_s
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
@ -113,7 +113,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channel
input_shapes = ShapeVector{{3, 5, 5, 5}, {7, 6, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Data batch channel count (5) does not match filter"));
}
@ -132,7 +132,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_rank_not_4) {
input_shapes = ShapeVector{{3, 6, 5}, {7, 6, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Expected 4D for the input. Got:"));
}

View File

@ -25,9 +25,8 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_autob_numpy_equal_ran
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}},
static_output_shapes = {StaticShape{}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1, 1, 5}, StaticShape{3, 1, 6, 1}};
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 1, 6, 5}));
}
@ -38,9 +37,8 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_autob_numpy_a_rank_hi
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 1, 5}, StaticShape{4, 6, 1}},
static_output_shapes = {StaticShape{}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 1, 5}, StaticShape{4, 6, 1}};
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 4, 6, 5}));
}
@ -51,9 +49,8 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_autob_numpy_b_rank_hi
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{4, 6, 1}, StaticShape{3, 4, 1, 5}},
static_output_shapes = {StaticShape{}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{4, 6, 1}, StaticShape{3, 4, 1, 5}};
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 4, 6, 5}));
}
@ -64,10 +61,9 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_autob_numpy_incompati
auto node = std::make_shared<TypeParam>(A, B);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{2, 4, 6, 5}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{2, 4, 6, 5}};
ASSERT_THROW(shape_inference(node.get(), static_input_shapes, static_output_shapes), NodeValidationFailure);
ASSERT_THROW(shape_inference(node.get(), static_input_shapes), NodeValidationFailure);
}
TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_aubtob_none) {
@ -76,9 +72,8 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_aubtob_none) {
auto node = std::make_shared<TypeParam>(A, B, op::AutoBroadcastType::NONE);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 4, 6, 5}},
static_output_shapes = {StaticShape{}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 4, 6, 5}};
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 4, 6, 5}));
}
@ -89,10 +84,9 @@ TYPED_TEST_P(StaticShapeInferenceTest_BEA, shape_inference_aubtob_none_incompati
auto node = std::make_shared<TypeParam>(A, B, op::AutoBroadcastType::NONE);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 4, 6, 5}, StaticShape{3, 1, 6, 1}};
ASSERT_THROW(shape_inference(node.get(), static_input_shapes, static_output_shapes), NodeValidationFailure);
ASSERT_THROW(shape_inference(node.get(), static_input_shapes), NodeValidationFailure);
}
REGISTER_TYPED_TEST_SUITE_P(StaticShapeInferenceTest_BEA,

View File

@ -15,18 +15,16 @@ TEST(StaticShapeInferenceTest, BroadcastBidirectionalTest) {
auto broadcast_v3 = std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastType::BIDIRECTIONAL);
int32_t target_shape_val[] = {1, 16, 50, 1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{4}, target_shape_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{4}, target_shape_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 8}, StaticShape{4}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 8}, StaticShape{4}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 8}));
static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastBidirectionalConstantTest) {
@ -34,9 +32,9 @@ TEST(StaticShapeInferenceTest, BroadcastBidirectionalConstantTest) {
auto target_shape = std::make_shared<ov::op::v0::Constant>(element::i32, ov::Shape{3}, std::vector<int32_t>{16, 1, 40});
auto broadcast_v3 = std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastType::BIDIRECTIONAL);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 16, 50, 1}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {});
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 16, 50, 1}, StaticShape{3}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 40}));
}
@ -47,18 +45,16 @@ TEST(StaticShapeInferenceTest, BroadcastPDPDTest) {
std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1));
int32_t target_shape_val[] = {2, 3, 6};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{3}, target_shape_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{3}, target_shape_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 3, 6}));
static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastPDPDConstantTest) {
@ -67,9 +63,8 @@ TEST(StaticShapeInferenceTest, BroadcastPDPDConstantTest) {
auto broadcast_v3 =
std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1));
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {});
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 3, 6}));
}
@ -79,18 +74,16 @@ TEST(StaticShapeInferenceTest, BroadcastNumpyTest) {
auto broadcast_v3 = std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastType::NUMPY);
int32_t target_shape_val[] = {1, 16, 50, 50};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{4}, target_shape_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{4}, target_shape_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 50}));
static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastNumpyConstantTest) {
@ -99,9 +92,9 @@ TEST(StaticShapeInferenceTest, BroadcastNumpyConstantTest) {
std::make_shared<ov::op::v0::Constant>(element::i32, ov::Shape{4}, std::vector<int32_t>{1, 16, 50, 50});
auto broadcast_v3 = std::make_shared<op::v3::Broadcast>(input, target_shape, op::BroadcastType::NUMPY);
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {});
std::vector<StaticShape> static_input_shapes = {StaticShape{16, 1, 1}, StaticShape{4}};
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 50}));
}
@ -114,21 +107,16 @@ TEST(StaticShapeInferenceTest, BroadcastExplicitTest) {
int32_t target_shape_val[] = {1, 16, 50, 50};
int32_t axes_mapping_val[] = {1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{4}, target_shape_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{1}, axes_mapping_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{4}, target_shape_val}},
{2, {element::Type_t::i32, ov::Shape{1}, axes_mapping_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{16}, StaticShape{4}, StaticShape{1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 50}));
constant_data.erase(1);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, constant_data),
NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes, constant_data), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v3.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastExplicitConstantTest) {
@ -140,8 +128,7 @@ TEST(StaticShapeInferenceTest, BroadcastExplicitConstantTest) {
std::make_shared<op::v3::Broadcast>(input, target_shape, axes_mapping, op::BroadcastType::EXPLICIT);
std::vector<StaticShape> static_input_shapes = {StaticShape{16}, StaticShape{4}, StaticShape{1}};
std::vector<StaticShape> static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v3.get(), static_input_shapes, static_output_shapes, {});
const auto static_output_shapes = shape_inference(broadcast_v3.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 16, 50, 50}));
}
@ -154,18 +141,16 @@ TEST(StaticShapeInferenceTest, BroadcastV1PDPDTest) {
std::make_shared<op::v1::Broadcast>(input, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1));
int32_t target_shape_val[] = {2, 3, 6};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{3}, target_shape_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{3}, target_shape_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
const auto static_output_shapes = shape_inference(broadcast_v1.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 3, 6}));
static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastV1NumpyTest) {
@ -174,18 +159,16 @@ TEST(StaticShapeInferenceTest, BroadcastV1NumpyTest) {
auto broadcast_v1 = std::make_shared<op::v1::Broadcast>(input, target_shape);
int32_t target_shape_val[] = {2, 3, 6};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{3}, target_shape_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{3}, target_shape_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
const auto static_output_shapes = shape_inference(broadcast_v1.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 3, 6}));
static_input_shapes = {StaticShape{3, 1}, StaticShape{3}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, BroadcastV1ExplicitTest) {
@ -196,18 +179,15 @@ TEST(StaticShapeInferenceTest, BroadcastV1ExplicitTest) {
int32_t target_shape_val[] = {2, 3, 1};
int32_t axes_mapping_val[] = {1, 2};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{3}, target_shape_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{2}, axes_mapping_val);
std::unordered_map<size_t, ov::Tensor> constant_data{{1, {element::Type_t::i32, ov::Shape{3}, target_shape_val}},
{2, {element::Type_t::i32, ov::Shape{2}, axes_mapping_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 1}, StaticShape{3}, StaticShape{2}};
const auto static_output_shapes = shape_inference(broadcast_v1.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 3, 1}));
static_input_shapes = {StaticShape{3, 1}, StaticShape{3}, StaticShape{2}};
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes, static_output_shapes, {}), NodeValidationFailure);
EXPECT_THROW(shape_inference(broadcast_v1.get(), static_input_shapes), NodeValidationFailure);
}

View File

@ -22,7 +22,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, default_ctor) {
op->set_with_right_bound(false);
input_shapes = ShapeVector{{3, 2, 7, 89}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({3, 2, 7, 89}));
@ -34,7 +34,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, dynamic_rank_inputs) {
op = make_op(data, buckets, element::i32);
input_shapes = ShapeVector{{10, 12, 1}, {5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 12, 1}));
@ -46,7 +46,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, static_rank_inputs) {
op = make_op(data, buckets);
input_shapes = ShapeVector{{100, 11}, {1}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({100, 11}));
@ -58,7 +58,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, bucket_incorrect_rank) {
op = make_op(data, buckets, element::i32);
input_shapes = ShapeVector{{100, 11}, {2, 1}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Buckets input must be a 1D tensor"));
}

View File

@ -67,7 +67,7 @@ INSTANTIATE_TEST_SUITE_P(
/** \brief Check shape_infer for concat op on static shapes. */
TEST_P(ConcatStaticShapeInferenceTest, concat_static) {
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes.front(), exp_shape);
}

View File

@ -122,7 +122,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 2d_inputs_dynamic_rank
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {6, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 1, 7, 7}));
@ -142,7 +142,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_lower
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {6, 2, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 2, 2, 1, 3}));
@ -161,11 +161,10 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_upper
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
int32_t spatial_dims[] = {2, 6, 1};
const auto const_map =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_dims)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{3}, spatial_dims}}};
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {5, 7, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_map);
output_shapes = shape_inference(op.get(), input_shapes, const_map);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 2, 6, 1}));

View File

@ -71,7 +71,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_inputs_dyn
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
@ -90,7 +90,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs_stat
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5, 5}));
@ -110,7 +110,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channels_not_
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {7, 6, 3, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Data batch channel count (5) does not match filter"));
}
@ -129,7 +129,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, data_rank_not_compatible_with_filt
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Data batch and filters rank do not match"));
}

View File

@ -26,7 +26,7 @@ TEST_F(CTCGreedyDecoderSeqLenV6StaticShapeInferenceTest, basic) {
input_shapes = {StaticShape{4, 100, 1200}, StaticShape{4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 100}));
EXPECT_EQ(output_shapes[1], StaticShape({4}));
}
@ -36,13 +36,13 @@ TEST_F(CTCGreedyDecoderSeqLenV6StaticShapeInferenceTest, default_ctor) {
// Two inputs
input_shapes = {StaticShape{4, 100, 1200}, StaticShape{4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 100}));
EXPECT_EQ(output_shapes[1], StaticShape({4}));
// Three inputs (the last one is optional)
input_shapes = {StaticShape{4, 100, 1200}, StaticShape{4}, {}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 100}));
EXPECT_EQ(output_shapes[1], StaticShape({4}));
}
@ -54,7 +54,7 @@ TEST_F(CTCGreedyDecoderSeqLenV6StaticShapeInferenceTest, incompatible_batch) {
input_shapes = {StaticShape{4, 100, 1200}, StaticShape{6}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The first dimensions of input tensors must match"))
}
@ -66,7 +66,7 @@ TEST_F(CTCGreedyDecoderSeqLenV6StaticShapeInferenceTest, incompatible_seq_len_ra
input_shapes = {StaticShape{4, 100, 1200}, StaticShape{4, 1}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The rank of sequence len tensor must be equal to 1"))
}

View File

@ -26,7 +26,7 @@ TEST_F(CTCGreedyDecoderV0StaticShapeInferenceTest, basic) {
input_shapes = {StaticShape{100, 3, 1200}, StaticShape{100, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({3, 100, 1, 1}));
}
@ -35,7 +35,7 @@ TEST_F(CTCGreedyDecoderV0StaticShapeInferenceTest, decoder_default_ctor) {
input_shapes = {StaticShape{100, 3, 1200}, StaticShape{100, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({3, 100, 1, 1}));
}
@ -46,7 +46,7 @@ TEST_F(CTCGreedyDecoderV0StaticShapeInferenceTest, incompatible_batch) {
input_shapes = {StaticShape{10, 3, 1200}, StaticShape{100, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The first dimensions of input tensors must match"))
}
@ -58,7 +58,7 @@ TEST_F(CTCGreedyDecoderV0StaticShapeInferenceTest, incompatible_t_dim) {
input_shapes = {StaticShape{100, 3, 1200}, StaticShape{100, 5}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The second dimensions of input tensors must match"))
}

View File

@ -29,7 +29,7 @@ TEST_F(CTCLossV4StaticShapeInferenceTest, correct_input_shapes) {
auto op = make_op(logits, logit_length, labels, label_length, blank_index);
input_shapes = ShapeVector{{10, 120, 28}, {10}, {10, 120}, {10}, {}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10}));
@ -39,7 +39,7 @@ TEST_F(CTCLossV4StaticShapeInferenceTest, default_ctor) {
auto op = make_op();
input_shapes = ShapeVector{{12, 120, 28}, {12}, {12, 120}, {12}, {}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({12}));

View File

@ -57,9 +57,8 @@ TEST_P(AdaptiveAvgPoolV8CpuShapeInferenceTest , shape_inference_with_const_map)
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i32, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
@ -74,4 +73,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -61,8 +61,8 @@ TEST_P(AdaptiveMaxPoolV8CpuShapeInferenceTest , shape_inference_with_const_map)
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i32, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
@ -77,4 +77,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -1,29 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <openvino/cc/factory.h>
#include <gtest/gtest.h>
#include "custom_shape_infer.hpp"
#include "ie_ngraph_utils.hpp"
#include "openvino/cc/factory.h"
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/type.hpp"
#include "openvino/op/ops.hpp"
#include "openvino/op/parameter.hpp"
#include "shape_inference/custom/reshape.hpp"
#include "shape_inference/custom/gather.hpp"
#include "shape_inference/custom/transpose.hpp"
#include "shape_inference/custom/adaptive_pooling.hpp"
#include "shape_inference/custom/color_convert.hpp"
#include "shape_inference/custom/eltwise.hpp"
#include "shape_inference/custom/adaptive_pooling.hpp"
#include "shape_inference/custom/fullyconnected.hpp"
#include "shape_inference/custom/gather.hpp"
#include "shape_inference/custom/matmul.hpp"
#include "shape_inference/custom/ngram.hpp"
#include "shape_inference/custom/one_hot.hpp"
#include "shape_inference/custom/priorbox.hpp"
#include "shape_inference/custom/priorbox_clustered.hpp"
#include "shape_inference/custom/reshape.hpp"
#include "shape_inference/custom/shapeof.hpp"
#include "shape_inference/custom/strided_slice.hpp"
#include "ie_ngraph_utils.hpp"
#include "custom_shape_infer.hpp"
#include "shape_inference/custom/transpose.hpp"
#include "shape_inference/shape_inference_status.hpp"
#include <gtest/gtest.h>
namespace ov {
namespace intel_cpu {
namespace unit_test {
@ -84,9 +86,9 @@ void compare_result(const std::vector<StaticShape>& ref, const std::vector<Vecto
} //namespace
void cpu_test_shape_infer(ov::Node* op,
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data) {
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::unordered_map<size_t, ov::Tensor>& constant_data) {
static std::shared_ptr<CustomShapeInferFF> cusFactory = std::make_shared<CustomShapeInferFF>();
auto shapeInferFactory = cusFactory->create(op->shared_from_this());
ASSERT_TRUE(shapeInferFactory != nullptr);
@ -114,9 +116,9 @@ void cpu_test_shape_infer(ov::Node* op,
const void* data = nullptr;
ov::element::Type elementType;
if (tensorIter != constant_data.end()) {
const auto tensor = tensorIter->second;
data = tensor->get_data_ptr();
elementType = tensor->get_element_type();
const auto& tensor = tensorIter->second;
data = tensor.data();
elementType = tensor.get_element_type();
} else {
const auto input_op = op->input_value(port).get_node_shared_ptr();
const auto const_op = ov::as_type_ptr<const ov::op::v0::Constant>(input_op);

View File

@ -2,21 +2,22 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_types.h"
#include <shape_inference/shape_inference_cpu.hpp>
#include <shape_inference/static_shape.hpp>
#include <common_test_utils/common_utils.hpp>
#include <gtest/gtest.h>
#include "common_test_utils/common_utils.hpp"
#include "cpu_types.h"
#include "shape_inference/shape_inference_cpu.hpp"
#include "shape_inference/static_shape.hpp"
#pragma once
namespace ov {
namespace intel_cpu {
namespace unit_test {
void cpu_test_shape_infer(ov::Node* op,
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {});
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::unordered_map<size_t, ov::Tensor>& constant_data = {});
using ShapeVector = std::vector<ov::intel_cpu::StaticShape>;

View File

@ -68,7 +68,7 @@ TYPED_TEST_P(CpuShapeInferenceGatherTest, axis_in_const_map) {
std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params;
auto op = this->make_gather(this->input_shapes);
auto axis_tensor = std::make_shared<HostTensor>(element::i32, ov::Shape{1}, &this->axis_val);
auto axis_tensor = ov::Tensor(element::i32, ov::Shape{1}, &this->axis_val);
this->output_shapes = {this->exp_shape};
unit_test::cpu_test_shape_infer(op.get(), this->input_shapes, this->output_shapes, {{2, axis_tensor}});
@ -83,4 +83,3 @@ INSTANTIATE_TYPED_TEST_SUITE_P(CpuShapeInfer, CpuShapeInferenceGatherTest, Gathe
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -72,15 +72,10 @@ TEST_P(OneHotCpuShapeInferenceTest , shape_inference_with_const_map) {
int64_t axis = -1;
const auto op = make_op(arg, depth, on, off, axis);
const auto depth_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{}, std::vector<int64_t>{m_depth});
const auto on_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_on});
const auto off_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_off});
const auto depth_tensor = std::make_shared<ov::HostTensor>(depth_const);
const auto on_tensor = std::make_shared<ov::HostTensor>(on_const);
const auto off_tensor = std::make_shared<ov::HostTensor>(off_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, depth_tensor},
{2, on_tensor},
{3, off_tensor}};
const auto depth_tensor = ov::Tensor(element::i64, ov::Shape{}, &m_depth);
const auto on_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_on);
const auto off_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_off);
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, depth_tensor}, {2, on_tensor}, {3, off_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
@ -101,15 +96,10 @@ TEST_P(OneHotCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
int64_t axis = -1;
const auto op = make_op(arg, depth, on, off, axis);
const auto depth_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{}, std::vector<int64_t>{m_depth});
const auto on_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_on});
const auto off_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{}, std::vector<int32_t>{m_off});
const auto depth_tensor = std::make_shared<ov::HostTensor>(depth_const);
const auto on_tensor = std::make_shared<ov::HostTensor>(on_const);
const auto off_tensor = std::make_shared<ov::HostTensor>(off_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, depth_tensor},
{2, on_tensor},
{3, off_tensor}};
const auto depth_tensor = ov::Tensor(element::i64, ov::Shape{}, &m_depth);
const auto on_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_on);
const auto off_tensor = ov::Tensor(element::i32, ov::Shape{}, &m_off);
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, depth_tensor}, {2, on_tensor}, {3, off_tensor}};
// TODO , implementation should throw exception
ASSERT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data),
@ -126,4 +116,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -4,10 +4,11 @@
#include <gtest/gtest.h>
#include <vector>
#include "common_test_utils/test_assertions.hpp"
#include "custom_shape_infer.hpp"
#include <ngraph/opsets/opset8.hpp>
#include <vector>
#include "openvino/opsets/opset8.hpp"
namespace ov {
namespace intel_cpu {
namespace unit_test {
@ -180,12 +181,8 @@ TEST_P(PriorBoxV0CpuShapeInferenceTest , shape_inference_with_const_map) {
const auto image_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto op = make_op(layer_shape, image_shape, attrs);
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
const std::map<size_t, HostTensorPtr> const_data {
{0, std::make_shared<HostTensor>(layer_const)},
{1, std::make_shared<HostTensor>(image_const)},
};
const std::unordered_map<size_t, ov::Tensor> const_data{{0, {element::i32, ov::Shape{2}, data[0].data()}},
{1, {element::i32, ov::Shape{2}, data[1].data()}}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data);
}

View File

@ -144,12 +144,10 @@ TEST_P(PriorBoxClusteredV0CpuShapeInferenceTest , shape_inference_with_const_map
const auto layer_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto image_shape = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto op = make_op(layer_shape, image_shape, attrs);
const auto layer_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[0]);
std::map<size_t, HostTensorPtr> const_data{{0, std::make_shared<HostTensor>(layer_const)}};
std::unordered_map<size_t, ov::Tensor> const_data{{0, {element::i32, ov::Shape{2}, data[0].data()}}};
if (input_shapes.size() == 2) {
const auto image_const = std::make_shared<op::v0::Constant>(element::i32, ov::Shape{2}, data[1]);
const_data.insert({1, std::make_shared<HostTensor>(image_const)});
const_data.insert({1, {element::i32, ov::Shape{2}, data[1].data()}});
}
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_data);
}
@ -171,4 +169,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -63,9 +63,8 @@ TEST_P(ReshapeCpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node, specalZero);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
output_shapes.push_back(exp_shape);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
@ -92,9 +91,8 @@ TEST_P(ReshapeCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node, specalZero);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
std::ostringstream os;
os << "[cpu]reshape: the shape of input data ";
os << "(";
@ -134,4 +132,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -57,9 +57,9 @@ TEST_P(SqueezeCpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = axes.empty() ? ov::Tensor(element::i64, ov::Shape{axes.size()})
: ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
@ -92,9 +92,8 @@ TEST_P(SqueezeCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
std::ostringstream os;
os << "[cpu]squeeze: the shape of input data ";
os << "(";
@ -135,4 +134,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -78,15 +78,10 @@ TEST_P(StridedSliceCpuShapeInferenceTest , shape_inference_in_const_map) {
const auto stride = std::make_shared<op::v0::Parameter>(element::i32, input_shapes[3].get_shape());
const auto op = make_op(arg, begin, end, stride, begin_mask, end_mask);
const auto begin_const = std::make_shared<op::v0::Constant>(element::i32, input_shapes[1].get_shape(), data[BEGIN]);
const auto end_const = std::make_shared<op::v0::Constant>(element::i32, input_shapes[2].get_shape(), data[END]);
const auto stride_const = std::make_shared<op::v0::Constant>(element::i32, input_shapes[3].get_shape(), data[STRIDE]);
const auto begin_tensor = std::make_shared<ov::HostTensor>(begin_const);
const auto end_tensor = std::make_shared<ov::HostTensor>(end_const);
const auto stride_tensor = std::make_shared<ov::HostTensor>(stride_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, begin_tensor},
{2, end_tensor},
{3, stride_tensor}};
const auto begin_tensor = ov::Tensor(element::i32, input_shapes[1].get_shape(), data[BEGIN].data());
const auto end_tensor = ov::Tensor(element::i32, input_shapes[2].get_shape(), data[END].data());
const auto stride_tensor = ov::Tensor(element::i32, input_shapes[3].get_shape(), data[STRIDE].data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, begin_tensor}, {2, end_tensor}, {3, stride_tensor}};
// implementation depends on some output information of the op
op->set_output_type(0, element::i32, {-1, -1, -1});
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
@ -95,7 +90,7 @@ TEST_P(StridedSliceCpuShapeInferenceTest , shape_inference_in_const_map) {
INSTANTIATE_TEST_SUITE_P(
CpuShapeInfer,
StridedSliceCpuShapeInferenceTest,
Values(make_tuple(unit_test::ShapeVector{{3, 4, 5}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{100}, {-100}, {-1}},
Values(make_tuple(unit_test::ShapeVector{{3, 4, 5}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{100, 100, 100}, {-100, -100, -100}, {-1, -1, -1}},
std::vector<int64_t>(4, 0), std::vector<int64_t>(4, 0), StaticShape({3, 4, 5})),
make_tuple(unit_test::ShapeVector{{3, 2, 3}, {3}, {3}, {3}}, std::vector<std::vector<int32_t>>{{1, 0, 0}, {2, 1, 3}, {1, 1, 1}},
std::vector<int64_t>(4, 0), std::vector<int64_t>(4, 0), StaticShape({1, 1, 3})),
@ -133,4 +128,3 @@ TEST(CpuShapeInfer, StridedSliceDefault_stride) {
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -79,9 +79,9 @@ TEST_P(TransposeCpuShapeInferenceThrowExceptionTest, shape_inference_in_const_ma
const auto order = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
auto op = make_op(arg, order);
const auto axes = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{transpose_order.size()}, transpose_order);
const auto const_tensor = std::make_shared<ov::HostTensor>(axes);
const std::map<size_t, ov::HostTensorPtr> const_map = {{1, const_tensor}};
const auto const_tensor = transpose_order.empty() ? ov::Tensor(element::i64, ov::Shape{transpose_order.size()})
: ov::Tensor(element::i64, ov::Shape{transpose_order.size()}, transpose_order.data());
const std::unordered_map<size_t, ov::Tensor> const_map = {{1, const_tensor}};
OV_EXPECT_THROW(unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, const_map),
ov::Exception,
@ -99,4 +99,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -57,9 +57,8 @@ TEST_P(UnsqueezeCpuShapeInferenceTest , shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
op = std::make_shared<op::v0::Unsqueeze>(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
output_shapes.push_back(exp_shape);
unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes, constant_data);
}
@ -95,9 +94,8 @@ TEST_P(UnsqueezeCpuShapeInferenceThrowExceptionTest, wrong_pattern) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ov::HostTensor>(axes_const);
const std::map<size_t, ov::HostTensorPtr>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const std::unordered_map<size_t, ov::Tensor> constant_data = {{1, axes_tensor}};
std::ostringstream os;
os << "[cpu]unsqueeze: the shape of input data ";
os << "(";
@ -135,4 +133,3 @@ INSTANTIATE_TEST_SUITE_P(
} // namespace unit_test
} // namespace intel_cpu
} // namespace ov

View File

@ -55,7 +55,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_d
op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
@ -76,7 +76,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_d
op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
@ -96,7 +96,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_uper_inputs_st
op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
@ -117,7 +117,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_upper_inputs_s
op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
@ -140,7 +140,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, mask_channel_dimension_n
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 17, 5, 5}};
OV_EXPECT_THROW(
shape_inference(op.get(), input_shapes, output_shapes),
shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr(
"The channels dimension of mask input is not compatible with filters and 'deformable group' attribute"));

View File

@ -1,10 +1,11 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <array>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
@ -36,13 +37,13 @@ TEST_F(DeformablePSROIPoolingV1StaticShapeInferenceTest, default_ctor) {
// 2 inputs
{
input_shapes = {StaticShape{2, 4, 8, 6}, StaticShape{rois_dim, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
// 3 inputs
{
input_shapes = {StaticShape{2, 4, 8, 6}, StaticShape{rois_dim, 5}, StaticShape{rois_dim, 20, group_size, group_size}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
}
@ -62,7 +63,7 @@ TEST_F(DeformablePSROIPoolingV1StaticShapeInferenceTest, no_offsets_input) {
StaticShape expected_output{rois_dim, output_dim, group_size, group_size};
input_shapes = {StaticShape{2, 4, 8, 6}, StaticShape{rois_dim, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
@ -82,6 +83,6 @@ TEST_F(DeformablePSROIPoolingV1StaticShapeInferenceTest, offsets_input) {
StaticShape expected_output{rois_dim, output_dim, group_size, group_size};
input_shapes = {StaticShape{2, 4, 8, 6}, StaticShape{rois_dim, 5}, StaticShape{rois_dim, 20, group_size, group_size}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}

View File

@ -24,7 +24,7 @@ TEST_F(DepthToSpaceV0StaticShapeInferenceTest, default_ctor) {
const auto op = make_op();
op->set_block_size(2);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 2, 2 * 3, 2 * 1080, 2 * 1616}));
@ -34,7 +34,7 @@ TEST_F(DepthToSpaceV0StaticShapeInferenceTest, block_first) {
const auto data = std::make_shared<Parameter>(element::f32, PartialShape::dynamic(4));
const auto op = make_op(data, op_type::DepthToSpaceMode::BLOCKS_FIRST, 2);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 2, 2 * 3, 2 * 1080, 2 * 1616}));

View File

@ -48,7 +48,7 @@ TEST(StaticShapeInferenceTest, detection_output_v0_top_k) {
StaticShape{4, 10},
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], StaticShape({1, 1, 56, 7}));
}
@ -74,7 +74,7 @@ TEST(StaticShapeInferenceTest, detection_output_v0_no_share_location) {
StaticShape{4, 10},
StaticShape{4, 40}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], StaticShape({1, 1, 40, 7}));
}
@ -98,7 +98,7 @@ TEST(StaticShapeInferenceTest, detection_output_v0_basic) {
StaticShape{4, 10},
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], (StaticShape{1, 1, 800, 7}));
}
@ -118,7 +118,7 @@ TEST(StaticShapeInferenceTest, detection_output_v0_default_ctor) {
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 1, 800, 7}));
}
@ -142,7 +142,7 @@ TEST(StaticShapeInferenceTest, detection_output_v8_top_k) {
StaticShape{4, 10},
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], StaticShape({1, 1, 56, 7}));
}
@ -167,7 +167,7 @@ TEST(StaticShapeInferenceTest, detection_output_v8_no_share_location) {
StaticShape{4, 10},
StaticShape{4, 40}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], StaticShape({1, 1, 40, 7}));
}
@ -190,7 +190,7 @@ TEST(StaticShapeInferenceTest, detection_output_v8_basic) {
StaticShape{4, 10},
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes[0], (StaticShape{1, 1, 800, 7}));
}
@ -209,6 +209,6 @@ TEST(StaticShapeInferenceTest, detection_output_v8_default_ctor) {
StaticShape{4, 20}};
std::vector<StaticShape> output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 1, 800, 7}));
}

View File

@ -1,12 +1,14 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "openvino/op/einsum.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using testing::ElementsAre;
class EinsumStaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v7::Einsum> {};
@ -14,33 +16,38 @@ TEST_F(EinsumStaticShapeInferenceTest, dot_product) {
auto inputs = OutputVector(2, std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic()));
auto op = make_op(inputs, "i,i->");
check_static_shape(op.get(), {StaticShape{3}, StaticShape{3}}, {StaticShape{}});
output_shapes = shape_inference(op.get(), ShapeVector{{3}, {3}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{}));
}
TEST_F(EinsumStaticShapeInferenceTest, matmul) {
auto inputs = OutputVector(2, std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic()));
auto op = make_op(inputs, "ab,bc->ac");
check_static_shape(op.get(), {StaticShape{2, 3}, StaticShape{3, 4}}, {StaticShape{2, 4}});
output_shapes = shape_inference(op.get(), ShapeVector{{2, 3}, {3, 4}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 4}));
}
TEST_F(EinsumStaticShapeInferenceTest, trace) {
auto I1 = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
auto op = make_op(OutputVector{I1}, "kii->k");
check_static_shape(op.get(), {StaticShape{2, 3, 3}}, {StaticShape{2}});
output_shapes = shape_inference(op.get(), ShapeVector{{2, 3, 3}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2}));
}
TEST_F(EinsumStaticShapeInferenceTest, transpose) {
auto I1 = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
auto op = make_op(OutputVector{I1}, "ijk->kij");
check_static_shape(op.get(), {StaticShape{1, 2, 3}}, {StaticShape{3, 1, 2}});
output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 3}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 1, 2}));
}
TEST_F(EinsumStaticShapeInferenceTest, multi_matmul) {
auto inputs = OutputVector(3, std::make_shared<op::v0::Parameter>(element::i32, ov::PartialShape::dynamic()));
auto op = make_op(inputs, "ab,bcd,bc->ca");
check_static_shape(op.get(), {StaticShape{2, 5}, StaticShape{5, 3, 6}, StaticShape{5, 3}}, {StaticShape{3, 2}});
output_shapes = shape_inference(op.get(), ShapeVector{{2, 5}, {5, 3, 6}, {5, 3}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 2}));
}

View File

@ -13,9 +13,8 @@ TEST(StaticShapeInferenceTest, UnaryEltwiseTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto node = std::make_shared<op::v0::Relu>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}},
static_output_shapes = {StaticShape{}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}};
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 6, 5, 5}));
}
@ -29,15 +28,12 @@ TEST(StaticShapeInferenceTest, FakeQuantizeTest) {
auto node = std::make_shared<op::v0::FakeQuantize>(data, il, ih, ol, oh, 256);
std::vector<StaticShape> static_input_shapes = {
StaticShape{3, 6, 3, 5},
StaticShape{1, 3, 1},
StaticShape{1},
StaticShape{5},
StaticShape{1, 1, 1, 1}
},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 3, 5},
StaticShape{1, 3, 1},
StaticShape{1},
StaticShape{5},
StaticShape{1, 1, 1, 1}};
shape_inference(node.get(), static_input_shapes, static_output_shapes);
const auto static_output_shapes = shape_inference(node.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 6, 3, 5}));
}

View File

@ -26,10 +26,9 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, default_ctor) {
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}};
int64_t num_segments = 4;
const auto const_map =
std::map<size_t, HostTensorPtr>{{3, std::make_shared<HostTensor>(element::i64, Shape{}, &num_segments)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{3, {element::i64, Shape{}, &num_segments}}};
shape_inference(op.get(), input_shapes, output_shapes, const_map);
output_shapes = shape_inference(op.get(), input_shapes, const_map);
EXPECT_EQ(output_shapes[0], (StaticShape{4, 2, 6}));
}
@ -43,7 +42,7 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, constant_input) {
auto op = make_op(emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights);
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}},
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{3, 2, 6}));
}
@ -59,10 +58,9 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, constant_map) {
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}};
int64_t num_segm_val = 3;
const auto const_map =
std::map<size_t, HostTensorPtr>{{3, std::make_shared<HostTensor>(element::i64, Shape{}, &num_segm_val)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{3, {element::i64, Shape{}, &num_segm_val}}};
shape_inference(op.get(), input_shapes, output_shapes, const_map);
output_shapes = shape_inference(op.get(), input_shapes, const_map);
EXPECT_EQ(output_shapes[0], (StaticShape{3, 2, 6}));
}
@ -76,12 +74,11 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, basic) {
auto op = make_op(emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights);
check_static_shape(
op.get(),
{StaticShape{5, 2}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}},
{StaticShape{3, 2}});
output_shapes = shape_inference(op.get(), ShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 2}));
check_static_shape(op.get(),
{StaticShape{5, 2}, StaticShape{4}, StaticShape{4}, 8, StaticShape{}, StaticShape{4}},
{StaticShape{8, 2}});
int64_t num_segm_val = 8;
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{3, {element::i64, Shape{}, &num_segm_val}}};
output_shapes = shape_inference(op.get(), ShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}}, const_map);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8, 2}));
}

View File

@ -1,12 +1,12 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <array>
#include "common_test_utils/test_assertions.hpp"
#include "embeddingbag_offsets_shape_inference.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
@ -31,19 +31,19 @@ TEST_F(EmbeddingBagOffsetsSumV3StaticShapeInferenceTest, default_ctor) {
// 3 inputs
{
input_shapes = {StaticShape{3, 4, 5, 6}, StaticShape{2}, StaticShape{batch}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
// 4 inputs
{
input_shapes = {StaticShape{3, 4, 5, 6}, StaticShape{2}, StaticShape{batch}, StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
// 5 inputs
{
input_shapes = {StaticShape{3, 4, 5, 6}, StaticShape{2}, StaticShape{batch}, StaticShape{}, StaticShape{2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
}
@ -58,7 +58,7 @@ TEST_F(EmbeddingBagOffsetsSumV3StaticShapeInferenceTest, basic_3in) {
auto expected_output = StaticShape{3, 2, 6};
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
@ -73,7 +73,7 @@ TEST_F(EmbeddingBagOffsetsSumV3StaticShapeInferenceTest, basic_4in) {
auto expected_output = StaticShape{3, 2, 6};
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{3}, StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
@ -89,6 +89,6 @@ TEST_F(EmbeddingBagOffsetsSumV3StaticShapeInferenceTest, basic_5in) {
auto expected_output = StaticShape{3, 2, 6};
input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{3}, StaticShape{}, StaticShape{4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}

View File

@ -1,11 +1,12 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <array>
#include "common_test_utils/test_assertions.hpp"
#include "embeddingbag_packed_shape_inference.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
@ -30,13 +31,13 @@ TEST_F(EmbeddingBagPackedSumV3StaticShapeInferenceTest, default_ctor) {
// 2 inputs
{
input_shapes = {StaticShape{3, 4, 5, 6}, StaticShape{batch, 2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
// 3 inputs
{
input_shapes = {StaticShape{3, 4, 5, 6}, StaticShape{batch, 2}, StaticShape{batch, 2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], expected_output);
}
}
@ -49,7 +50,7 @@ TEST_F(EmbeddingBagPackedSumV3StaticShapeInferenceTest, basic_2in) {
auto op = make_op(emb_table, indices);
input_shapes = {StaticShape{5, 2, 6}, StaticShape{3, 4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{3, 2, 6}));
}
@ -61,6 +62,6 @@ TEST_F(EmbeddingBagPackedSumV3StaticShapeInferenceTest, basic_3in) {
auto op = make_op(emb_table, indices, per_sample_weights);
input_shapes = {StaticShape{5, 2, 6}, StaticShape{3, 4}, StaticShape{3, 4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{3, 2, 6}));
}

View File

@ -30,7 +30,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, default_c
op->set_attrs({.05f, .5f, 4.1352f, 12, 20, 7, false, {10.0f, 10.0f, 5.0f, 5.0f}});
input_shapes = ShapeVector{{10, 4}, {10, 48}, {10, 12}, {1, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{7, 4}, {7}, {7}}));
}
@ -43,7 +43,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_dy
op = make_op(rois, deltas, scores, im_info, make_attrs());
input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{5, 4}, {5}, {5}}));
}
@ -56,7 +56,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_st
op = make_op(rois, deltas, scores, im_info, make_attrs());
input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{5, 4}, {5}, {5}}));
}
@ -69,7 +69,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, im_info_b
op = make_op(rois, deltas, scores, im_info, make_attrs());
input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Input image info shape must be compatible with [1,3]"));
}
@ -82,7 +82,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, deltas_no
op = make_op(rois, deltas, scores, im_info, make_attrs());
input_shapes = ShapeVector{{10, 4}, {10, 40, 1}, {10, 10}, {1, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Input deltas rank must be equal to 2"));
}
@ -96,7 +96,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, rois_1st_
input_shapes = ShapeVector{{9, 4}, {10, 40}, {10, 10}, {1, 3}};
OV_EXPECT_THROW(
shape_inference(op.get(), input_shapes, output_shapes),
shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The first dimension of inputs 'input_rois', 'input_deltas', 'input_scores' must be the compatible"));
}

View File

@ -29,7 +29,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe
op->set_attrs({0.0f, 0.0f, 100, 0});
input_shapes = ShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{100, 4}, {100}}));
}
@ -42,7 +42,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe
op = make_op(im_info, anchors, deltas, scores, make_attrs(100));
input_shapes = ShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{100, 4}, {100}}));
}
@ -55,7 +55,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe
op = make_op(im_info, anchors, deltas, scores, make_attrs(1000));
input_shapes = ShapeVector{{3}, {12, 4}, {3, 120, 15}, {5, 120, 15}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{1000, 4}, {1000}}));
}
@ -68,7 +68,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe
op = make_op(im_info, anchors, deltas, scores, make_attrs(40));
input_shapes = ShapeVector{{4}, {12, 4}, {3, 120, 15}, {5, 120, 15}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The 'input_im_info' shape is expected to be a compatible with [3]"));
}
@ -81,7 +81,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe
op = make_op(im_info, anchors, deltas, scores, make_attrs(40));
input_shapes = ShapeVector{{3}, {12, 4}, {3, 120, 15, 1}, {5, 120, 15}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The 'input_deltas' input is expected to be a 3D"));
}

View File

@ -30,7 +30,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, defaul
op->set_attrs({true, 0, 0, 5.0f, 5.0f});
input_shapes = ShapeVector{{3, 4}, {1, 5, 7, 2}, {1, 5, 50, 50}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes, ShapeVector({{42, 4}}));
}
@ -42,7 +42,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, inputs
op = make_op(priors, feat_map, im_data, make_attrs(false));
input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({4, 5, 10, 4}));
@ -55,7 +55,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, inputs
op = make_op(priors, feat_map, im_data, make_attrs(true));
input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({200, 4}));
@ -68,7 +68,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, feat_m
op = make_op(priors, feat_map, im_data, make_attrs(true));
input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5, 1}, {1, 2, 100, 100}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Feature_map rank must be equal to 4"));
}
@ -80,7 +80,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, priors
op = make_op(priors, feat_map, im_data, make_attrs(true));
input_shapes = ShapeVector{{10, 5}, {1, 2, 4, 5}, {1, 2, 100, 100}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The last dimension of the 'priors' input must be equal to 4"));
}

View File

@ -28,7 +28,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, defau
op->set_attrs(make_attrs(16));
input_shapes = ShapeVector{{1000, 4}, {1, 5, 8, 8}, {1, 5, 16, 16}, {1, 5, 64, 64}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1000, 5, 16, 16}, StaticShape{1000, 4}));
}
@ -40,7 +40,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, input
op = make_op(OutputVector{rois, layer_0, layer_1}, make_attrs(100));
input_shapes = ShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{25, 2, 100, 100}, StaticShape{25, 4}));
}
@ -54,7 +54,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, input
op = make_op(OutputVector{rois, layer_0, layer_1, layer_2, layer_3}, make_attrs(15));
input_shapes = ShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{25, 2, 15, 15}, StaticShape{25, 4}));
}
@ -67,7 +67,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, rois_
op = make_op(OutputVector{rois, layer_0, layer_1, layer_2}, make_attrs(15));
input_shapes = ShapeVector{{25, 4, 1}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Input rois rank must be equal to 2"));
}
@ -80,7 +80,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, layer
op = make_op(OutputVector{rois, layer_0, layer_1, layer_2}, make_attrs(15));
input_shapes = ShapeVector{{25, 4}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 3, 200, 50}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The number of channels must be the same for all layers of the pyramid"));
}

View File

@ -25,7 +25,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, default_ctor) {
op->set_max_rois(100);
input_shapes = ShapeVector{{12, 4}, {12}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({100, 4}));
@ -37,7 +37,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, inputs_dynamic_r
op = make_op(input_rois, rois_probs, 5);
input_shapes = ShapeVector{{10, 4}, {10}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 4}));
@ -49,7 +49,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, inputs_static_ra
op = make_op(input_rois, rois_probs, 15);
input_shapes = ShapeVector{{100, 4}, {100}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({15, 4}));
@ -62,7 +62,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, input_rois_not_2
input_shapes = ShapeVector{{10, 4, 10}, {10}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The 'input_rois' input is expected to be a 2D."));
}
@ -74,7 +74,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, rois_prob_not_1d
input_shapes = ShapeVector{{10, 4}, {10, 2}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The 'rois_probs' input is expected to be a 1D."));
}
@ -86,7 +86,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, input_rois_secon
input_shapes = ShapeVector{{10, 5}, {10}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The second dimension of 'input_rois' should be 4."));
}

View File

@ -25,7 +25,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, default_ctor_no_args) {
op->set_auto_pad(pad_type);
input_shapes = ShapeVector{{10, 8, 12, 6}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 72, 2, 1}));
@ -36,7 +36,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_input_is_dynamic_rank) {
op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{2, 2}, op::PadType::VALID);
input_shapes = ShapeVector{{2, 2, 23, 24}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 18, 4, 4}));
@ -47,7 +47,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_input_is_static_rank) {
op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{1, 1}, op::PadType::SAME_UPPER);
input_shapes = ShapeVector{{2, 2, 43, 34}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 18, 9, 7}));
@ -57,7 +57,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_shape_not_compatible_rank_4) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic(4));
op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{1, 1}, op::PadType::SAME_UPPER);
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 12, 24, 1}}, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 12, 24, 1}}),
NodeValidationFailure,
HasSubstr("input tensor must be 4D tensor"));
}

View File

@ -2,10 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <array>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
@ -30,7 +31,7 @@ TEST_F(EyeV9StaticShapeInferenceTest, parameters_as_constant) {
const auto op = make_op(rows, cols, diag, batch, element::f64);
input_shapes = ShapeVector{rows->get_shape(), cols->get_shape(), diag->get_shape(), batch->get_shape()};
shape_inference(op.get(), input_shapes, output_shapes, {});
const auto output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 5, 4}));
@ -46,13 +47,12 @@ TEST_F(EyeV9StaticShapeInferenceTest, parameters_in_const_data_map) {
int32_t rows = 3, cols = 8;
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data =
std::map<size_t, HostTensorPtr>{{0, std::make_shared<HostTensor>(element::i32, Shape{}, &rows)},
{1, std::make_shared<HostTensor>(element::i32, Shape{1}, &cols)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, batch.data())}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &rows}},
{1, {element::i32, Shape{1}, &cols}},
{3, {element::i32, Shape{3}, batch.data()}}};
input_shapes = ShapeVector{{}, {1}, {1}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 1, 3, 8}));
@ -69,13 +69,13 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_negative_rows) {
int64_t rows = -3, cols = 8;
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data =
std::map<size_t, HostTensorPtr>{{0, std::make_shared<HostTensor>(element::i64, Shape{}, &rows)},
{1, std::make_shared<HostTensor>(element::i64, Shape{1}, &cols)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, batch.data())}};
std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &rows}},
{1, {element::i32, Shape{1}, &cols}},
{3, {element::i32, Shape{batch.size()}, batch.data()}}};
input_shapes = ShapeVector{{}, {1}, {1}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
AssertFailure,
HasSubstr("Value -3 not in range [0:"));
}
@ -91,13 +91,13 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_negative_columns) {
int64_t rows = 3, cols = -8;
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data =
std::map<size_t, HostTensorPtr>{{0, std::make_shared<HostTensor>(element::i64, Shape{}, &rows)},
{1, std::make_shared<HostTensor>(element::i64, Shape{1}, &cols)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, batch.data())}};
std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &rows}},
{1, {element::i32, Shape{1}, &cols}},
{3, {element::i32, Shape{batch.size()}, batch.data()}}};
input_shapes = ShapeVector{{}, {1}, {1}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
AssertFailure,
HasSubstr("Value -8 not in range [0:"));
}
@ -113,14 +113,14 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_rows_not_1D) {
int64_t cols = 8;
auto rows = std::array<int64_t, 2>{2, 1};
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data = std::map<size_t, HostTensorPtr>{
{0, std::make_shared<HostTensor>(element::i64, Shape{rows.size()}, rows.data())},
{1, std::make_shared<HostTensor>(element::i64, Shape{1}, &cols)},
{3, std::make_shared<HostTensor>(element::i32, Shape{batch.size()}, batch.data())}};
const auto const_data =
std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{rows.size()}, &rows}},
{1, {element::i32, Shape{1}, &cols}},
{3, {element::i32, Shape{batch.size()}, batch.data()}}};
input_shapes = ShapeVector{{}, {1}, {1}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
NodeValidationFailure,
HasSubstr("'num_rows' value must be a scalar or 1D tensor. Got:"));
}
@ -136,14 +136,14 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_columns_not_1D) {
int64_t rows = 8;
auto cols = std::array<int64_t, 2>{2, 1};
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data = std::map<size_t, HostTensorPtr>{
{0, std::make_shared<HostTensor>(element::i64, Shape{}, &rows)},
{1, std::make_shared<HostTensor>(element::i64, Shape{cols.size()}, cols.data())},
{3, std::make_shared<HostTensor>(element::i32, Shape{batch.size()}, batch.data())}};
const auto const_data =
std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &rows}},
{1, {element::i32, Shape{cols.size()}, &cols}},
{3, {element::i32, Shape{batch.size()}, batch.data()}}};
input_shapes = ShapeVector{{1}, {}, {1}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
NodeValidationFailure,
HasSubstr("'num_columns' value must be a scalar or 1D tensor. Got:"));
}
@ -158,14 +158,14 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_batch_shape_not_match_shape_in_c
int64_t rows = 8, cols = 5;
auto batch = std::array<int32_t, 3>{2, 4, 1};
const auto const_data = std::map<size_t, HostTensorPtr>{
{0, std::make_shared<HostTensor>(element::i64, Shape{}, &rows)},
{1, std::make_shared<HostTensor>(element::i64, Shape{}, &cols)},
{3, std::make_shared<HostTensor>(element::i32, Shape{batch.size()}, batch.data())}};
const auto const_data =
std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &rows}},
{1, {element::i32, Shape{}, &cols}},
{3, {element::i32, Shape{batch.size()}, batch.data()}}};
input_shapes = ShapeVector{{}, {}, {}, {2}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
NodeValidationFailure,
HasSubstr("Check 'static_cast<int64_t>(batch_shape[0].get_length()) == "
"static_cast<int64_t>(batch_as_shape->rank().get_length())'"));

View File

@ -49,108 +49,96 @@ static std::shared_ptr<op::v7::IDFT> build_idft_signal() {
TEST(StaticShapeInferenceTest, DFTTest) {
auto DFT = build_dft();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}};
shape_inference(DFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(DFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 320, 320, 2}));
}
TEST(StaticShapeInferenceTest, DFTSignalTest) {
auto DFT = build_dft_signal();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
int32_t signal_val[] = {512, 100};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, signal_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}},
{2, {element::i32, ov::Shape{2}, signal_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}};
shape_inference(DFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(DFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 512, 100, 2}));
}
TEST(StaticShapeInferenceTest, DFTConstantTest) {
auto DFT = build_dft_constant();
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}};
shape_inference(DFT.get(), static_input_shapes, static_output_shapes);
const auto static_output_shapes = shape_inference(DFT.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 512, 100, 2}));
}
TEST(StaticShapeInferenceTest, DFTSignalMissingConstDataTest) {
auto DFT = build_dft_signal();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(DFT.get(), static_input_shapes, static_output_shapes, constant_data),
NodeValidationFailure);
int32_t axes_val[] = {1, 2};
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}};
EXPECT_THROW(shape_inference(DFT.get(), static_input_shapes, constant_data), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, IDFTTest) {
auto IDFT = build_idft();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}};
shape_inference(IDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(IDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 320, 320, 2}));
}
TEST(StaticShapeInferenceTest, IDFTSignalTest) {
auto IDFT = build_idft_signal();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
int32_t signal_val[] = {512, 100};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, signal_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}},
{2, {element::i32, ov::Shape{2}, signal_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}};
shape_inference(IDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(IDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 512, 100, 2}));
}
TEST(StaticShapeInferenceTest, IDFTSignalMissingConstDataTest) {
auto IDFT = build_idft_signal();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {1, 2};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 320, 320, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(IDFT.get(), static_input_shapes, static_output_shapes, constant_data),
NodeValidationFailure);
EXPECT_THROW(shape_inference(IDFT.get(), static_input_shapes, constant_data), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, RDFT) {
auto input_shape = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto RDFT = std::make_shared<ov::op::v9::RDFT>(input_shape, axes);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}};
shape_inference(RDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(RDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 120, 64, 33, 2}));
}
@ -159,17 +147,15 @@ TEST(StaticShapeInferenceTest, RDFTWithSignalSizes) {
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto signal = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto RDFT = std::make_shared<ov::op::v9::RDFT>(input_shape, axes, signal);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
int32_t signal_val[] = {40, 30};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, signal_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}},
{2, {element::i32, ov::Shape{2}, signal_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}};
shape_inference(RDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(RDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 120, 40, 16, 2}));
}
@ -179,10 +165,9 @@ TEST(StaticShapeInferenceTest, RDFTWithConstAxesAndSignalSizes) {
auto signal = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{64, 64});
auto RDFT = std::make_shared<ov::op::v9::RDFT>(input_shape, axes, signal);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}};
shape_inference(RDFT.get(), static_input_shapes, static_output_shapes);
const auto static_output_shapes = shape_inference(RDFT.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 120, 64, 33, 2}));
}
@ -191,28 +176,25 @@ TEST(StaticShapeInferenceTest, RDFTMissingSignalTensor) {
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto signal = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto RDFT = std::make_shared<ov::op::v9::RDFT>(input_shape, axes, signal);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(RDFT.get(), static_input_shapes, static_output_shapes, constant_data),
NodeValidationFailure);
int32_t axes_val[] = {2, 3};
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}};
EXPECT_THROW(shape_inference(RDFT.get(), static_input_shapes, constant_data), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, IRDFT) {
auto input_shape = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto IRDFT = std::make_shared<ov::op::v9::IRDFT>(input_shape, axes);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}};
shape_inference(IRDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(IRDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 120, 64, 64}));
}
@ -221,17 +203,15 @@ TEST(StaticShapeInferenceTest, IRDFTWithSignalSizes) {
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto signal = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto IRDFT = std::make_shared<ov::op::v9::IRDFT>(input_shape, axes, signal);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
int32_t signal_val[] = {64, 64};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, signal_val);
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}},
{2, {element::i32, ov::Shape{2}, signal_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}, StaticShape{2}};
shape_inference(IRDFT.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(IRDFT.get(), static_input_shapes, constant_data);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 120, 64, 64}));
}
@ -240,12 +220,10 @@ TEST(StaticShapeInferenceTest, IRDFTMissingSignalSizesTensor) {
auto axes = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto signal = std::make_shared<ov::op::v0::Parameter>(element::i32, PartialShape::dynamic());
auto IRDFT = std::make_shared<ov::op::v9::IRDFT>(input_shape, axes, signal);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
int32_t axes_val[] = {2, 3};
constant_data[1] = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, Shape{2}, axes_val);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
EXPECT_THROW(shape_inference(IRDFT.get(), static_input_shapes, static_output_shapes, constant_data),
NodeValidationFailure);
int32_t axes_val[] = {2, 3};
auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, ov::Shape{2}, axes_val}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 120, 64, 33, 2}, StaticShape{2}, StaticShape{2}};
EXPECT_THROW(shape_inference(IRDFT.get(), static_input_shapes, constant_data), NodeValidationFailure);
}

View File

@ -23,7 +23,7 @@ TEST_F(GatherElementsStaticShapeInferenceTest, GatherElements_basic) {
input_shapes = {StaticShape{300, 3, 10, 2}, StaticShape{300, 3, 10, 33333}};
output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{300, 3, 10, 33333}));
}
@ -35,7 +35,7 @@ TEST_F(GatherElementsStaticShapeInferenceTest, GatherElements_incompatible_rank)
op = make_op(data, indices, axis);
input_shapes = {StaticShape{1, 2, 3, 4, 5}, StaticShape{1, 2, 3, 4}};
output_shapes = {StaticShape{}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
ov::NodeValidationFailure,
HasSubstr("rank must be equal"));
}
@ -48,7 +48,7 @@ TEST_F(GatherElementsStaticShapeInferenceTest, GatherElements_incompatible_dims)
op = make_op(data, indices, axis);
input_shapes = {StaticShape{300, 4, 10, 2}, StaticShape{300, 5, 10, 33333}};
output_shapes = {StaticShape{}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
ov::NodeValidationFailure,
HasSubstr("are not consistent"));
}
@ -60,6 +60,6 @@ TEST_F(GatherElementsStaticShapeInferenceTest, GatherElements_default_constructo
input_shapes = {StaticShape{300, 3, 10, 2}, StaticShape{300, 3, 10, 33333}};
output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{300, 3, 10, 33333}));
}

View File

@ -34,8 +34,7 @@ template <typename TGatherND>
void run_gather_nd_test(const GatherNDTestParams& test_params) {
auto op = make_gather_nd<TGatherND>(test_params.batch_dims);
ShapeVector output_shapes(1);
shape_inference(op.get(), test_params.input_shapes, output_shapes);
auto output_shapes = shape_inference(op.get(), test_params.input_shapes);
EXPECT_EQ(output_shapes[0], test_params.exp_shape)
<< "Failed for input shapes: " << ov::util::vector_to_string(test_params.input_shapes)
@ -105,7 +104,7 @@ TYPED_TEST_P(StaticShapeInferenceGatherNDTest, gather_nd_common_default_ctor) {
ShapeVector input_shapes{{8, 3, 11, 12}, {8, 5, 2}};
ShapeVector output_shapes(1);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{8, 5, 12}));
}

View File

@ -4,12 +4,11 @@
#include <gtest/gtest.h>
#include <openvino/op/constant.hpp>
#include <openvino/op/gather.hpp>
#include <openvino/op/parameter.hpp>
#include <openvino/util/common_util.hpp>
#include <shape_inference/shape_inference.hpp>
#include "openvino/op/constant.hpp"
#include "openvino/op/gather.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/util/common_util.hpp"
#include "shape_inference/shape_inference.hpp"
#include "utils.hpp"
using namespace ov;
@ -61,7 +60,7 @@ TYPED_TEST_P(StaticShapeInferenceGatherTest, axis_const) {
auto op = this->make_gather(this->input_shapes, &this->axis_val);
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), this->exp_shape)
<< "Failed for axis: " << this->axis_val
@ -74,9 +73,9 @@ TYPED_TEST_P(StaticShapeInferenceGatherTest, axis_in_const_map) {
std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params;
auto op = this->make_gather(this->input_shapes);
auto axis_tensor = std::make_shared<HostTensor>(element::i32, Shape{1}, &this->axis_val);
auto axis_tensor = ov::Tensor(element::i32, Shape{1}, &this->axis_val);
shape_inference(op.get(), this->input_shapes, this->output_shapes, {{2, axis_tensor}});
this->output_shapes = shape_inference(op.get(), this->input_shapes, {{2, axis_tensor}});
ASSERT_EQ(this->output_shapes.front(), this->exp_shape)
<< "Failed for axis: " << this->axis_val

View File

@ -23,7 +23,7 @@ TEST_F(GatherTreeStaticShapeInferenceTest, gather_tree) {
input_shapes = {StaticShape{1, 2, 3}, StaticShape{1, 2, 3}, StaticShape{2}, StaticShape{}};
output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 2, 3}));
}
@ -31,6 +31,6 @@ TEST_F(GatherTreeStaticShapeInferenceTest, gather_tree_default_ctor) {
op = make_op();
input_shapes = {StaticShape{2, 4, 3}, StaticShape{2, 4, 3}, StaticShape{4}, StaticShape{}};
output_shapes = {StaticShape{}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 4, 3}));
}

View File

@ -23,7 +23,7 @@ TEST_F(GridSampleStaticShapeInferenceTest, GridSample) {
output_shapes = {StaticShape{}};
exp_shape = StaticShape{2, 3, 6, 7};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], exp_shape);
}
@ -34,6 +34,6 @@ TEST_F(GridSampleStaticShapeInferenceTest, GridSample_default_constructor) {
output_shapes = {StaticShape{}};
exp_shape = StaticShape{2, 3, 6, 7};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], exp_shape);
}

View File

@ -53,8 +53,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor) {
op->set_auto_pad(op::PadType::EXPLICIT);
int32_t spatial_shape[] = {5, 10, 15};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_shape)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{3}, spatial_shape}}};
input_shapes = ShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}};
auto shape_infer = make_shape_inference(op);
@ -77,8 +76,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor_more_i
op->set_auto_pad(op::PadType::EXPLICIT);
int32_t spatial_shape[] = {5, 10, 15};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_shape)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{3}, spatial_shape}}};
// More than three inputs can be provided, but not used
input_shapes = ShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}, {0}};
@ -105,7 +103,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 2d_inputs_dynamic_r
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 2, 5, 5}, {2, 1, 2, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 7, 7}));
@ -125,7 +123,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_lo
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 6, 2, 1, 3}));
@ -144,11 +142,10 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_up
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
int32_t spatial_dims[] = {2, 6, 1};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_dims)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{3}, spatial_dims}}};
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {1, 5, 1, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 1, 2, 6, 1}));

View File

@ -90,7 +90,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 1d_explicit_pads_inputs_stati
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 12, 20}, {12, 1, 1, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 12, 18}));
@ -109,7 +109,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_input
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 2, 5, 5}));
@ -128,7 +128,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 6, 5, 5, 5}));

View File

@ -30,7 +30,7 @@ TEST_F(GRUCellV3StaticShapeInferenceTest, default_ctor) {
StaticShape{gates_count * hidden_size, hidden_size}, // R
StaticShape{gates_count * hidden_size}}; // B
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -54,7 +54,7 @@ TEST_F(GRUCellV3StaticShapeInferenceTest, default_bias) {
StaticShape{gates_count * hidden_size, hidden_size}, // R
StaticShape{gates_count * hidden_size}}; // B
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -79,7 +79,7 @@ TEST_F(GRUCellV3StaticShapeInferenceTest, with_bias) {
output_shapes = {StaticShape{}, StaticShape{}};
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -115,7 +115,7 @@ TEST_F(GRUCellV3StaticShapeInferenceTest, linear_before) {
output_shapes = {StaticShape{}, StaticShape{}};
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -139,6 +139,6 @@ TEST_F(GRUCellV3StaticShapeInferenceTest, dynamic_rank_inputs) {
StaticShape{gates_count * hidden_size, hidden_size}, // R
StaticShape{gates_count * hidden_size}}; // B
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}

View File

@ -33,7 +33,7 @@ TEST_F(GRUSequenceV5StaticShapeInferenceTest, default_ctor) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(gru_sequence.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru_sequence.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -65,7 +65,7 @@ TEST_F(GRUSequenceV5StaticShapeInferenceTest, FORWARD) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(gru_sequence.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru_sequence.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -108,7 +108,7 @@ TEST_F(GRUSequenceV5StaticShapeInferenceTest, FORWARD_linear_before) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, (gates_count + 1) * hidden_size}}; // B
shape_inference(gru_sequence.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru_sequence.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -140,7 +140,7 @@ TEST_F(GRUSequenceV5StaticShapeInferenceTest, REVERSE) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(gru_sequence.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru_sequence.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -172,7 +172,7 @@ TEST_F(GRUSequenceV5StaticShapeInferenceTest, BIDIRECTIONAL) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(gru_sequence.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru_sequence.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}

View File

@ -30,11 +30,10 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, default_ctor_no_attributes) {
op->set_attrs(attrs);
int32_t out_shape_v[] = {10, 20, 30};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{3}, out_shape_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, out_shape_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 2, 20, 128, 128, 30}));
@ -48,7 +47,7 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, out_shape_as_constant) {
op = make_op(img, out_shape, attrs);
input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 100, 128, 100, 128}));
@ -62,11 +61,10 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scales
op = make_op(img, out_shape, attrs);
int32_t out_shape_v[] = {10, 20, 30};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{3}, out_shape_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, out_shape_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 2, 10, 128, 20, 30}));
@ -80,11 +78,10 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, all_inputs_static_rank_use_sizes)
op = make_op(img, out_shape, attrs);
int32_t out_shape_v[] = {10, 20, 30};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{3}, out_shape_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, out_shape_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 20, 30, 128, 128, 64}));
@ -111,12 +108,11 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, default_ctor_no_attributes) {
float scales_v[] = {1.5f, 3.0f, 0.2f};
int32_t axes_v[] = {2, 0, 5};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::f32, Shape{3}, scales_v)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::f32, Shape{3}, scales_v}},
{3, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({15, 2, 192, 128, 128, 12}));
@ -132,7 +128,7 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, scales_as_constant) {
op = make_op(img, sizes, scales, axes, attrs);
input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {1}, {2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 4, 128, 89, 128}));
@ -146,7 +142,7 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, sizes_as_constant) {
op = make_op(img, sizes, scales, axes, attrs);
input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {1}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 5, 128, 10, 128}));
@ -164,12 +160,11 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scales
float scales_v[] = {1.5f, 3.0f, 0.2f};
int32_t axes_v[] = {2, 0, 5};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::f32, Shape{3}, scales_v)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::f32, Shape{3}, scales_v}},
{3, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({18, 3, 193, 129, 129, 12}));
@ -186,12 +181,11 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, all_inputs_static_rank_use_sizes)
int32_t sizes_v[] = {10, 50, 60};
int32_t axes_v[] = {1, 0, 3};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{3}, sizes_v)},
{3, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, sizes_v}},
{3, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({50, 10, 128, 60, 128, 64}));
@ -218,12 +212,11 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, default_ctor_no_attributes) {
float scales_v[] = {1.5f, 3.0f, 0.2f};
int32_t axes_v[] = {2, 0, 5};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::f32, Shape{3}, scales_v)},
{2, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::f32, Shape{3}, scales_v}},
{2, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({15, 2, 192, 128, 128, 12}));
@ -238,7 +231,7 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, scales_as_constant) {
op = make_op(img, scales, axes, attrs);
input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 4, 128, 89, 128}));
@ -251,7 +244,7 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, sizes_as_constant) {
op = make_op(img, sizes, axes, attrs);
input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({5, 5, 128, 10, 128}));
@ -267,12 +260,11 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scale
float scales_v[] = {1.5f, 3.0f, 0.2f};
int32_t axes_v[] = {2, 0, 5};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::f32, Shape{3}, scales_v)},
{2, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::f32, Shape{3}, scales_v}},
{2, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({15, 2, 192, 128, 128, 12}));
@ -288,12 +280,11 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, all_inputs_static_rank_use_sizes)
int32_t sizes_v[] = {10, 50, 60};
int32_t axes_v[] = {1, 0, 3};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{3}, sizes_v)},
{2, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_v)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, sizes_v}},
{2, {element::i32, Shape{3}, axes_v}}};
input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({50, 10, 128, 60, 128, 64}));

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/op/logical_not.hpp"
#include "openvino/op/parameter.hpp"
#include "utils.hpp"
@ -25,7 +26,7 @@ TEST_F(LogicalNotStaticShapeInferenceTest, static_rank) {
this->input_shapes = {StaticShape{3, 4, 7, 5}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 4, 7, 5}));
}
@ -36,7 +37,7 @@ TEST_F(LogicalNotStaticShapeInferenceTest, dynamic_rank) {
this->input_shapes = {StaticShape{3, 1, 5, 2}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
ASSERT_EQ(this->output_shapes.front(), StaticShape({3, 1, 5, 2}));
}

View File

@ -30,7 +30,7 @@ TEST_F(LSTMCellV4StaticShapeInferenceTest, default_ctor) {
StaticShape{gates_count * hidden_size, input_size},
StaticShape{gates_count * hidden_size, hidden_size},
StaticShape{gates_count * hidden_size}},
shape_inference(lstm_cell.get(), input_shapes, output_shapes);
output_shapes = shape_inference(lstm_cell.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, hidden_size}));
}
@ -55,7 +55,7 @@ TEST_F(LSTMCellV4StaticShapeInferenceTest, basic_shape_infer) {
StaticShape{gates_count * hidden_size, input_size},
StaticShape{gates_count * hidden_size, hidden_size},
StaticShape{gates_count * hidden_size}},
shape_inference(lstm_cell.get(), input_shapes, output_shapes);
output_shapes = shape_inference(lstm_cell.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, hidden_size}));
}
@ -81,9 +81,8 @@ TEST(StaticShapeInferenceTest, LSTMCellV0Test) {
StaticShape{gates_count * hidden_size, input_size},
StaticShape{gates_count * hidden_size, hidden_size},
StaticShape{gates_count * hidden_size},
StaticShape{3 * hidden_size}},
static_output_shapes = {StaticShape{}, StaticShape{}};
shape_inference(lstm_cell.get(), static_input_shapes, static_output_shapes);
StaticShape{3 * hidden_size}};
const auto static_output_shapes = shape_inference(lstm_cell.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({batch_size, hidden_size}));
ASSERT_EQ(static_output_shapes[1], StaticShape({batch_size, hidden_size}));
}

View File

@ -35,7 +35,7 @@ TEST_F(LSTMSequenceV0StaticShapeInferenceTest, default_ctor) {
StaticShape{num_directions, gates_count * hidden_size}, // B
StaticShape{num_directions, (gates_count - 1) * hidden_size}}; // P
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
@ -71,7 +71,7 @@ TEST_F(LSTMSequenceV0StaticShapeInferenceTest, FORWARD_without_P) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
@ -109,7 +109,7 @@ TEST_F(LSTMSequenceV0StaticShapeInferenceTest, FORWARD_with_P) {
StaticShape{num_directions, gates_count * hidden_size}, // B
StaticShape{num_directions, (gates_count - 1) * hidden_size}}; // P
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
@ -145,7 +145,7 @@ TEST_F(LSTMSequenceV0StaticShapeInferenceTest, REVERSE) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
@ -180,7 +180,7 @@ TEST_F(LSTMSequenceV0StaticShapeInferenceTest, BIDIRECTIONAL) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
@ -212,7 +212,7 @@ TEST_F(LSTMSequenceV5StaticShapeInferenceTest, default_ctor) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
@ -248,7 +248,7 @@ TEST_F(LSTMSequenceV5StaticShapeInferenceTest, FORWARD) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
@ -284,7 +284,7 @@ TEST_F(LSTMSequenceV5StaticShapeInferenceTest, REVERSE) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
@ -319,7 +319,7 @@ TEST_F(LSTMSequenceV5StaticShapeInferenceTest, BIDIRECTIONAL) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 3);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));

View File

@ -3,36 +3,38 @@
//
#include <gtest/gtest.h>
#include <openvino/core/coordinate_diff.hpp>
#include <openvino/op/ops.hpp>
#include <openvino/op/parameter.hpp>
#include <shape_inference/shape_inference.hpp>
#include <shape_inference/static_shape.hpp>
#include "ngraph_functions/builders.hpp"
#include <thread>
#include <atomic>
#include <ov_ops/type_relaxed.hpp>
#include <thread>
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/op/ops.hpp"
#include "openvino/op/parameter.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "shape_inference/shape_inference.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using ov::op::v0::MatMul;
using ov::op::v0::Parameter;
using ov::op::v0::Result;
TEST(StaticShapeInferenceTest, MakeShapeInference) {
auto inp1_f32 = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto inp2_f32 = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto inp1_f32 = std::make_shared<Parameter>(element::f32, PartialShape::dynamic(4));
auto inp2_f32 = std::make_shared<Parameter>(element::f32, PartialShape::dynamic(4));
auto inp1 = std::make_shared<op::v0::Parameter>(element::i8, PartialShape{-1, -1, -1, -1});
auto inp2 = std::make_shared<op::v0::Parameter>(element::i8, PartialShape{-1, -1, -1, -1});
auto inp1 = std::make_shared<Parameter>(element::i8, PartialShape::dynamic(4));
auto inp2 = std::make_shared<Parameter>(element::i8, PartialShape::dynamic(4));
auto matMulRelaxed = std::make_shared<ov::op::TypeRelaxed<ngraph::opset3::MatMul>>(
*as_type_ptr<ngraph::opset3::MatMul>(ngraph::builder::makeMatMul(inp1_f32, inp2_f32, false, false)),
element::f32);
auto matMulRelaxed = std::make_shared<ov::op::TypeRelaxed<MatMul>>(
*as_type_ptr<MatMul>(std::make_shared<MatMul>(inp1_f32, inp2_f32, false, false)),
element::f32);
auto matMul = matMulRelaxed->clone_with_new_inputs({inp1, inp2});
ngraph::ResultVector results;
results.push_back(std::make_shared<ngraph::opset1::Result>(matMul->output(0)));
ov::ResultVector results;
results.push_back(std::make_shared<Result>(matMul->output(0)));
auto function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{inp1, inp2}, "testFunction");
auto model = std::make_shared<ov::Model>(results, ov::ParameterVector{inp1, inp2}, "testFunction");
std::atomic_flag wrongPrcFlag;
wrongPrcFlag.clear();

View File

@ -81,7 +81,7 @@ TEST_P(MatMulTest, no_input_transpose) {
std::vector<StaticShape> static_input_shapes = {a_shape, b_shape}, static_output_shapes = {StaticShape{}};
shape_inference(matmul.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(matmul.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes.front(), exp_shape);
}
@ -91,7 +91,7 @@ TEST_P(MatMulTest, transpose_input_a) {
const auto a_transpose = make_transpose_input(a_shape);
std::vector<StaticShape> static_input_shapes = {a_transpose, b_shape}, static_output_shapes = {StaticShape{}};
shape_inference(matmul.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(matmul.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes.front(), exp_shape);
}
@ -101,7 +101,7 @@ TEST_P(MatMulTest, transpose_input_b) {
const auto b_transpose = make_transpose_input(b_shape);
std::vector<StaticShape> static_input_shapes = {a_shape, b_transpose}, static_output_shapes = {StaticShape{}};
shape_inference(matmul.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(matmul.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes.front(), exp_shape);
}
@ -113,6 +113,6 @@ TEST_P(MatMulTest, transpose_inputs_a_b) {
std::vector<StaticShape> static_input_shapes = {a_transpose, b_transpose}, static_output_shapes = {StaticShape{}};
shape_inference(matmul.get(), static_input_shapes, static_output_shapes);
static_output_shapes = shape_inference(matmul.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes.front(), exp_shape);
}

View File

@ -20,9 +20,8 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantInput) {
int64_t axis = -1;
auto ont_hot = std::make_shared<op::v1::OneHot>(indices, depth, on_value, off_value, axis);
// Test StaticShape
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}},
static_output_shapes = {StaticShape{}};
shape_inference(ont_hot.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}};
const auto static_output_shapes = shape_inference(ont_hot.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], (StaticShape{3, 2}));
}
@ -38,17 +37,12 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMap) {
int32_t on_value[] = {1};
int32_t off_value[] = {0};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i64, Shape{}, depth_value);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, on_value);
constant_data[3] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, off_value);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{}, depth_value}},
{2, {element::i32, ov::Shape{}, on_value}},
{1, {element::i32, ov::Shape{}, off_value}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}},
static_output_shapes = {StaticShape{}};
shape_inference(ont_hot.get(), static_input_shapes, static_output_shapes, constant_data);
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}};
const auto static_output_shapes = shape_inference(ont_hot.get(), static_input_shapes, constant_data);
EXPECT_EQ(static_output_shapes[0], (StaticShape{3, 2}));
}
@ -60,18 +54,13 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMapDefaultCtor) {
int32_t on_value[] = {1};
int32_t off_value[] = {0};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i64, Shape{}, depth_value);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, on_value);
constant_data[3] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, off_value);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{}, depth_value}},
{2, {element::i32, ov::Shape{}, on_value}},
{1, {element::i32, ov::Shape{}, off_value}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}};
shape_inference(ont_hot.get(), static_input_shapes, static_output_shapes, constant_data);
const auto static_output_shapes = shape_inference(ont_hot.get(), static_input_shapes, constant_data);
EXPECT_EQ(static_output_shapes[0], (StaticShape{3, 2}));
}
@ -88,18 +77,13 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMapNegativeDepth) {
int32_t on_value[] = {1};
int32_t off_value[] = {0};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i64, Shape{}, depth_value);
constant_data[2] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, on_value);
constant_data[3] =
std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{}, off_value);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{}, depth_value}},
{2, {element::i32, ov::Shape{}, on_value}},
{1, {element::i32, ov::Shape{}, off_value}}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}},
static_output_shapes = {StaticShape{}};
std::vector<StaticShape> static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}};
OV_EXPECT_THROW(shape_inference(ont_hot.get(), static_input_shapes, static_output_shapes, constant_data),
OV_EXPECT_THROW(shape_inference(ont_hot.get(), static_input_shapes, constant_data),
ov::NodeValidationFailure,
HasSubstr("can't be negative"));
}

View File

@ -30,12 +30,11 @@ TYPED_TEST_P(PadStaticShapeInference, default_ctor) {
int64_t pads_begin[] = {3, 2, 1, 1};
int32_t pads_end[] = {0, 1, 2, 3};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{4}, pads_begin)},
{2, std::make_shared<HostTensor>(element::i32, Shape{4}, pads_end)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{4}, pads_begin}},
{2, {element::i32, Shape{4}, pads_end}}};
this->input_shapes = ShapeVector{{3, 6, 5, 5}, {4}, {4}};
shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data);
this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({6, 9, 8, 9}));
@ -51,7 +50,7 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_end_value_as_constants) {
const auto op = this->make_op(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT);
this->input_shapes = ShapeVector{{3, 6, 5, 5}, {4}, {4}, {}};
shape_inference(op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({6, 9, 8, 8}));
@ -65,14 +64,13 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_end_in_constant_map) {
uint64_t pads_begin_data[] = {0, 2, 2, 0};
uint32_t pads_end_data[] = {0, 1, 2, 0};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::u64, Shape{4}, pads_begin_data)},
{2, std::make_shared<HostTensor>(element::u32, Shape{4}, pads_end_data)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::u64, Shape{4}, pads_begin_data}},
{2, {element::u32, Shape{4}, pads_end_data}}};
const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT);
this->input_shapes = ShapeVector{{3, 6, 5, 1}, {4}, {4}};
shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data);
this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 9, 9, 1}));
}
@ -84,13 +82,12 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_got_negative_value) {
int8_t pads_begin_data[] = {0, -2, -2, 0};
const auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i8, Shape{4}, pads_begin_data)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i8, Shape{4}, pads_begin_data}}};
const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT);
this->input_shapes = ShapeVector{{3, SIZE_MAX, 5, 2}, {4}, {4}};
shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data);
this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, SIZE_MAX, 3, 2}));
}
@ -103,12 +100,11 @@ TYPED_TEST_P(PadStaticShapeInference, pads_end_got_negative_value) {
int8_t pads_end_data[] = {0, -3, -2, 0};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i8, Shape{4}, pads_end_data)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::i8, Shape{4}, pads_end_data}}};
this->input_shapes = ShapeVector{{3, 6, 5, SIZE_MAX}, {4}, {4}};
shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data);
this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data);
EXPECT_EQ(this->output_shapes.front(), StaticShape({4, 4, 5, SIZE_MAX}));
}
@ -119,11 +115,11 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_is_empty) {
const auto pads_end = Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT);
const auto const_data = std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::u64, Shape{0})}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::u64, Shape{0}}}};
this->input_shapes = ShapeVector{{3, 6, 5, 2}, {0}, {4}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, const_data),
NodeValidationFailure,
HasSubstr("length of pads_begin mismatches with rank of input"));
}
@ -134,11 +130,11 @@ TYPED_TEST_P(PadStaticShapeInference, pads_end_is_empty) {
const auto pads_end = std::make_shared<Parameter>(element::i8, PartialShape::dynamic());
const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT);
const auto const_data = std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i8, Shape{0})}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{2, {element::i8, Shape{0}}}};
this->input_shapes = ShapeVector{{3, 6, 5, 2}, {4}, {0}};
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, this->output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, const_data),
NodeValidationFailure,
HasSubstr("length of pads_end mismatches with rank of input"));
}

View File

@ -31,10 +31,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, default_ctor_no_args) {
int32_t out_size[] = {2, 5};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, out_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, out_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 80}));
@ -49,10 +46,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_dynamic_rank) {
int32_t output_size[] = {2, 5};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, output_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 4 * 2 * 5 * 2}));
@ -67,10 +61,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_static_rank) {
int32_t output_size[] = {5, 2};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, output_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 4 * 5 * 2 * 2}));
@ -83,7 +74,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, out_size_constant) {
op = make_op(out_size, img_size, attrs);
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 4 * 4 * 6 * 2}));
@ -96,7 +87,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_constants) {
op = make_op(out_size, img_size, attrs);
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 4 * 12 * 16 * 2}));
@ -111,10 +102,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, invalid_number_of_elements_i
int64_t output_size[] = {5, 2, 1};
input_shapes = ShapeVector{{2}, {2}};
OV_EXPECT_THROW(shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i64, ov::Shape{3}, output_size)}}),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}),
NodeValidationFailure,
HasSubstr("Output size must have two elements"));
}
@ -128,10 +116,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, invalid_input_ranks) {
int64_t output_size[] = {5, 2, 1};
input_shapes = ShapeVector{{2, 1}, {2}};
OV_EXPECT_THROW(shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i64, ov::Shape{3}, output_size)}}),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}),
NodeValidationFailure,
HasSubstr("output size input rank 2 must match image shape input rank 1"));
}

View File

@ -32,10 +32,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, default_ctor_no_args) {
int32_t out_size[] = {2, 5};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, out_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, out_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 200}));
@ -50,10 +47,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_dynamic_rank) {
int32_t output_size[] = {2, 5};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, output_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 200}));
@ -68,10 +62,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_static_rank) {
int32_t output_size[] = {5, 2};
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, output_size)}});
output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}});
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 200}));
@ -84,7 +75,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, out_size_constant) {
op = make_op(out_size, img_size, attrs);
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 480}));
@ -97,7 +88,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_constants) {
op = make_op(out_size, img_size, attrs);
input_shapes = ShapeVector{{2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2, 3840}));
@ -112,10 +103,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, invalid_number_of_elements_in_out_siz
int64_t output_size[] = {5, 2, 1};
input_shapes = ShapeVector{{2}, {2}};
OV_EXPECT_THROW(shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i64, ov::Shape{3}, output_size)}}),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}),
NodeValidationFailure,
HasSubstr("Output size must have two elements"));
}
@ -129,10 +117,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, invalid_input_ranks) {
int64_t output_size[] = {5, 2, 1};
input_shapes = ShapeVector{{2, 1}, {2}};
OV_EXPECT_THROW(shape_inference(op.get(),
input_shapes,
output_shapes,
{{0, std::make_shared<HostTensor>(element::i64, ov::Shape{3}, output_size)}}),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}),
NodeValidationFailure,
HasSubstr("output size input rank 2 must match image shape input rank 1"));
}

View File

@ -40,7 +40,7 @@ TYPED_TEST_P(ProposalTest, default_ctor) {
this->op->set_attrs(this->make_attrs(10));
this->input_shapes = ShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), this->exp_out_size());
EXPECT_EQ(this->output_shapes.front(), StaticShape({20, 5}));
@ -54,7 +54,7 @@ TYPED_TEST_P(ProposalTest, all_inputs_dynamic_rank) {
this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(4));
this->input_shapes = ShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), this->exp_out_size());
EXPECT_EQ(this->output_shapes[0], StaticShape({8, 5}));
@ -68,7 +68,7 @@ TYPED_TEST_P(ProposalTest, all_inputs_static_rank) {
this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5));
this->input_shapes = ShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {4}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), this->exp_out_size());
EXPECT_EQ(this->output_shapes[0], StaticShape({15, 5}));
@ -82,7 +82,7 @@ TYPED_TEST_P(ProposalTest, batch_size_not_compatible) {
this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5));
this->input_shapes = ShapeVector{{3, 4, 10, 10}, {4, 8, 10, 10}, {3}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Batch size inconsistent between class_probs"));
}
@ -95,7 +95,7 @@ TYPED_TEST_P(ProposalTest, image_shape_input_not_compatible_shape) {
this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5));
this->input_shapes = ShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {5}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Image_shape must be 1-D tensor and has got 3 or 4 elements"));
}

View File

@ -33,7 +33,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, default_ctor_avg_mode) {
input_shapes = ShapeVector{{1, 45, 10, 10}, {3, 5}};
auto shape_infer = make_shape_inference(op);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({3, 5, 3, 3}));
@ -50,7 +50,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, default_ctor_bilinear_mode) {
input_shapes = ShapeVector{{1, 75, 10, 10}, {2, 5}};
auto shape_infer = make_shape_inference(op);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 5, 8, 8}));
@ -63,7 +63,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, inputs_dynamic_rank) {
op = make_op(feat, rois, 4, group, scale, 0, 0, "average");
input_shapes = ShapeVector{{2, 36, 100, 100}, {10, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 4, 3, 3}));
@ -76,7 +76,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, inputs_static_rank) {
op = make_op(feat, rois, 2, 1, scale, bins_x, bins_y, "bilinear");
input_shapes = ShapeVector{{2, 24, 20, 100}, {1, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 2, 1, 1}));
@ -90,7 +90,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, invalid_rois_batch_size) {
input_shapes = ShapeVector{{2, 24, 20, 100}, {1, 6}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The second dimension of ROIs input should contain batch id and box coordinates. This "
"dimension is expected to be equal to 5"));

View File

@ -0,0 +1,67 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "range_shape_inference.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using std::make_shared;
using testing::ElementsAre;
TEST(StaticShapeInferenceTest, Rangev4_i32) {
auto start = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto stop = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto step = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
int32_t start_v = 2, stop_v = 0, step_v = -2;
auto const_data = std::unordered_map<size_t, ov::Tensor>{{0, {element::i32, Shape{}, &start_v}},
{1, {element::i32, Shape{}, &stop_v}},
{2, {element::i32, Shape{}, &step_v}}};
auto output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1}));
step_v = -1;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2}));
start_v = -19, stop_v = 19, step_v = 1;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{38}));
step_v = 3;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{13}));
start_v = 20, stop_v = -19, step_v = 1;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{0}));
}
TEST(StaticShapeInferenceTest, Rangev4_f32) {
auto start = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto stop = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto step = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
float start_v = 0.f, stop_v = 1.f, step_v = .25f;
auto const_data = std::unordered_map<size_t, ov::Tensor>{{0, {element::f32, Shape{}, &start_v}},
{1, {element::f32, Shape{}, &stop_v}},
{2, {element::f32, Shape{}, &step_v}}};
auto output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{4}));
start_v = -1.f;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8}));
stop_v = .875f;
output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8}));
}

View File

@ -1,37 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <range_shape_inference.hpp>
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace std;
TEST(StaticShapeInferenceTest, Rangev4_i32) {
auto start = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto stop = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto step = make_shared<op::v0::Parameter>(element::i32, ov::PartialShape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
check_static_shape(range.get(), {2, 0, -2}, {StaticShape{1}});
check_static_shape(range.get(), {2, 0, -1}, {StaticShape{2}});
check_static_shape(range.get(), {-19, 19, 1}, {StaticShape{38}});
check_static_shape(range.get(), {-19, 19, 3}, {StaticShape{13}});
check_static_shape(range.get(), {20, -19, 1}, {StaticShape{0}});
}
TEST(StaticShapeInferenceTest, Rangev4_f32) {
auto start = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto stop = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto step = make_shared<op::v0::Parameter>(element::f32, ov::PartialShape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
check_static_shape(range.get(), {0., 1., 0.25}, {StaticShape{4}});
check_static_shape(range.get(), {-1., 1., 0.25}, {StaticShape{8}});
check_static_shape(range.get(), {-1., 0.875, 0.25}, {StaticShape{8}});
}

View File

@ -30,8 +30,8 @@ void readValueTest() {
auto readValue = constructGraph<T>();
// Test StaticShape
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 2, 64, 64}}, static_output_shapes = {StaticShape{}};
shape_inference(readValue.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 2, 64, 64}};
const auto static_output_shapes = shape_inference(readValue.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], (StaticShape{1, 2, 64, 64}));
}

View File

@ -28,9 +28,8 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, default_ctor) {
this->input_shapes = ShapeVector{{1, 6, 7, 8, 4}, {3}};
int32_t axes_val[] = {0, 1, 3};
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {
{1, std::make_shared<HostTensor>(element::i32, Shape{3}, axes_val)}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes, constant_data);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, axes_val}}};
this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({1, 1, 7, 1, 4}));
@ -43,7 +42,7 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant) {
this->op = this->make_op(data, axes, false);
this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 5}));
@ -57,9 +56,8 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_param) {
this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}};
int32_t axes_val[] = {1, 3};
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {
{1, std::make_shared<HostTensor>(element::i32, Shape{2}, axes_val)}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes, constant_data);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{2}, axes_val}}};
this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 5}));
@ -72,7 +70,7 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant_keep_dims) {
this->op = this->make_op(data, axes, true);
this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 1, 5, 1}));
@ -86,9 +84,8 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_param_keep_dims) {
this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}};
int32_t axes_val[] = {1, 3};
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {
{1, std::make_shared<HostTensor>(element::i32, Shape{2}, axes_val)}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes, constant_data);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{2}, axes_val}}};
this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 1, 5, 1}));

View File

@ -22,7 +22,7 @@ TEST_F(StaticShapeRegionYoloTest, default_ctor_do_soft_max_no_args) {
op->set_end_axis(3);
input_shapes = ShapeVector{{10, 8, 12, 6}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 8, 72}));
@ -33,7 +33,7 @@ TEST_F(StaticShapeRegionYoloTest, data_input_is_dynamic_rank) {
op = make_op(data, 0, 0, 0, true, std::vector<int64_t>(), 1, 3);
input_shapes = ShapeVector{{2, 2, 3, 4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 24}));
@ -44,7 +44,7 @@ TEST_F(StaticShapeRegionYoloTest, data_input_is_static_rank) {
op = make_op(data, 5, 4, 20, false, std::vector<int64_t>{0, 1}, 1, 3);
input_shapes = ShapeVector{{2, 5, 6, 7}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 20, 6, 7}));
@ -54,7 +54,7 @@ TEST_F(StaticShapeRegionYoloTest, data_shape_not_compatible_rank_4) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, 5, 4, 20, false, std::vector<int64_t>{0, 1}, 1, 3);
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 12, 24, 1}}, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector({{2, 20, 12, 24, 1}})),
NodeValidationFailure,
HasSubstr("Input must be a tensor of rank 4, but got"));
}

View File

@ -20,7 +20,7 @@ TEST_F(StaticShapeReorgYoloTest, default_ctor_no_args) {
op->set_strides(3);
input_shapes = ShapeVector{{2, 9, 12, 6}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 81, 4, 2}));
@ -31,7 +31,7 @@ TEST_F(StaticShapeReorgYoloTest, data_input_is_dynamic_rank) {
op = make_op(data, 2);
input_shapes = ShapeVector{{2, 12, 12, 24}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 48, 6, 12}));
@ -42,7 +42,7 @@ TEST_F(StaticShapeReorgYoloTest, data_input_is_static_rank) {
op = make_op(data, 2);
input_shapes = ShapeVector{{2, 20, 12, 24}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 80, 6, 12}));
@ -52,7 +52,7 @@ TEST_F(StaticShapeReorgYoloTest, data_shape_not_compatible_rank_4) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, 2);
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 12, 24, 1}}, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector({{2, 20, 12, 24, 1}})),
NodeValidationFailure,
HasSubstr("[N, C, H, W] input shape is required"));
}
@ -61,7 +61,7 @@ TEST_F(StaticShapeReorgYoloTest, h_dim_not_div_by_stride) {
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, 2);
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 11, 24}}, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 11, 24}}),
NodeValidationFailure,
HasSubstr("H and W should be divisible by stride"));
}

View File

@ -27,7 +27,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, default_batch_seq_axes) {
auto op = make_op(data, seq_lengths);
input_shapes = ShapeVector{{4, 3, 2}, {4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2}));
}
@ -36,7 +36,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, set_batch_seq_axes) {
auto op = make_op(data, seq_lengths, -1, 1);
input_shapes = ShapeVector{{4, 3, 2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2}));
}
@ -45,14 +45,14 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_input_shapes_count) {
auto op = make_op(data, seq_lengths);
input_shapes = ShapeVector{{1, 2, 4}};
EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes), NodeValidationFailure);
EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure);
}
TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_data_shape_rank) {
auto op = make_op(data, seq_lengths);
input_shapes = ShapeVector{{4}, {4}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Data input rank should be equal or greater than 2. Got: "));
}
@ -61,7 +61,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_sequence_shape_rank) {
auto op = make_op(data, seq_lengths);
input_shapes = ShapeVector{{4, 5, 6}, {2, 2}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Sequence lengths rank must be equal to 1. Got: "));
}
@ -70,7 +70,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, default_ctor) {
auto op = make_op();
input_shapes = ShapeVector{{11, 2, 3}, {11}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({11, 2, 3}));
}

View File

@ -28,7 +28,7 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_index_as_constant) {
auto op = make_op(data, Constant::create(element::i16, Shape{4}, {-1000, 1, 2, 2}), Reverse::Mode::INDEX);
input_shapes = ShapeVector{{4, 3, 2, 4}, {4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4}));
}
@ -38,9 +38,8 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_index_in_constant_data) {
input_shapes = ShapeVector{{4, 3, 2, 4}, {4}};
int8_t axes_val[] = {-1, 2, 1};
auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i8, Shape{3}, axes_val)}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i8, Shape{3}, axes_val}}};
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4}));
}
@ -51,7 +50,7 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_mask_as_constant) {
input_shapes = ShapeVector{{4, 3, 2, 4}, {4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4}));
}
@ -62,9 +61,8 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_mask_in_constant_data) {
input_shapes = ShapeVector{{4, 3, 2, 4}, {4}};
bool axes_val[] = {true, true, false, false};
auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::boolean, Shape{4}, axes_val)}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::boolean, Shape{4}, axes_val}}};
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4}));
}
@ -73,7 +71,7 @@ TEST_F(ReverseV1StaticShapeInferenceTest, invalid_axes_mask_length) {
auto op = make_op(data, Constant::create(element::boolean, Shape{3}, {false, false, true}), Reverse::Mode::MASK);
input_shapes = ShapeVector{{1, 2, 4, 3}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The number of elements in the reversed_axes tensor (3) must match the input data tensor "
"rank (4) in 'mask' mode"));
@ -83,7 +81,7 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_index_out_of_data_rank) {
auto op = make_op(data, Constant::create(element::u8, Shape{3}, {0, 20, 3}), Reverse::Mode::INDEX);
input_shapes = ShapeVector{{1, 2, 4, 3}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Some of the provided axes (AxisSet{0, 3, 20}) are out of bounds (input rank: 4)"));
}
@ -95,9 +93,8 @@ TEST_F(ReverseV1StaticShapeInferenceTest, default_ctor) {
input_shapes = ShapeVector{{11, 2, 3}, {3}};
int64_t axes_val[] = {-1, 2, 0};
auto const_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{3}, axes_val)}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{3}, axes_val}}};
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes[0], StaticShape({11, 2, 3}));
}

View File

@ -32,7 +32,7 @@ TEST_F(RNNCellV0StaticShapeInferenceTest, default_ctor) {
StaticShape{gates_count * hidden_size}}; // B
std::vector<StaticShape> output_shapes;
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -57,7 +57,7 @@ TEST_F(RNNCellV0StaticShapeInferenceTest, default_bias) {
StaticShape{gates_count * hidden_size}}; // B
std::vector<StaticShape> output_shapes;
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -80,7 +80,7 @@ TEST_F(RNNCellV0StaticShapeInferenceTest, with_bias) {
StaticShape{gates_count * hidden_size, hidden_size}, // R
StaticShape{gates_count * hidden_size}}; // B
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}
@ -105,6 +105,6 @@ TEST_F(RNNCellV0StaticShapeInferenceTest, dynamic_rank_inputs) {
StaticShape{gates_count * hidden_size}}; // B
std::vector<StaticShape> output_shapes;
shape_inference(gru.get(), input_shapes, output_shapes);
output_shapes = shape_inference(gru.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, hidden_size}));
}

View File

@ -33,7 +33,7 @@ TEST_F(RNNSequenceV5StaticShapeInferenceTest, default_ctor) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -64,7 +64,7 @@ TEST_F(RNNSequenceV5StaticShapeInferenceTest, FORWARD) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -95,7 +95,7 @@ TEST_F(RNNSequenceV5StaticShapeInferenceTest, REVERSE) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}
@ -126,7 +126,7 @@ TEST_F(RNNSequenceV5StaticShapeInferenceTest, BIDIRECTIONAL) {
StaticShape{num_directions, gates_count * hidden_size, hidden_size}, // R
StaticShape{num_directions, gates_count * hidden_size}}; // B
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({batch_size, num_directions, seq_len, hidden_size}));
EXPECT_EQ(output_shapes[1], StaticShape({batch_size, num_directions, hidden_size}));
}

View File

@ -27,7 +27,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, default_ctor_no_args) {
this->op->set_pooled_w(2);
this->input_shapes = ShapeVector{{2, 3, 5, 5}, {7, 4}, {7}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes[0], (StaticShape{7, 3, 2, 2}));
@ -41,7 +41,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, all_inputs_dynamic_rank) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 3, 5, 5}, {10, 4}, {10}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes[0], (StaticShape{10, 3, 2, 2}));
@ -55,7 +55,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, all_inputs_static_rank) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 4}, {10}};
shape_inference(this->op.get(), this->input_shapes, this->output_shapes);
this->output_shapes = shape_inference(this->op.get(), this->input_shapes);
EXPECT_EQ(this->output_shapes.size(), 1);
EXPECT_EQ(this->output_shapes[0], (StaticShape{10, 8, 2, 2}));
@ -69,7 +69,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_input_rank) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 8, 5}, {10, 3}, {10}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Expected a 4D tensor for the input data"));
}
@ -82,7 +82,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_rois_rank) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3, 1}, {10}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Expected a 2D tensor for the ROIs input"));
}
@ -94,7 +94,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_batch_indicies_rank) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3}, {}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("Expected a 1D tensor for the batch indices input."));
}
@ -107,7 +107,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, invalid_rois_2nd_dim) {
this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG);
this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3}, {10}};
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes, this->output_shapes),
OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes),
NodeValidationFailure,
HasSubstr("op dimension is expected to be equal to 4"));
}

View File

@ -27,7 +27,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, default_ctor) {
input_shapes = ShapeVector{{1, 5, 10, 10}, {2, 5}};
auto shape_infer = make_shape_inference(op);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 5, 3, 3}));
@ -40,7 +40,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, inputs_dynamic_rank) {
op = make_op(feat, rois, ov::Shape{5, 5}, 0.9f);
input_shapes = ShapeVector{{2, 3, 100, 100}, {10, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 3, 5, 5}));
@ -53,7 +53,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, inputs_static_rank) {
op = make_op(feat, rois, ov::Shape{7, 5}, 1.9f, "max");
input_shapes = ShapeVector{{2, 3, 20, 100}, {10, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 3, 7, 5}));
@ -67,7 +67,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, invalid_rois_batch_size) {
input_shapes = ShapeVector{{2, 3, 20, 100}, {10, 6}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("The second dimension of ROIs input should contain batch id and box coordinates. This "
"dimension is expected to be equal to 5"));

View File

@ -1,10 +1,11 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <array>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
@ -29,7 +30,7 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_as_constant) {
input_shapes = {StaticShape{3, 5}, StaticShape{2}, StaticShape{2}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], input_shapes[0]);
}
@ -41,12 +42,12 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_in_const_map) {
const auto op = make_op(arg, shift, axes);
auto axes_val = std::array<int32_t, 3>{0, 1, -1};
const auto constant_data = std::map<size_t, HostTensorPtr>{
{2, std::make_shared<HostTensor>(element::i32, Shape{axes_val.size()}, axes_val.data())}};
const auto constant_data =
std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{axes_val.size()}, axes_val.data()}}};
input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], input_shapes[0]);
}
@ -58,12 +59,12 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_over_arg_rank) {
const auto op = make_op(arg, shift, axes);
auto axes_val = std::array<int32_t, 3>{0, 3, -1};
const auto constant_data = std::map<size_t, HostTensorPtr>{
{2, std::make_shared<HostTensor>(element::i32, Shape{axes_val.size()}, axes_val.data())}};
const auto constant_data =
std::unordered_map<size_t, ov::Tensor>{{2, {element::i32, Shape{axes_val.size()}, axes_val.data()}}};
input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, constant_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, constant_data),
NodeValidationFailure,
HasSubstr("Parameter axis 3 out of the tensor rank range"));
}
@ -76,12 +77,12 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_has_negative_after_normalization) {
const auto op = make_op(arg, shift, axes);
auto axes_val = std::array<int64_t, 3>{-4, 2, -1};
const auto constant_data = std::map<size_t, HostTensorPtr>{
{2, std::make_shared<HostTensor>(element::i64, Shape{axes_val.size()}, axes_val.data())}};
const auto constant_data =
std::unordered_map<size_t, ov::Tensor>{{2, {element::i64, Shape{axes_val.size()}, axes_val.data()}}};
input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, constant_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, constant_data),
NodeValidationFailure,
HasSubstr(" Parameter axis -4 out of the tensor rank range"));
}
@ -90,11 +91,11 @@ TEST_F(RollV7StaticShapeInferenceTest, default_ctor) {
const auto op = make_op();
auto axes_val = std::array<int64_t, 4>{-4, 2, -1, 1};
const auto constant_data = std::map<size_t, HostTensorPtr>{
{2, std::make_shared<HostTensor>(element::i64, Shape{axes_val.size()}, axes_val.data())}};
const auto constant_data =
std::unordered_map<size_t, ov::Tensor>{{2, {element::i64, Shape{axes_val.size()}, axes_val.data()}}};
input_shapes = {StaticShape{3, 2, 5, 1}, StaticShape{}, StaticShape{4}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], input_shapes[0]);
}

View File

@ -25,11 +25,10 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, default_ctor) {
const auto op = make_op();
int32_t axis = 1;
const auto const_data =
std::map<size_t, HostTensorPtr>{{3, std::make_shared<HostTensor>(element::i32, Shape{1}, &axis)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{3, {element::i32, Shape{1}, &axis}}};
input_shapes = ShapeVector{{1000, 256, 10, 13}, {25, 125, 3, 1}, {25, 125, 3, 1}, {1}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 13}));
@ -44,7 +43,7 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, correct_inputs_axis_as_c
const auto op = make_op(d, i, u, a);
input_shapes = ShapeVector{{2, 5, 10, 15}, {2, 1, 10, 15}, {2, 1, 10, 15}, {}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({2, 5, 10, 15}));
@ -59,11 +58,10 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, params_are_dynamic_rank_
const auto op = make_op(d, i, u, a);
uint32_t axis = 2;
const auto const_data =
std::map<size_t, HostTensorPtr>{{3, std::make_shared<HostTensor>(element::u32, Shape{}, &axis)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{3, {element::u32, Shape{}, &axis}}};
input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
const auto output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({5000, 256, 10, 15}));
@ -78,11 +76,10 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, incorrect_axis_value) {
const auto op = make_op(d, i, u, a);
uint32_t axis = 4;
const auto const_data =
std::map<size_t, HostTensorPtr>{{3, std::make_shared<HostTensor>(element::u32, Shape{}, &axis)}};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{3, {element::u32, Shape{}, &axis}}};
input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
AssertFailure,
HasSubstr("Parameter axis 4 out of the tensor rank range [-4, 3]"));
}

View File

@ -21,7 +21,7 @@ TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, default_ctor) {
const auto op = make_op();
input_shapes = ShapeVector{{1000, 256, 10, 13}, {25, 125, 3}, {25, 125, 13}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 13}));
@ -35,7 +35,7 @@ TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, correct_inputs) {
const auto op = make_op(d, i, u);
input_shapes = ShapeVector{{1000, 256, 10, 15}, {25, 125, 3}, {25, 125, 15}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
@ -49,7 +49,7 @@ TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, params_are_dynamic_rank) {
const auto op = make_op(d, i, u);
input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3}, {30, 25, 15}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({5000, 256, 10, 15}));

View File

@ -18,14 +18,14 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_3D_axis_1) {
auto scatter_update = std::make_shared<op::v3::ScatterUpdate>(data_param, indices_param, updates_param, axis_param);
int32_t axis_val[] = {1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[3] = std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{1}, axis_val);
std::unordered_map<size_t, ov::Tensor> constant_data;
constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val);
std::vector<StaticShape> input_shapes = {StaticShape{2, 3, 4},
StaticShape{2, 1},
StaticShape{2, 2, 1, 4},
StaticShape{1}},
output_shapes = {StaticShape{}};
shape_inference(scatter_update.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(scatter_update.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], StaticShape({2, 3, 4}));
}
@ -38,14 +38,14 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_4D_axis_2) {
auto scatter_update = std::make_shared<op::v3::ScatterUpdate>(data_param, indices_param, updates_param, axis_param);
int32_t axis_val[] = {2};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[3] = std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{1}, axis_val);
std::unordered_map<size_t, ov::Tensor> constant_data;
constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val);
std::vector<StaticShape> input_shapes = {StaticShape{1000, 256, 10, 15},
StaticShape{125, 20},
StaticShape{1000, 125, 20, 10, 15},
StaticShape{1}},
output_shapes = {StaticShape{}};
shape_inference(scatter_update.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(scatter_update.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
}
@ -58,14 +58,14 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_4D_incompatible_axis) {
auto scatter_update = std::make_shared<op::v3::ScatterUpdate>(data_param, indices_param, updates_param, axis_param);
int32_t axis_val[] = {1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[3] = std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{1}, axis_val);
std::unordered_map<size_t, ov::Tensor> constant_data;
constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val);
std::vector<StaticShape> input_shapes = {StaticShape{1000, 256, 10, 15},
StaticShape{125, 20},
StaticShape{1000, 125, 20, 10, 15},
StaticShape{1}},
output_shapes = {StaticShape{}};
shape_inference(scatter_update.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(scatter_update.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
}
@ -80,9 +80,8 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_axis_as_const) {
std::vector<StaticShape> input_shapes = {StaticShape{1000, 256, 10, 15},
StaticShape{125, 20},
StaticShape{1000, 125, 20, 10, 15},
StaticShape{1}},
output_shapes = {StaticShape{}};
shape_inference(scatter_update.get(), input_shapes, output_shapes);
StaticShape{1}};
const auto output_shapes = shape_inference(scatter_update.get(), input_shapes);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
}
@ -95,14 +94,14 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_dynamic_rank) {
auto scatter_update = std::make_shared<op::v3::ScatterUpdate>(data_param, indices_param, updates_param, axis_param);
int32_t axis_val[] = {1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[3] = std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{1}, axis_val);
std::unordered_map<size_t, ov::Tensor> constant_data;
constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val);
std::vector<StaticShape> input_shapes = {StaticShape{1000, 256, 10, 15},
StaticShape{125, 20},
StaticShape{1000, 125, 20, 10, 15},
StaticShape{1}},
output_shapes = {StaticShape{}};
shape_inference(scatter_update.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(scatter_update.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
}
@ -115,8 +114,8 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_params_dynamic_rank_incorrect_updat
auto scatter_update = std::make_shared<op::v3::ScatterUpdate>(data_param, indices_param, updates_param, axis_param);
int32_t axis_val[] = {1};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[3] = std::make_shared<ngraph::runtime::HostTensor>(element::Type_t::i32, Shape{1}, axis_val);
std::unordered_map<size_t, ov::Tensor> constant_data;
constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val);
// Incorrect rank of the third input shape
std::vector<StaticShape> input_shapes = {StaticShape{1000, 256, 10, 15},
@ -126,6 +125,6 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_params_dynamic_rank_incorrect_updat
output_shapes = {StaticShape{}};
// ScatterUpdate shape_inference is implemented by usage of entryFirstPassthrough, no additional checks
shape_inference(scatter_update.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(scatter_update.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes[0], StaticShape({1000, 256, 10, 15}));
}

View File

@ -15,35 +15,32 @@ TEST(StaticShapeInferenceTest, SelectTestBCastModeNUMPY) {
auto pfalse = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
auto select = std::make_shared<op::v1::Select>(cond, ptrue, pfalse, op::AutoBroadcastType::NUMPY);
{
std::vector<StaticShape> static_input_shapes = {StaticShape{}, StaticShape{4}, StaticShape{2, 4}},
static_output_shapes = {StaticShape{}};
shape_inference(select.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{}, StaticShape{4}, StaticShape{2, 4}};
const auto static_output_shapes = shape_inference(select.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({2, 4}));
}
{
std::vector<StaticShape> static_input_shapes = {StaticShape{}, StaticShape{2, 4}, StaticShape{2, 4}},
static_output_shapes = {StaticShape{}};
shape_inference(select.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{}, StaticShape{2, 4}, StaticShape{2, 4}};
const auto static_output_shapes = shape_inference(select.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({2, 4}));
}
{
std::vector<StaticShape> static_input_shapes = {StaticShape{4}, StaticShape{2, 4}, StaticShape{4}},
static_output_shapes = {StaticShape{}};
shape_inference(select.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{4}, StaticShape{2, 4}, StaticShape{4}};
const auto static_output_shapes = shape_inference(select.get(), static_input_shapes);
EXPECT_EQ(static_output_shapes[0], StaticShape({2, 4}));
}
}
TEST(StaticShapeInferenceTest, SelectTestBCastModePDPD) {
auto cond = std::make_shared<op::v0::Parameter>(element::boolean, PartialShape::dynamic());
auto ptrue = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
auto pfalse = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
auto select =
std::make_shared<op::v1::Select>(cond, ptrue, pfalse, op::AutoBroadcastSpec{op::AutoBroadcastType::PDPD, 1});
std::vector<StaticShape> static_input_shapes = {StaticShape{4}, StaticShape{2, 4}, StaticShape{4}},
static_output_shapes = {StaticShape{}};
shape_inference(select.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{4}, StaticShape{2, 4}, StaticShape{4}};
const auto static_output_shapes = shape_inference(select.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({2, 4}));
}
@ -53,8 +50,7 @@ TEST(StaticShapeInferenceTest, SelectTestBCastModeNone) {
auto pfalse = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
auto select = std::make_shared<op::v1::Select>(cond, ptrue, pfalse, op::AutoBroadcastType::NONE);
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 4}, StaticShape{6, 4}, StaticShape{6, 4}},
static_output_shapes = {StaticShape{}};
shape_inference(select.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 4}, StaticShape{6, 4}, StaticShape{6, 4}};
const auto static_output_shapes = shape_inference(select.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({6, 4}));
}

View File

@ -13,12 +13,10 @@ TEST(StaticShapeInferenceTest, ReshapeTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{0, -1});
auto reduce =
std::make_shared<op::v1::Reshape>(data, pattern, true);
auto reduce = std::make_shared<op::v1::Reshape>(data, pattern, true);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(reduce.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}, StaticShape{2}};
const auto static_output_shapes = shape_inference(reduce.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 150}));
}
@ -27,12 +25,10 @@ TEST(StaticShapeInferenceTest, ReshapeEmptyTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, 2, 2});
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{0, 4});
auto reduce =
std::make_shared<op::v1::Reshape>(data, pattern, false);
auto reduce = std::make_shared<op::v1::Reshape>(data, pattern, false);
std::vector<StaticShape> static_input_shapes = {StaticShape{0, 2, 2}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(reduce.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{0, 2, 2}, StaticShape{2}};
const auto static_output_shapes = shape_inference(reduce.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({0, 4}));
}
@ -40,12 +36,10 @@ TEST(StaticShapeInferenceTest, ReshapeEmptyTest) {
TEST(StaticShapeInferenceTest, ShapeOf5DTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto shapeof =
std::make_shared<op::v0::ShapeOf>(data);
auto shapeof = std::make_shared<op::v0::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{2, 3, 4, 5, 6}},
static_output_shapes = {StaticShape{}};
shape_inference(shapeof.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{2, 3, 4, 5, 6}};
const auto static_output_shapes = shape_inference(shapeof.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({5}));
}
@ -53,12 +47,10 @@ TEST(StaticShapeInferenceTest, ShapeOf5DTest) {
TEST(StaticShapeInferenceTest, ShapeOf0DTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{});
auto shapeof =
std::make_shared<op::v3::ShapeOf>(data);
auto shapeof = std::make_shared<op::v3::ShapeOf>(data);
std::vector<StaticShape> static_input_shapes = {StaticShape{}},
static_output_shapes = {StaticShape{}};
shape_inference(shapeof.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{}};
const auto static_output_shapes = shape_inference(shapeof.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({}));
}

View File

@ -25,7 +25,7 @@ TEST_F(ShuffleChannelsV0StaticShapeInferenceTest, default_ctor) {
op->set_group(2);
input_shapes = {StaticShape{5, 4, 9}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], input_shapes[0]);
@ -36,7 +36,7 @@ TEST_F(ShuffleChannelsV0StaticShapeInferenceTest, correct_shape_infer) {
op = make_op(data, -1, 3);
input_shapes = {StaticShape{5, 4, 9}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0], input_shapes[0]);
}

View File

@ -34,7 +34,7 @@ TEST_F(SliceStaticShapeInferenceTest, reverse_steps_start_stop_outside_dimension
input_shapes.push_back({3, 4, 5, max_d, max_d});
input_shapes.resize(4, start->get_shape());
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), num_of_outputs);
EXPECT_EQ(output_shapes.front(), StaticShape({3, 2, 5, max_d, 3}));
@ -53,19 +53,19 @@ TEST_F(SliceStaticShapeInferenceTest, reverse_step_on_signle_axis_but_start_stop
auto stop_buff = std::vector<int64_t>{2};
auto steps_buff = std::vector<int64_t>{-2};
const auto start_tensor = std::make_shared<HostTensor>(et, Shape{1}, static_cast<void*>(start_buff.data()));
const auto stop_tensor = std::make_shared<HostTensor>(et, Shape{1}, static_cast<void*>(stop_buff.data()));
const auto steps_tensor = std::make_shared<HostTensor>(et, Shape{1}, static_cast<void*>(steps_buff.data()));
const auto start_tensor = ov::Tensor(element::i64, Shape{1}, static_cast<void*>(start_buff.data()));
const auto stop_tensor = ov::Tensor(element::i64, Shape{1}, static_cast<void*>(stop_buff.data()));
const auto steps_tensor = ov::Tensor(element::i64, Shape{1}, static_cast<void*>(steps_buff.data()));
const auto op = make_op(data, start, stop, steps, axes);
input_shapes = ShapeVector{{3, 4, 10}, {1}, {1}, {1}, axes->get_shape()};
const std::map<size_t, std::shared_ptr<HostTensor>>& constant_data = {{1, start_tensor},
{2, stop_tensor},
{3, steps_tensor}};
const std::unordered_map<size_t, ov::Tensor>& constant_data = {{1, start_tensor},
{2, stop_tensor},
{3, steps_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes.size(), num_of_outputs);
EXPECT_EQ(output_shapes.front(), StaticShape({3, 4, 4}));
@ -86,22 +86,22 @@ TEST_F(SliceStaticShapeInferenceTest, forward_step_all_data_in_const_map) {
const auto common_shape = Shape{start_buff.size()};
const auto start_tensor = std::make_shared<HostTensor>(et, common_shape, static_cast<void*>(start_buff.data()));
const auto stop_tensor = std::make_shared<HostTensor>(et, common_shape, static_cast<void*>(stop_buff.data()));
const auto steps_tensor = std::make_shared<HostTensor>(et, common_shape, static_cast<void*>(steps_buff.data()));
const auto axes_tensor = std::make_shared<HostTensor>(et, common_shape, static_cast<void*>(axes_buff.data()));
const auto start_tensor = ov::Tensor(element::i64, common_shape, static_cast<void*>(start_buff.data()));
const auto stop_tensor = ov::Tensor(element::i64, common_shape, static_cast<void*>(stop_buff.data()));
const auto steps_tensor = ov::Tensor(element::i64, common_shape, static_cast<void*>(steps_buff.data()));
const auto axes_tensor = ov::Tensor(element::i64, common_shape, static_cast<void*>(axes_buff.data()));
const auto op = make_op(data, start, stop, steps);
input_shapes.push_back({10, 10, 8, max_d, max_d, max_d, 10});
input_shapes.resize(5, common_shape);
const std::map<size_t, std::shared_ptr<HostTensor>>& constant_data = {{1, start_tensor},
{2, stop_tensor},
{3, steps_tensor},
{4, axes_tensor}};
const std::unordered_map<size_t, ov::Tensor>& constant_data = {{1, start_tensor},
{2, stop_tensor},
{3, steps_tensor},
{4, axes_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes.size(), num_of_outputs);
EXPECT_EQ(output_shapes.front(), StaticShape({10, 3, 0, 4, max_d, max_d, 3}));

View File

@ -35,13 +35,12 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, default_ctor) {
int32_t pads_begin_val[] = {0, 2, 0, 0, 0};
int32_t pads_end_val[] = {0, 2, 1, 0, 0};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)},
{2, std::make_shared<HostTensor>(element::i32, Shape{5}, pads_begin_val)},
{3, std::make_shared<HostTensor>(element::i32, Shape{5}, pads_end_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}},
{2, {element::i32, Shape{5}, pads_begin_val}},
{3, {element::i32, Shape{5}, pads_end_val}}};
input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16}));
}
@ -55,7 +54,7 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, blocks_pads_as_constants) {
const auto op = make_op(data, block_shape, pads_begin, pads_end);
input_shapes = {{2, 100, 1024, 3}, {4}, {4}, {4}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes[0],
(StaticShape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
@ -68,13 +67,12 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, blocks_pads_in_constant_map) {
int32_t pads_begin_val[] = {0, 2, 0, 0, 0};
int32_t pads_end_val[] = {0, 2, 1, 0, 0};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)},
{2, std::make_shared<HostTensor>(element::i32, Shape{5}, pads_begin_val)},
{3, std::make_shared<HostTensor>(element::i32, Shape{5}, pads_end_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}},
{2, {element::i32, Shape{5}, pads_begin_val}},
{3, {element::i32, Shape{5}, pads_end_val}}};
input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16}));
}
@ -83,17 +81,16 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, throw_no_data_const_map) {
const auto op = make_space_to_batch_dynamic();
input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes), NodeValidationFailure);
EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure);
}
TEST_F(SpaceToBatchV1StaticShapeInferenceTest, exception_missing_pads_data_in_const_map) {
const auto op = make_space_to_batch_dynamic();
int32_t block_val[] = {1, 6, 5, 1, 16};
const auto constant_data =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i32, Shape{5}, block_val)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{5}, block_val}}};
input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes), NodeValidationFailure);
EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure);
}

View File

@ -24,7 +24,7 @@ TEST_F(SpaceToDepthV0StaticShapeInferenceTest, default_ctor) {
op->set_block_size(2);
input_shapes = {StaticShape{1, 12, 4, 1080, 1616}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2}));
@ -35,7 +35,7 @@ TEST_F(SpaceToDepthV0StaticShapeInferenceTest, depth_first_block_2) {
const auto op = make_op(data, op_type::SpaceToDepthMode::DEPTH_FIRST, 2);
input_shapes = {StaticShape{1, 12, 4, 1080, 1616}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], (StaticShape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2}));

View File

@ -2,7 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "gmock/gmock.h"
#include <gmock/gmock.h>
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/split.hpp"
@ -58,7 +59,7 @@ TEST_P(SplitStaticShapeInferenceTest, shape_inference_empty_const_map) {
const auto axis_node = std::make_shared<op::v0::Constant>(element::i64, Shape{}, axis);
op = make_op(arg, axis_node, num_of_splits);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), num_of_splits);
EXPECT_THAT(output_shapes, Each(exp_shape));
@ -68,11 +69,10 @@ TEST_P(SplitStaticShapeInferenceTest, shape_inference_with_const_map) {
const auto axis_node = std::make_shared<op::v0::Parameter>(element::i64, Shape{});
op = make_op(arg, axis_node, num_of_splits);
const auto axis_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{}, axis);
const auto axis_tensor = std::make_shared<ngraph::runtime::HostTensor>(axis_const);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {{1, axis_tensor}};
const auto axis_tensor = ov::Tensor(element::i64, ov::Shape{}, &axis);
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, axis_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
ASSERT_EQ(output_shapes.front(), exp_shape);
}

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/squeeze.hpp"
@ -28,7 +29,7 @@ TEST_F(SqueezeStaticShapeInferenceAssertTest, no_axes) {
input_shapes = ShapeVector{{5, 6}, axes->get_shape()};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Check 'constant != nullptr'"));
}
@ -40,7 +41,7 @@ TEST_F(SqueezeStaticShapeInferenceAssertTest, parameter_static_shape_axes_no_dat
input_shapes = ShapeVector{arg->get_shape(), axes->get_shape()};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Check 'constant != nullptr'"));
}
@ -100,7 +101,7 @@ TEST_P(SqueezeStaticShapeInferenceTest, shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i64, Shape{axes.size()}, axes);
const auto op = make_op(arg, axes_node);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes.front(), exp_shape);
}
@ -109,11 +110,11 @@ TEST_P(SqueezeStaticShapeInferenceTest, shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, Shape{1});
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ngraph::runtime::HostTensor>(axes_const);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = axes.empty() ? ov::Tensor(element::i64, ov::Shape{axes.size()})
: ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, axes_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
ASSERT_EQ(output_shapes.front(), exp_shape);
}

View File

@ -1,6 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
@ -28,12 +29,12 @@ TEST_F(StridedSliceStaticShapeInferenceTest, reverse_stride_begin_end_clip_to_di
const auto op = make_op(data, begin, end, stride, mask, mask);
check_static_shape(op.get(),
{StaticShape{3, 4, 5}, StaticShape{3}, StaticShape{3}, StaticShape{3}},
{StaticShape{3, 4, 5}});
input_shapes = ShapeVector{{3, 4, 5}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 4, 5}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end) {
TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_1) {
const auto mask = std::vector<int64_t>(4, 0);
const auto data = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
@ -43,11 +44,60 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end) {
const auto op = make_op(data, begin, end, stride, mask, mask);
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {1, 0, 0}, {2, 1, 3}, {1, 1, 1}}, {StaticShape{1, 1, 3}});
int64_t begin_v[] = {1, 0, 0};
int64_t end_v[] = {2, 1, 3};
int64_t stride_v[] = {1, 1, 1};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {1, 0, 0}, {2, 2, 3}, {1, 1, 1}}, {StaticShape{1, 2, 3}});
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 1, 3}));
}
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {2, 0, 0}, {3, 2, 3}, {1, 1, 2}}, {StaticShape{1, 2, 2}});
TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_2) {
const auto mask = std::vector<int64_t>(4, 0);
const auto data = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
const auto begin = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto end = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto stride = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto op = make_op(data, begin, end, stride, mask, mask);
int64_t begin_v[] = {1, 0, 0};
int64_t end_v[] = {2, 2, 3};
int64_t stride_v[] = {1, 1, 1};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 3}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_3) {
const auto mask = std::vector<int64_t>(4, 0);
const auto data = std::make_shared<op::v0::Parameter>(element::f32, ov::PartialShape::dynamic());
const auto begin = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto end = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto stride = std::make_shared<op::v0::Parameter>(element::i64, Shape{3});
const auto op = make_op(data, begin, end, stride, mask, mask);
int64_t begin_v[] = {2, 0, 0};
int64_t end_v[] = {3, 2, 3};
int64_t stride_v[] = {1, 1, 2};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 2}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end) {
@ -61,7 +111,16 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end) {
const auto op = make_op(data, begin, end, stride, begin_mask, end_mask);
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {1, 0, 0}, {0, 0, 0}, {1, 1, 1}}, {StaticShape{2, 2, 3}});
int64_t begin_v[] = {1, 0, 0};
int64_t end_v[] = {0, 0, 0};
int64_t stride_v[] = {1, 1, 1};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 2, 3}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end_stride_by_two_last_dim) {
@ -75,7 +134,16 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end_stride_by_two_last
auto op = make_op(data, begin, end, stride, begin_mask, end_mask);
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {0, 1, 0}, {2, 0, 0}, {1, 1, 2}}, {StaticShape{2, 1, 2}});
int64_t begin_v[] = {0, 1, 0};
int64_t end_v[] = {2, 0, 0};
int64_t stride_v[] = {1, 1, 2};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 1, 2}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, use_reverse_stride_on_last_dimension) {
@ -88,7 +156,16 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_reverse_stride_on_last_dimensio
const auto op = make_op(data, begin, end, stride, mask, mask);
check_static_shape(op.get(), {StaticShape{3, 2, 3}, {0, 0, 0}, {1, 0, 0}, {1, 1, -1}}, {StaticShape{1, 2, 3}});
int64_t begin_v[] = {0, 0, 0};
int64_t end_v[] = {1, 0, 0};
int64_t stride_v[] = {1, 1, -1};
const auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, ov::Shape{3}, begin_v}},
{2, {element::i64, ov::Shape{3}, end_v}},
{3, {element::i64, ov::Shape{3}, stride_v}}};
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 3}));
}
TEST_F(StridedSliceStaticShapeInferenceTest, default_stride) {
@ -101,7 +178,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, default_stride) {
input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes.front(), StaticShape({1, 2, 2}));
}

View File

@ -14,15 +14,13 @@ TEST(StaticShapeInferenceTest, TileTest) {
auto param1 = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{3}, std::vector<int>{3, 4, 1});
auto tile = std::make_shared<op::v0::Tile>(param0, param1);
// Test Static Shape
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(tile.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{3}};
const auto static_output_shapes = shape_inference(tile.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({18, 32, 10}));
// Test Wrong Static Shape
std::vector<StaticShape> wrong_static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{}},
wrong_static_output_shapes = {StaticShape{}};
std::vector<StaticShape> wrong_static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{}};
ASSERT_THROW(shape_inference(tile.get(), wrong_static_input_shapes, wrong_static_output_shapes), ov::AssertFailure);
ASSERT_THROW(shape_inference(tile.get(), wrong_static_input_shapes), ov::AssertFailure);
}
TEST(StaticShapeInferenceTest, TileFewRepeatsTest) {
@ -30,9 +28,8 @@ TEST(StaticShapeInferenceTest, TileFewRepeatsTest) {
auto param1 = ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 1});
auto tile = std::make_shared<op::v0::Tile>(param0, param1);
// Test Static Shape
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(tile.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{2}};
const auto static_output_shapes = shape_inference(tile.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({6, 32, 10}));
}
@ -41,9 +38,8 @@ TEST(StaticShapeInferenceTest, TileSmallDataRankTest) {
auto param1 = ov::op::v0::Constant::create(element::i64, Shape{3}, {3, 4, 1});
auto tile = std::make_shared<op::v0::Tile>(param0, param1);
// Test Static Shape
std::vector<StaticShape> static_input_shapes = {StaticShape{8, 10}, StaticShape{3}},
static_output_shapes = {StaticShape{}};
shape_inference(tile.get(), static_input_shapes, static_output_shapes);
std::vector<StaticShape> static_input_shapes = {StaticShape{8, 10}, StaticShape{3}};
const auto static_output_shapes = shape_inference(tile.get(), static_input_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 32, 10}));
}
@ -53,12 +49,11 @@ TEST(StaticShapeInferenceTest, TileSmallDataRankTestRepeatsInConstMap) {
auto tile = std::make_shared<op::v0::Tile>(param0, param1);
int32_t repeats[] = {3, 4, 1};
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {
{1, std::make_shared<HostTensor>(element::i32, Shape{3}, repeats)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{3}, repeats}}};
// Test Static Shape
ShapeVector input_shapes = {StaticShape{8, 10}, StaticShape{3}}, output_shapes = {StaticShape{}};
shape_inference(tile.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(tile.get(), input_shapes, constant_data);
ASSERT_EQ(output_shapes.front(), StaticShape({3, 32, 10}));
}
@ -86,8 +81,7 @@ TEST(StaticShapeInferenceTest, TileNewApiInputsStaticRank) {
auto tile = std::make_shared<op::v0::Tile>(param0, param1);
int32_t repeats[] = {3, 4, 1, 2};
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {
{1, std::make_shared<HostTensor>(element::i32, Shape{4}, repeats)}};
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, {element::i32, Shape{4}, repeats}}};
auto dims = std::vector<VectorDims>{{8, 10}, {4}};
auto in_shapes = std::vector<StaticShapeRef>(dims.begin(), dims.end());

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/opsets/opset10.hpp"
#include "topk_shape_inference.hpp"
#include "utils.hpp"
@ -46,10 +47,9 @@ TEST_F(TopKV1AssertStaticShapeInferenceTest, k_is_negative) {
output_shapes = ShapeVector(2);
int64_t k = -2;
const auto const_map =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{}, &k)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{}, &k}}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_map),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_map),
ov::AssertFailure,
HasSubstr("The value of 'K' must be greater or equal to zero. (got " + std::to_string(k) + ")"));
}
@ -63,7 +63,7 @@ TEST_P(TopKV1Test, no_constant_map) {
const auto op = make_op(data, k_node, axis, "max", "value");
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(exp_shape));
@ -75,7 +75,7 @@ TEST_P(TopKV1Test, k_as_param_no_const_map) {
const auto op = make_op(data, k_node, axis, "min", "value");
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Static shape inference lacks constant data on port 1"));
}
@ -84,12 +84,11 @@ TEST_P(TopKV1Test, k_as_param_in_const_map) {
const auto data = std::make_shared<Parameter>(element::f32, PartialShape::dynamic());
const auto k_node = std::make_shared<Parameter>(element::i64, PartialShape::dynamic());
const auto const_map =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{}, &k)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{}, &k}}};
const auto op = make_op(data, k_node, axis, "min", "value");
shape_inference(op.get(), input_shapes, output_shapes, const_map);
output_shapes = shape_inference(op.get(), input_shapes, const_map);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(exp_shape));
@ -109,10 +108,9 @@ TEST_F(TopKV3AssertStaticShapeInferenceTest, k_is_negative) {
output_shapes = ShapeVector(2);
int64_t k = -2;
const auto const_map =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{}, &k)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{}, &k}}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_map),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_map),
ov::AssertFailure,
HasSubstr("The value of 'K' must be greater or equal to zero. (got " + std::to_string(k) + ")"));
}
@ -126,7 +124,7 @@ TEST_P(TopKV3Test, k_as_constant) {
const auto op = make_op(data, k_node, axis, "min", "value");
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(exp_shape));
@ -138,7 +136,7 @@ TEST_P(TopKV3Test, k_as_param_no_const_map) {
const auto op = make_op(data, k_node, axis, "min", "value");
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Static shape inference lacks constant data on port 1"));
}
@ -147,12 +145,11 @@ TEST_P(TopKV3Test, k_as_param_in_const_map) {
const auto data = std::make_shared<Parameter>(element::f32, PartialShape::dynamic());
const auto k_node = std::make_shared<Parameter>(element::i64, PartialShape::dynamic());
const auto const_map =
std::map<size_t, HostTensorPtr>{{1, std::make_shared<HostTensor>(element::i64, Shape{}, &k)}};
const auto const_map = std::unordered_map<size_t, ov::Tensor>{{1, {element::i64, Shape{}, &k}}};
const auto op = make_op(data, k_node, axis, "max", "value");
shape_inference(op.get(), input_shapes, output_shapes, const_map);
output_shapes = shape_inference(op.get(), input_shapes, const_map);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(exp_shape));

View File

@ -68,9 +68,7 @@ INSTANTIATE_TEST_SUITE_P(
/** \brief Check shape_infer for transpose on static shapes. */
TEST_P(StaticShapeInferenceTest, transpose_static) {
auto output_shapes = std::vector<StaticShape>{StaticShape{}};
shape_inference(transpose.get(), {input_shape, transpose_order}, output_shapes);
auto output_shapes = shape_inference(transpose.get(), {input_shape, transpose_order});
ASSERT_EQ(output_shapes[op::v1::Transpose::ARG_T], exp_shape);
}
@ -81,9 +79,7 @@ TEST(StaticShapeInferenceTest, transpose_input_shape_dim_dynamic) {
const auto order = std::vector<size_t>{1, 2, 0};
const auto transpose = make_transpose(input_shape, order);
auto output_shapes = std::vector<StaticShape>{StaticShape{}};
shape_inference(transpose.get(), {StaticShape{2, 6, 3}, order}, output_shapes);
auto output_shapes = shape_inference(transpose.get(), {StaticShape{2, 6, 3}, order});
ASSERT_EQ(output_shapes[op::v1::Transpose::ARG_T], StaticShape({6, 3, 2}));
}
@ -95,13 +91,12 @@ TEST(StaticShapeInferenceTest, transpose_order_in_constant_map) {
const auto transpose = std::make_shared<op::v1::Transpose>(input, order);
const auto axes_order = std::vector<size_t>{1, 2, 0, 3};
const auto axes = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes_order.size()}, axes_order);
const auto const_tensor = std::make_shared<ngraph::runtime::HostTensor>(axes);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> const_map = {{1, const_tensor}};
auto axes_order = std::vector<int64_t>{1, 2, 0, 3};
const auto const_tensor = ov::Tensor(element::i64, ov::Shape{axes_order.size()}, axes_order.data());
const std::unordered_map<size_t, ov::Tensor> const_map = {{1, const_tensor}};
auto output_shapes = std::vector<StaticShape>{StaticShape{}};
shape_inference(transpose.get(), {StaticShape({2, 4, 6, 8}), StaticShape()}, output_shapes, const_map);
output_shapes = shape_inference(transpose.get(), {StaticShape({2, 4, 6, 8}), StaticShape()}, const_map);
ASSERT_EQ(output_shapes[op::v1::Transpose::ARG_T], StaticShape({4, 6, 2, 8}));
}

View File

@ -2,7 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "gmock/gmock.h"
#include <gmock/gmock.h>
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/unsqueeze.hpp"
@ -28,7 +29,7 @@ TEST_F(UnsqueezeStaticShapeInferenceAssertTest, no_axes) {
input_shapes = ShapeVector{{5, 6}, axes->get_shape()};
try {
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
FAIL() << "Axes nullptr not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_THAT(error.what(), HasSubstr("Check 'constant != nullptr'"));
@ -111,7 +112,7 @@ TEST_P(UnsqueezeStaticShapeInferenceTest, shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i64, Shape{axes.size()}, axes);
op = std::make_shared<op::v0::Unsqueeze>(arg, axes_node);
shape_inference(op.get(), input_shapes, output_shapes);
output_shapes = shape_inference(op.get(), input_shapes);
ASSERT_EQ(output_shapes.front(), exp_shape);
}
@ -120,11 +121,10 @@ TEST_P(UnsqueezeStaticShapeInferenceTest, shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, Shape{1});
op = std::make_shared<op::v0::Unsqueeze>(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ngraph::runtime::HostTensor>(axes_const);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {{1, axes_tensor}};
const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data());
const auto constant_data = std::unordered_map<size_t, ov::Tensor>{{1, axes_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
output_shapes = shape_inference(op.get(), input_shapes, constant_data);
ASSERT_EQ(output_shapes.front(), exp_shape);
}

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "utils.hpp"
namespace ov {
namespace intel_cpu {
std::vector<StaticShapeRef> make_static_shape_refs(const ShapeVector& shapes) {
std::vector<StaticShapeRef> out;
out.reserve(shapes.size());
for (auto& s : shapes) {
out.emplace_back(s);
}
return out;
}
ShapeVector shape_inference(ov::Node* op,
const ShapeVector& input_shapes,
const std::unordered_map<size_t, Tensor>& constant_data) {
const auto in_shapes = intel_cpu::make_static_shape_refs(input_shapes);
const auto shape_infer = intel_cpu::make_shape_inference(op->shared_from_this());
auto result = shape_infer->infer(in_shapes, make_tensor_accessor(constant_data));
OPENVINO_ASSERT(result, "There are no output shapes in shape inference result");
return *result;
}
} // namespace intel_cpu
} // namespace ov

View File

@ -10,114 +10,16 @@
#include "shape_inference/shape_inference.hpp"
#include "shape_inference/static_shape.hpp"
using ShapeVector = std::vector<ov::intel_cpu::StaticShape>;
namespace ov {
namespace intel_cpu {
namespace {
std::vector<StaticShapeRef> make_static_shape_refs(const ShapeVector& shapes) {
std::vector<StaticShapeRef> out;
out.reserve(shapes.size());
for (auto& s : shapes) {
out.emplace_back(s);
}
return out;
}
} // namespace
template <class TIface = IStaticShapeInfer, class TTensorPtr = HostTensorPtr>
void shape_inference(ov::Node* op,
const std::vector<StaticShape>& input_shapes,
std::vector<StaticShape>& output_shapes,
const std::map<size_t, TTensorPtr>& constant_data = {}) {
const auto in_shapes = make_static_shape_refs(input_shapes);
const auto shape_infer = make_shape_inference(op->shared_from_this());
auto result = shape_infer->infer(in_shapes, ov::make_tensor_accessor(constant_data));
OPENVINO_ASSERT(result, "There are no output shapes in shape inference result");
output_shapes = std::move(*result);
}
using ShapeVector = std::vector<ov::intel_cpu::StaticShape>;
template <class T = std::unordered_map<size_t, Tensor>>
ShapeVector shape_inference(ov::Node* op, const ShapeVector& input_shapes, const T& constant_data = T{}) {
const auto in_shapes = intel_cpu::make_static_shape_refs(input_shapes);
const auto shape_infer = intel_cpu::make_shape_inference(op->shared_from_this());
auto result = shape_infer->infer(in_shapes, make_tensor_accessor(constant_data));
OPENVINO_ASSERT(result, "There are no output shapes in shape inference result");
return *result;
}
} // namespace intel_cpu
} // namespace ov
std::vector<StaticShapeRef> make_static_shape_refs(const ShapeVector& shapes);
struct TestTensor {
std::shared_ptr<ngraph::runtime::HostTensor> tensor;
ov::intel_cpu::StaticShape static_shape;
template <typename T>
TestTensor(std::initializer_list<T> values) : TestTensor(ov::intel_cpu::StaticShape({values.size()}), values) {}
template <typename T>
TestTensor(T scalar) : TestTensor(ov::intel_cpu::StaticShape({}), {scalar}) {}
TestTensor(ov::intel_cpu::StaticShape shape) : static_shape(shape) {}
template <typename T>
TestTensor(ov::intel_cpu::StaticShape shape, std::initializer_list<T> values) {
static_shape = shape;
ov::Shape s;
for (auto dim : shape)
s.push_back(dim);
if (values.size() > 0) {
tensor = std::make_shared<ngraph::runtime::HostTensor>(ov::element::from<T>(), s);
T* ptr = tensor->get_data_ptr<T>();
int i = 0;
for (auto& v : values)
ptr[i++] = v;
}
}
};
// TestTensor can be constructed from initializer_list<T>/int64_t/Shape/Shape+initializer_list
// so each element of inputs can be:
// {1,2,3,4} tensor of shape [4] and values (1,2,3,4)
// 2 tensor of scalar with value 2
// Shape{2,2} tensor of shape [2,2] and value unknown
// {Shape{2,2}, {1,2,3,4}} tensor of shape [2,2] and values (1,2,3,4)
inline void check_static_shape(ov::Node* op,
std::initializer_list<TestTensor> inputs,
std::initializer_list<ov::intel_cpu::StaticShape> expect_shapes) {
std::vector<ov::intel_cpu::StaticShape> output_shapes;
std::vector<ov::intel_cpu::StaticShape> input_shapes;
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constData;
int index = 0;
std::for_each(inputs.begin(), inputs.end(), [&](TestTensor t) {
input_shapes.push_back(t.static_shape);
if (t.tensor)
constData[index] = t.tensor;
index++;
});
output_shapes.resize(expect_shapes.size(), ov::intel_cpu::StaticShape{});
shape_inference(op, input_shapes, output_shapes, constData);
EXPECT_EQ(output_shapes.size(), expect_shapes.size());
int id = 0;
for (auto& shape : expect_shapes) {
EXPECT_EQ(output_shapes[id], shape);
id++;
}
}
inline void check_output_shape(ov::Node* op, std::initializer_list<ov::PartialShape> expect_shapes) {
int id = 0;
EXPECT_EQ(op->outputs().size(), expect_shapes.size());
for (auto& shape : expect_shapes) {
EXPECT_EQ(op->get_output_partial_shape(id), shape);
id++;
}
}
ShapeVector shape_inference(ov::Node* op,
const ShapeVector& input_shapes,
const std::unordered_map<size_t, Tensor>& constant_data = {});
template <class TOp>
class OpStaticShapeInferenceTest : public testing::Test {
@ -133,3 +35,6 @@ protected:
return std::make_shared<TOp>(std::forward<Args>(args)...);
}
};
} // namespace intel_cpu
} // namespace ov

Some files were not shown because too many files have changed in this diff Show More