fixed tensor shapes to work correctly if shape of the first arg less then shape of the second arg; (#9368)

added according unit test
This commit is contained in:
Svetlana Dolinina 2022-01-10 17:15:21 +03:00 committed by GitHub
parent 8d8ceeb5d7
commit 04386bb667
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 23 additions and 4 deletions

View File

@ -66,6 +66,11 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
const auto& input1 = node->input_value(0);
const auto& input2 = node->input_value(1);
// broadcast shapes to allocate tensors of correct size for operations with both inputs
PartialShape input_shape = input1.get_partial_shape();
NGRAPH_CHECK(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()),
"Argument shapes in divide operation are inconsistent.");
const auto& input2_low = input2.get_tensor().get_lower_value();
if (input2_low == nullptr)
return false;
@ -103,7 +108,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
return status;
if (!is_upper) {
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input1.get_shape());
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_low, input1_up});
if (!status)
return status;
@ -130,7 +135,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
if (!status)
return status;
} else {
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input1.get_shape());
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_up, input1_low});
if (!status)
return status;

View File

@ -166,7 +166,7 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_scalar) {
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
}
TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
TEST(type_prop, interval_value_propagation_mul_lhs_1D_div) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
auto shape_of = make_shared<op::v3::ShapeOf>(param);
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
@ -180,7 +180,7 @@ TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
}
TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) {
TEST(type_prop, interval_value_propagation_mul_rhs_1D_div) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
auto shape_of = make_shared<op::v3::ShapeOf>(param);
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
@ -194,6 +194,20 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) {
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
}
TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
auto shape_of = make_shared<op::v3::ShapeOf>(param);
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
auto mul = make_shared<op::v1::Multiply>(cast_fp, op::Constant::create(element::f32, {1}, {2}));
auto div = make_shared<op::v1::Divide>(op::Constant::create(element::f32, {}, {192}), mul);
auto cast_int = make_shared<op::Convert>(div, element::i32);
auto r = make_shared<op::v1::Reshape>(param, cast_int, false);
ASSERT_EQ(r->get_element_type(), element::f32);
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(12, 48), Dimension(6, 24), 16}));
}
TEST(type_prop, interval_value_propagation_reduce) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
auto shape_of = make_shared<op::v3::ShapeOf>(param);