diff --git a/src/core/src/op/divide.cpp b/src/core/src/op/divide.cpp index e6ae6a0fc0e..d49cce71378 100644 --- a/src/core/src/op/divide.cpp +++ b/src/core/src/op/divide.cpp @@ -66,6 +66,11 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo const auto& input1 = node->input_value(0); const auto& input2 = node->input_value(1); + // broadcast shapes to allocate tensors of correct size for operations with both inputs + PartialShape input_shape = input1.get_partial_shape(); + NGRAPH_CHECK(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()), + "Argument shapes in divide operation are inconsistent."); + const auto& input2_low = input2.get_tensor().get_lower_value(); if (input2_low == nullptr) return false; @@ -103,7 +108,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo return status; if (!is_upper) { - auto value1 = std::make_shared(input1.get_element_type(), input1.get_shape()); + auto value1 = std::make_shared(input1.get_element_type(), input_shape); status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_low, input1_up}); if (!status) return status; @@ -130,7 +135,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo if (!status) return status; } else { - auto value1 = std::make_shared(input1.get_element_type(), input1.get_shape()); + auto value1 = std::make_shared(input1.get_element_type(), input_shape); status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_up, input1_low}); if (!status) return status; diff --git a/src/core/tests/type_prop/reshape.cpp b/src/core/tests/type_prop/reshape.cpp index 6eab452ef6c..80d847fa3de 100644 --- a/src/core/tests/type_prop/reshape.cpp +++ b/src/core/tests/type_prop/reshape.cpp @@ -166,7 +166,7 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_scalar) { ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4})); } -TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) { +TEST(type_prop, interval_value_propagation_mul_lhs_1D_div) { auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); auto cast_fp = make_shared(shape_of, element::f32); @@ -180,7 +180,7 @@ TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) { ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4})); } -TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) { +TEST(type_prop, interval_value_propagation_mul_rhs_1D_div) { auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); auto shape_of = make_shared(param); auto cast_fp = make_shared(shape_of, element::f32); @@ -194,6 +194,20 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) { ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4})); } +TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) { + auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6}); + auto shape_of = make_shared(param); + auto cast_fp = make_shared(shape_of, element::f32); + auto mul = make_shared(cast_fp, op::Constant::create(element::f32, {1}, {2})); + auto div = make_shared(op::Constant::create(element::f32, {}, {192}), mul); + auto cast_int = make_shared(div, element::i32); + + auto r = make_shared(param, cast_int, false); + + ASSERT_EQ(r->get_element_type(), element::f32); + ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(12, 48), Dimension(6, 24), 16})); +} + TEST(type_prop, interval_value_propagation_reduce) { auto param = make_shared(element::f32, PartialShape{Dimension(1, 8), 2, 3}); auto shape_of = make_shared(param);