fixed tensor shapes to work correctly if shape of the first arg less then shape of the second arg; (#9368)
added according unit test
This commit is contained in:
parent
8d8ceeb5d7
commit
04386bb667
@ -66,6 +66,11 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
|
|||||||
const auto& input1 = node->input_value(0);
|
const auto& input1 = node->input_value(0);
|
||||||
const auto& input2 = node->input_value(1);
|
const auto& input2 = node->input_value(1);
|
||||||
|
|
||||||
|
// broadcast shapes to allocate tensors of correct size for operations with both inputs
|
||||||
|
PartialShape input_shape = input1.get_partial_shape();
|
||||||
|
NGRAPH_CHECK(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()),
|
||||||
|
"Argument shapes in divide operation are inconsistent.");
|
||||||
|
|
||||||
const auto& input2_low = input2.get_tensor().get_lower_value();
|
const auto& input2_low = input2.get_tensor().get_lower_value();
|
||||||
if (input2_low == nullptr)
|
if (input2_low == nullptr)
|
||||||
return false;
|
return false;
|
||||||
@ -103,7 +108,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
|
|||||||
return status;
|
return status;
|
||||||
|
|
||||||
if (!is_upper) {
|
if (!is_upper) {
|
||||||
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input1.get_shape());
|
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
|
||||||
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_low, input1_up});
|
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_low, input1_up});
|
||||||
if (!status)
|
if (!status)
|
||||||
return status;
|
return status;
|
||||||
@ -130,7 +135,7 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
|
|||||||
if (!status)
|
if (!status)
|
||||||
return status;
|
return status;
|
||||||
} else {
|
} else {
|
||||||
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input1.get_shape());
|
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
|
||||||
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_up, input1_low});
|
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_up, input1_low});
|
||||||
if (!status)
|
if (!status)
|
||||||
return status;
|
return status;
|
||||||
|
@ -166,7 +166,7 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_scalar) {
|
|||||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
|
TEST(type_prop, interval_value_propagation_mul_lhs_1D_div) {
|
||||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
|
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
|
||||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||||
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
||||||
@ -180,7 +180,7 @@ TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
|
|||||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) {
|
TEST(type_prop, interval_value_propagation_mul_rhs_1D_div) {
|
||||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
|
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
|
||||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||||
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
||||||
@ -194,6 +194,20 @@ TEST(type_prop, interval_value_propagation_mul_div_rhs_1D) {
|
|||||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(8, 32), 4}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(type_prop, interval_value_propagation_mul_div_lhs_1D) {
|
||||||
|
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 6});
|
||||||
|
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||||
|
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
||||||
|
auto mul = make_shared<op::v1::Multiply>(cast_fp, op::Constant::create(element::f32, {1}, {2}));
|
||||||
|
auto div = make_shared<op::v1::Divide>(op::Constant::create(element::f32, {}, {192}), mul);
|
||||||
|
auto cast_int = make_shared<op::Convert>(div, element::i32);
|
||||||
|
|
||||||
|
auto r = make_shared<op::v1::Reshape>(param, cast_int, false);
|
||||||
|
|
||||||
|
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||||
|
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(12, 48), Dimension(6, 24), 16}));
|
||||||
|
}
|
||||||
|
|
||||||
TEST(type_prop, interval_value_propagation_reduce) {
|
TEST(type_prop, interval_value_propagation_reduce) {
|
||||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||||
|
Loading…
Reference in New Issue
Block a user