Review RandomUniform for shape inference (#18800)

* Review RandomUniform for shape inference
- Check dynamic and static shape propagation
- Check label propagation
- Check preserve input values and labels
- Add template implementation of shape_infer
- Add unit tests

* Update RandomUniform copy test

* Use correct type for call reference implementation
This commit is contained in:
Pawel Raasz 2023-07-26 19:54:15 +02:00 committed by GitHub
parent 32f2868a5b
commit 2655512b2f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 364 additions and 257 deletions

View File

@ -73,10 +73,7 @@ public:
return m_state;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
protected:

View File

@ -0,0 +1,61 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "dimension_util.hpp"
#include "openvino/op/random_uniform.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v8 {
template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TRShape> shape_infer(const RandomUniform* op,
const std::vector<TShape>& input_shapes,
const ITensorAccessor& ta = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& shape = input_shapes[0];
NODE_SHAPE_INFER_CHECK(op,
input_shapes,
shape.rank().compatible(1),
"The rank of the tensor defining output shape must be equal to 1.");
const auto& min_shape = input_shapes[1];
NODE_SHAPE_INFER_CHECK(op,
input_shapes,
min_shape.compatible(TRShape{}) || min_shape.compatible(TRShape{1}),
"Min value must be a scalar or one element 1D tensor.");
const auto& max_shape = input_shapes[2];
NODE_SHAPE_INFER_CHECK(op,
input_shapes,
max_shape.compatible(TRShape{}) || max_shape.compatible(TRShape{1}),
"Max value must be a scalar or one element 1D tensor.");
if (const auto& const_min = get_input_const_data_as<TRShape, double>(op, 1, ta)) {
if (const auto& const_max = get_input_const_data_as<TRShape, double>(op, 2, ta)) {
NODE_VALIDATION_CHECK(op,
const_min->front() < const_max->front(),
"Min value must be less than max value. Got min value: ",
const_min->front(),
", max value: ",
const_max->front());
}
}
auto output_shapes = std::vector<TRShape>();
if (auto out_shape = get_input_const_data_as_shape<TRShape>(op, 0, ta)) {
output_shapes.push_back(std::move(*out_shape));
} else if (std::is_same<TRShape, PartialShape>::value) {
output_shapes.push_back(
PartialShape::dynamic(shape.rank().is_static() ? shape[0].get_max_length() : Rank::dynamic()));
}
return output_shapes;
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -2,22 +2,31 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/random_uniform.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp"
#include "ngraph/runtime/reference/random_uniform.hpp"
using namespace std;
using namespace ngraph;
#include "itt.hpp"
#include "openvino/op/random_uniform.hpp"
#include "random_uniform_shape_inference.hpp"
op::v8::RandomUniform::RandomUniform(const Output<Node>& out_shape,
const Output<Node>& min_val,
const Output<Node>& max_val,
const ngraph::element::Type& out_type,
uint64_t global_seed,
uint64_t op_seed)
namespace ov {
namespace op {
namespace v8 {
namespace validate {
inline bool shape_et(const element::Type& et) {
return (et == element::i32) || (et == element::i64);
}
inline bool out_et(const element::Type& et) {
return et.is_real() || shape_et(et);
}
} // namespace validate
RandomUniform::RandomUniform(const Output<Node>& out_shape,
const Output<Node>& min_val,
const Output<Node>& max_val,
const ngraph::element::Type& out_type,
uint64_t global_seed,
uint64_t op_seed)
: Op({out_shape, min_val, max_val}),
m_output_type(out_type),
m_global_seed(global_seed),
@ -25,93 +34,30 @@ op::v8::RandomUniform::RandomUniform(const Output<Node>& out_shape,
constructor_validate_and_infer_types();
}
void op::v8::RandomUniform::validate_and_infer_types() {
void RandomUniform::validate_and_infer_types() {
OV_OP_SCOPE(v8_RandomUniform_validate_and_infer_types);
const auto& shape_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this,
shape_et.is_dynamic() || shape_et == element::i32 || shape_et == element::i64,
shape_et.is_dynamic() || validate::shape_et(shape_et),
"Type of the input should be int32 or int64.");
ov::PartialShape output_shape = ov::PartialShape::dynamic();
const auto& input_shape = get_input_partial_shape(0);
if (input_shape.rank().is_static()) {
NODE_VALIDATION_CHECK(this,
input_shape.rank() == 1,
"The rank of the tensor defining output shape must be equal to 1.");
OPENVINO_SUPPRESS_DEPRECATED_START
if (!evaluate_as_partial_shape(input_value(0), output_shape)) {
OPENVINO_SUPPRESS_DEPRECATED_END
output_shape = ov::PartialShape::dynamic(input_shape[0]);
}
}
const auto& min_pshape = get_input_partial_shape(1);
const auto& max_pshape = get_input_partial_shape(2);
if (min_pshape.is_static()) {
const auto& min_rank = min_pshape.rank().get_length();
NODE_VALIDATION_CHECK(this, min_rank <= 1, "Min value must be a scalar or 1D tensor.");
if (min_rank == 1) {
NODE_VALIDATION_CHECK(this, min_pshape.compatible(ov::Shape{1}), "'min_val' should have 1 element.");
}
}
if (max_pshape.is_static()) {
const auto& max_rank = max_pshape.rank().get_length();
NODE_VALIDATION_CHECK(this, max_rank <= 1, "Max value must be a scalar or 1D tensor.");
if (max_rank == 1) {
NODE_VALIDATION_CHECK(this, max_pshape.compatible(ov::Shape{1}), "'max_val' should have 1 element.");
}
}
const element::Type& min_element_type = get_input_element_type(1);
element::Type max_element_type = get_input_element_type(2);
const auto& min_et = get_input_element_type(1);
const auto& max_et = get_input_element_type(2);
const auto& out_et = get_out_type();
NODE_VALIDATION_CHECK(this, min_et == max_et, "'min_val' should have the same type as 'max_val'.");
NODE_VALIDATION_CHECK(this,
min_element_type == max_element_type,
"'min_val' should have the same type as 'max_val'.");
NODE_VALIDATION_CHECK(this,
min_element_type == get_out_type(),
validate::out_et(out_et) && (out_et == min_et),
"'min_val' and 'max_val' should have the same type as 'out_type' attribute.");
OPENVINO_SUPPRESS_DEPRECATED_START
if (const auto& const_min = get_constant_from_source(input_value(1))) {
if (const auto& const_max = get_constant_from_source(input_value(2))) {
OPENVINO_SUPPRESS_DEPRECATED_END
if (get_out_type() == ngraph::element::Type_t::i64 || get_out_type() == ngraph::element::Type_t::i32) {
int64_t min_val = const_min->cast_vector<int64_t>()[0];
int64_t max_val = const_max->cast_vector<int64_t>()[0];
const auto input_shapes = get_node_input_partial_shapes(*this);
OPENVINO_SUPPRESS_DEPRECATED_END
const auto output_shapes = shape_infer(this, input_shapes);
NODE_VALIDATION_CHECK(this,
min_val < max_val,
"Min value must be less than max value. Got "
"min value: ",
min_val,
", max value: ",
max_val);
} else if (get_out_type().is_real()) {
double min_val = const_min->cast_vector<double>()[0];
double max_val = const_max->cast_vector<double>()[0];
NODE_VALIDATION_CHECK(this,
min_val < max_val,
"Min value must be less than max value. Got "
"min value: ",
min_val,
", max value: ",
max_val);
} else {
OPENVINO_THROW("Unsupported output type of RandomUniform: " + get_out_type().to_string());
}
}
}
set_output_type(0, get_out_type(), output_shape);
set_output_type(0, out_et, output_shapes.front());
}
bool op::v8::RandomUniform::visit_attributes(AttributeVisitor& visitor) {
bool RandomUniform::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v8_RandomUniform_visit_attributes);
visitor.on_attribute("output_type", m_output_type);
visitor.on_attribute("op_seed", m_op_seed);
@ -119,75 +65,38 @@ bool op::v8::RandomUniform::visit_attributes(AttributeVisitor& visitor) {
return true;
}
shared_ptr<Node> op::v8::RandomUniform::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> RandomUniform::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v8_RandomUniform_clone_with_new_inputs);
check_new_args_count(this, new_args);
auto ru_copy =
make_shared<v8::RandomUniform>(new_args[0], new_args[1], new_args[2], m_output_type, m_global_seed, m_op_seed);
auto ru_copy = std::make_shared<v8::RandomUniform>(new_args.at(0),
new_args.at(1),
new_args.at(2),
m_output_type,
m_global_seed,
m_op_seed);
ru_copy->m_state = this->m_state;
return ru_copy;
}
bool op::v8::RandomUniform::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool RandomUniform::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v8_RandomUniform_evaluate);
const uint64_t* out_shape;
std::vector<uint64_t> out_shape_uint64(shape_size(inputs[0]->get_shape()));
if (inputs[0]->get_element_type() == element::Type_t::u64) {
out_shape = inputs[0]->get_data_ptr<const uint64_t>();
} else if (inputs[0]->get_element_type() == element::Type_t::i32) {
auto out_shape_i32 = inputs[0]->get_data_ptr<const int32_t>();
std::transform(out_shape_i32,
out_shape_i32 + shape_size(inputs[0]->get_shape()),
out_shape_uint64.begin(),
[](const int32_t& elem) {
return static_cast<uint64_t>(elem);
});
out_shape = out_shape_uint64.data();
} else if (inputs[0]->get_element_type() == element::Type_t::i64) {
auto out_shape_i64 = inputs[0]->get_data_ptr<const int64_t>();
std::transform(out_shape_i64,
out_shape_i64 + shape_size(inputs[0]->get_shape()),
out_shape_uint64.begin(),
[](const int64_t& elem) {
return static_cast<uint64_t>(elem);
});
out_shape = out_shape_uint64.data();
} else {
OPENVINO_THROW("Unsupported type of out shape in RandomUniform operation: " +
inputs[0]->get_element_type().to_string());
auto input_shapes = std::vector<PartialShape>();
input_shapes.reserve(inputs.size());
for (auto& t : inputs) {
input_shapes.emplace_back(t.get_shape());
}
const auto out_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape();
const auto out_dims = std::vector<uint64_t>(out_shape.begin(), out_shape.end());
element::Type_t t_out = get_out_type();
char* out;
switch (t_out) {
case element::Type_t::i32:
out = (char*)outputs[0]->get_data_ptr<const int32_t>();
break;
case element::Type_t::i64:
out = (char*)outputs[0]->get_data_ptr<const int64_t>();
break;
case element::Type_t::f16:
out = (char*)outputs[0]->get_data_ptr<const float16>();
break;
case element::Type_t::bf16:
out = (char*)outputs[0]->get_data_ptr<const bfloat16>();
break;
case element::Type_t::f32:
out = (char*)outputs[0]->get_data_ptr<const float>();
break;
case element::Type_t::f64:
out = (char*)outputs[0]->get_data_ptr<const double>();
break;
default:
OPENVINO_THROW("Unsupported type of RandomUniform: " + get_out_type().to_string());
}
const auto& t_out = get_out_type();
OPENVINO_ASSERT(validate::out_et(t_out), "Unsupported type of RandomUniform: " + t_out.get_type_name());
auto state = ngraph::runtime::reference::random_uniform(out_shape,
inputs[1]->get_data_ptr<const char>(),
inputs[2]->get_data_ptr<const char>(),
out,
inputs[0]->get_shape(),
auto state = ngraph::runtime::reference::random_uniform(out_dims.data(),
static_cast<const char*>(inputs[1].data()),
static_cast<const char*>(inputs[2].data()),
static_cast<char*>(outputs[0].data()),
inputs[0].get_shape(),
get_out_type(),
get_global_seed(),
get_op_seed(),
@ -199,22 +108,10 @@ bool op::v8::RandomUniform::evaluate(const HostTensorVector& outputs, const Host
return true;
}
bool op::v8::RandomUniform::has_evaluate() const {
bool RandomUniform::has_evaluate() const {
OV_OP_SCOPE(v8_RandomUniform_has_evaluate);
if (get_input_element_type(0) != ngraph::element::i32 && get_input_element_type(0) != ngraph::element::i64) {
return false;
}
switch (get_out_type()) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::f16:
case ngraph::element::bf16:
case ngraph::element::f32:
case ngraph::element::f64:
return true;
default:
break;
}
return false;
return validate::shape_et(get_input_element_type(0)) && validate::out_et(get_out_type());
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -406,6 +406,9 @@ TEST(copy, loop) {
}
TEST(copy, random_uniform) {
auto shape = std::vector<int64_t>{1, 2, 3};
float min = 0., max = 1.;
const auto min_val_param = make_shared<op::Parameter>(element::f32, Shape{1});
const auto max_val_param = make_shared<op::Parameter>(element::f32, Shape{1});
auto out_shape = make_shared<op::Constant>(element::i64, Shape{3}, std::vector<int64_t>{1, 2, 3});
@ -413,12 +416,11 @@ TEST(copy, random_uniform) {
std::make_shared<ov::opset8::RandomUniform>(out_shape, min_val_param, max_val_param, element::f32, 150, 10);
// Call `evaluate` to update m_state
ru->evaluate({make_host_tensor<element::i64>(out_shape->get_shape(), out_shape->get_vector<int64_t>()),
make_host_tensor<element::f32>(min_val_param->get_shape(), {0}),
make_host_tensor<element::f32>(max_val_param->get_shape(), {1})},
{make_host_tensor<element::i64>(out_shape->get_shape(), out_shape->get_vector<int64_t>()),
make_host_tensor<element::f32>(min_val_param->get_shape(), {0}),
make_host_tensor<element::f32>(max_val_param->get_shape(), {1})});
auto outputs = ov::TensorVector{{element::i64, out_shape->get_shape(), shape.data()}};
ru->evaluate(outputs,
ov::TensorVector{{element::i64, out_shape->get_shape(), shape.data()},
{element::f32, min_val_param->get_shape(), &min},
{element::f32, max_val_param->get_shape(), &max}});
auto out_shape_c = make_shared<op::Constant>(element::i64, Shape{4}, std::vector<int64_t>{4, 3, 2, 1});
const auto min_val_param_c = make_shared<op::Parameter>(element::f32, Shape{1});

View File

@ -2,13 +2,31 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ngraph;
using namespace testing;
using namespace ov;
TEST(type_prop, random_uniform_default_ctor) {
auto out_shape = opset8::Constant::create(element::i64, Shape{4}, {2, 3, 4, 5});
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
auto r = make_shared<opset8::RandomUniform>();
r->set_arguments(OutputVector{out_shape, min_val, max_val});
r->set_out_type(element::f32);
r->set_global_seed(121);
r->set_op_seed(100);
r->validate_and_infer_types();
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_EQ(r->get_output_partial_shape(0), (PartialShape{2, 3, 4, 5}));
}
TEST(type_prop, random_uniform_type_shape) {
auto out_shape = opset8::Constant::create(element::i64, Shape{4}, {2, 3, 4, 5});
@ -18,7 +36,7 @@ TEST(type_prop, random_uniform_type_shape) {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape{2, 3, 4, 5}));
EXPECT_EQ(r->get_output_partial_shape(0), (PartialShape{2, 3, 4, 5}));
}
TEST(type_prop, random_uniform_param_input) {
@ -40,12 +58,14 @@ TEST(type_prop, random_uniform_dynamic_shape) {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::i64, 100, 200);
EXPECT_EQ(r->get_output_element_type(0), element::i64);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
EXPECT_EQ(r->get_output_partial_shape(0), (PartialShape::dynamic()));
}
TEST(type_prop, random_uniform_dynamic_shape_1) {
auto shape = make_shared<opset8::Parameter>(element::i32, PartialShape{{0, 10}, 4, {3, 7}, -1});
auto out_shape = make_shared<opset8::ShapeOf>(shape);
TEST(type_prop, random_uniform_dynamic_shape_with_labels) {
auto shape = PartialShape{{0, 10}, 4, {3, 7}, -1};
set_shape_labels(shape, 10);
auto param = make_shared<opset8::Parameter>(element::i32, shape);
auto out_shape = make_shared<opset8::ShapeOf>(param);
auto min_val = make_shared<opset8::Constant>(element::i64, Shape{}, 5);
auto max_val = make_shared<opset8::Constant>(element::i64, Shape{}, 10);
@ -54,6 +74,7 @@ TEST(type_prop, random_uniform_dynamic_shape_1) {
EXPECT_EQ(r->get_output_element_type(0), element::i64);
EXPECT_EQ(r->get_output_partial_shape(0), PartialShape({{0, 10}, 4, {3, 7}, -1}));
EXPECT_THAT(get_shape_labels(r->get_output_partial_shape(0)), ElementsAre(10, 11, 12, 13));
}
TEST(type_prop, random_uniform_dynamic_rank) {
@ -72,31 +93,19 @@ TEST(type_prop, random_uniform_invalid_out_shape_type) {
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid output shape.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Type of the input should be int32 or int64."));
} catch (...) {
FAIL() << "Check failed for unexpected reason.";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("Type of the input should be int32 or int64."));
}
TEST(type_prop, random_uniform_invalid_out_shape_rank) {
auto out_shape = make_shared<opset8::Parameter>(element::i32, Shape{3, 2});
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid output shape.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("The rank of the tensor defining output shape must be equal to 1."));
} catch (...) {
FAIL() << "Check failed for unexpected reason.";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("The rank of the tensor defining output shape must be equal to 1."));
}
TEST(type_prop, random_uniform_invalid_min_val) {
@ -104,15 +113,9 @@ TEST(type_prop, random_uniform_invalid_min_val) {
auto min_val = opset8::Constant::create(element::f32, Shape{2}, {2, 3});
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid min value.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("'min_val' should have 1 element."));
} catch (...) {
FAIL() << "Check failed for unexpected reason.";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("Min value must be a scalar or one element 1D tensor."));
}
TEST(type_prop, random_uniform_invalid_max_val) {
@ -120,15 +123,9 @@ TEST(type_prop, random_uniform_invalid_max_val) {
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = opset8::Constant::create(element::f32, Shape{3}, {2, 3, 5});
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid max value.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("'max_val' should have 1 element."));
} catch (...) {
FAIL() << "Check failed for unexpected reason.";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("Max value must be a scalar or one element 1D tensor."));
}
TEST(type_prop, random_uniform_invalid_min_max_val_type_case1) {
@ -136,15 +133,9 @@ TEST(type_prop, random_uniform_invalid_min_max_val_type_case1) {
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = make_shared<opset8::Constant>(element::i32, Shape{}, 100);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid min value type.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("'min_val' should have the same type as 'max_val'."));
} catch (...) {
FAIL() << "Check failed for unexpected reason";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("'min_val' should have the same type as 'max_val'."));
}
TEST(type_prop, random_uniform_invalid_min_max_val_type_case2) {
@ -152,16 +143,9 @@ TEST(type_prop, random_uniform_invalid_min_max_val_type_case2) {
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::i32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid min and max value type.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("'min_val' and 'max_val' should have the same type as 'out_type' attribute."));
} catch (...) {
FAIL() << "Check failed for unexpected reason";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::i32, 120, 100),
NodeValidationFailure,
HasSubstr("'min_val' and 'max_val' should have the same type as 'out_type' attribute."));
}
TEST(type_prop, random_uniform_invalid_min_max_values_case1) {
@ -169,15 +153,9 @@ TEST(type_prop, random_uniform_invalid_min_max_values_case1) {
auto min_val = make_shared<opset8::Constant>(element::f32, Shape{}, 1.f);
auto max_val = make_shared<opset8::Constant>(element::f32, Shape{}, 0.f);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid min and max values.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Min value must be less than max value."));
} catch (...) {
FAIL() << "Check failed for unexpected reason";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100),
NodeValidationFailure,
HasSubstr("Min value must be less than max value."));
}
TEST(type_prop, random_uniform_invalid_min_max_values_case2) {
@ -185,15 +163,9 @@ TEST(type_prop, random_uniform_invalid_min_max_values_case2) {
auto min_val = make_shared<opset8::Constant>(element::i32, Shape{}, 100);
auto max_val = make_shared<opset8::Constant>(element::i32, Shape{}, 100);
try {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::i32, 120, 100);
// Should have thrown, so fail if it didn't
FAIL() << "Unexpected pass with invalid min and max values.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Min value must be less than max value."));
} catch (...) {
FAIL() << "Check failed for unexpected reason";
}
OV_EXPECT_THROW(ignore = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::i32, 120, 100),
NodeValidationFailure,
HasSubstr("Min value must be less than max value."));
}
TEST(type_prop, random_uniform_min_max_1d_tensors) {
@ -204,5 +176,5 @@ TEST(type_prop, random_uniform_min_max_1d_tensors) {
auto r = make_shared<opset8::RandomUniform>(out_shape, min_val, max_val, element::f32, 120, 100);
EXPECT_EQ(r->get_output_element_type(0), element::f32);
EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape{2, 3, 4, 5}));
EXPECT_EQ(r->get_output_partial_shape(0), (PartialShape{2, 3, 4, 5}));
}

View File

@ -72,6 +72,7 @@
#include "prior_box_shape_inference.hpp"
#include "proposal_shape_inference.hpp"
#include "psroi_pooling_shape_inference.hpp"
#include "random_uniform_shape_inference.hpp"
#include "range_shape_inference.hpp"
#include "rdft_shape_inference.hpp"
#include "reduce_shape_inference.hpp"
@ -409,6 +410,7 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{
_OV_OP_SHAPE_INFER_MASK_REG(opset8::GatherND, ShapeInferTA, util::bit::mask()),
_OV_OP_SHAPE_INFER_MASK_REG(opset8::MaxPool, ShapeInferPaddingTA, util::bit::mask()),
_OV_OP_SHAPE_INFER_MASK_REG(opset8::PriorBox, ShapeInferTA, util::bit::mask(0)),
_OV_OP_SHAPE_INFER_MASK_REG(opset8::RandomUniform, ShapeInferTA, util::bit::mask(0, 1, 2)),
_OV_OP_SHAPE_INFER_MASK_REG(opset8::Slice, ShapeInferTA, util::bit::mask(1, 2, 3, 4)),
_OV_OP_SHAPE_INFER_NON_TEMPLATE_REG(opset8::Softmax, ShapeInferCopy),
// opset7

View File

@ -0,0 +1,167 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset12.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class RandomUniformV8StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v8::RandomUniform> {
protected:
void SetUp() override {}
uint64_t global_seed = 120, op_seed = 100;
};
TEST_F(RandomUniformV8StaticShapeInferenceTest, default_ctor_no_args) {
op = make_op();
op->set_out_type(element::i32);
op->set_global_seed(global_seed);
op->set_op_seed(op_seed);
int32_t min = 10, max = 15;
int64_t shape[] = {2, 4, 12, 13};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i64, ov::Shape{4}, shape}},
{1, {element::i32, ov::Shape{1}, &min}},
{2, {element::i32, ov::Shape{}, &max}}};
input_shapes = ShapeVector{{4}, {1}, {}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 12, 13}));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_dynamic_rank) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto min_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto max_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed);
int64_t min = 1, max = 15;
int64_t shape[] = {2, 4, 12, 13, 2};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i64, ov::Shape{5}, shape}},
{1, {element::i64, ov::Shape{}, &min}},
{2, {element::i64, ov::Shape{}, &max}}};
input_shapes = ShapeVector{{5}, {}, {}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 12, 13, 2}));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_static_rank) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic(1));
const auto min_val = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(1));
const auto max_val = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(1));
op = make_op(out_shape, min_val, max_val, element::f32, global_seed, op_seed);
float min = 1., max = 15.;
int32_t shape[] = {12, 13, 2};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i32, ov::Shape{3}, shape}},
{1, {element::f32, ov::Shape{1}, &min}},
{2, {element::f32, ov::Shape{1}, &max}}};
input_shapes = ShapeVector{{3}, {}, {}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({12, 13, 2}));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_as_const) {
const auto out_shape = op::v0::Constant::create(element::i32, Shape{6}, {2, 1, 3, 5, 1, 7});
const auto min_val = op::v0::Constant::create(element::f16, Shape{}, {2});
const auto max_val = op::v0::Constant::create(element::f16, Shape{1}, {16});
op = make_op(out_shape, min_val, max_val, element::f16, global_seed, op_seed);
input_shapes = ShapeVector{{6}, {}, {1}};
output_shapes = shape_inference(op.get(), input_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({2, 1, 3, 5, 1, 7}));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, some_inputs_are_const_some_dynamic) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto min_val = op::v0::Constant::create(element::f32, Shape{}, {2});
const auto max_val = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(out_shape, min_val, max_val, element::f32, global_seed, op_seed);
float max = 15.;
int32_t shape[] = {12, 13, 2};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i32, ov::Shape{3}, shape}},
{2, {element::f32, ov::Shape{1}, &max}}};
input_shapes = ShapeVector{{3}, {}, {}};
output_shapes = shape_inference(op.get(), input_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({12, 13, 2}));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, min_not_lt_max) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto min_val = op::v0::Constant::create(element::i64, Shape{}, {2});
const auto max_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed);
int64_t max = 2;
int32_t shape[] = {12, 13, 2};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i32, ov::Shape{3}, shape}},
{2, {element::i64, ov::Shape{1}, &max}}};
input_shapes = ShapeVector{{3}, {}, {}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
NodeValidationFailure,
HasSubstr("Min value must be less than max value. Got min value:"));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, out_shape_input_not_rank_1) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto min_val = op::v0::Constant::create(element::i64, Shape{}, {2});
const auto max_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed);
int64_t max = 20;
int32_t shape[] = {12, 13, 2};
const auto const_data = std::unordered_map<size_t, Tensor>{{0, {element::i32, ov::Shape{3}, shape}},
{2, {element::i64, ov::Shape{1}, &max}}};
input_shapes = ShapeVector{{3, 1}, {}, {}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data),
NodeValidationFailure,
HasSubstr("The rank of the tensor defining output shape must be equal to 1"));
}
TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_dynamic_no_const_data) {
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
const auto min_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
const auto max_val = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed);
input_shapes = ShapeVector{{3}, {}, {}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes),
NodeValidationFailure,
HasSubstr("Static shape inference lacks constant data on port"));
}

View File

@ -36,6 +36,15 @@ void shape_inference(ov::Node* op,
OPENVINO_ASSERT(result, "There are no output shapes in shape inference result");
output_shapes = std::move(*result);
}
template <class T = std::unordered_map<size_t, Tensor>>
ShapeVector shape_inference(ov::Node* op, const ShapeVector& input_shapes, const T& constant_data = T{}) {
const auto in_shapes = intel_cpu::make_static_shape_refs(input_shapes);
const auto shape_infer = intel_cpu::make_shape_inference(op->shared_from_this());
auto result = shape_infer->infer(in_shapes, make_tensor_accessor(constant_data));
OPENVINO_ASSERT(result, "There are no output shapes in shape inference result");
return *result;
}
} // namespace intel_cpu
} // namespace ov