Removes legacy transformations from CNNNetworkNGraphImpl::reshape (#15853)
* Removes legacy transformations from CNNNetworkNGraphImpl::reshape * Removes legacy transformations from CNNNetworkNGraphImpl::reshape * 6 more models propagate shape more precise * Removes legacy includes * Fix invalidation * Test change * win fix * Ilyas suggestion * Unary ops -- removed shape relying on the output of the op, used shapes from the input tensor instead * Code clean up * Equal: bounds evaluation * Equal: bounds evaluation * Restrict TypeRelaxed from partial_value propagation * TypeRelaxed: propagate lower/upper bounds * Remove debug prints * fix build * GPU shape inference problem fixed * Generate Proposals: better dynamic shape propagation * Style
This commit is contained in:
parent
b921bf2e29
commit
bc7a121a20
@ -8,6 +8,7 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <openvino/op/convert.hpp>
|
||||
#include <openvino/op/parameter.hpp>
|
||||
#include <string>
|
||||
#include <transformations_visibility.hpp>
|
||||
#include <vector>
|
||||
@ -223,6 +224,9 @@ public:
|
||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
bool evaluate_lower(TensorVector& outputs) const override;
|
||||
bool evaluate_upper(TensorVector& outputs) const override;
|
||||
|
||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
@ -233,6 +237,7 @@ private:
|
||||
validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool evaluate_bound(TensorVector& outputs, bool is_upper) const;
|
||||
init_rt_result init_rt = init_rt_info(*this);
|
||||
};
|
||||
|
||||
@ -296,6 +301,41 @@ bool TypeRelaxed<BaseOp>::evaluate(const HostTensorVector& outputs, const HostTe
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
|
||||
std::unordered_map<size_t, std::pair<ov::Tensor, ov::Tensor>> OPENVINO_API
|
||||
convert_input_types(OutputVector& inputs, const element::TypeVector& types);
|
||||
ov::TensorVector OPENVINO_API get_output_tensors_of_original_type(const ov::TensorVector& fake_output_tensors,
|
||||
const element::TypeVector& types);
|
||||
void OPENVINO_API
|
||||
reset_input_types(const std::unordered_map<size_t, std::pair<ov::Tensor, ov::Tensor>>& original_input_vals,
|
||||
OutputVector& inputs);
|
||||
bool OPENVINO_API convert_outputs_to_fake_type(ov::TensorVector& outputs,
|
||||
ov::TensorVector& original_outputs,
|
||||
bool is_upper);
|
||||
|
||||
template <typename BaseOp>
|
||||
bool TypeRelaxed<BaseOp>::evaluate_bound(TensorVector& outputs, bool is_upper) const {
|
||||
auto inputs = Op::input_values();
|
||||
const auto& original_inputs = convert_input_types(inputs, m_input_data_types);
|
||||
auto original_outputs = get_output_tensors_of_original_type(outputs, m_original_output_data_types);
|
||||
if ((is_upper && !BaseOp::evaluate_upper(original_outputs)) ||
|
||||
(!is_upper && !BaseOp::evaluate_lower(original_outputs))) {
|
||||
reset_input_types(original_inputs, inputs);
|
||||
return false;
|
||||
}
|
||||
reset_input_types(original_inputs, inputs);
|
||||
return convert_outputs_to_fake_type(outputs, original_outputs, is_upper);
|
||||
}
|
||||
|
||||
template <typename BaseOp>
|
||||
bool TypeRelaxed<BaseOp>::evaluate_lower(TensorVector& outputs) const {
|
||||
return evaluate_bound(outputs, false);
|
||||
}
|
||||
|
||||
template <typename BaseOp>
|
||||
bool TypeRelaxed<BaseOp>::evaluate_upper(TensorVector& outputs) const {
|
||||
return evaluate_bound(outputs, true);
|
||||
}
|
||||
|
||||
template <typename BaseOp>
|
||||
void TypeRelaxed<BaseOp>::validate_and_infer_types() {
|
||||
element::TypeVector old_input_types;
|
||||
|
@ -4,223 +4,222 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <openvino/opsets/opset1.hpp>
|
||||
#include <openvino/pass/constant_folding.hpp>
|
||||
#include <openvino/pass/manager.hpp>
|
||||
#include <ov_ops/type_relaxed.hpp>
|
||||
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "openvino/core/rt_info.hpp"
|
||||
#include "transformations/convert_precision.hpp"
|
||||
|
||||
namespace element = ngraph::element;
|
||||
namespace element = ov::element;
|
||||
using std::make_shared;
|
||||
using TypeVector = element::TypeVector;
|
||||
|
||||
using TypeRelaxedTests = CommonTestUtils::TestsCommon;
|
||||
|
||||
TEST_F(TypeRelaxedTests, noOverrideCopyCtor) {
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
element::Type type(element::Type_t::f32);
|
||||
auto param = make_shared<ngraph::opset1::Parameter>(type, shape);
|
||||
auto op = ngraph::opset1::Relu(param);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ngraph::opset1::Relu>>(op);
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
auto param = make_shared<ov::opset1::Parameter>(type, shape);
|
||||
auto op = ov::opset1::Relu(param);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::Relu>>(op);
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph::ParameterVector params = {param};
|
||||
ngraph::ResultVector results = {result};
|
||||
ov::ParameterVector params = {param};
|
||||
ov::ResultVector results = {result};
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(results, params);
|
||||
model = make_shared<ov::Model>(results, params);
|
||||
|
||||
ASSERT_EQ(element::f32, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::f32, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(3, ngraph->get_ops().size());
|
||||
ASSERT_EQ(3, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, overrideOutputCopyCtor) {
|
||||
auto input_type = element::f32;
|
||||
auto overriden_type = element::i32;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param = make_shared<ngraph::opset1::Parameter>(input_type, shape);
|
||||
auto op = ngraph::opset1::Relu(param);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param = make_shared<ov::opset1::Parameter>(input_type, shape);
|
||||
auto op = ov::opset1::Relu(param);
|
||||
auto relaxed_op =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Relu>>(op, TypeVector{}, TypeVector{overriden_type});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Relu>>(op, TypeVector{}, TypeVector{overriden_type});
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
|
||||
ASSERT_EQ(input_type, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(3, ngraph->get_ops().size());
|
||||
ASSERT_EQ(3, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, overrideInputCopyCtor) {
|
||||
auto input_type = element::f32;
|
||||
auto overriden_type = element::i32;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param = make_shared<ngraph::opset1::Parameter>(input_type, shape);
|
||||
auto op = ngraph::opset1::Relu(param);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param = make_shared<ov::opset1::Parameter>(input_type, shape);
|
||||
auto op = ov::opset1::Relu(param);
|
||||
auto relaxed_op =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Relu>>(op, TypeVector{overriden_type}, TypeVector{});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Relu>>(op, TypeVector{overriden_type}, TypeVector{});
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
|
||||
|
||||
ASSERT_EQ(input_type, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(3, ngraph->get_ops().size());
|
||||
ASSERT_EQ(3, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, mixedInputsAutoOutput) {
|
||||
auto input_type1 = element::u8;
|
||||
auto input_type2 = element::i8;
|
||||
auto overriden_type = element::i16;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(input_type1, shape);
|
||||
auto param2 = make_shared<ngraph::opset1::Parameter>(input_type2, shape);
|
||||
auto op = ngraph::opset1::Add(ov::op::TemporaryReplaceOutputType(param1->output(0), overriden_type).get(),
|
||||
ov::op::TemporaryReplaceOutputType(param2->output(0), overriden_type).get());
|
||||
auto relaxed_op =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Add>>(op,
|
||||
TypeVector{overriden_type, overriden_type},
|
||||
TypeVector{});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(input_type1, shape);
|
||||
auto param2 = make_shared<ov::opset1::Parameter>(input_type2, shape);
|
||||
auto op = ov::opset1::Add(ov::op::TemporaryReplaceOutputType(param1->output(0), overriden_type).get(),
|
||||
ov::op::TemporaryReplaceOutputType(param2->output(0), overriden_type).get());
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::Add>>(op,
|
||||
TypeVector{overriden_type, overriden_type},
|
||||
TypeVector{});
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1, param2});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1, param2});
|
||||
|
||||
ASSERT_EQ(input_type1, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(input_type2, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(4, ngraph->get_ops().size());
|
||||
ASSERT_EQ(4, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, mixedInputsAutoOutputForwardCtor) {
|
||||
auto input_type1 = element::u8;
|
||||
auto input_type2 = element::i8;
|
||||
auto overriden_type = element::i16;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(input_type1, shape);
|
||||
auto param2 = make_shared<ngraph::opset1::Parameter>(input_type2, shape);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ngraph::opset1::Add>>(
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(input_type1, shape);
|
||||
auto param2 = make_shared<ov::opset1::Parameter>(input_type2, shape);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::Add>>(
|
||||
TypeVector{overriden_type, overriden_type},
|
||||
TypeVector{},
|
||||
ov::op::TemporaryReplaceOutputType(param1, overriden_type).get(),
|
||||
ov::op::TemporaryReplaceOutputType(param2, overriden_type).get());
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1, param2});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1, param2});
|
||||
|
||||
ASSERT_EQ(input_type1, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(input_type2, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(4, ngraph->get_ops().size());
|
||||
ASSERT_EQ(4, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, notSupportedTypeOverride) {
|
||||
auto overriden_type = element::u8;
|
||||
auto orig_type = element::boolean;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(overriden_type, shape);
|
||||
auto param2 = make_shared<ngraph::opset1::Parameter>(overriden_type, shape);
|
||||
auto op = ngraph::opset1::LogicalAnd(ov::op::TemporaryReplaceOutputType(param1, orig_type).get(),
|
||||
ov::op::TemporaryReplaceOutputType(param2, orig_type).get());
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ngraph::opset1::LogicalAnd>>(op,
|
||||
TypeVector{orig_type, orig_type},
|
||||
TypeVector{overriden_type});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(overriden_type, shape);
|
||||
auto param2 = make_shared<ov::opset1::Parameter>(overriden_type, shape);
|
||||
auto op = ov::opset1::LogicalAnd(ov::op::TemporaryReplaceOutputType(param1, orig_type).get(),
|
||||
ov::op::TemporaryReplaceOutputType(param2, orig_type).get());
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::LogicalAnd>>(op,
|
||||
TypeVector{orig_type, orig_type},
|
||||
TypeVector{overriden_type});
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1, param2});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1, param2});
|
||||
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(4, ngraph->get_ops().size());
|
||||
ASSERT_EQ(4, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, notSupportedTypeOverridePartially) {
|
||||
auto some_type = element::u8;
|
||||
auto overriden_type = element::f32;
|
||||
auto orig_type = element::i64;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(some_type, shape);
|
||||
auto param2 = make_shared<ngraph::opset1::Parameter>(overriden_type, ngraph::PartialShape{1});
|
||||
auto op = ngraph::opset1::Reshape(param1, ov::op::TemporaryReplaceOutputType(param2, orig_type).get(), false);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(some_type, shape);
|
||||
auto param2 = make_shared<ov::opset1::Parameter>(overriden_type, ov::PartialShape{1});
|
||||
auto op = ov::opset1::Reshape(param1, ov::op::TemporaryReplaceOutputType(param2, orig_type).get(), false);
|
||||
auto relaxed_op =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Reshape>>(op,
|
||||
TypeVector{element::undefined, orig_type},
|
||||
TypeVector{});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Reshape>>(op,
|
||||
TypeVector{element::undefined, orig_type},
|
||||
TypeVector{});
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1, param2});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1, param2});
|
||||
|
||||
ASSERT_EQ(some_type, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(some_type, relaxed_op->get_output_element_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(4, ngraph->get_ops().size());
|
||||
ASSERT_EQ(4, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, multiOutputTypeOverride) {
|
||||
auto overriden_type = element::f16;
|
||||
auto orig_type = element::f32;
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(orig_type, shape);
|
||||
auto op = ngraph::opset1::Split(param1,
|
||||
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}),
|
||||
3);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ngraph::opset1::Split>>(
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(orig_type, shape);
|
||||
auto op = ov::opset1::Split(param1, ov::opset1::Constant::create(ov::element::i64, ov::Shape{}, {1}), 3);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::Split>>(
|
||||
op,
|
||||
TypeVector{},
|
||||
TypeVector{overriden_type, overriden_type, overriden_type});
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1});
|
||||
|
||||
for (size_t i = 0; i < 3; ++i) {
|
||||
ASSERT_EQ(overriden_type, relaxed_op->get_output_element_type(i));
|
||||
ASSERT_EQ(ngraph::Shape({1, 1, 22, 22}), relaxed_op->get_output_shape(i));
|
||||
ASSERT_EQ(ov::Shape({1, 1, 22, 22}), relaxed_op->get_output_shape(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
std::shared_ptr<ngraph::Function> ngraph;
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
ngraph::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(element::u8, shape);
|
||||
auto param2 = make_shared<ngraph::opset1::Parameter>(element::u8, shape);
|
||||
ov::PartialShape shape({1, 3, 22, 22});
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(element::u8, shape);
|
||||
auto param2 = make_shared<ov::opset1::Parameter>(element::u8, shape);
|
||||
// create TypeRelaxed without any type adjustment, the same behaviour as for opset1::Add
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ngraph::opset1::Add>>(param1, param2);
|
||||
auto result = make_shared<ngraph::opset1::Result>(relaxed_op);
|
||||
auto relaxed_op = make_shared<ov::op::TypeRelaxed<ov::opset1::Add>>(param1, param2);
|
||||
auto result = make_shared<ov::opset1::Result>(relaxed_op);
|
||||
|
||||
ngraph = make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1, param2});
|
||||
model = make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1, param2});
|
||||
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1));
|
||||
@ -242,7 +241,7 @@ TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
// previous checks for input/output indices that are out of number of real inputs/outputs
|
||||
// should resize internal vectors that hold orig/overridden types, it may affect
|
||||
// inference for the op, so here we check if the inference is still OK:
|
||||
ngraph->validate_nodes_and_infer_types();
|
||||
model->validate_nodes_and_infer_types();
|
||||
|
||||
// recheck basic statements about input/output types; they should be the same as we haven't changed anything
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
@ -252,14 +251,14 @@ TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
// now we are modifying input types and see if the output type reflects this change
|
||||
relaxed_op->set_origin_input_type(element::i8, 0);
|
||||
relaxed_op->set_origin_input_type(element::i8, 1);
|
||||
ngraph->validate_nodes_and_infer_types();
|
||||
model->validate_nodes_and_infer_types();
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(element::i8, relaxed_op->get_output_element_type(0));
|
||||
|
||||
// override output type
|
||||
relaxed_op->set_overridden_output_type(element::f32, 0);
|
||||
ngraph->validate_nodes_and_infer_types();
|
||||
model->validate_nodes_and_infer_types();
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(element::f32, relaxed_op->get_output_element_type(0));
|
||||
@ -277,7 +276,7 @@ TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
// (a bit hypothetical though).
|
||||
relaxed_op->set_origin_input_type(element::i32, 2);
|
||||
relaxed_op->set_overridden_output_type(element::i32, 1);
|
||||
ngraph->validate_nodes_and_infer_types();
|
||||
model->validate_nodes_and_infer_types();
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(element::f32, relaxed_op->get_output_element_type(0));
|
||||
@ -288,7 +287,7 @@ TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
relaxed_op->set_origin_input_type(element::undefined, 0);
|
||||
relaxed_op->set_origin_input_type(element::undefined, 1);
|
||||
relaxed_op->set_overridden_output_type(element::undefined, 0);
|
||||
ngraph->validate_nodes_and_infer_types();
|
||||
model->validate_nodes_and_infer_types();
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1));
|
||||
ASSERT_EQ(element::u8, relaxed_op->get_output_element_type(0));
|
||||
@ -298,18 +297,18 @@ TEST_F(TypeRelaxedTests, setGetTypes) {
|
||||
ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(0));
|
||||
}
|
||||
|
||||
ASSERT_EQ(4, ngraph->get_ops().size());
|
||||
ASSERT_EQ(4, model->get_ops().size());
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, OneOutputMultipleInputPorts) {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
std::shared_ptr<ov::Model> f;
|
||||
{
|
||||
auto param1 = make_shared<ngraph::opset1::Parameter>(element::boolean, ngraph::Shape{1, 3, 22, 22});
|
||||
auto op = ngraph::opset1::Select(param1, param1, param1);
|
||||
auto param1 = make_shared<ov::opset1::Parameter>(element::boolean, ov::Shape{1, 3, 22, 22});
|
||||
auto op = ov::opset1::Select(param1, param1, param1);
|
||||
auto relaxed_op =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Select>>(op, TypeVector{}, TypeVector{element::i64});
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Select>>(op, TypeVector{}, TypeVector{element::i64});
|
||||
|
||||
f = make_shared<ngraph::Function>(ngraph::OutputVector{relaxed_op}, ngraph::ParameterVector{param1});
|
||||
f = make_shared<ov::Model>(ov::OutputVector{relaxed_op}, ov::ParameterVector{param1});
|
||||
|
||||
// Prepare relaxed op for input change
|
||||
relaxed_op->set_origin_input_type(element::boolean, 0);
|
||||
@ -327,80 +326,182 @@ TEST_F(TypeRelaxedTests, OneOutputMultipleInputPorts) {
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, ConstantFoldingCheck) {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
std::shared_ptr<ov::Model> f;
|
||||
{
|
||||
auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto equal = ngraph::opset1::Equal(const1, const2);
|
||||
auto const1 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto const2 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto equal = ov::opset1::Equal(const1, const2);
|
||||
auto relaxed_equal =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Equal>>(equal, TypeVector{}, TypeVector{element::u8});
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Equal>>(equal, TypeVector{}, TypeVector{element::u8});
|
||||
|
||||
f = make_shared<ngraph::Function>(ngraph::OutputVector{relaxed_equal}, ngraph::ParameterVector{});
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
f = make_shared<ov::Model>(ov::OutputVector{relaxed_equal}, ov::ParameterVector{});
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConstantFolding>();
|
||||
ASSERT_NO_THROW(manager.run_passes(f));
|
||||
auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0);
|
||||
ASSERT_TRUE(ngraph::is_type<ngraph::opset1::Constant>(layer_before_result));
|
||||
ASSERT_TRUE(ov::is_type<ov::opset1::Constant>(layer_before_result));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, ConstantFoldingCheck1) {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
std::shared_ptr<ov::Model> f;
|
||||
{
|
||||
auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto equal = ngraph::opset1::Equal(const1, const2);
|
||||
auto const1 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto const2 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto equal = ov::opset1::Equal(const1, const2);
|
||||
auto relaxed_equal =
|
||||
make_shared<ov::op::TypeRelaxed<ngraph::opset1::Equal>>(equal, TypeVector{}, TypeVector{element::boolean});
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Equal>>(equal, TypeVector{}, TypeVector{element::boolean});
|
||||
|
||||
f = make_shared<ngraph::Function>(ngraph::OutputVector{relaxed_equal}, ngraph::ParameterVector{});
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
f = make_shared<ov::Model>(ov::OutputVector{relaxed_equal}, ov::ParameterVector{});
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConstantFolding>();
|
||||
ASSERT_NO_THROW(manager.run_passes(f));
|
||||
auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0);
|
||||
ASSERT_TRUE(ngraph::is_type<ngraph::opset1::Constant>(layer_before_result));
|
||||
ASSERT_TRUE(ov::is_type<ov::opset1::Constant>(layer_before_result));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, ConstantFoldingCheck2) {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
std::shared_ptr<ov::Model> f;
|
||||
{
|
||||
auto const1 = ngraph::opset1::Constant::create(element::u8, ngraph::Shape{}, {2});
|
||||
auto const2 = ngraph::opset1::Constant::create(element::i8, ngraph::Shape{}, {2});
|
||||
auto const1 = ov::opset1::Constant::create(element::u8, ov::Shape{}, {2});
|
||||
auto const2 = ov::opset1::Constant::create(element::i8, ov::Shape{}, {2});
|
||||
|
||||
auto original_input_types = TypeVector{element::i32, element::i32};
|
||||
auto relaxed_equal = std::make_shared<ov::op::TypeRelaxed<ngraph::opset1::Equal>>(
|
||||
ngraph::element::TypeVector{element::i32, element::i32},
|
||||
ngraph::element::TypeVector{element::u8},
|
||||
auto relaxed_equal = std::make_shared<ov::op::TypeRelaxed<ov::opset1::Equal>>(
|
||||
ov::element::TypeVector{element::i32, element::i32},
|
||||
ov::element::TypeVector{element::u8},
|
||||
ov::op::TemporaryReplaceOutputType(const1, element::i32).get(),
|
||||
ov::op::TemporaryReplaceOutputType(const2, element::i32).get());
|
||||
|
||||
f = make_shared<ngraph::Function>(ngraph::OutputVector{relaxed_equal}, ngraph::ParameterVector{});
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
f = make_shared<ov::Model>(ov::OutputVector{relaxed_equal}, ov::ParameterVector{});
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConstantFolding>();
|
||||
ASSERT_NO_THROW(manager.run_passes(f));
|
||||
auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0);
|
||||
ASSERT_TRUE(ngraph::is_type<ngraph::opset1::Constant>(layer_before_result));
|
||||
ASSERT_TRUE(ov::is_type<ov::opset1::Constant>(layer_before_result));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, ConstantFoldingCheck3) {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
std::shared_ptr<ov::Model> f;
|
||||
{
|
||||
auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, {2});
|
||||
auto equal = ngraph::opset1::Equal(const1, const2);
|
||||
auto const1 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto const2 = ov::opset1::Constant::create(element::i32, ov::Shape{}, {2});
|
||||
auto equal = ov::opset1::Equal(const1, const2);
|
||||
|
||||
auto original_input_types = TypeVector{element::f32, element::f32};
|
||||
auto relaxed_equal = make_shared<ov::op::TypeRelaxed<ngraph::opset1::Equal>>(equal,
|
||||
original_input_types,
|
||||
TypeVector{element::u8});
|
||||
auto relaxed_equal =
|
||||
make_shared<ov::op::TypeRelaxed<ov::opset1::Equal>>(equal, original_input_types, TypeVector{element::u8});
|
||||
|
||||
f = make_shared<ngraph::Function>(ngraph::OutputVector{relaxed_equal}, ngraph::ParameterVector{});
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
f = make_shared<ov::Model>(ov::OutputVector{relaxed_equal}, ov::ParameterVector{});
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConstantFolding>();
|
||||
ASSERT_NO_THROW(manager.run_passes(f));
|
||||
auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0);
|
||||
ASSERT_TRUE(ngraph::is_type<ngraph::opset1::Constant>(layer_before_result));
|
||||
ASSERT_TRUE(ov::is_type<ov::opset1::Constant>(layer_before_result));
|
||||
}
|
||||
}
|
||||
|
||||
/* copied from CPU plugin to provide the same experience here */
|
||||
bool fuse_type_to_convert_cpu(const std::shared_ptr<ov::Node>& node, const precisions_map& precisions) {
|
||||
const auto& from = node->get_output_element_type(0);
|
||||
auto it = precisions.find(from);
|
||||
if (it == precisions.end())
|
||||
return false;
|
||||
const auto& to = it->second;
|
||||
if (auto convert = ov::as_type_ptr<ov::opset1::Convert>(node)) {
|
||||
// For Convert node, converting precision from floating point to boolean will lead to mathematical
|
||||
// error, because here the output precision boolean is replaced by u8. E.g. floating point value 0.01
|
||||
// is converted to be 1 for boolean, but 0 for u8. Thus an Abs and Ceil node should be added before the
|
||||
// Convert node for this scenario.
|
||||
if (convert->input(0).get_element_type().is_real() &&
|
||||
convert->get_convert_element_type() == ov::element::boolean && to.is_integral_number()) {
|
||||
auto abs = std::make_shared<ov::opset1::Abs>(convert->input_value(0).get_node_shared_ptr());
|
||||
auto ceil = std::make_shared<ov::opset1::Ceiling>(abs);
|
||||
auto new_convert = std::make_shared<ov::opset1::Convert>(ceil, to);
|
||||
new_convert->set_friendly_name(convert->get_friendly_name());
|
||||
ov::copy_runtime_info(convert, {abs, ceil, new_convert});
|
||||
ov::replace_node(convert, new_convert);
|
||||
return true;
|
||||
} else {
|
||||
convert->set_convert_element_type(to);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, PartialValuePropagation) {
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(element::f32, ov::PartialShape{1, 768, -1, -1});
|
||||
auto shape = std::make_shared<ov::opset1::ShapeOf>(parameter);
|
||||
auto strided_slice =
|
||||
std::make_shared<ov::opset1::StridedSlice>(shape,
|
||||
ov::opset1::Constant::create(element::i64, {1}, {0}),
|
||||
ov::opset1::Constant::create(element::i64, {1}, {2}),
|
||||
ov::opset1::Constant::create(element::i64, {1}, {1}),
|
||||
std::vector<int64_t>{0},
|
||||
std::vector<int64_t>{0});
|
||||
auto concat = std::make_shared<ov::opset1::Concat>(
|
||||
ov::OutputVector{strided_slice, ov::opset1::Constant::create(element::i64, {1}, {-1})},
|
||||
0);
|
||||
auto reshape = std::make_shared<ov::opset1::Reshape>(parameter, concat, false);
|
||||
|
||||
model = make_shared<ov::Model>(ov::OutputVector{reshape}, ov::ParameterVector{parameter});
|
||||
|
||||
precisions_map map = {
|
||||
{ov::element::i64, ov::element::i32},
|
||||
{ov::element::boolean, ov::element::u8},
|
||||
};
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConvertPrecision>(
|
||||
map,
|
||||
type_to_fuse_map{{ov::opset1::Convert::get_type_info_static(), fuse_type_to_convert_cpu}});
|
||||
ASSERT_NO_THROW(manager.run_passes(model));
|
||||
EXPECT_EQ(model->get_result()->get_output_partial_shape(0), ov::PartialShape({1, 768, -1}));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TypeRelaxedTests, PartialValuePropagation2) {
|
||||
std::shared_ptr<ov::Model> model;
|
||||
{
|
||||
auto parameter = std::make_shared<ov::opset1::Parameter>(element::f32, ov::PartialShape{-1, -1});
|
||||
auto axis = ov::opset1::Constant::create(element::i64, {1}, {1});
|
||||
auto broadcast_input =
|
||||
std::make_shared<ov::opset1::Unsqueeze>(std::make_shared<ov::opset1::Unsqueeze>(parameter, axis), axis);
|
||||
|
||||
auto shape = std::make_shared<ov::opset1::ShapeOf>(parameter);
|
||||
auto gather_batch = std::make_shared<ov::opset1::Gather>(shape,
|
||||
ov::opset1::Constant::create(element::i64, {1}, {0}),
|
||||
ov::opset1::Constant::create(element::i64, {}, {0}));
|
||||
auto gather_sequence_twice =
|
||||
std::make_shared<ov::opset1::Gather>(shape,
|
||||
ov::opset1::Constant::create(element::i64, {2}, {1, 1}),
|
||||
ov::opset1::Constant::create(element::i64, {}, {0}));
|
||||
auto concat = std::make_shared<ov::opset1::Concat>(
|
||||
ov::OutputVector{gather_batch, ov::opset1::Constant::create(element::i64, {1}, {1}), gather_sequence_twice},
|
||||
0);
|
||||
auto reshape =
|
||||
std::make_shared<ov::opset1::Reshape>(concat, ov::opset1::Constant::create(element::i64, {1}, {-1}), false);
|
||||
auto equal = std::make_shared<ov::opset1::Equal>(reshape, ov::opset1::Constant::create(element::i64, {}, {-1}));
|
||||
|
||||
auto select =
|
||||
std::make_shared<ov::opset1::Select>(equal, ov::opset1::Constant::create(element::i64, {1}, {1}), reshape);
|
||||
|
||||
auto broadcast = std::make_shared<ov::opset1::Broadcast>(broadcast_input, select);
|
||||
model = make_shared<ov::Model>(ov::OutputVector{broadcast}, ov::ParameterVector{parameter});
|
||||
|
||||
precisions_map map = {
|
||||
{ov::element::i64, ov::element::i32},
|
||||
{ov::element::boolean, ov::element::u8},
|
||||
};
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::ConvertPrecision>(
|
||||
map,
|
||||
type_to_fuse_map{{ov::opset1::Convert::get_type_info_static(), fuse_type_to_convert_cpu}});
|
||||
ASSERT_NO_THROW(manager.run_passes(model));
|
||||
EXPECT_EQ(model->get_result()->get_output_partial_shape(0), ov::PartialShape({-1, 1, -1, -1}));
|
||||
}
|
||||
}
|
||||
|
@ -47,6 +47,8 @@ public:
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
bool evaluate_upper(TensorVector& outputs) const override;
|
||||
bool evaluate_lower(TensorVector& outputs) const override;
|
||||
bool has_evaluate() const override;
|
||||
};
|
||||
} // namespace v1
|
||||
|
@ -63,6 +63,8 @@ public:
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override;
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
bool evaluate_upper(TensorVector& outputs) const override;
|
||||
bool evaluate_lower(TensorVector& outputs) const override;
|
||||
bool has_evaluate() const override;
|
||||
|
||||
private:
|
||||
|
@ -19,6 +19,7 @@ void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes
|
||||
const auto& deltas_shape = input_shapes[2];
|
||||
const auto& scores_shape = input_shapes[3];
|
||||
const auto im_info_shape_rank = im_info_shape.rank();
|
||||
auto num_batches = Dimension::dynamic();
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
im_info_shape_rank.compatible(2),
|
||||
"The 'input_im_info' input is expected to be a 2D. Got: ",
|
||||
@ -29,6 +30,7 @@ void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes
|
||||
(im_info_shape[1].compatible(3) || im_info_shape[1].compatible(4)),
|
||||
"The 'input_im_info' shape[1] is expected to be a compatible with [3] or [4]. Got: ",
|
||||
im_info_shape);
|
||||
Dimension::merge(num_batches, im_info_shape[0], num_batches);
|
||||
}
|
||||
|
||||
const auto anchors_shape_rank = anchors_shape.rank();
|
||||
@ -53,6 +55,11 @@ void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes
|
||||
scores_shape_rank.compatible(4),
|
||||
"The 'input_scores' input is expected to be a 4D. Got: ",
|
||||
scores_shape);
|
||||
if (deltas_shape_rank.is_static())
|
||||
Dimension::merge(num_batches, deltas_shape[0], num_batches);
|
||||
if (scores_shape_rank.is_static())
|
||||
Dimension::merge(num_batches, scores_shape[0], num_batches);
|
||||
|
||||
if (deltas_shape_rank.is_static() && scores_shape_rank.is_static()) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
deltas_shape[0].compatible(scores_shape[0]),
|
||||
@ -115,13 +122,10 @@ void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes
|
||||
scores_shape[1]);
|
||||
}
|
||||
|
||||
output_shapes[0] = ov::PartialShape({Dimension::dynamic(), 4});
|
||||
output_shapes[1] = ov::PartialShape::dynamic(1);
|
||||
if (im_info_shape_rank.is_static()) {
|
||||
output_shapes[2] = ov::PartialShape({im_info_shape[0]});
|
||||
} else {
|
||||
output_shapes[2] = ov::PartialShape::dynamic(1);
|
||||
}
|
||||
auto num_rois = Dimension(0, (num_batches * op->get_attrs().post_nms_count).get_max_length());
|
||||
output_shapes[0] = ov::PartialShape({num_rois, 4});
|
||||
output_shapes[1] = ov::PartialShape({num_rois});
|
||||
output_shapes[2] = ov::PartialShape({num_batches});
|
||||
}
|
||||
|
||||
} // namespace v9
|
||||
|
@ -22,6 +22,7 @@ void shape_infer(const NonMaxSuppression* op,
|
||||
std::vector<T>& output_shapes,
|
||||
bool static_output = false,
|
||||
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
|
||||
// this shape_infer differs from all the other - it is used in GPU during compile-time and infer-time in custom code
|
||||
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 3);
|
||||
|
||||
const auto& boxes_ps = input_shapes[0];
|
||||
@ -68,18 +69,21 @@ void shape_infer(const NonMaxSuppression* op,
|
||||
ov::PartialShape out_shape = {Dimension::dynamic(), 3};
|
||||
if (boxes_ps.rank().is_static() && scores_ps.rank().is_static()) {
|
||||
const auto num_boxes_boxes = boxes_ps[1];
|
||||
if (num_boxes_boxes.is_static() && scores_ps[0].is_static() && scores_ps[1].is_static()) {
|
||||
const auto num_boxes = num_boxes_boxes.get_length();
|
||||
const auto num_classes = scores_ps[1].get_length();
|
||||
bool gpu_wa =
|
||||
static_output && (!num_boxes_boxes.is_static() || !scores_ps[0].is_static() || !scores_ps[1].is_static());
|
||||
if (!gpu_wa && num_boxes_boxes.get_max_length() != -1 && scores_ps[0].get_max_length() != -1 &&
|
||||
scores_ps[1].get_max_length() != -1) {
|
||||
const auto num_boxes = num_boxes_boxes.get_max_length();
|
||||
const auto num_classes = scores_ps[1].get_max_length();
|
||||
std::vector<int64_t> max_output_boxes_per_class_as_vals;
|
||||
if ((op->get_input_size() > 2 || constant_data.count(2)) &&
|
||||
get_data_as_int64<T>(2, op, max_output_boxes_per_class_as_vals, constant_data)) {
|
||||
int64_t max_output_boxes_per_class = max_output_boxes_per_class_as_vals[0];
|
||||
out_shape[0] = static_output ? std::min(num_boxes, max_output_boxes_per_class) * num_classes *
|
||||
scores_ps[0].get_length()
|
||||
scores_ps[0].get_max_length()
|
||||
: Dimension(0,
|
||||
std::min(num_boxes, max_output_boxes_per_class) * num_classes *
|
||||
scores_ps[0].get_length());
|
||||
scores_ps[0].get_max_length());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -92,8 +92,8 @@ std::vector<TShape> shape_infer(const util::TopKBase* op,
|
||||
const auto k_max = k.get_max_length();
|
||||
|
||||
const auto lower = std::min<TDimValue>(in_min, k_min);
|
||||
const auto upper =
|
||||
in_max < 0 ? Dimension::dynamic().get_max_length() : std::max<TDimValue>(in_max, k_max);
|
||||
const auto upper = in_max < 0 ? Dimension::dynamic().get_max_length()
|
||||
: std::min<TDimValue>(in_max, (k_max < 0 ? Interval::s_max : k_max));
|
||||
dim_axis = TDim(lower, upper);
|
||||
}
|
||||
} else {
|
||||
|
@ -78,7 +78,6 @@ ov::Tensor evaluate_bound(const Output<Node>& output, bool is_upper, bool invali
|
||||
|
||||
std::vector<Node*> order;
|
||||
if (could_propagate(output, order)) {
|
||||
reverse(order.begin(), order.end());
|
||||
for (const auto& node : order) {
|
||||
ov::TensorVector outputs;
|
||||
for (const auto& out : node->outputs()) {
|
||||
@ -235,25 +234,50 @@ ov::Tensor make_tensor_max_of_type(ov::element::Type_t t) {
|
||||
|
||||
} // namespace
|
||||
|
||||
bool ov::could_propagate(const Output<Node>& output, std::vector<Node*>& order) {
|
||||
bool ov::could_propagate(const Output<Node>& output, std::vector<Node*>& result) {
|
||||
auto status = true;
|
||||
|
||||
std::deque<Node*> nodes_to_calculate = {output.get_node()};
|
||||
order.push_back(output.get_node());
|
||||
std::stack<Node*, std::vector<Node*>> nodes_to_do;
|
||||
nodes_to_do.push(output.get_node());
|
||||
std::unordered_set<Node*> nodes_done;
|
||||
|
||||
while (status && !nodes_to_calculate.empty()) {
|
||||
auto current_node = nodes_to_calculate.front();
|
||||
nodes_to_calculate.pop_front();
|
||||
while (status && nodes_to_do.size() > 0) {
|
||||
Node* node = nodes_to_do.top();
|
||||
if (nodes_done.count(node) == 0) {
|
||||
bool can_add = true;
|
||||
size_t arg_count = node->get_input_size();
|
||||
|
||||
if (current_node->inputs().empty() && !is_type<op::v0::Constant>(current_node)) {
|
||||
status = false;
|
||||
} else if (!is_type<op::v0::ShapeOf>(current_node) && !is_type<op::v3::ShapeOf>(current_node)) {
|
||||
// not a leaf, not a shape_of -- continue to search
|
||||
for (const auto& input_value : current_node->input_values()) {
|
||||
const auto& input_node = input_value.get_node();
|
||||
order.push_back(input_node);
|
||||
nodes_to_calculate.push_front(input_node);
|
||||
if (arg_count == 0 && !is_type<op::v0::Constant>(node)) {
|
||||
status = false;
|
||||
continue;
|
||||
} else if (is_type<op::v0::ShapeOf>(node) || is_type<op::v3::ShapeOf>(node)) {
|
||||
result.push_back(node);
|
||||
nodes_to_do.pop();
|
||||
nodes_done.insert(node);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < arg_count; ++i) {
|
||||
Node* dep = node->get_input_node_ptr(arg_count - i - 1);
|
||||
if (nodes_done.count(dep) == 0) {
|
||||
can_add = false;
|
||||
nodes_to_do.push(dep);
|
||||
}
|
||||
}
|
||||
for (auto& depptr : node->get_control_dependencies()) {
|
||||
Node* dep = depptr.get();
|
||||
if (nodes_done.count(dep) == 0) {
|
||||
can_add = false;
|
||||
nodes_to_do.push(dep);
|
||||
}
|
||||
}
|
||||
if (can_add) {
|
||||
result.push_back(node);
|
||||
nodes_to_do.pop();
|
||||
nodes_done.insert(node);
|
||||
}
|
||||
} else {
|
||||
nodes_to_do.pop();
|
||||
}
|
||||
}
|
||||
return status;
|
||||
@ -268,9 +292,51 @@ ov::Tensor ov::evaluate_upper_bound(const Output<Node>& output) {
|
||||
}
|
||||
|
||||
std::pair<ov::Tensor, ov::Tensor> ov::evaluate_both_bounds(const Output<Node>& output) {
|
||||
evaluate_bound(output, false, false);
|
||||
evaluate_bound(output, true);
|
||||
return {output.get_tensor_ptr()->get_lower_value(), output.get_tensor_ptr()->get_upper_value()};
|
||||
const auto& output_tensor = output.get_tensor();
|
||||
if (output_tensor.get_lower_value() && output_tensor.get_upper_value())
|
||||
return {output_tensor.get_lower_value(), output_tensor.get_upper_value()};
|
||||
std::vector<Node*> order;
|
||||
if (could_propagate(output, order)) {
|
||||
for (const auto& node : order) {
|
||||
ov::TensorVector outputs_lower, outputs_upper;
|
||||
for (const auto& out : node->outputs()) {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
outputs_lower.push_back(util::wrap_tensor(out));
|
||||
outputs_upper.push_back(util::wrap_tensor(out));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
if (!node->evaluate_lower(outputs_lower) || !node->evaluate_upper(outputs_upper)) {
|
||||
break;
|
||||
}
|
||||
auto input_values = node->input_values();
|
||||
bool same_inputs = std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
|
||||
auto& t = input.get_tensor();
|
||||
return t.has_and_set_bound() || are_equal(t.get_lower_value(), t.get_upper_value());
|
||||
});
|
||||
TensorLabelVector output_labels(node->get_output_size());
|
||||
bool labels_evaluated = node->evaluate_label(output_labels);
|
||||
for (size_t i = 0; i < node->get_output_size(); ++i) {
|
||||
auto& out_tensor = node->get_output_tensor(i);
|
||||
out_tensor.set_lower_value(outputs_lower[i]);
|
||||
out_tensor.set_upper_value(outputs_upper[i]);
|
||||
if (same_inputs || are_equal(outputs_lower[i], outputs_upper[i]))
|
||||
out_tensor.set_upper_value(outputs_lower[i]);
|
||||
if (labels_evaluated)
|
||||
node->get_output_tensor(i).set_value_label(output_labels[i]);
|
||||
}
|
||||
for (const auto& input : node->input_values()) {
|
||||
auto& tensor = input.get_tensor();
|
||||
const auto& lower = tensor.get_lower_value();
|
||||
const auto& upper = tensor.get_upper_value();
|
||||
const auto should_invalidate =
|
||||
(lower && shape_size(lower.get_shape()) > 10) || (upper && shape_size(upper.get_shape()) > 10);
|
||||
if (should_invalidate && input.get_target_inputs().size() == 1)
|
||||
tensor.invalidate_values();
|
||||
}
|
||||
propagate_rt_info(node, output);
|
||||
}
|
||||
}
|
||||
return {output_tensor.get_lower_value(), output_tensor.get_upper_value()};
|
||||
}
|
||||
|
||||
bool ov::default_lower_bound_evaluator(const Node* node, TensorVector& output_values) {
|
||||
|
@ -52,7 +52,7 @@ bool evaluate_abs(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr
|
||||
|
||||
bool ov::op::v0::Abs::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Abs_evaluate);
|
||||
return absop::evaluate_abs(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return absop::evaluate_abs(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool ov::op::v0::Abs::has_evaluate() const {
|
||||
|
@ -59,7 +59,7 @@ bool evaluate_acos(const ov::HostTensorPtr& arg0, const ov::HostTensorPtr& out,
|
||||
|
||||
bool ov::op::v0::Acos::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Acos_evaluate);
|
||||
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return acosop::evaluate_acos(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool ov::op::v0::Acos::has_evaluate() const {
|
||||
|
@ -63,7 +63,7 @@ bool evaluate_asin(const HostTensorPtr& arg0, const HostTensorPtr& out, const si
|
||||
|
||||
bool op::Asin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Asin_evaluate);
|
||||
return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return asinop::evaluate_asin(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Asin::has_evaluate() const {
|
||||
|
@ -61,7 +61,7 @@ bool evaluate_atan(const HostTensorPtr& arg0, const HostTensorPtr& out, const si
|
||||
|
||||
bool op::Atan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Atan_evaluate);
|
||||
return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return atanop::evaluate_atan(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Atan::has_evaluate() const {
|
||||
|
@ -66,7 +66,7 @@ bool evaluate_ceiling(const HostTensorPtr& arg0, const HostTensorPtr& out, const
|
||||
|
||||
bool op::Ceiling::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Ceiling_evaluate);
|
||||
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return ceiling::evaluate_ceiling(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Ceiling::has_evaluate() const {
|
||||
|
@ -558,7 +558,7 @@ bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) {
|
||||
bool ov::op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Constant_evaluate);
|
||||
auto output = outputs[0];
|
||||
output->set_shape(get_shape());
|
||||
output->set_shape(m_shape);
|
||||
output->write(get_data_ptr(), output->get_size_in_bytes());
|
||||
return true;
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ bool op::Cos::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
OV_OP_SCOPE(v0_Cos_evaluate);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
|
||||
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
return cosop::evaluate_cos(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Cos::has_evaluate() const {
|
||||
|
@ -61,7 +61,7 @@ bool op::Cosh::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return coshop::evaluate_cosh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Cosh::has_evaluate() const {
|
||||
|
@ -4,9 +4,13 @@
|
||||
|
||||
#include "ngraph/op/equal.hpp"
|
||||
|
||||
#include "bound_evaluate.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/less_eq.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/equal.hpp"
|
||||
#include "openvino/op/ops.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -55,6 +59,60 @@ bool evaluate_equal(const HostTensorPtr& arg0,
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
ov::Tensor equal_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
|
||||
auto equal = op::v1::Equal(std::make_shared<op::v0::Parameter>(lhs.get_element_type(), lhs.get_shape()),
|
||||
std::make_shared<op::v0::Parameter>(rhs.get_element_type(), rhs.get_shape()),
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
auto outs = ov::TensorVector{{equal.get_output_element_type(0), equal.get_output_shape(0)}};
|
||||
equal.evaluate(outs, ov::TensorVector{lhs, rhs});
|
||||
return outs.front();
|
||||
}
|
||||
|
||||
ov::Tensor less_equal_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
|
||||
auto equal = op::v1::LessEqual(std::make_shared<op::v0::Parameter>(lhs.get_element_type(), lhs.get_shape()),
|
||||
std::make_shared<op::v0::Parameter>(rhs.get_element_type(), rhs.get_shape()),
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
auto outs = ov::TensorVector{{equal.get_output_element_type(0), equal.get_output_shape(0)}};
|
||||
equal.evaluate(outs, ov::TensorVector{lhs, rhs});
|
||||
return outs.front();
|
||||
}
|
||||
|
||||
ov::Tensor and_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
|
||||
auto logical_and =
|
||||
ov::op::v1::LogicalAnd(std::make_shared<op::v0::Parameter>(lhs.get_element_type(), lhs.get_shape()),
|
||||
std::make_shared<op::v0::Parameter>(rhs.get_element_type(), rhs.get_shape()),
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
auto outs = ov::TensorVector{{logical_and.get_output_element_type(0), logical_and.get_output_shape(0)}};
|
||||
logical_and.evaluate(outs, ov::TensorVector{lhs, rhs});
|
||||
return outs.front();
|
||||
}
|
||||
|
||||
ov::Tensor or_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
|
||||
auto logical_or =
|
||||
ov::op::v1::LogicalOr(std::make_shared<op::v0::Parameter>(lhs.get_element_type(), lhs.get_shape()),
|
||||
std::make_shared<op::v0::Parameter>(rhs.get_element_type(), rhs.get_shape()),
|
||||
op::AutoBroadcastType::NUMPY);
|
||||
auto outs = ov::TensorVector{{logical_or.get_output_element_type(0), logical_or.get_output_shape(0)}};
|
||||
logical_or.evaluate(outs, ov::TensorVector{lhs, rhs});
|
||||
return outs.front();
|
||||
}
|
||||
|
||||
void all_equal(const ov::TensorVector tensors, ov::Tensor& output_value) {
|
||||
OPENVINO_ASSERT(tensors.size() >= 2, "Unexpected number of tensors in all_equal helper");
|
||||
auto& tensor = tensors[0];
|
||||
output_value = equal_tensor(tensor, tensors[1]);
|
||||
for (size_t i = 2; i < tensors.size(); ++i) {
|
||||
output_value = and_tensor(output_value, equal_tensor(tensor, tensors[i]));
|
||||
}
|
||||
}
|
||||
|
||||
ov::Tensor within_interval(const ov::Tensor& lower, const ov::Tensor& upper, const ov::Tensor& subject_to_check) {
|
||||
auto lower_check = less_equal_tensor(lower, subject_to_check);
|
||||
auto upper_check = less_equal_tensor(subject_to_check, upper);
|
||||
return and_tensor(lower_check, upper_check);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace equal
|
||||
|
||||
@ -75,6 +133,30 @@ bool op::v1::Equal::evaluate(const HostTensorVector& outputs, const HostTensorVe
|
||||
return equal::evaluate_equal(inputs[0], inputs[1], outputs[0], get_autob());
|
||||
}
|
||||
|
||||
bool op::v1::Equal::evaluate_lower(ov::TensorVector& output_values) const {
|
||||
if (get_input_tensor(0).has_and_set_bound() && get_input_tensor(1).has_and_set_bound())
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
// ll == lu == rl == ru -> {true}
|
||||
// else -> {false}
|
||||
const auto &lhs = get_input_tensor(0), &rhs = get_input_tensor(1);
|
||||
auto lhs_lower = lhs.get_lower_value(), lhs_upper = lhs.get_upper_value();
|
||||
auto rhs_lower = rhs.get_lower_value(), rhs_upper = rhs.get_upper_value();
|
||||
equal::all_equal({lhs_lower, lhs_upper, rhs_lower, rhs_upper}, output_values[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::v1::Equal::evaluate_upper(ov::TensorVector& output_values) const {
|
||||
const auto &lhs = get_input_tensor(0), &rhs = get_input_tensor(1);
|
||||
auto lhs_lower = lhs.get_lower_value(), lhs_upper = lhs.get_upper_value();
|
||||
auto rhs_lower = rhs.get_lower_value(), rhs_upper = rhs.get_upper_value();
|
||||
// check for intersection:
|
||||
// ll <= rl <= lu or ll <= ru <= lu
|
||||
auto rl_check = equal::within_interval(lhs_lower, lhs_upper, rhs_lower);
|
||||
auto ru_check = equal::within_interval(lhs_lower, lhs_upper, rhs_upper);
|
||||
output_values[0] = equal::or_tensor(rl_check, ru_check);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::v1::Equal::has_evaluate() const {
|
||||
OV_OP_SCOPE(v1_Equal_has_evaluate);
|
||||
switch (get_input_element_type(0)) {
|
||||
|
@ -59,7 +59,7 @@ bool evaluate_erf(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz
|
||||
|
||||
bool op::Erf::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Erf_evaluate);
|
||||
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return erfop::evaluate_erf(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Erf::has_evaluate() const {
|
||||
|
@ -71,7 +71,7 @@ bool evaluate_floor(const HostTensorPtr& arg0, const HostTensorPtr& out, const s
|
||||
|
||||
bool op::Floor::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Floor_evaluate);
|
||||
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return floorop::evaluate_floor(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Floor::has_evaluate() const {
|
||||
|
@ -58,7 +58,7 @@ bool evaluate_log(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz
|
||||
|
||||
bool op::Log::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Log_evaluate);
|
||||
return logop::evaluate_log(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return logop::evaluate_log(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Log::has_evaluate() const {
|
||||
|
@ -61,7 +61,7 @@ bool op::Negative::evaluate(const HostTensorVector& outputs, const HostTensorVec
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 1));
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(outputs[0]->get_shape()));
|
||||
return negativeop::evaluate_negative(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Negative::has_evaluate() const {
|
||||
|
@ -344,37 +344,30 @@ Dimension resolve_minus_one(const Node* reshape_node,
|
||||
input_dim.get_length() % output_dim.get_length() == 0,
|
||||
"Non-'-1' output dimensions do not evenly divide the input dimensions");
|
||||
}
|
||||
if (output_dim.get_min_length() == 0 || output_dim == Dimension() || input_dim == Dimension()) {
|
||||
|
||||
if (output_dim == Dimension() || input_dim == Dimension()) {
|
||||
return Dimension::dynamic();
|
||||
} else {
|
||||
auto in_min = input_dim.get_min_length(), in_max = input_dim.get_max_length();
|
||||
auto out_min = output_dim.get_min_length(), out_max = output_dim.get_max_length();
|
||||
|
||||
Dimension::value_type lower;
|
||||
if (input_dim.get_min_length() == 0)
|
||||
lower = 0;
|
||||
else if (input_dim.get_min_length() == -1 || output_dim.get_max_length() == 0 ||
|
||||
output_dim.get_max_length() == -1)
|
||||
if (in_min == -1 || out_max == -1)
|
||||
lower = -1; // dynamic
|
||||
else
|
||||
lower = static_cast<Dimension::value_type>(
|
||||
ceil(static_cast<double>(input_dim.get_min_length()) / output_dim.get_max_length()));
|
||||
lower = static_cast<Dimension::value_type>(ceil(static_cast<double>(in_min) / (out_max ? out_max : 1)));
|
||||
|
||||
Dimension::value_type upper;
|
||||
if (input_dim.get_max_length() == 0)
|
||||
upper = 0;
|
||||
else if (input_dim.get_max_length() == -1 || output_dim.get_min_length() == 0 ||
|
||||
output_dim.get_min_length() == -1)
|
||||
if (in_max == -1 || out_min == -1)
|
||||
upper = -1; // dynamic
|
||||
else
|
||||
upper = static_cast<Dimension::value_type>(
|
||||
floor(static_cast<double>(input_dim.get_max_length()) / output_dim.get_min_length()));
|
||||
upper =
|
||||
static_cast<Dimension::value_type>(floor(static_cast<double>(in_max) / (out_min ? out_min : 1)));
|
||||
|
||||
if (lower == -1)
|
||||
return Dimension::dynamic();
|
||||
else if (upper == -1)
|
||||
return Dimension(lower, upper);
|
||||
else if (lower > upper) // empty intersection
|
||||
if (lower == -1 || (lower > upper && upper > -1))
|
||||
return Dimension::dynamic();
|
||||
else
|
||||
return Dimension(lower, upper);
|
||||
return {lower, upper};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "bound_evaluate.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/runtime/reference/select.hpp"
|
||||
@ -125,6 +126,14 @@ bool op::v1::Select::evaluate(const HostTensorVector& output_values, const HostT
|
||||
return detail::evaluate_select(output_values, input_values, autob, output_values[0]->get_element_type());
|
||||
}
|
||||
|
||||
bool op::v1::Select::evaluate_lower(ov::TensorVector& output_values) const {
|
||||
return get_input_tensor(0).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Select::evaluate_upper(ov::TensorVector& output_values) const {
|
||||
return get_input_tensor(0).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Select::has_evaluate() const {
|
||||
OV_OP_SCOPE(v1_Select_has_evaluate);
|
||||
switch (get_output_element_type(0)) {
|
||||
|
@ -124,20 +124,16 @@ bool evaluate_bound_shape(const Node* shape_of_node, ov::TensorVector& output_va
|
||||
: interval.get_max_val();
|
||||
}
|
||||
NGRAPH_CHECK(pshape_up.is_static() && pshape_low.is_static());
|
||||
const auto output_et = shape_of_node->get_output_element_type(0);
|
||||
const auto output_et = output_values[0].get_element_type();
|
||||
|
||||
if (pshape_low.to_shape() == pshape_up.to_shape()) {
|
||||
shape_of::evaluate_shape_of(output_values[0], pshape_low.to_shape());
|
||||
shape_of_node->get_output_tensor(0).set_lower_value(output_values[0]);
|
||||
shape_of_node->get_output_tensor(0).set_upper_value(output_values[0]);
|
||||
} else {
|
||||
auto&& upper = is_upper ? output_values : ov::TensorVector{{output_et, Shape{pshape_up.to_shape().size()}}};
|
||||
shape_of::evaluate_shape_of(upper[0], pshape_up.to_shape());
|
||||
shape_of_node->get_output_tensor(0).set_upper_value(upper[0]);
|
||||
|
||||
auto&& lower = is_upper ? ov::TensorVector{{output_et, Shape{pshape_low.to_shape().size()}}} : output_values;
|
||||
shape_of::evaluate_shape_of(lower[0], pshape_low.to_shape());
|
||||
shape_of_node->get_output_tensor(0).set_lower_value(lower[0]);
|
||||
|
||||
vector<char> dynamic_mask; // true if dimension is dynamic
|
||||
for (const auto& i : input_partial_shape)
|
||||
|
@ -61,7 +61,7 @@ bool op::Sign::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return signop::evaluate_sign(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Sign::has_evaluate() const {
|
||||
|
@ -59,7 +59,7 @@ bool evaluate_sin(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz
|
||||
|
||||
bool op::Sin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Sin_evaluate);
|
||||
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return sinop::evaluate_sin(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Sin::has_evaluate() const {
|
||||
|
@ -61,7 +61,7 @@ bool op::Sinh::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 1));
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return sinhop::evaluate_sinh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Sinh::has_evaluate() const {
|
||||
|
@ -59,7 +59,7 @@ bool evaluate_sqrt(const HostTensorPtr& arg0, const HostTensorPtr& out, const si
|
||||
|
||||
bool op::Sqrt::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Sqrt_evaluate);
|
||||
return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return sqrtop::evaluate_sqrt(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Sqrt::has_evaluate() const {
|
||||
|
@ -60,7 +60,7 @@ bool evaluate_tan(const HostTensorPtr& arg0, const HostTensorPtr& out, const siz
|
||||
|
||||
bool op::Tan::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Tan_evaluate);
|
||||
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return tanop::evaluate_tan(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Tan::has_evaluate() const {
|
||||
|
@ -59,7 +59,7 @@ bool evaluate_tanh(const HostTensorPtr& arg0, const HostTensorPtr& out, const si
|
||||
|
||||
bool op::Tanh::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v0_Tanh_evaluate);
|
||||
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
return tanhop::evaluate_tanh(inputs[0], outputs[0], shape_size(inputs[0]->get_shape()));
|
||||
}
|
||||
|
||||
bool op::Tanh::has_evaluate() const {
|
||||
|
@ -11,5 +11,142 @@
|
||||
namespace ov {
|
||||
namespace op {
|
||||
TypeRelaxedBase::~TypeRelaxedBase() = default;
|
||||
|
||||
namespace {
|
||||
void convert_types(std::shared_ptr<v0::Parameter>& parameter,
|
||||
std::shared_ptr<v0::Convert>& convert,
|
||||
Output<Node>& output,
|
||||
const element::Type& new_type) {
|
||||
parameter->set_element_type(output.get_element_type());
|
||||
parameter->set_partial_shape(output.get_shape());
|
||||
parameter->validate_and_infer_types();
|
||||
if (auto& bound = output.get_tensor().get_lower_value())
|
||||
parameter->get_output_tensor(0).set_lower_value(bound);
|
||||
if (auto& bound = output.get_tensor().get_upper_value())
|
||||
parameter->get_output_tensor(0).set_upper_value(bound);
|
||||
|
||||
convert->set_destination_type(new_type);
|
||||
convert->validate_and_infer_types();
|
||||
|
||||
ov::TensorVector lower_tensor = {ov::Tensor(new_type, output.get_shape())};
|
||||
ov::TensorVector upper_tensor = {ov::Tensor(new_type, output.get_shape())};
|
||||
bool lower_success = convert->evaluate_lower(lower_tensor);
|
||||
bool upper_success = convert->evaluate_upper(upper_tensor);
|
||||
auto& tensor = output.get_tensor();
|
||||
|
||||
if (lower_success || upper_success) {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
tensor.set_element_type(new_type); // deprecated piece
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
if (lower_success)
|
||||
tensor.set_lower_value(lower_tensor[0]);
|
||||
if (upper_success)
|
||||
tensor.set_upper_value(upper_tensor[0]);
|
||||
if (lower_success && upper_success) {
|
||||
if (memcmp(lower_tensor[0].data(), upper_tensor[0].data(), lower_tensor[0].get_byte_size()) == 0)
|
||||
tensor.set_upper_value(lower_tensor[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void reset_convert(std::shared_ptr<v0::Parameter> parameter,
|
||||
std::shared_ptr<v0::Convert> convert,
|
||||
const ov::Tensor& tensor,
|
||||
const element::Type& new_type,
|
||||
bool is_upper) {
|
||||
parameter->set_element_type(tensor.get_element_type());
|
||||
parameter->set_partial_shape(tensor.get_shape());
|
||||
parameter->validate_and_infer_types();
|
||||
convert->set_destination_type(new_type);
|
||||
convert->validate_and_infer_types();
|
||||
if (is_upper) {
|
||||
convert->get_input_tensor(0).set_upper_value(tensor);
|
||||
} else {
|
||||
convert->get_input_tensor(0).set_lower_value(tensor);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
std::unordered_map<size_t, std::pair<ov::Tensor, ov::Tensor>> convert_input_types(OutputVector& inputs,
|
||||
const element::TypeVector& types) {
|
||||
OPENVINO_ASSERT(inputs.size() >= types.size());
|
||||
std::shared_ptr<v0::Parameter> parameter = nullptr;
|
||||
std::shared_ptr<v0::Convert> convert = nullptr;
|
||||
std::unordered_map<size_t, std::pair<ov::Tensor, ov::Tensor>> original_inputs; // input_idx -> {lower, upper}
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
if (i >= types.size())
|
||||
break; // inputs with this idx and higher don't change type
|
||||
auto& input = inputs[i];
|
||||
const auto& fake_type = input.get_element_type();
|
||||
const auto& original_type = types[i];
|
||||
if (original_type == fake_type || original_type == element::undefined)
|
||||
continue; // this input type wasn't changed
|
||||
if (parameter == nullptr || convert == nullptr) {
|
||||
parameter = std::make_shared<op::v0::Parameter>(element::undefined, PartialShape());
|
||||
convert = std::make_shared<ov::op::v0::Convert>(parameter, element::undefined);
|
||||
}
|
||||
ov::op::convert_types(parameter, convert, input, original_type);
|
||||
original_inputs[i] = {parameter->get_output_tensor(0).get_lower_value(),
|
||||
parameter->get_output_tensor(0).get_upper_value()};
|
||||
}
|
||||
return original_inputs;
|
||||
}
|
||||
|
||||
ov::TensorVector get_output_tensors_of_original_type(const ov::TensorVector& fake_output_tensors,
|
||||
const element::TypeVector& types) {
|
||||
TensorVector original_outputs(fake_output_tensors.size());
|
||||
for (size_t i = 0; i < original_outputs.size(); ++i) {
|
||||
const auto fake_type = fake_output_tensors[i].get_element_type();
|
||||
const auto original_type = types[i];
|
||||
if (fake_type == original_type) {
|
||||
original_outputs[i] = fake_output_tensors[i];
|
||||
} else {
|
||||
original_outputs[i] = ov::Tensor(original_type, fake_output_tensors[i].get_shape());
|
||||
}
|
||||
}
|
||||
return original_outputs;
|
||||
}
|
||||
|
||||
void reset_input_types(const std::unordered_map<size_t, std::pair<ov::Tensor, ov::Tensor>>& original_input_vals,
|
||||
OutputVector& inputs) {
|
||||
for (auto& item : original_input_vals) {
|
||||
if (!item.second.first && !item.second.second)
|
||||
continue;
|
||||
const auto& etype =
|
||||
item.second.first ? item.second.first.get_element_type() : item.second.second.get_element_type();
|
||||
auto& tensor = inputs[item.first].get_tensor();
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
tensor.set_element_type(etype);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
if (item.second.first)
|
||||
tensor.set_lower_value(item.second.first);
|
||||
if (item.second.second)
|
||||
tensor.set_upper_value(item.second.second);
|
||||
}
|
||||
}
|
||||
|
||||
bool convert_outputs_to_fake_type(ov::TensorVector& outputs, ov::TensorVector& original_outputs, bool is_upper) {
|
||||
OPENVINO_ASSERT(outputs.size() == original_outputs.size());
|
||||
std::shared_ptr<v0::Parameter> parameter = nullptr;
|
||||
std::shared_ptr<v0::Convert> convert = nullptr;
|
||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||
const auto& fake_type = outputs[i].get_element_type();
|
||||
const auto& original_type = original_outputs[i].get_element_type();
|
||||
if (fake_type == original_type)
|
||||
continue;
|
||||
if (parameter == nullptr || convert == nullptr) {
|
||||
parameter = std::make_shared<op::v0::Parameter>(element::undefined, PartialShape());
|
||||
convert = std::make_shared<ov::op::v0::Convert>(parameter, element::undefined);
|
||||
}
|
||||
reset_convert(parameter, convert, original_outputs[i], fake_type, is_upper);
|
||||
TensorVector local_outputs = {outputs[i]};
|
||||
if (is_upper && !convert->evaluate_upper(local_outputs))
|
||||
return false;
|
||||
if (!is_upper && !convert->evaluate_lower(local_outputs))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace op
|
||||
} // namespace ov
|
||||
|
@ -1240,3 +1240,41 @@ TEST(type_prop, broadcast_v3_eval_labels_static_dims_bidirectional) {
|
||||
EXPECT_EQ(out_shape, expected_shape);
|
||||
EXPECT_EQ(get_shape_labels(out_shape), expected_labels);
|
||||
}
|
||||
|
||||
TEST(type_prop, broadcast_v3_bidirectional_tricky_partial_value_case_and_equal_partial_value_propagation) {
|
||||
PartialShape pshape_a{{0, 10}, 1, 4};
|
||||
PartialShape pshape_b{{0, 10}, 1};
|
||||
|
||||
PartialShape expected_shape = PartialShape{{0, 10}, 1, 4};
|
||||
|
||||
auto a = std::make_shared<op::Parameter>(element::f32, pshape_a);
|
||||
auto b = std::make_shared<op::Parameter>(element::f32, pshape_b);
|
||||
auto shape_of_b = make_shared<op::v3::ShapeOf>(b);
|
||||
auto concat =
|
||||
make_shared<op::v0::Concat>(ov::OutputVector{shape_of_b, op::v0::Constant::create(element::i64, {1}, {4})}, 0);
|
||||
auto equal = make_shared<op::v1::Equal>(concat, op::v0::Constant::create(element::i64, {3}, {-1, -1, -1}));
|
||||
auto select = make_shared<op::v1::Select>(equal, op::Constant::create(element::i64, {3}, {1, 1, 1}), concat);
|
||||
|
||||
PartialShape shape;
|
||||
auto broadcast_a = make_shared<op::v3::Broadcast>(a, select, "BIDIRECTIONAL");
|
||||
const auto out_shape = broadcast_a->get_output_partial_shape(0);
|
||||
|
||||
EXPECT_EQ(out_shape, expected_shape);
|
||||
{
|
||||
auto constant = ov::get_constant_from_source(equal->output(0));
|
||||
EXPECT_TRUE(constant != nullptr);
|
||||
std::vector<bool> expected{false, false, false}, calculated = constant->get_vector<bool>();
|
||||
EXPECT_EQ(calculated, expected);
|
||||
}
|
||||
{
|
||||
equal = make_shared<op::v1::Equal>(concat, op::v0::Constant::create(element::i64, {3}, {5, 1, 4}));
|
||||
EXPECT_TRUE(ov::get_constant_from_source(equal->output(0)) == nullptr);
|
||||
}
|
||||
{
|
||||
equal = make_shared<op::v1::Equal>(concat, op::v0::Constant::create(element::i64, {3}, {11, 1, 4}));
|
||||
auto constant = ov::get_constant_from_source(equal->output(0));
|
||||
EXPECT_TRUE(constant != nullptr);
|
||||
std::vector<bool> expected{false, true, true}, calculated = constant->get_vector<bool>();
|
||||
EXPECT_EQ(calculated, expected);
|
||||
}
|
||||
}
|
||||
|
@ -32,8 +32,8 @@ TEST(type_prop, generate_proposals) {
|
||||
ASSERT_EQ(proposals->get_output_element_type(0), element::f32);
|
||||
ASSERT_EQ(proposals->get_output_element_type(1), element::f32);
|
||||
ASSERT_EQ(proposals->get_output_element_type(2), element::i64);
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(0), (PartialShape{dyn_dim, 4}));
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(1), (PartialShape{dyn_dim}));
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(0), (PartialShape{{0, 1000}, 4}));
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(1), (PartialShape{{0, 1000}}));
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(2), (PartialShape{1}));
|
||||
|
||||
im_info = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic(2));
|
||||
@ -104,38 +104,97 @@ TEST(type_prop, generate_proposals_dynamic) {
|
||||
PartialShape deltas_shape;
|
||||
PartialShape scores_shape;
|
||||
size_t post_nms_count;
|
||||
PartialShape expected_shape_0;
|
||||
PartialShape expected_shape_1;
|
||||
PartialShape expected_shape_2;
|
||||
};
|
||||
|
||||
const auto dyn_dim = Dimension::dynamic();
|
||||
const auto dyn_shape = PartialShape::dynamic();
|
||||
|
||||
std::vector<ShapesAndAttrs> shapes = {
|
||||
{{1, 3}, {200, 336, 3, 4}, {1, 12, 200, 336}, {1, 3, 200, 336}, 1000},
|
||||
{{2, 3}, {200, 336, 3, 4}, {2, 12, 200, 336}, dyn_shape, 500},
|
||||
{{1, 3}, {200, 336, 3, 4}, dyn_shape, {1, 3, 200, 336}, 700},
|
||||
{{2, 3}, {200, 336, 3, 4}, dyn_shape, dyn_shape, 300},
|
||||
{{1, 3}, dyn_shape, {1, 12, 200, 336}, {1, 3, 200, 336}, 200},
|
||||
{{2, 3}, dyn_shape, {2, 12, 200, 336}, dyn_shape, 40},
|
||||
{{1, 3}, dyn_shape, dyn_shape, {1, 3, 200, 336}, 70},
|
||||
{{2, 3}, dyn_shape, dyn_shape, dyn_shape, 60},
|
||||
{dyn_shape, {200, 336, 3, 4}, {1, 12, 200, 336}, {1, 3, 200, 336}, 500},
|
||||
{dyn_shape, {200, 336, 3, 4}, {2, 12, 200, 336}, dyn_shape, 400},
|
||||
{dyn_shape, {200, 336, 3, 4}, dyn_shape, {1, 3, 200, 336}, 350},
|
||||
{dyn_shape, {200, 336, 3, 4}, dyn_shape, dyn_shape, 440},
|
||||
{dyn_shape, dyn_shape, {1, 12, 200, 336}, {1, 3, 200, 336}, 315},
|
||||
{dyn_shape, dyn_shape, {2, 12, 200, 336}, dyn_shape, 130},
|
||||
{dyn_shape, dyn_shape, dyn_shape, {1, 3, 200, 336}, 1000},
|
||||
{dyn_shape, dyn_shape, dyn_shape, dyn_shape, 700},
|
||||
{{1, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {1, 12, 200, 336}, {1, 3, 200, 336}, 540},
|
||||
{{1, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {1, 12, 200, 336}, {dyn_dim, dyn_dim, 200, 336}, 600},
|
||||
{{2, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {dyn_dim, dyn_dim, 200, 336}, {2, 3, 200, 336}, 75},
|
||||
{{1, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {dyn_dim, dyn_dim, 200, 336}, {dyn_dim, dyn_dim, 200, 336}, 80},
|
||||
{{1, 3}, {200, 336, 3, 4}, {1, 12, 200, dyn_dim}, {1, 3, 200, dyn_dim}, 430},
|
||||
{{2, 3}, {200, 336, 3, 4}, {2, 12, dyn_dim, 336}, {2, 3, dyn_dim, 336}, 180},
|
||||
{{1, 3}, {200, 336, 3, 4}, {1, 12, dyn_dim, dyn_dim}, {1, 3, dyn_dim, dyn_dim}, 170},
|
||||
{{1, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {1, 12, 200, dyn_dim}, {1, 3, 200, dyn_dim}, 200},
|
||||
{{2, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {2, 12, dyn_dim, 336}, {2, 3, dyn_dim, 336}, 800},
|
||||
{{1, 3}, {dyn_dim, dyn_dim, dyn_dim, 4}, {1, 12, dyn_dim, dyn_dim}, {1, 3, dyn_dim, dyn_dim}, 560},
|
||||
{{1, 3}, {200, 336, 3, 4}, {1, 12, 200, 336}, {1, 3, 200, 336}, 1000, {{0, 1000}, 4}, {{0, 1000}}, {1}},
|
||||
{{2, 3}, {200, 336, 3, 4}, {2, 12, 200, 336}, dyn_shape, 500, {{0, 1000}, 4}, {{0, 1000}}, {2}},
|
||||
{{1, 3}, {200, 336, 3, 4}, dyn_shape, {1, 3, 200, 336}, 700, {{0, 700}, 4}, {{0, 700}}, {1}},
|
||||
{{2, 3}, {200, 336, 3, 4}, dyn_shape, dyn_shape, 300, {{0, 600}, 4}, {{0, 600}}, {2}},
|
||||
{{1, 3}, dyn_shape, {1, 12, 200, 336}, {1, 3, 200, 336}, 200, {{0, 200}, 4}, {{0, 200}}, {1}},
|
||||
{{2, 3}, dyn_shape, {2, 12, 200, 336}, dyn_shape, 40, {{0, 80}, 4}, {{0, 80}}, {2}},
|
||||
{{1, 3}, dyn_shape, dyn_shape, {1, 3, 200, 336}, 70, {{0, 70}, 4}, {{0, 70}}, {1}},
|
||||
{{2, 3}, dyn_shape, dyn_shape, dyn_shape, 60, {{0, 120}, 4}, {{0, 120}}, {2}},
|
||||
{dyn_shape, {200, 336, 3, 4}, {1, 12, 200, 336}, {1, 3, 200, 336}, 500, {{0, 500}, 4}, {{0, 500}}, {1}},
|
||||
{dyn_shape, {200, 336, 3, 4}, {2, 12, 200, 336}, dyn_shape, 400, {{0, 800}, 4}, {{0, 800}}, {2}},
|
||||
{dyn_shape, {200, 336, 3, 4}, dyn_shape, {1, 3, 200, 336}, 350, {{0, 350}, 4}, {{0, 350}}, {1}},
|
||||
{dyn_shape, {200, 336, 3, 4}, dyn_shape, dyn_shape, 440, {dyn_dim, 4}, {dyn_dim}, {dyn_dim}},
|
||||
{dyn_shape, dyn_shape, {1, 12, 200, 336}, {1, 3, 200, 336}, 315, {{0, 315}, 4}, {{0, 315}}, {1}},
|
||||
{dyn_shape, dyn_shape, {2, 12, 200, 336}, dyn_shape, 130, {{0, 260}, 4}, {{0, 260}}, {2}},
|
||||
{dyn_shape, dyn_shape, dyn_shape, {1, 3, 200, 336}, 1000, {{0, 1000}, 4}, {{0, 1000}}, {1}},
|
||||
{dyn_shape, dyn_shape, dyn_shape, dyn_shape, 700, {dyn_dim, 4}, {dyn_dim}, {dyn_dim}},
|
||||
{{1, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{1, 12, 200, 336},
|
||||
{1, 3, 200, 336},
|
||||
540,
|
||||
{{0, 540}, 4},
|
||||
{{0, 540}},
|
||||
{{1}}},
|
||||
{{1, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{1, 12, 200, 336},
|
||||
{dyn_dim, dyn_dim, 200, 336},
|
||||
600,
|
||||
{{0, 600}, 4},
|
||||
{{0, 600}},
|
||||
{{1}}},
|
||||
{{2, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{dyn_dim, dyn_dim, 200, 336},
|
||||
{2, 3, 200, 336},
|
||||
75,
|
||||
{{0, 150}, 4},
|
||||
{{0, 150}},
|
||||
{{2}}},
|
||||
{{1, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{dyn_dim, dyn_dim, 200, 336},
|
||||
{dyn_dim, dyn_dim, 200, 336},
|
||||
80,
|
||||
{{0, 80}, 4},
|
||||
{{0, 80}},
|
||||
{{1}}},
|
||||
{{1, 3}, {200, 336, 3, 4}, {1, 12, 200, dyn_dim}, {1, 3, 200, dyn_dim}, 430, {{0, 430}, 4}, {{0, 430}}, {{1}}},
|
||||
{{2, 3}, {200, 336, 3, 4}, {2, 12, dyn_dim, 336}, {2, 3, dyn_dim, 336}, 180, {{0, 360}, 4}, {{0, 360}}, {{2}}},
|
||||
{{1, 3},
|
||||
{200, 336, 3, 4},
|
||||
{1, 12, dyn_dim, dyn_dim},
|
||||
{1, 3, dyn_dim, dyn_dim},
|
||||
170,
|
||||
{{0, 170}, 4},
|
||||
{{0, 170}},
|
||||
{{1}}},
|
||||
{{1, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{1, 12, 200, dyn_dim},
|
||||
{1, 3, 200, dyn_dim},
|
||||
200,
|
||||
{{0, 200}, 4},
|
||||
{{0, 200}},
|
||||
{{1}}},
|
||||
{{2, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{2, 12, dyn_dim, 336},
|
||||
{2, 3, dyn_dim, 336},
|
||||
800,
|
||||
{{0, 1600}, 4},
|
||||
{{0, 1600}},
|
||||
{{2}}},
|
||||
{{1, 3},
|
||||
{dyn_dim, dyn_dim, dyn_dim, 4},
|
||||
{1, 12, dyn_dim, dyn_dim},
|
||||
{1, 3, dyn_dim, dyn_dim},
|
||||
560,
|
||||
{{0, 560}, 4},
|
||||
{{0, 560}},
|
||||
{{1}}},
|
||||
};
|
||||
|
||||
for (const auto& s : shapes) {
|
||||
@ -155,12 +214,8 @@ TEST(type_prop, generate_proposals_dynamic) {
|
||||
ASSERT_EQ(proposals->get_output_element_type(0), element::f32);
|
||||
ASSERT_EQ(proposals->get_output_element_type(1), element::f32);
|
||||
ASSERT_EQ(proposals->get_output_element_type(2), element::i64);
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(0), (PartialShape{dyn_dim, 4}));
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(1), (PartialShape{dyn_dim}));
|
||||
if (s.im_info_shape.rank().is_static()) {
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(2), PartialShape{s.im_info_shape[0]});
|
||||
} else {
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(2), PartialShape::dynamic(1));
|
||||
}
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(0), s.expected_shape_0);
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(1), s.expected_shape_1);
|
||||
EXPECT_EQ(proposals->get_output_partial_shape(2), s.expected_shape_2);
|
||||
}
|
||||
}
|
||||
|
@ -260,38 +260,38 @@ TYPED_TEST_P(topk_type_prop, preserve_partial_values_and_labels_k_is_interval) {
|
||||
const auto data = std::make_shared<Parameter>(element::f32, data_shape);
|
||||
|
||||
{
|
||||
// dim{2,5} k{10,20} -> {2,20}
|
||||
// dim{2,5} k{10,20} -> {2,5}
|
||||
const auto op = this->make_op(data, k, 0, "max", "value");
|
||||
EXPECT_THAT(op->get_output_partial_shape(0),
|
||||
AllOf(PartialShape({{2, 20}, {12, 18}, {2, 30}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
ResultOf(get_shape_labels, ElementsAre(no_label, 2, 3, 4, 5, 6))));
|
||||
}
|
||||
{
|
||||
// dim{12,18} k{10,20} -> {10,20}
|
||||
// dim{12,18} k{10,20} -> {10,18}
|
||||
const auto op = this->make_op(data, k, 1, "max", "value");
|
||||
EXPECT_THAT(op->get_output_partial_shape(0),
|
||||
AllOf(PartialShape({{2, 5}, {10, 20}, {2, 30}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
AllOf(PartialShape({{2, 5}, {10, 18}, {2, 30}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
ResultOf(get_shape_labels, ElementsAre(1, no_label, 3, 4, 5, 6))));
|
||||
}
|
||||
{
|
||||
// dim{2, 30} k{10,20} -> {2,30}
|
||||
// dim{2, 30} k{10,20} -> {2,20}
|
||||
const auto op = this->make_op(data, k, 2, "max", "value");
|
||||
EXPECT_THAT(op->get_output_partial_shape(0),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 20}, {30, 40}, {-1, 15}, {15, -1}}),
|
||||
ResultOf(get_shape_labels, ElementsAre(1, 2, no_label, 4, 5, 6))));
|
||||
}
|
||||
{
|
||||
// dim{30,40} k{10,20} -> {10,40} (should use k upper bounds??)
|
||||
// dim{30,40} k{10,20} -> {10,20} (should use k upper bounds??)
|
||||
const auto op = this->make_op(data, k, 3, "max", "value");
|
||||
EXPECT_THAT(op->get_output_partial_shape(0),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {10, 40}, {-1, 15}, {15, -1}}),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {10, 20}, {-1, 15}, {15, -1}}),
|
||||
ResultOf(get_shape_labels, ElementsAre(1, 2, 3, no_label, 5, 6))));
|
||||
}
|
||||
{
|
||||
// dim{-inf,15} k{10,20} -> {0,20}
|
||||
// dim{-inf,15} k{10,20} -> {0,15}
|
||||
const auto op = this->make_op(data, k, 4, "max", "value");
|
||||
EXPECT_THAT(op->get_output_partial_shape(0),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {30, 40}, {0, 20}, {15, -1}}),
|
||||
AllOf(PartialShape({{2, 5}, {12, 18}, {2, 30}, {30, 40}, {0, 15}, {15, -1}}),
|
||||
ResultOf(get_shape_labels, ElementsAre(1, 2, 3, 4, no_label, 6))));
|
||||
}
|
||||
{
|
||||
@ -410,6 +410,16 @@ TEST_F(TypePropTopKV3Test, k_is_u32) {
|
||||
Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({1, {-1, 2}}))));
|
||||
}
|
||||
|
||||
TEST(type_prop, top_k_partial_value) {
|
||||
const auto data = std::make_shared<opset11::Parameter>(element::f32, PartialShape{{0, 16000}});
|
||||
const auto shape = std::make_shared<opset11::ShapeOf>(data);
|
||||
const auto concat =
|
||||
std::make_shared<Concat>(ov::OutputVector{shape, Constant::create(element::i64, {1}, {200})}, 0);
|
||||
const auto reduce_min = std::make_shared<opset11::ReduceMin>(concat, Constant::create(element::i64, {1}, {0}));
|
||||
const auto op = std::make_shared<op::v3::TopK>(data, reduce_min, 0, "max", "value");
|
||||
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{0, 200}}));
|
||||
}
|
||||
|
||||
TEST(type_prop, topk_v11_stable_sort_by_indices) {
|
||||
const auto data = std::make_shared<Parameter>(element::f32, Shape{2, 3, 4});
|
||||
const auto k = Constant::create(element::u32, Shape{}, {1});
|
||||
|
@ -9,18 +9,24 @@
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <openvino/cc/pass/itt.hpp>
|
||||
#include <openvino/op/util/op_types.hpp>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <transformations/common_optimizations/fold_subgraph_empty_inputs.hpp>
|
||||
#include <transformations/common_optimizations/nop_elimination.hpp>
|
||||
#include <transformations/common_optimizations/remove_concat_zero_dim_input.hpp>
|
||||
#include <transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "blob_factory.hpp"
|
||||
#include "cpp/ie_cnn_network.h"
|
||||
#include "ie_common.h"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_memcpy.h"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "openvino/core/except.hpp"
|
||||
#include "openvino/pass/serialize.hpp"
|
||||
@ -29,26 +35,6 @@
|
||||
#include "transformations/smart_reshape/smart_reshape.hpp"
|
||||
#include "transformations/utils/utils.hpp"
|
||||
|
||||
// TODO: remove this pass usage
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_nms_5_to_legacy.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
|
||||
#include <openvino/cc/pass/itt.hpp>
|
||||
#include <transformations/common_optimizations/dimension_tracking.hpp>
|
||||
#include <transformations/common_optimizations/fold_subgraph_empty_inputs.hpp>
|
||||
#include <transformations/common_optimizations/nop_elimination.hpp>
|
||||
#include <transformations/common_optimizations/remove_concat_zero_dim_input.hpp>
|
||||
#include <transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp>
|
||||
#include <transformations/disable_decompression_convert_constant_folding.hpp>
|
||||
#include <transformations/low_precision/mark_dequantization_subgraph.hpp>
|
||||
#include <transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp>
|
||||
#include <transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp>
|
||||
#include <transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp>
|
||||
#include <transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp>
|
||||
|
||||
#include "exec_graph_info.hpp"
|
||||
#include "ie_itt.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace InferenceEngine;
|
||||
using details::CNNNetworkNGraphImpl;
|
||||
@ -79,12 +65,13 @@ void CNNNetworkNGraphImpl::createDataForResult(const ::ngraph::Output<::ngraph::
|
||||
}
|
||||
};
|
||||
auto shape = output.get_partial_shape();
|
||||
auto rank = shape.rank().is_static() ? shape.rank().get_length() : -1;
|
||||
SizeVector dims(1, 0);
|
||||
if (shape.is_static()) {
|
||||
dims = output.get_shape();
|
||||
} else if (rank >= 0) {
|
||||
dims = SizeVector(rank, 0);
|
||||
if (shape.rank().is_static()) {
|
||||
dims.resize(shape.size(), 0);
|
||||
for (size_t i = 0; i < shape.size(); ++i) {
|
||||
if (shape[i].get_max_length() != -1) // dimension has an estimation
|
||||
dims[i] = shape[i].get_max_length();
|
||||
}
|
||||
}
|
||||
// query shape from ngraph::Parameter output shape and check there are no zeros in it
|
||||
for (const auto& dim : shape) {
|
||||
@ -92,6 +79,7 @@ void CNNNetworkNGraphImpl::createDataForResult(const ::ngraph::Output<::ngraph::
|
||||
IE_THROW() << outName << " has zero dimension which is not allowed";
|
||||
}
|
||||
|
||||
auto rank = shape.rank().is_static() ? shape.rank().get_length() : -1;
|
||||
const Layout rankLayout = rank < 0 ? Layout::BLOCKED : TensorDesc::getLayoutByRank(rank);
|
||||
if (ptr) {
|
||||
const auto origLayout = ptr->getTensorDesc().getLayout();
|
||||
@ -118,7 +106,7 @@ void CNNNetworkNGraphImpl::validateFunctionNames() const {
|
||||
if (parent->get_output_size() > 1) {
|
||||
name += "." + std::to_string(result->get_input_source_output(0).get_index());
|
||||
}
|
||||
if (unique_names.count(name) && !ngraph::op::is_parameter(parent) && parent != unique_names.at(name)) {
|
||||
if (unique_names.count(name) && !ov::op::util::is_parameter(parent) && parent != unique_names.at(name)) {
|
||||
IE_THROW() << "Function contains several inputs and outputs with one friendly name: " << name;
|
||||
}
|
||||
unique_names.insert({name, parent});
|
||||
@ -489,8 +477,7 @@ void CNNNetworkNGraphImpl::reshape(const std::map<std::string, ngraph::PartialSh
|
||||
auto params = _ngraph_function->get_parameters();
|
||||
|
||||
bool parameter_replaced = false;
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
auto& param = params[i];
|
||||
for (auto& param : params) {
|
||||
if (inputShapes.find(param->get_friendly_name()) == inputShapes.end())
|
||||
continue;
|
||||
param->set_partial_shape(inputShapes.at(param->get_friendly_name()));
|
||||
@ -499,43 +486,6 @@ void CNNNetworkNGraphImpl::reshape(const std::map<std::string, ngraph::PartialSh
|
||||
if (parameter_replaced)
|
||||
_ngraph_function->validate_nodes_and_infer_types();
|
||||
|
||||
const auto& results = _ngraph_function->get_results();
|
||||
bool outputs_are_static = all_of(begin(results), end(results), [](const std::shared_ptr<ngraph::Node>& n) {
|
||||
return n->get_output_partial_shape(0).is_static();
|
||||
});
|
||||
|
||||
{
|
||||
shared_ptr<Function> specialized_ngraph_function = nullptr;
|
||||
if (outputs_are_static) {
|
||||
specialized_ngraph_function = _ngraph_function;
|
||||
} else {
|
||||
specialized_ngraph_function = ngraph::clone_function(*_ngraph_function);
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "CNNNetworkNGraphImpl::ConvertToLegacy");
|
||||
::ngraph::pass::Manager manager;
|
||||
// resolves dynamism by replacing dynamic operation with static version
|
||||
using namespace ngraph::pass;
|
||||
using namespace ov::pass;
|
||||
REGISTER_PASS(manager, ConvertNMS5ToLegacyMatcher, false)
|
||||
REGISTER_PASS(manager, ConvertMulticlassNmsToMulticlassNmsIE, false)
|
||||
REGISTER_PASS(manager, ConvertMatrixNmsToMatrixNmsIE, false)
|
||||
REGISTER_PASS(manager, ConvertNMS9ToNMSIEInternal)
|
||||
REGISTER_PASS(manager, ConvertGP9ToGPIEInternal)
|
||||
REGISTER_PASS(
|
||||
manager,
|
||||
MarkDequantizationSubgraph,
|
||||
ov::element::TypeVector{ov::element::i8, ov::element::u8, ov::element::i4, ov::element::u4})
|
||||
REGISTER_PASS(manager, DisableDecompressionConvertConstantFolding)
|
||||
REGISTER_PASS(manager, ConstantFolding)
|
||||
|
||||
// OneHotToLegacy changes output precision
|
||||
manager.register_pass<::ngraph::pass::ConvertOneHotToOneHotIEMatcher>()->detect_output_type(
|
||||
specialized_ngraph_function);
|
||||
manager.run_passes(specialized_ngraph_function);
|
||||
}
|
||||
specialized_ngraph_function->validate_nodes_and_infer_types();
|
||||
}
|
||||
|
||||
#if 0
|
||||
bool obfuscate = true; // set to false to get exact dimensions
|
||||
std::map<std::string, std::map<std::string, size_t>> signatures;
|
||||
@ -547,19 +497,18 @@ void CNNNetworkNGraphImpl::reshape(const std::map<std::string, ngraph::PartialSh
|
||||
std::cout << item.first << " " << shape_to_count.second << "x " << shape_to_count.first << std::endl;
|
||||
#endif
|
||||
|
||||
std::unordered_set<std::string> opName;
|
||||
for (const auto& result : specialized_ngraph_function->get_results()) {
|
||||
addOutput(result->input_value(0));
|
||||
}
|
||||
std::unordered_set<std::string> opName;
|
||||
for (const auto& result : _ngraph_function->get_results()) {
|
||||
addOutput(result->input_value(0));
|
||||
}
|
||||
|
||||
for (const auto& parameter : specialized_ngraph_function->get_parameters()) {
|
||||
const auto& outName = parameter->get_friendly_name();
|
||||
if (opName.find(outName) != opName.end()) {
|
||||
IE_THROW() << "All operations in nGraph function should have unique friendly names!";
|
||||
}
|
||||
opName.insert(outName);
|
||||
createDataForResult(parameter, outName, _data[outName]);
|
||||
for (const auto& parameter : _ngraph_function->get_parameters()) {
|
||||
const auto& outName = parameter->get_friendly_name();
|
||||
if (opName.find(outName) != opName.end()) {
|
||||
IE_THROW() << "All operations in nGraph function should have unique friendly names!";
|
||||
}
|
||||
opName.insert(outName);
|
||||
createDataForResult(parameter, outName, _data[outName]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ TEST_F(NGraphReshapeTests, ReshapedDynamicShapeLayout) {
|
||||
|
||||
CNNNetwork cnnNetwork(ngraph);
|
||||
ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout());
|
||||
ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{0, 0, 0, 0}));
|
||||
ASSERT_EQ(cnnNetwork.getInputsInfo()["A"]->getInputData()->getDims(), (SizeVector{0, 3, 22, 22}));
|
||||
|
||||
ICNNNetwork::InputShapes new_shape;
|
||||
new_shape["A"] = {1, 3, 22, 22};
|
||||
|
Loading…
Reference in New Issue
Block a user