[CPU] Issue in opset name determining (#10479)

This commit is contained in:
Vladislav Volkov 2022-02-21 18:47:24 +03:00 committed by GitHub
parent b7fede89c8
commit 1d33c37970
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 97 additions and 171 deletions

View File

@ -18,7 +18,7 @@ namespace internal {
class TRANSFORMATIONS_API NonMaxSuppressionIEInternal : public Op {
public:
OPENVINO_OP("NonMaxSuppressionIEInternal", "util");
OPENVINO_OP("NonMaxSuppressionIEInternal", "ie_internal_opset");
BWDCMP_RTTI_DECLARATION;
NonMaxSuppressionIEInternal() = default;

View File

@ -109,7 +109,7 @@ const ::ngraph::Node::type_info_t& NmsStaticShapeIE<BaseNmsOp>::get_type_info_st
static const std::string name = BaseNmsOpTypeInfoPtr->name;
static const ::ngraph::Node::type_info_t type_info_static{
name.c_str(), BaseNmsOpTypeInfoPtr->version, BaseNmsOpTypeInfoPtr};
name.c_str(), BaseNmsOpTypeInfoPtr->version, "ie_internal_opset", BaseNmsOpTypeInfoPtr};
return type_info_static;
}

View File

@ -568,9 +568,7 @@ const std::vector<Edge> create_edge_mapping(const std::unordered_map<ngraph::Nod
std::string get_opset_name(const ngraph::Node* n, const std::map<std::string, ngraph::OpSet>& custom_opsets) {
OPENVINO_ASSERT(n != nullptr);
if (n->get_type_info().version_id != nullptr) {
return n->get_type_info().version_id;
}
// Try to find opset name from RT info
auto opset_it = n->get_rt_info().find("opset");
if (opset_it != n->get_rt_info().end()) {
@ -582,6 +580,10 @@ std::string get_opset_name(const ngraph::Node* n, const std::map<std::string, ng
}
}
if (n->get_type_info().version_id != nullptr) {
return n->get_type_info().version_id;
}
for (const auto& custom_opset : custom_opsets) {
std::string name = custom_opset.first;
ngraph::OpSet opset = custom_opset.second;

View File

@ -608,6 +608,20 @@ GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& nod
return params;
}
// Symmetric function to translate type name.
// See translate_type_name in src/core/src/pass/serialize.cpp.
static const std::string& translate_type_name(const std::string& name) {
static const std::unordered_map<std::string, std::string> translate_type_name_translator = {{"Const", "Constant"},
{"PReLU", "PRelu"},
{"ReLU", "Relu"},
{"SoftMax", "Softmax"}};
auto found = translate_type_name_translator.find(name);
if (found != end(translate_type_name_translator)) {
return found->second;
}
return name;
}
std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
const std::vector<ngraph::Output<ngraph::Node>>& inputs,
const pugi::xml_node& node,
@ -623,8 +637,10 @@ std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
<< " has undefined element type for input with index " << i << "!";
}
const std::string& type_name = translate_type_name(params.type);
std::shared_ptr<ngraph::Node> ngraphNode;
ov::DiscreteTypeInfo type(params.type.c_str(), 0, params.version.c_str());
ov::DiscreteTypeInfo type(type_name.c_str(), 0, params.version.c_str());
auto extensionIt = m_extensions.find(type);
if (extensionIt != m_extensions.end()) {
@ -646,17 +662,15 @@ std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
"RNNCell",
"Proposal"};
if (experimental_ops_added_to_opset.count(params.type) &&
if (experimental_ops_added_to_opset.count(type_name) &&
(params.version == "experimental" || params.version == "extension")) {
opsetIt = m_opsets.find("opset6");
}
if (!ngraphNode && opsetIt != m_opsets.end()) {
auto const& type = params.type == "Const" ? "Constant" : params.type;
if (params.version == "opset1") {
// MVN, ROIPooling and ReorgYolo were missing in opset1
if (type == "MVN" || type == "ROIPooling" || type == "ReorgYolo") {
if (type_name == "MVN" || type_name == "ROIPooling" || type_name == "ReorgYolo") {
opsetIt = m_opsets.find("opset2");
if (opsetIt == m_opsets.end()) {
IE_THROW() << "Cannot create " << params.type << " layer " << params.name
@ -667,9 +681,9 @@ std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
auto const& opset = opsetIt->second;
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create_insensitive(type));
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create_insensitive(type_name));
if (!ngraphNode) {
IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type;
IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type_name;
}
// Share Weights form constant blob
if (auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(ngraphNode)) {

View File

@ -96,7 +96,6 @@ ov::intel_cpu::MoveEltwiseUpThroughDataMov::MoveEltwiseUpThroughDataMov() {
eltwiseInputs[0] = child->input_value(0);
auto newEltwise = eltwise->clone_with_new_inputs(eltwiseInputs);
ngraph::copy_runtime_info(eltwise, newEltwise);
newEltwise->set_friendly_name(eltwise->get_friendly_name());
ngraph::OutputVector childInputs = child->input_values();
childInputs[0] = newEltwise;

View File

@ -655,6 +655,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
}
auto config = orig_config;
CNNNetwork clonedNetwork = InferenceEngine::details::cloneNetwork(network);
const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE);
const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/
@ -822,7 +823,8 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
RO_property(ov::range_for_async_infer_requests.name()),
RO_property(ov::range_for_streams.name()),
RO_property(ov::device::full_name.name()),
RO_property(ov::device::capabilities.name())
RO_property(ov::device::capabilities.name()),
RO_property(ov::cache_dir.name()) // WA Can be removed after implementing snippet serialization.
};
// the whole config is RW before network is loaded.
std::vector<ov::PropertyName> rwProperties {RW_property(ov::num_streams.name()),

View File

@ -14,11 +14,12 @@
using namespace testing;
TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwise) {
class MoveEltwiseUpThroughDataMovTest: public TransformationTestsF{};
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleUnaryEltwise) {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
std::shared_ptr<ngraph::Function> f(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -30,15 +31,9 @@ TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwise) {
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -50,19 +45,14 @@ TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwise) {
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, EltwiseSequence) {
TEST_F(MoveEltwiseUpThroughDataMovTest, EltwiseSequence) {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {1, 2, 0, 3};
const int64_t unsqueeze_axis = 1;
std::shared_ptr<ngraph::Function> f(nullptr);
{
auto input_left = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto input_right = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -79,16 +69,9 @@ TEST(MoveEltwiseUpThroughDataMov, EltwiseSequence) {
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input_left, input_right});
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input_left, input_right});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto input_left = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto input_right = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -105,60 +88,40 @@ TEST(MoveEltwiseUpThroughDataMov, EltwiseSequence) {
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input_left, input_right});
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input_left, input_right});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, DataMovementTwoConsumers) {
TEST_F(MoveEltwiseUpThroughDataMovTest, DataMovementTwoConsumers) {
/* In this case transformation shouldn't apply */
auto create_graph = [] () -> std::shared_ptr<ngraph::Function> {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {1, 2, 0, 3};
const int64_t unsqueeze_axis = 1;
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {1, 2, 0, 3};
const int64_t unsqueeze_axis = 1;
auto input_left = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto input_right = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto input_left = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto input_right = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto matmul = std::make_shared<ngraph::opset8::MatMul>(input_left, input_right);
auto matmul = std::make_shared<ngraph::opset8::MatMul>(input_left, input_right);
auto transpose_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{input_order.size()}, input_order);
auto transpose = std::make_shared<ngraph::opset8::Transpose>(matmul, transpose_const);
auto transpose_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{input_order.size()}, input_order);
auto transpose = std::make_shared<ngraph::opset8::Transpose>(matmul, transpose_const);
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
auto relu = std::make_shared<ngraph::opset8::Relu>(transpose);
auto relu = std::make_shared<ngraph::opset8::Relu>(transpose);
return std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid, relu}, ngraph::ParameterVector{input_left, input_right});
};
std::shared_ptr<ngraph::Function> f = create_graph();
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref = create_graph();
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid, relu}, ngraph::ParameterVector{input_left, input_right});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}
TEST(MoveEltwiseUpThroughDataMov, SingleBinaryEltwiseWithScalarOnSecondBranch) {
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleBinaryEltwiseWithScalarOnSecondBranch) {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
const float scalar_value = 0.5f;
std::shared_ptr<ngraph::Function> f(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -170,14 +133,9 @@ TEST(MoveEltwiseUpThroughDataMov, SingleBinaryEltwiseWithScalarOnSecondBranch) {
auto add = std::make_shared<ngraph::opset8::Add>(unsqueeze, ngraph::opset8::Constant::create(ngraph::element::f32, {}, {scalar_value}));
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -189,20 +147,15 @@ TEST(MoveEltwiseUpThroughDataMov, SingleBinaryEltwiseWithScalarOnSecondBranch) {
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, SingleEltwiseWith5ScalarOnSecondBranch) {
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleEltwiseWith5ScalarOnSecondBranch) {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
const float scalar_value = 0.5f;
std::shared_ptr<ngraph::Function> f(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -211,14 +164,9 @@ TEST(MoveEltwiseUpThroughDataMov, SingleEltwiseWith5ScalarOnSecondBranch) {
auto add = std::make_shared<ngraph::opset8::Add>(unsqueeze, ngraph::opset8::Constant::create(ngraph::element::f32, {1, 1, 1, 1, 1}, {scalar_value}));
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
@ -227,50 +175,33 @@ TEST(MoveEltwiseUpThroughDataMov, SingleEltwiseWith5ScalarOnSecondBranch) {
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(add, unsqueeze_const);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, SingleBinaryEltwiseWithNotScalarOnSecondBranch) {
auto create_graph = [] () -> std::shared_ptr<ngraph::Function> {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
std::shared_ptr<ngraph::Function> f(nullptr);
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto transpose_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{input_order.size()}, input_order);
auto transpose = std::make_shared<ngraph::opset8::Transpose>(input, transpose_const);
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
auto add_scalar = ngraph::opset8::Constant::create(ngraph::element::f32, {1, 1, 1, 3}, {0.5, 0.2, 0.3});
auto add = std::make_shared<ngraph::opset8::Add>(unsqueeze, add_scalar);
return std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
};
std::shared_ptr<ngraph::Function> f = create_graph();
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref = create_graph();
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwiseDynamicShape) {
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleBinaryEltwiseWithNotScalarOnSecondBranch) {
const ngraph::Shape shape{1, 3, 224, 224};
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, shape);
auto transpose_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{input_order.size()}, input_order);
auto transpose = std::make_shared<ngraph::opset8::Transpose>(input, transpose_const);
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(transpose, unsqueeze_const);
auto add_scalar = ngraph::opset8::Constant::create(ngraph::element::f32, {1, 1, 1, 3}, {0.5, 0.2, 0.3});
auto add = std::make_shared<ngraph::opset8::Add>(unsqueeze, add_scalar);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleUnaryEltwiseDynamicShape) {
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
std::shared_ptr<ngraph::Function> f(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(3));
@ -279,15 +210,10 @@ TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwiseDynamicShape) {
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref(nullptr);
{
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(3));
@ -296,36 +222,19 @@ TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwiseDynamicShape) {
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(sigmoid, unsqueeze_const);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
function_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{unsqueeze}, ngraph::ParameterVector{input});
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(MoveEltwiseUpThroughDataMov, SingleUnaryEltwiseDynamicRank) {
auto create_graph = [] () -> std::shared_ptr<ngraph::Function> {
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
std::shared_ptr<ngraph::Function> f(nullptr);
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(ngraph::Rank::dynamic()));
TEST_F(MoveEltwiseUpThroughDataMovTest, SingleUnaryEltwiseDynamicRank) {
const std::vector<int64_t> input_order = {3, 2, 1, 0};
const int64_t unsqueeze_axis = 2;
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(input, unsqueeze_const);
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
return std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
};
std::shared_ptr<ngraph::Function> f = create_graph();
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::InitNodeInfo>();
m.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
auto input = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(ngraph::Rank::dynamic()));
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
std::shared_ptr<ngraph::Function> f_ref = create_graph();
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
auto unsqueeze_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {unsqueeze_axis});
auto unsqueeze = std::make_shared<ngraph::opset8::Unsqueeze>(input, unsqueeze_const);
auto sigmoid = std::make_shared<ngraph::opset8::Sigmoid>(unsqueeze);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{sigmoid}, ngraph::ParameterVector{input});
manager.register_pass<ov::intel_cpu::MoveEltwiseUpThroughDataMov>();
}