[Snippets] [CPU] Serialization/Deserialization enabled (#14064)

This commit is contained in:
Vladislav Golubev 2022-11-30 11:37:19 +01:00 committed by GitHub
parent a296a509e8
commit 9eb43bb8b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 426 additions and 145 deletions

View File

@ -24,22 +24,9 @@ public:
BroadcastLoad(const Output<Node>& x, Shape output_shape);
BroadcastLoad() = default;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
void set_broadcast_info(const Shape& bct) {
broadcast_info = bct;
}
bool is_broadcast(size_t idx) {
return broadcast_info[idx] == 1;
}
private:
Shape broadcast_info;
};
} // namespace op

View File

@ -20,6 +20,7 @@ namespace op {
class PowerStatic : public ov::op::util::UnaryElementwiseArithmetic {
public:
OPENVINO_OP("PowerStatic", "SnippetsOpset", ov::op::util::UnaryElementwiseArithmetic);
BWDCMP_RTTI_DECLARATION;
PowerStatic() = default;
PowerStatic(const Output <Node> &arg, float power) : UnaryElementwiseArithmetic(arg), power(power) {

View File

@ -19,6 +19,9 @@ namespace op {
class Scalar : public ov::op::v0::Constant {
public:
OPENVINO_OP("Scalar", "SnippetsOpset", ov::op::v0::Constant);
BWDCMP_RTTI_DECLARATION;
Scalar() = default;
template <class T, class = typename std::enable_if<std::is_fundamental<T>::value>::type>
Scalar(const element::Type& type, Shape shape, T value) : Constant(type, shape, value) {

View File

@ -7,6 +7,7 @@
#include <memory>
#include <openvino/core/model.hpp>
#include <openvino/op/util/sub_graph_base.hpp>
#include <ngraph/op/op.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pass/manager.hpp>
@ -22,9 +23,10 @@ namespace op {
* @brief An operation that is implemented by a model
* @ingroup snippets
*/
class Subgraph : public ngraph::op::Op {
class Subgraph : public ov::op::util::SubGraphOp {
public:
OPENVINO_OP("Subgraph", "SnippetsOpset");
OPENVINO_OP("Subgraph", "SnippetsOpset", ov::op::util::SubGraphOp);
BWDCMP_RTTI_DECLARATION;
// < 1, 42, 17, 15, 16> < 0, 1, 2, 3, 1>
// should be:
@ -70,6 +72,8 @@ public:
using BlockedShape = std::tuple<ngraph::Shape, ngraph::AxisVector, ngraph::element::Type>;
using BlockedShapeVector = std::vector<BlockedShape>;
Subgraph() = default;
Subgraph(const OutputVector& args, std::shared_ptr<ov::Model> body);
Subgraph(const NodeVector& args, std::shared_ptr<ov::Model> body);
@ -80,11 +84,29 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& inputs) const override;
std::shared_ptr<ov::Model> get_body() const {
return m_body;
// we introduce this method instead of using SubGraphOp::get_function()
// to align naming with other methods
const std::shared_ptr<ov::Model> & body_ptr() const {
return m_bodies[0];
}
std::shared_ptr<ngraph::snippets::Generator> get_generator() const {
std::shared_ptr<ov::Model> & body_ptr() {
return m_bodies[0];
}
const ov::Model & body() const {
return *m_bodies[0];
}
ov::Model & body() {
return *m_bodies[0];
}
const std::shared_ptr<ngraph::snippets::Generator> & get_generator() const {
return m_generator;
}
std::shared_ptr<ngraph::snippets::Generator> & get_generator() {
return m_generator;
}
@ -123,13 +145,13 @@ public:
private:
void align_element_types(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes);
void convert_to_snippet_dialect();
// Count of potentional non-scalar Consants that will be created after some tranformations
// At the moment it's relevant only for FakeQuantize decomposition
// NOTE: To avoid overheads in each calcution of this count (for example, in validate_and_type_infer()),
// we should MANUALLY calculate it where it needed.
size_t m_non_scalar_constants_count = 0;
Shape exec_domain = {};
std::shared_ptr<ov::Model> m_body = nullptr;
std::shared_ptr<ngraph::snippets::Generator> m_generator = nullptr;
// TODO: Change logic of insert Converts. This exec element type can be different for plugins

View File

@ -12,20 +12,14 @@ using namespace std;
using namespace ngraph;
snippets::op::BroadcastLoad::BroadcastLoad(const Output<Node>& x, Shape shape)
: BroadcastMove(x, shape), broadcast_info(x.get_shape().size(), 0) {
: BroadcastMove(x, shape) {
constructor_validate_and_infer_types();
}
bool snippets::op::BroadcastLoad::visit_attributes(AttributeVisitor& visitor) {
return true;
}
std::shared_ptr<Node> snippets::op::BroadcastLoad::clone_with_new_inputs(const OutputVector& new_args) const {
INTERNAL_OP_SCOPE(BroadcastLoad);
check_new_args_count(this, new_args);
auto other = std::make_shared<BroadcastLoad>(new_args.at(0), output_shape);
other->set_broadcast_info(this->broadcast_info);
return other;
return std::make_shared<BroadcastLoad>(new_args.at(0), output_shape);
}
void snippets::op::BroadcastLoad::validate_and_infer_types() {

View File

@ -17,6 +17,7 @@ snippets::op::BroadcastMove::BroadcastMove(const Output<Node>& x, Shape shape) :
}
bool snippets::op::BroadcastMove::visit_attributes(AttributeVisitor& visitor) {
visitor.on_attribute("output_shape", output_shape);
return true;
}

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "snippets/op/powerstatic.hpp"
namespace ngraph {
namespace snippets {
namespace op {
BWDCMP_RTTI_DEFINITION(PowerStatic);
} // namespace op
} // namespace snippets
} // namespace ngraph

View File

@ -6,6 +6,8 @@
using namespace ngraph;
BWDCMP_RTTI_DEFINITION(snippets::op::Scalar);
std::shared_ptr<Node> snippets::op::Scalar::clone_with_new_inputs(const OutputVector& new_args) const {
check_new_args_count(this, new_args);
return std::make_shared<Scalar>(*this);

View File

@ -32,6 +32,9 @@
using namespace std;
using namespace ngraph;
using namespace ov::op::util;
BWDCMP_RTTI_DEFINITION(snippets::op::Subgraph);
void snippets::op::Subgraph::set_generator(std::shared_ptr<ngraph::snippets::Generator> generator) {
m_generator = generator;
@ -42,8 +45,9 @@ void snippets::op::Subgraph::set_non_scalar_constants_count(const size_t count)
}
snippets::op::Subgraph::Subgraph(const OutputVector& args, std::shared_ptr<ov::Model> body)
: Op(args), m_body(body), m_generator(nullptr) {
const auto ops = m_body->get_ops();
: SubGraphOp(args) {
set_function(body);
const auto ops = body_ptr()->get_ops();
for (const auto& op : ops) {
config.m_is_quantized = config.m_is_quantized || ov::is_type<ov::op::v0::FakeQuantize>(op);
config.m_has_type_relaxed_ops = config.m_has_type_relaxed_ops || std::dynamic_pointer_cast<ngraph::op::TypeRelaxedBase>(op);
@ -52,6 +56,11 @@ snippets::op::Subgraph::Subgraph(const OutputVector& args, std::shared_ptr<ov::M
}
constructor_validate_and_infer_types();
for (size_t i = 0; i < body->get_parameters().size(); ++i)
m_input_descriptions[0].push_back(std::make_shared<InvariantInputDescription>(i, i));
for (size_t i = 0; i < body->get_output_size(); ++i)
m_output_descriptions[0].push_back(std::make_shared<BodyOutputDescription>(i, i));
m_transformations_allowed = false;
}
snippets::op::Subgraph::Subgraph(const NodeVector& args, std::shared_ptr<ov::Model> body)
@ -59,34 +68,37 @@ snippets::op::Subgraph::Subgraph(const NodeVector& args, std::shared_ptr<ov::Mod
std::shared_ptr<Node> snippets::op::Subgraph::clone_with_new_inputs(const OutputVector& inputs) const {
INTERNAL_OP_SCOPE(Subgraph);
return make_shared<Subgraph>(inputs, ov::clone_model(*m_body.get()));
return make_shared<Subgraph>(inputs, ov::clone_model(body()));
}
void snippets::op::Subgraph::validate_and_infer_types() {
INTERNAL_OP_SCOPE(Subgraph);
OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::validate_and_infer_types")
ngraph::ParameterVector old_parameters;
for (auto op : m_body->get_parameters()) {
for (auto op : body_ptr()->get_parameters()) {
old_parameters.push_back(op);
}
for (size_t i = 0; i < get_input_size(); ++i) {
m_body->replace_parameter(i, std::make_shared<opset1::Parameter>(get_input_element_type(i), get_input_partial_shape(i)));
body_ptr()->replace_parameter(i, std::make_shared<opset1::Parameter>(get_input_element_type(i), get_input_partial_shape(i)));
}
m_body->validate_nodes_and_infer_types();
body_ptr()->validate_nodes_and_infer_types();
for (size_t i = 0; i < m_body->get_parameters().size(); i++) {
m_body->get_parameters()[i]->set_friendly_name(old_parameters[i]->get_friendly_name());
for (size_t i = 0; i < body_ptr()->get_parameters().size(); i++) {
body_ptr()->get_parameters()[i]->set_friendly_name(old_parameters[i]->get_friendly_name());
}
set_output_size(m_body->get_output_size());
set_output_size(body_ptr()->get_output_size());
for (size_t i = 0; i < get_output_size(); ++i) {
set_output_type(i, m_body->get_output_element_type(i), m_body->get_output_partial_shape(i));
set_output_type(i, body_ptr()->get_output_element_type(i), body_ptr()->get_output_partial_shape(i));
}
}
bool snippets::op::Subgraph::visit_attributes(AttributeVisitor& visitor) {
visitor.on_attribute("body", body_ptr());
visitor.on_attribute("input_descriptions", m_input_descriptions[0]);
visitor.on_attribute("output_descriptions", m_output_descriptions[0]);
return true;
}
@ -172,11 +184,11 @@ void snippets::op::Subgraph::fill_empty_output_names(const Output<Node>& target_
Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes) {
INTERNAL_OP_SCOPE(Subgraph);
OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::canonicalize")
NODE_VALIDATION_CHECK(this, inputShapes.size() == m_body->get_parameters().size(),
"Number of parameters for snippet doesn't match passed to generate method: ", inputShapes.size(), " vs ", m_body->get_parameters().size(), ".");
NODE_VALIDATION_CHECK(this, inputShapes.size() == body_ptr()->get_parameters().size(),
"Number of parameters for snippet doesn't match passed to generate method: ", inputShapes.size(), " vs ", body_ptr()->get_parameters().size(), ".");
NODE_VALIDATION_CHECK(this, outputShapes.size() == m_body->get_results().size(),
"number of results for snippet doesn't match passed to generate method: ", outputShapes.size(), " vs ", m_body->get_results().size(), ".");
NODE_VALIDATION_CHECK(this, outputShapes.size() == body_ptr()->get_results().size(),
"number of results for snippet doesn't match passed to generate method: ", outputShapes.size(), " vs ", body_ptr()->get_results().size(), ".");
auto getMaxRankBlockedShape = [](const BlockedShapeVector& blockedShapes) -> const BlockedShape& {
return *std::max_element(blockedShapes.begin(), blockedShapes.end(),
@ -219,13 +231,13 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape
NODE_VALIDATION_CHECK(this,
PartialShape::broadcast_merge_into(tmpPShape, inShape, ::ngraph::op::AutoBroadcastType::NUMPY),
"Failed to create broadcastable shapes in snippets canonicalization");
const auto paramShape = m_body->get_parameters()[i]->get_shape();
const auto paramType = m_body->get_parameters()[i]->get_element_type();
const auto paramShape = body_ptr()->get_parameters()[i]->get_shape();
const auto paramType = body_ptr()->get_parameters()[i]->get_element_type();
if (paramShape.size() != inShape.size() || !equal(paramShape.begin(), paramShape.end(), inShape.begin()))
m_body->replace_parameter(i, std::make_shared<opset1::Parameter>(paramType, inShape));
body_ptr()->replace_parameter(i, std::make_shared<opset1::Parameter>(paramType, inShape));
}
m_body->validate_nodes_and_infer_types();
body_ptr()->validate_nodes_and_infer_types();
auto skipStartEndOnes = [](const Shape& shape) {
auto begin = shape.begin();
auto end = shape.end();
@ -239,7 +251,7 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape
};
// Check that output shapes are broadcastable => can be scheduled
const auto& body_results = m_body->get_results();
const auto& body_results = body_ptr()->get_results();
PartialShape outPShape = body_results[0]->get_shape();
for (size_t i = 0; i < body_results.size(); i++) {
auto shape_i = body_results[i]->get_shape();
@ -270,7 +282,7 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape
void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outputShapes,
const BlockedShapeVector& inputShapes) {
// We should insert Convert before Results to set original output element type if needed
const auto& body_results = m_body->get_results();
const auto& body_results = body_ptr()->get_results();
for (size_t i = 0; i < outputShapes.size(); i++) {
const auto needed_out_type = std::get<2>(outputShapes[i]);
if (body_results[i]->get_input_element_type(0) != needed_out_type) {
@ -281,7 +293,7 @@ void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outpu
}
// We should change existing element type to original for Parameters if needed
const auto& body_parameters = m_body->get_parameters();
const auto& body_parameters = body_ptr()->get_parameters();
for (size_t i = 0; i < inputShapes.size(); ++i) {
const auto needed_in_type = std::get<2>(inputShapes[i]);
if (body_parameters[i]->get_element_type() != needed_in_type) {
@ -300,7 +312,7 @@ void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outpu
manager.register_pass<snippets::pass::AlignElementType>(execution_element_type);
manager.register_pass<ngraph::pass::ConstantFolding>();
}
manager.run_passes(m_body);
manager.run_passes(body_ptr());
}
void snippets::op::Subgraph::convert_to_snippet_dialect() {
@ -344,7 +356,7 @@ void snippets::op::Subgraph::convert_to_snippet_dialect() {
manager.get_pass_config()->
set_callback<ngraph::snippets::pass::SetScalarCountForStore>(skip_matching_domain);
}
manager.run_passes(m_body);
manager.run_passes(body_ptr());
}
snippets::Schedule snippets::op::Subgraph::generate(const BlockedShapeVector& output_shapes,
@ -372,19 +384,19 @@ snippets::Schedule snippets::op::Subgraph::generate(ngraph::pass::Manager& opt,
OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::op::generate")
NGRAPH_CHECK(m_generator != nullptr, "generate is called while generator is not set");
convert_to_snippet_dialect();
opt.run_passes(m_body);
opt.run_passes(body_ptr());
// generation flow
snippets::pass::AssignRegisters().run_on_model(m_body);
snippets::pass::AssignRegisters().run_on_model(body_ptr());
// schedule generation should go here and be target agnostic
// actual code emission
ngraph::snippets::code ptr = m_generator->generate(m_body, compile_params);
ngraph::snippets::code ptr = m_generator->generate(body_ptr(), compile_params);
// check that body doesn't have constants for scheduling
std::vector<std::shared_ptr<opset1::Constant>> constants;
for (auto op : m_body->get_ordered_ops()) {
for (auto op : body_ptr()->get_ordered_ops()) {
if (auto constant = ov::as_type_ptr<opset1::Constant>(op)) {
if (ngraph::shape_size(constant->get_shape()) != 1 && constant->get_shape() != Shape()) {
constants.push_back(constant);
@ -400,10 +412,10 @@ void snippets::op::Subgraph::print() const {
INTERNAL_OP_SCOPE(Subgraph);
remark(13) << "subgraph " << this->get_friendly_name() << " "
<< this->get_type_name()
<< " which contains " << this->get_body()->get_ops().size() << " nodes" << std::endl;
<< " which contains " << body_ptr()->get_ops().size() << " nodes" << std::endl;
int qqq = 0;
for (auto op : this->get_body()->get_ordered_ops()) {
for (auto op : body_ptr()->get_ordered_ops()) {
remark(13) << "op " << qqq++ << " " << op->get_friendly_name() << " (" << op->get_type_name() << ") " << op << std::endl;
}
@ -434,7 +446,7 @@ void snippets::op::Subgraph::print_statistics(bool verbose) {
}
if (auto subgraph = ngraph::as_type_ptr<op::Subgraph>(n)) {
for (auto op : subgraph->get_body()->get_ordered_ops()) {
for (auto op : subgraph->body_ptr()->get_ordered_ops()) {
if (ngraph::as_type_ptr<ngraph::opset1::Constant>(op)) {
total += op->output(0).get_tensor().size();
}
@ -444,9 +456,9 @@ void snippets::op::Subgraph::print_statistics(bool verbose) {
return total;
};
auto getModelInventory = [getNodeInventory](std::shared_ptr<ov::Model> f) -> size_t {
auto getModelInventory = [getNodeInventory](const ov::Model & f) -> size_t {
size_t total = 0;
for (auto op : f->get_ordered_ops()) {
for (auto op : f.get_ordered_ops()) {
// Results and parameters are artificially introduced,
// while Constants are already considered if they are inputs of other operation
// this should lead to 1:1 inventory for single node operations
@ -459,24 +471,22 @@ void snippets::op::Subgraph::print_statistics(bool verbose) {
return total;
};
auto countConstants = [](std::shared_ptr<ov::Model> f) -> size_t {
auto countConstants = [](const ov::Model & f) -> size_t {
size_t count = 0;
for (auto op : f->get_ordered_ops()) {
for (auto op : f.get_ordered_ops()) {
count += !!ngraph::as_type_ptr<ngraph::opset1::Constant>(op) ? 1 : 0;
}
return count;
};
auto body = this->get_body();
std::cout << this->get_friendly_name()
std::cout << get_friendly_name()
<< ";" << this
<< ";" << body->get_ops().size()
<< ";" << body->get_parameters().size()
<< ";" << body->get_results().size()
<< ";" << countConstants(body)
<< ";" << getModelInventory(body)
<< ";" << getNodeInventory(this->shared_from_this()) << std::endl;
<< ";" << body_ptr()->get_ops().size()
<< ";" << body_ptr()->get_parameters().size()
<< ";" << body_ptr()->get_results().size()
<< ";" << countConstants(body())
<< ";" << getModelInventory(body())
<< ";" << getNodeInventory(shared_from_this()) << std::endl;
if (verbose) {
this->print();
@ -486,7 +496,7 @@ void snippets::op::Subgraph::print_statistics(bool verbose) {
void snippets::op::Subgraph::serialize() const {
std::stringstream xmlFile, binFile;
ov::pass::Serialize serializer(xmlFile, xmlFile, ov::pass::Serialize::Version::IR_V10);
serializer.run_on_model(get_body());
serializer.run_on_model(body_ptr());
auto m_constants = binFile.str();
auto m_model = xmlFile.str();
std::cout << m_model << std::endl;

View File

@ -162,7 +162,7 @@ auto update_out_tensor_name(std::shared_ptr<ngraph::snippets::op::Subgraph> &sub
for (unsigned int i = 0; i < subgraph->get_output_size() && not_set; i++) {
for (const auto &in : subgraph->get_output_target_inputs(i)) {
if (ov::is_type<opset1::Result>(in.get_node())) {
const auto& body_result = subgraph->get_body()->get_output_op(i);
const auto& body_result = subgraph->body_ptr()->get_output_op(i);
const auto& body_result_input = body_result->get_input_source_output(0);
op::Subgraph::fill_empty_output_names(subgraph->output(i), body_result_input);
not_set = false;
@ -318,8 +318,8 @@ TokenizeSnippets::TokenizeSnippets() {
for (const auto &input_node : ngraph::as_node_vector(input_values)) {
if (auto subgraph = ov::as_type_ptr<op::Subgraph>(input_node)) {
if (!clones.count(input_node)) {
auto f = ov::clone_model(*subgraph->get_body().get());
f->set_friendly_name(subgraph->get_body()->get_friendly_name());
auto f = ov::clone_model(subgraph->body());
f->set_friendly_name(subgraph->body_ptr()->get_friendly_name());
clones[input_node] = f;
}
}
@ -332,6 +332,7 @@ TokenizeSnippets::TokenizeSnippets() {
<< " outputs" << std::endl;
return true;
}
std::string subgraph_name = node->get_friendly_name();
std::string fusedNames{};
size_t num_result_children = 0;
std::pair<int64_t, int64_t> currentTopoBounds {-1, LONG_MAX};
@ -347,7 +348,12 @@ TokenizeSnippets::TokenizeSnippets() {
fusedNames += getFusedNames(subgraph);
num_result_children += has_result_child(subgraph);
if (has_result_child(subgraph)) {
// we set input subgraph name to the current subgraph
// in order to save node friendly name before result
subgraph_name = subgraph->get_friendly_name();
num_result_children += 1;
}
auto f = clones[input_node];
const auto& input_body_parameters = f->get_parameters();
// Todo:
@ -546,10 +552,10 @@ TokenizeSnippets::TokenizeSnippets() {
for (size_t i = 0; i < body->get_parameters().size(); i++) {
body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
}
auto subgraph = op::build_subgraph(node, external_inputs, body);
auto act_body = subgraph->get_body();
for (size_t i = 0; i < act_body->get_parameters().size(); i++) {
act_body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
auto subgraph = op::build_subgraph(node, external_inputs, body, subgraph_name);
const auto & act_body = subgraph->body();
for (size_t i = 0; i < act_body.get_parameters().size(); i++) {
act_body.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
}
if (subgraph->get_output_size() != subgraph_result_inputs.size()) {
@ -568,9 +574,9 @@ TokenizeSnippets::TokenizeSnippets() {
subgraph->validate_and_infer_types();
auto act_body1 = subgraph->get_body();
for (size_t i = 0; i < act_body1->get_parameters().size(); i++) {
act_body1->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
const auto & act_body1 = subgraph->body();
for (size_t i = 0; i < act_body1.get_parameters().size(); i++) {
act_body1.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
}
subgraph->get_rt_info()["originalLayersNames"] = fusedNames;
subgraph->set_non_scalar_constants_count(hidden_non_scalar_constant_count);
@ -579,7 +585,7 @@ TokenizeSnippets::TokenizeSnippets() {
<< subgraph->get_friendly_name()
<< " with " << subgraph->inputs().size()
<< " inputs and " << subgraph->outputs().size()
<< " outputs and " << subgraph->get_body()->get_ops().size() << " ops total\n";
<< " outputs and " << subgraph->body_ptr()->get_ops().size() << " ops total\n";
return true;
};

View File

@ -23,7 +23,7 @@ namespace pass {
// Move up Constants which aren't scalars from body to Subgraph and replace them with Parameters inside body
void ConvertConstantsToParameters(const std::shared_ptr<ngraph::snippets::op::Subgraph>& subgraph) {
OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::ConvertConstantsToParameters");
auto body = subgraph->get_body();
auto body = subgraph->body_ptr();
ParameterVector new_parameters;
OutputVector new_external_inputs = subgraph->input_values();
@ -59,7 +59,7 @@ CommonOptimizations::CommonOptimizations() {
return false;
}
auto body = subgraph->get_body();
auto body = subgraph->body_ptr();
const auto is_quantized = subgraph->is_quantized();
// Firsly we should transform all original Converts inside body to ConvertTruncation to save original behavior.

View File

@ -38,6 +38,8 @@ public:
};
class LoweringTests : public TransformationTestsF {
public:
LoweringTests();
protected:
static std::shared_ptr<ngraph::snippets::op::Subgraph> getSubgraph(const std::shared_ptr<Model>& f);
static std::shared_ptr<ngraph::snippets::op::Subgraph> getLoweredSubgraph(const std::shared_ptr<Model>& f);

View File

@ -34,6 +34,13 @@ DummyTargetMachine::DummyTargetMachine() {
jitters[ngraph::snippets::op::TileScheduler::get_type_info_static()] = dummy_functor;
}
LoweringTests::LoweringTests() : TransformationTestsF() {
// external subgraph input shape and internal parameters shapes
// might differ due to the blocked layout
// so input & output descriptors shouldn't be checked
comparator.disable(FunctionsComparator::CmpValues::SUBGRAPH_DESCRIPTORS);
}
std::shared_ptr<ngraph::snippets::op::Subgraph> LoweringTests::getSubgraph(const std::shared_ptr<Model>& f) {
std::shared_ptr<ngraph::snippets::op::Subgraph> subgraph;
for (const auto &op : f->get_ops()) {

View File

@ -77,6 +77,14 @@ TEST_F(CollapseSubgraphTests, smoke_Snippets_ConvertPartialInputsAndResults) {
run();
}
TEST_F(CollapseSubgraphTests, smoke_Snippets_EltwiseTwoResultsFunction) {
const auto &f = EltwiseTwoResultsFunction(std::vector<Shape>{{2, 5}, {2, 1}});
function = f.getOriginal();
function_ref = f.getReference();
comparator.enable(FunctionsComparator::CmpValues::NAMES);
run();
}
} // namespace snippets
} // namespace test
} // namespace ov

View File

@ -24,10 +24,10 @@ public:
TransformationTestsF::TearDown();
auto subgraph = FunctionHelper::getSubgraph(function);
auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph)->get_body();
auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph)->body_ptr();
auto subgraph_ref = FunctionHelper::getSubgraph(function_ref);
auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph_ref)->get_body();
auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph_ref)->body_ptr();
auto res = comparator.compare(body, body_ref);
ASSERT_TRUE(res.valid) << res.message;

View File

@ -35,7 +35,7 @@ void InsertLoadStoreTests::SetUp() {
TEST_P(InsertLoadStoreTests, ThreeInputsEltwise) {
auto subgraph = getLoweredSubgraph(snippets_function->getOriginal());
function = subgraph->get_body();
function = subgraph->body_ptr();
function_ref = snippets_function->getLowered();
}

View File

@ -33,7 +33,7 @@ void InsertMoveBroadcastTests::SetUp() {
TEST_P(InsertMoveBroadcastTests, AddBroadcast) {
auto subgraph = getLoweredSubgraph(snippets_function->getOriginal());
function = subgraph->get_body();
function = subgraph->body_ptr();
function_ref = snippets_function->getLowered();
}

View File

@ -295,6 +295,10 @@ public:
return m_output_descriptions.size();
}
bool get_transformations_allowed() const {
return m_transformations_allowed;
}
MultiSubGraphOp(const MultiSubGraphOp&) = delete;
MultiSubGraphOp(MultiSubGraphOp&&) = default;
@ -313,6 +317,7 @@ protected:
std::vector<std::shared_ptr<Model>> m_bodies;
std::vector<MultiSubgraphInputDescriptionVector> m_input_descriptions;
std::vector<MultiSubgraphOutputDescriptionVector> m_output_descriptions;
bool m_transformations_allowed = true;
};
} // namespace util
} // namespace op

View File

@ -10,6 +10,7 @@
#include <ngraph/pattern/op/wrap_type.hpp>
#include <openvino/cc/pass/itt.hpp>
#include <regex>
#include <string>
#include <unordered_set>
#include <vector>
@ -175,10 +176,12 @@ bool ov::pass::GraphRewrite::apply_matcher_passes(std::shared_ptr<Model> f,
// Recursive apply Matchers for sub-graph based nodes
if (auto sub_graph_node = std::dynamic_pointer_cast<ngraph::op::util::MultiSubGraphOp>(node)) {
size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size();
for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) {
auto sub_graph = sub_graph_node->get_function(static_cast<int>(sub_graph_ind));
run_on_model(sub_graph);
if (sub_graph_node->get_transformations_allowed()) {
size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size();
for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) {
auto sub_graph = sub_graph_node->get_function(sub_graph_ind);
run_on_model(sub_graph);
}
}
}
// Temporary keep this GraphRewrite property for backward compatibility

View File

@ -107,6 +107,18 @@ public:
return _inputData->getName();
}
/**
* @brief Changes the name of the input data provided by the user.
*
* @param newName A new name of the input data to set
*/
void setName(const std::string& newName) {
if (!_inputData) {
IE_THROW() << "Data is empty!";
}
_inputData->setName(newName);
}
/**
* @brief Gets the input data
*

View File

@ -20,6 +20,8 @@
#include <ov_ops/nms_static_shape_ie.hpp>
#include <ov_ops/multiclass_nms_ie_internal.hpp>
#include <snippets/op/subgraph.hpp>
#include <mutex>
namespace ov {
@ -125,10 +127,33 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
return opset;
};
auto snippets_opset = []() {
ngraph::OpSet opset;
#define NGRAPH_OP(NAME, NAMESPACE) opset.insert<NAMESPACE::NAME>();
NGRAPH_OP(BroadcastLoad, ngraph::snippets::op)
NGRAPH_OP(BroadcastMove, ngraph::snippets::op)
NGRAPH_OP(ConvertSaturation, ngraph::snippets::op)
NGRAPH_OP(ConvertTruncation, ngraph::snippets::op)
NGRAPH_OP(Kernel, ngraph::snippets::op)
NGRAPH_OP(Load, ngraph::snippets::op)
NGRAPH_OP(Nop, ngraph::snippets::op)
NGRAPH_OP(PowerStatic, ngraph::snippets::op)
NGRAPH_OP(Scalar, ngraph::snippets::op)
NGRAPH_OP(Store, ngraph::snippets::op)
NGRAPH_OP(Subgraph, ngraph::snippets::op)
NGRAPH_OP(Tile, ngraph::snippets::op)
NGRAPH_OP(TileScheduler, ngraph::snippets::op)
#undef NGRAPH_OP
return opset;
};
static std::map<std::string, ngraph::OpSet> opsets = {
{ "cpu_plugin_opset", cpu_plugin_opset() },
{ "type_relaxed_opset", type_relaxed_opset() },
{ "ie_internal_opset", ie_internal_opset() },
{ "SnippetsOpset", snippets_opset() },
};
return opsets;

View File

@ -58,10 +58,10 @@ void Snippet::copy_snippet() {
if (!sharedMutex) {
IE_THROW() << "Subgraph doesn't have shared mutex";
}
std::lock_guard<std::mutex> lock(*sharedMutex.get());
new_body = ov::clone_model(*original_snippet->get_body().get());
std::lock_guard<std::mutex> lock(*sharedMutex);
new_body = ov::clone_model(*original_snippet->body_ptr());
} else {
new_body = ov::clone_model(*original_snippet->get_body().get());
new_body = ov::clone_model(*original_snippet->body_ptr());
}
snippet = std::make_shared<ngraph::snippets::op::Subgraph>(subgraph_node_inputs, new_body);
ngraph::copy_runtime_info(original_snippet, snippet);
@ -320,13 +320,13 @@ void Snippet::define_schedule() {
// Canonicalization broadcasts inputs and outputs to max input rank, which can be smaller than tensorRank
// prepend to enable 6D scheduler
exec_domain = prependWithOnes(exec_domain);
const auto &body = snippet->get_body();
for (const auto& p : body->get_parameters()) {
const auto &body = snippet->body();
for (const auto& p : body.get_parameters()) {
dims_in.emplace_back(prependWithOnes(p->get_shape()));
}
for (size_t i = 0; i < body->get_output_size(); i++) {
dims_out.push_back(prependWithOnes(body->get_output_shape(i)));
for (size_t i = 0; i < body.get_output_size(); i++) {
dims_out.push_back(prependWithOnes(body.get_output_shape(i)));
}
const auto config = getSelectedPrimitiveDescriptor()->getConfig();

View File

@ -867,13 +867,10 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
} else {
enableBF16 = engConfig.enforceBF16 && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core);
}
const auto& modelCacheProp = config.find(InferenceEngine::PluginConfigParams::KEY_CACHE_DIR);
const bool enableModelCache = (modelCacheProp != config.end() && !modelCacheProp->second.empty())
|| !engConfig.cache_dir.empty();
const auto& dynamicBatchProp = config.find(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED);
const bool enableDynamicBatch = (dynamicBatchProp != config.end() && dynamicBatchProp->second == PluginConfigParams::YES)
|| engConfig.enableDynamicBatch;
const bool enableSnippets = !(enableModelCache || enableDynamicBatch);
const bool enableSnippets = !enableDynamicBatch;
auto nGraphFunc = clonedNetwork.getFunction();
DEBUG_LOG(PrintableModel(*nGraphFunc, "org_"));
@ -1122,7 +1119,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE);
const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/
|| Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */;
const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch);
const bool enableSnippets = !conf.enableDynamicBatch;
auto model = network.getFunction();
if (model == nullptr) {

View File

@ -41,28 +41,22 @@ namespace {
IE_THROW(NetworkNotRead) << "Unknown layout with name '" << name << "'";
}
template<typename T>
void setPrecisionsAndLayouts(
pugi::xml_object_range<pugi::xml_named_node_iterator> && nodes,
T && info) {
for (auto n : nodes) {
auto name_attr = n.attribute("name");
auto precision_attr = n.attribute("precision");
auto layout_attr = n.attribute("layout");
template <typename T>
void setInfo(pugi::xml_object_range<pugi::xml_named_node_iterator>&& nodes, T&& info) {
auto nodes_it = nodes.begin();
auto info_iter = info.begin();
for (; nodes_it != nodes.end(); ++nodes_it, ++info_iter) {
auto name_attr = nodes_it->attribute("name");
auto precision_attr = nodes_it->attribute("precision");
auto layout_attr = nodes_it->attribute("layout");
if (!name_attr
|| !precision_attr
|| !layout_attr) {
if (!name_attr || !precision_attr || !layout_attr || info_iter == info.end()) {
IE_THROW(NetworkNotRead) << "The inputs/outputs information is invalid.";
}
auto it = info.find(name_attr.value());
if (it == info.end()) {
IE_THROW(NetworkNotRead) << "The input/output with name '" << name_attr.value() << "' not found";
}
it->second->setPrecision(Precision::FromStr(precision_attr.value()));
it->second->setLayout(layout_from_string(layout_attr.value()));
info_iter->second->setName(name_attr.value());
info_iter->second->setPrecision(Precision::FromStr(precision_attr.value()));
info_iter->second->setLayout(layout_from_string(layout_attr.value()));
}
}
}; // namespace
@ -170,8 +164,8 @@ void CNNNetworkDeserializer::operator >> (InferenceEngine::CNNNetwork & network)
pugi::xml_node inputs = root.child("inputs");
pugi::xml_node outputs = root.child("outputs");
setPrecisionsAndLayouts(inputs.children("in"), network.getInputsInfo());
setPrecisionsAndLayouts(outputs.children("out"), network.getOutputsInfo());
setInfo(inputs.children("in"), network.getInputsInfo());
setInfo(outputs.children("out"), network.getOutputsInfo());
}
} // namespace intel_cpu

View File

@ -16,7 +16,7 @@ target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime)
set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $<TARGET_PROPERTY:openvino_intel_cpu_plugin,SOURCE_DIR>/src)
set(DEPENDENCIES openvino_intel_cpu_plugin)
set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo)
set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo inference_engine_snippets)
if (ENABLE_OV_ONNX_FRONTEND)
list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}")
else()

View File

@ -0,0 +1,149 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/openvino.hpp"
#include "openvino/opsets/opset9.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "test_utils/convolution_params.hpp"
#include "snippets/op/subgraph.hpp"
using namespace CPUTestUtils;
using namespace ov::opset9;
namespace SubgraphTestsDefinitions {
class SubgraphSnippetSerializationTest : public ::testing::Test, public CPUTestsBase {};
TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraph) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto model = ([] () -> std::shared_ptr<ov::Model> {
auto shape = ov::Shape({2, 2});
auto input0 = std::make_shared<Parameter>(ov::element::f32, shape);
auto input1 = std::make_shared<Parameter>(ov::element::f32, shape);
auto ininput0 = std::make_shared<Parameter>(ov::element::f32, shape);
auto ininput1 = std::make_shared<Parameter>(ov::element::f32, shape);
auto add = std::make_shared<Add>(ininput0, ininput1);
auto subgraph_body = std::make_shared<ov::Model>(ov::NodeVector{add}, ov::ParameterVector{ininput0, ininput1});
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(ov::NodeVector{input0, input1}, ov::clone_model(*subgraph_body.get()));
return std::make_shared<ov::Model>(ov::NodeVector{subgraph}, ov::ParameterVector{input0, input1});
})();
ov::Core core;
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
std::stringstream stream;
compiled_model.export_model(stream);
ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU");
float data[] = {1.f, 1.f, 1.f, 1.f};
ov::Tensor input_data1{ov::element::f32, ov::Shape({2, 2}), data};
ov::Tensor input_data2{ov::element::f32, ov::Shape({2, 2}), data};
ov::InferRequest infer_request = compiled_model.create_infer_request();
infer_request.set_input_tensor(0, input_data1);
infer_request.set_input_tensor(1, input_data2);
infer_request.infer();
auto out = infer_request.get_output_tensor(0);
float* out_p = static_cast<float*>(out.data(ov::element::Type_t::f32));
auto out_val = std::vector<float>(out_p, out_p + out.get_size());
ov::InferRequest imported_infer_request = imported_compiled_model.create_infer_request();
imported_infer_request.set_input_tensor(0, input_data1);
imported_infer_request.set_input_tensor(1, input_data2);
imported_infer_request.infer();
auto imported_out = imported_infer_request.get_output_tensor(0);
float* imported_out_p = static_cast<float*>(imported_out.data(ov::element::Type_t::f32));
auto imported_out_val = std::vector<float>(imported_out_p, imported_out_p + imported_out.get_size());
ASSERT_EQ(out_val, imported_out_val);
auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model());
auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model());
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::CONST_VALUES)
.enable(FunctionsComparator::ATTRIBUTES);
const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime);
ASSERT_TRUE(results.valid) << results.message;
}
TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraphWithScalarConst) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto model = ([] () -> std::shared_ptr<ov::Model> {
auto shape = ov::Shape({1});
auto input = std::make_shared<Parameter>(ov::element::f32, shape);
auto internal_input = std::make_shared<Parameter>(ov::element::f32, shape);
auto constant = std::make_shared<Constant>(ov::element::f32, shape, 2);
auto internal_constant = std::make_shared<Constant>(ov::element::f32, shape, 2);
auto add = std::make_shared<Add>(input, constant);
auto internal_add = std::make_shared<Add>(internal_input, internal_constant);
auto subgraph_body = std::make_shared<ov::Model>(ov::NodeVector{internal_add}, ov::ParameterVector{internal_input});
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(ov::NodeVector{add}, ov::clone_model(*subgraph_body.get()));
return std::make_shared<ov::Model>(ov::NodeVector{subgraph}, ov::ParameterVector{input});
})();
ov::Core core;
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
std::stringstream stream;
compiled_model.export_model(stream);
float data[] = {1.f};
ov::Tensor input_data1{ov::element::f32, ov::Shape({1}), data};
ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU");
ov::InferRequest infer_request = compiled_model.create_infer_request();
infer_request.set_input_tensor(0, input_data1);
infer_request.infer();
auto out = infer_request.get_output_tensor(0);
float* out_p = static_cast<float*>(out.data(ov::element::Type_t::f32));
auto out_val = std::vector<float>(out_p, out_p + out.get_size());
ov::InferRequest imported_infer_request = imported_compiled_model.create_infer_request();
imported_infer_request.set_input_tensor(0, input_data1);
imported_infer_request.infer();
auto imported_out = imported_infer_request.get_output_tensor(0);
float* imported_out_p = static_cast<float*>(imported_out.data(ov::element::Type_t::f32));
auto imported_out_val = std::vector<float>(imported_out_p, imported_out_p + imported_out.get_size());
ASSERT_EQ(out_val, imported_out_val);
auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model());
auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model());
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::CONST_VALUES)
.enable(FunctionsComparator::ATTRIBUTES);
const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime);
ASSERT_TRUE(results.valid) << results.message;
}
TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraphWithResultAs1stOutput) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto precision = ov::element::f32;
auto shape = ov::Shape{1, 3, 16, 16};
auto model = [&] () -> std::shared_ptr<ov::Model> {
auto input1 = std::make_shared<Parameter>(precision, shape);
auto input2 = std::make_shared<Parameter>(precision, shape);
auto sinh1 = std::make_shared<Sinh>(input1);
auto sinh2 = std::make_shared<Sinh>(input2);
auto relu = std::make_shared<Relu>(sinh2);
auto sinh_out = std::make_shared<Sinh>(relu);
auto result1 = std::make_shared<Result>(sinh_out);
auto add = std::make_shared<Add>(sinh1, relu);
auto result2 = std::make_shared<Result>(add);
ov::ParameterVector params{input1, input2};
ov::ResultVector results{result1, result2};
return std::make_shared<ov::Model>(results, params);
}();
ov::Core core;
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
std::stringstream stream;
compiled_model.export_model(stream);
ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU");
auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model());
auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model());
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::CONST_VALUES)
.enable(FunctionsComparator::ATTRIBUTES);
const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime);
ASSERT_TRUE(results.valid) << results.message;
}
} // namespace SubgraphTestsDefinitions

View File

@ -31,10 +31,10 @@ public:
TransformationTestsF::TearDown();
auto subgraph = FunctionHelper::getSubgraph(function);
auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph)->get_body();
auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph)->body_ptr();
auto subgraph_ref = FunctionHelper::getSubgraph(function_ref);
auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph_ref)->get_body();
auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast<ngraph::snippets::op::Subgraph>(subgraph_ref)->body_ptr();
if ((body != nullptr) && (body_ref != nullptr)) {
auto res = comparator.compare(body, body_ref);

View File

@ -451,7 +451,7 @@ public:
using Result = Comparator::Result;
using SubGraphOp = ov::op::util::SubGraphOp;
Result compare(SubGraphOp* sub_lhs, SubGraphOp* sub_rhs) {
Result compare(SubGraphOp* sub_lhs, SubGraphOp* sub_rhs, bool compare_in_outs) {
const auto lhs_it_no = get_num_iterations(sub_lhs);
const auto rhs_it_no = get_num_iterations(sub_rhs);
if (lhs_it_no != rhs_it_no) {
@ -460,14 +460,16 @@ public:
not_valid_input_output = lhs_it_no;
const auto result_for_inputs = compare_inputs(sub_lhs, sub_rhs);
if (!result_for_inputs.valid) {
return result_for_inputs;
}
if (compare_in_outs) {
const auto& result_for_inputs = compare_inputs(sub_lhs, sub_rhs);
if (!result_for_inputs.valid) {
return result_for_inputs;
}
const auto result_for_outputs = compare_outputs(sub_lhs, sub_rhs);
if (!result_for_outputs.valid) {
return result_for_outputs;
const auto& result_for_outputs = compare_outputs(sub_lhs, sub_rhs);
if (!result_for_outputs.valid) {
return result_for_outputs;
}
}
return compare_backedges(sub_lhs, sub_rhs);
@ -559,8 +561,10 @@ private:
} // namespace detail
Comparator::Result compare_io(ov::op::util::SubGraphOp* sub_lhs, ov::op::util::SubGraphOp* sub_rhs) {
return detail::CompareSubGraphs{}.compare(sub_lhs, sub_rhs);
Comparator::Result compare_io(ov::op::util::SubGraphOp* sub_lhs,
ov::op::util::SubGraphOp* sub_rhs,
bool compare_in_outs) {
return detail::CompareSubGraphs{}.compare(sub_lhs, sub_rhs, compare_in_outs);
}
} // namespace subgraph
} // namespace
@ -698,7 +702,7 @@ Comparator::Result Comparator::compare(ngraph::Node* node1, ngraph::Node* node2,
const bool subgraph_nodes = subgraph1 && subgraph2;
if (subgraph_nodes) {
const auto result = subgraph::compare_io(subgraph1, subgraph2);
const auto result = subgraph::compare_io(subgraph1, subgraph2, should_compare(CmpValues::SUBGRAPH_DESCRIPTORS));
if (!result.valid) {
return result;
}

View File

@ -27,7 +27,8 @@ public:
PRECISIONS = 1 << 4,
ATTRIBUTES = 1 << 5,
TENSOR_NAMES = 1 << 6,
ACCURACY = 1 << 7
ACCURACY = 1 << 7,
SUBGRAPH_DESCRIPTORS = 1 << 8
};
struct Result {
@ -50,6 +51,7 @@ public:
fc.enable(NODES);
fc.enable(PRECISIONS);
fc.enable(TENSOR_NAMES);
fc.enable(SUBGRAPH_DESCRIPTORS);
return fc;
}
@ -155,11 +157,16 @@ public:
// initialize function with unique friendly and tensor names
for (auto node : f->get_ordered_ops()) {
const auto& node_name = generate_friendly_name();
node->set_friendly_name(node_name);
// this expression means that user didn't set friendly name and it was generated automatically
if (node->get_friendly_name() == node->get_name()) {
node->set_friendly_name(node_name);
}
for (auto output : node->outputs()) {
const auto& tensor_name = generate_tensor_name();
output.set_names({tensor_name});
if (output.get_names().empty()) {
output.set_names({tensor_name});
}
}
}

View File

@ -12,9 +12,11 @@ TransformationTestsF::TransformationTestsF()
comparator.enable(FunctionsComparator::CmpValues::NODES);
comparator.enable(FunctionsComparator::CmpValues::PRECISIONS);
comparator.enable(FunctionsComparator::CmpValues::RUNTIME_KEYS);
// TODO: enable attributes and constant values comparison by default XXX-68694
comparator.enable(FunctionsComparator::CmpValues::SUBGRAPH_DESCRIPTORS);
// TODO: enable attributes and constant values comparison by default XXX-98039
// comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
// comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
// comparator.enable(FunctionsComparator::CmpValues::NAMES);
}
void TransformationTestsF::SetUp() {

View File

@ -232,12 +232,19 @@ std::shared_ptr<ov::Model> EltwiseLogLoopFunction::initReference() const {
std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initOriginal() const {
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
data0->set_friendly_name("data0");
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
data1->set_friendly_name("data1");
auto sinh0 = std::make_shared<op::v0::Sinh>(data0);
sinh0->set_friendly_name("sinh0");
auto sinh1 = std::make_shared<op::v0::Sinh>(data1);
sinh1->set_friendly_name("sinh1");
auto add = std::make_shared<op::v1::Add>(sinh0, sinh1);
add->set_friendly_name("add");
auto hswish = std::make_shared<op::v4::HSwish>(add);
hswish->set_friendly_name("hswish");
auto relu = std::make_shared<op::v0::Relu>(hswish);
relu->set_friendly_name("relu");
NGRAPH_SUPPRESS_DEPRECATED_START
auto& out_tensor0 = add->get_output_tensor(0);
@ -249,25 +256,38 @@ std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initOriginal() const {
out_tensor1.set_names({"relu_out", "y1"});
NGRAPH_SUPPRESS_DEPRECATED_END
return std::make_shared<Model>(NodeVector{add, relu}, ParameterVector{data0, data1});
auto res0 = std::make_shared<op::v0::Result>(add);
res0->set_friendly_name("res0");
auto res1 = std::make_shared<op::v0::Result>(relu);
res1->set_friendly_name("res1");
return std::make_shared<Model>(ResultVector{res0, res1}, ParameterVector{data0, data1});
}
std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initReference() const {
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
data0->set_friendly_name("data0");
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
data1->set_friendly_name("data1");
auto sinh0 = std::make_shared<op::v0::Sinh>(data0);
sinh0->set_friendly_name("sinh0");
auto sinh1 = std::make_shared<op::v0::Sinh>(data1);
sinh1->set_friendly_name("sinh1");
auto indata0 = std::make_shared<op::v0::Parameter>(precision, sinh0->get_shape());
auto indata1 = std::make_shared<op::v0::Parameter>(precision, sinh1->get_shape());
auto add = std::make_shared<op::v1::Add>(indata0, indata1);
add->set_friendly_name("add");
auto hswish = std::make_shared<op::v4::HSwish>(add);
hswish->set_friendly_name("hswish");
auto subgraph0 = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{sinh0, sinh1},
std::make_shared<ov::Model>(NodeVector{add, hswish},
ParameterVector{indata0, indata1}));
subgraph0->set_friendly_name("add");
auto indata2 = std::make_shared<op::v0::Parameter>(precision, subgraph0->get_output_shape(1));
auto relu = std::make_shared<op::v0::Relu>(indata2);
relu->set_friendly_name("relu");
auto subgraph1 = std::make_shared<ngraph::snippets::op::Subgraph>(OutputVector{subgraph0->output(1)},
std::make_shared<ov::Model>(NodeVector{relu},
ParameterVector{indata2}));
subgraph1->set_friendly_name("relu");
NGRAPH_SUPPRESS_DEPRECATED_START
auto& out_tensor0 = subgraph0->get_output_tensor(0);
out_tensor0.set_name("add_out");
@ -277,7 +297,12 @@ std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initReference() const {
out_tensor1.set_name("relu_out");
out_tensor1.set_names({"relu_out", "y1"});
NGRAPH_SUPPRESS_DEPRECATED_END
return std::make_shared<Model>(OutputVector{subgraph0->output(0), subgraph1->output(0)}, ParameterVector{data0, data1});
auto res0 = std::make_shared<op::v0::Result>(subgraph0->output(0));
res0->set_friendly_name("res0");
auto res1 = std::make_shared<op::v0::Result>(subgraph1->output(0));
res1->set_friendly_name("res1");
return std::make_shared<Model>(ResultVector{res0, res1}, ParameterVector{data0, data1});
}
std::shared_ptr<ov::Model> TwoInputsAndOutputsFunction::initOriginal() const {