[Snippets] Removed limitation on Subgraph creation after Parameters (#13893)
This commit is contained in:
parent
0ce82204bb
commit
ba4edc08d9
@ -134,9 +134,6 @@ auto snippets::op::Subgraph::wrap_node_as_subgraph(const std::shared_ptr<ov::Nod
|
||||
throw ngraph::ngraph_error("original node outputs size and extracted subgraph node outputs size doesn't much");
|
||||
}
|
||||
|
||||
// Clear the node dependencies so graph::topological_sort will not find any extra ops in get_ordered_ops()
|
||||
// This is needed so the model body will be created correctly
|
||||
body_node->clear_control_dependencies();
|
||||
ngraph::ResultVector body_results;
|
||||
for (auto output : node->outputs()) {
|
||||
body_results.push_back(std::make_shared<ngraph::opset1::Result>(body_node->output(output.get_index())));
|
||||
|
@ -174,7 +174,7 @@ auto update_out_tensor_name(std::shared_ptr<ngraph::snippets::op::Subgraph> &sub
|
||||
} // namespace
|
||||
|
||||
bool AppropriateForSubgraph(const std::shared_ptr<const Node> &node) {
|
||||
return is_supported_op(node) && has_supported_in_out(node);
|
||||
return is_supported_op(node) && has_supported_in_out(node) && node->get_control_dependencies().empty();
|
||||
}
|
||||
|
||||
void SetSnippetsNodeType(const std::shared_ptr<Node> &node, SnippetsNodeType nodeType) {
|
||||
@ -273,6 +273,8 @@ TokenizeSnippets::TokenizeSnippets() {
|
||||
OutputVector external_inputs;
|
||||
// inputs to the node before merge to subgraph
|
||||
OutputVector internal_inputs;
|
||||
// nodes whose rt_info should be copied into result subgraph
|
||||
NodeVector replaced_nodes{node};
|
||||
|
||||
auto input_values = node->input_values();
|
||||
/*
|
||||
@ -347,6 +349,7 @@ TokenizeSnippets::TokenizeSnippets() {
|
||||
input_subgraphs.insert(input_node);
|
||||
|
||||
fusedNames += getFusedNames(subgraph);
|
||||
replaced_nodes.push_back(subgraph);
|
||||
|
||||
if (has_result_child(subgraph)) {
|
||||
// we set input subgraph name to the current subgraph
|
||||
@ -553,6 +556,7 @@ TokenizeSnippets::TokenizeSnippets() {
|
||||
body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
|
||||
}
|
||||
auto subgraph = op::build_subgraph(node, external_inputs, body, subgraph_name);
|
||||
copy_runtime_info(replaced_nodes, subgraph);
|
||||
const auto & act_body = subgraph->body();
|
||||
for (size_t i = 0; i < act_body.get_parameters().size(); i++) {
|
||||
act_body.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name());
|
||||
|
@ -19,7 +19,7 @@ namespace {
|
||||
bool isValidRangesInputs(const std::shared_ptr<ngraph::opset1::FakeQuantize>& fq) {
|
||||
auto il = fq->input_value(1);
|
||||
auto ih = fq->input_value(2);
|
||||
auto greater_equal = std::make_shared<ngraph::opset1::GreaterEqual>(il, ih);
|
||||
auto greater_equal = std::make_shared<ngraph::opset1::Greater>(il, ih);
|
||||
|
||||
ngraph::OutputVector result(1);
|
||||
if (!greater_equal->constant_fold(result, greater_equal->input_values()))
|
||||
|
@ -1603,7 +1603,7 @@ void Graph::EnforceBF16() {
|
||||
// Concatenation node is exception because it doesn't change an accuracy for BF16 activation
|
||||
node->getType() != Type::Concatenation) &&
|
||||
// exclude Eltwise after Input since it supports conversion to BF16
|
||||
!(parent->getType() == Type::Input && node->getType() == Type::Eltwise) &&
|
||||
!(parent->getType() == Type::Input && (node->getType() == Type::Eltwise || node->getType() == Type::Subgraph)) &&
|
||||
node->getOriginalInputPrecisionAtPort(i) == Precision::FP32)
|
||||
node->setOriginalInputPrecisionAtPort(i, Precision::BF16);
|
||||
}
|
||||
|
@ -433,10 +433,7 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr<ov::Model> &m) {
|
||||
for (auto &node : m->get_ordered_ops()) {
|
||||
if (ngraph::op::is_constant(node))
|
||||
continue;
|
||||
|
||||
if (ngraph::op::is_parameter(node)) {
|
||||
SetNodeFusingType(node, NodeFusingType::IgnoredAfterInputs);
|
||||
} else if (isSuitableConvolutionParent(node)) {
|
||||
if (isSuitableConvolutionParent(node)) {
|
||||
// Initiate fusing chain
|
||||
SetNodeFusingType(node, NodeFusingType::FusedWithConvolution);
|
||||
channelAxis = DEFAULT_AXIS;
|
||||
@ -490,12 +487,6 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr<ov::Model> &m) {
|
||||
NodeFusingType updatedChainType = fusingChainType;
|
||||
if (isSuitableChildForFusingMatMul(node, isExecutedInINT8, updatedChainType, channelAxis))
|
||||
PropagateIfHasOnlyChild(node, updatedChainType);
|
||||
} else if (fusingChainType == NodeFusingType::IgnoredAfterInputs && (snippets::pass::AppropriateForSubgraph(node) ||
|
||||
ov::is_type<ngraph::op::v0::Convert>(node) || ov::is_type<ngraph::op::v1::Transpose>(node))) {
|
||||
// In OV_API 2.0 after Input node with I8/U8 precisions incerts Convert node, moreother on TF models inserts
|
||||
// Transpose layer. These brakes an idea to leave Eltwise node with I8/U8 inputs and FP32 outputs instead of Subgrath node
|
||||
// TODO Remove an additional check on Convert/Transpose here after enabling Subgraths with I8/U8 inputs and FP32 outputs
|
||||
SetNodeFusingType(node, NodeFusingType::IgnoredAfterInputs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ enum class NodeFusingType : int64_t {
|
||||
NotSet,
|
||||
FusedTerminator,
|
||||
FusedWithConvolution, FusedWithBinaryConvolution, FusedWithConvolutionSumActivation,
|
||||
FusedWithMatMul, FusedWithMatMulI8, FusedWithReduce, FusedWithMisc, IgnoredAfterInputs};
|
||||
FusedWithMatMul, FusedWithMatMulI8, FusedWithReduce, FusedWithMisc};
|
||||
|
||||
} // namespace intel_cpu
|
||||
} // namespace ov
|
||||
|
@ -19,7 +19,7 @@ std::string getInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||
auto it_info = node->get_rt_info().find(InputMemoryFormats::get_type_info_static());
|
||||
if (it_info != node->get_rt_info().end()) {
|
||||
if (it_info->second.is<InputMemoryFormats>()) {
|
||||
return it_info->second.as<InputMemoryFormats>().getMemoryFormats();
|
||||
return it_info->second.as<InputMemoryFormats>().to_string();
|
||||
}
|
||||
}
|
||||
return {};
|
||||
@ -31,7 +31,7 @@ std::string getOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
|
||||
auto it_info = node->get_rt_info().find(OutputMemoryFormats::get_type_info_static());
|
||||
if (it_info != node->get_rt_info().end()) {
|
||||
if (it_info->second.is<OutputMemoryFormats>()) {
|
||||
return it_info->second.as<OutputMemoryFormats>().getMemoryFormats();
|
||||
return it_info->second.as<OutputMemoryFormats>().to_string();
|
||||
}
|
||||
}
|
||||
return {};
|
||||
|
@ -25,7 +25,7 @@ protected:
|
||||
public:
|
||||
MemoryFormats() = default;
|
||||
explicit MemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {}
|
||||
std::string getMemoryFormats() const { return memory_format; }
|
||||
std::string to_string() const override { return memory_format; };
|
||||
bool is_copyable(const std::shared_ptr<ov::Node>& to) const override {
|
||||
return (!ov::op::util::is_constant(to));
|
||||
}
|
||||
@ -36,7 +36,7 @@ public:
|
||||
for (auto &node : nodes) {
|
||||
auto it_info = node->get_rt_info().find(MemoryFormat::get_type_info_static());
|
||||
if (it_info != node->get_rt_info().end()) {
|
||||
std::string mem_format = it_info->second.template as<MemoryFormat>().getMemoryFormats();
|
||||
std::string mem_format = it_info->second.template as<MemoryFormat>().to_string();
|
||||
if (!mem_format.empty()) {
|
||||
unique_mem_format.insert(mem_format);
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["RELU"] = "ndef";
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_4"] = "ndef";
|
||||
expectedPrecisions["Add_4"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_6"] = "BF16";
|
||||
}
|
||||
};
|
||||
|
@ -123,7 +123,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
|
@ -127,7 +127,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
expectedPrecisions["RELU_1"] = "ndef";
|
||||
|
@ -121,7 +121,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["RELU_1"] = "ndef";
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
|
@ -106,7 +106,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["ADD_1"] = "ndef";
|
||||
expectedPrecisions["ADD_1"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
|
@ -110,7 +110,7 @@ protected:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["CONV_1"] = "BF16";
|
||||
expectedPrecisions["ADD_2"] = "ndef";
|
||||
expectedPrecisions["ADD_2"] = netPrecision.name();
|
||||
expectedPrecisions["CONV_2"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_1"] = "ndef";
|
||||
expectedPrecisions["Add_1"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_1"] = "BF16";
|
||||
expectedPrecisions["Convolution_2"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
|
@ -128,10 +128,9 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_1"] = "ndef";
|
||||
expectedPrecisions["Add_2"] = "ndef";
|
||||
expectedPrecisions["Add_2"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_1"] = "BF16";
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
expectedPrecisions["ELT_1"] = netPrecision.name();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -147,9 +147,9 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_1"] = "ndef";
|
||||
expectedPrecisions["Add_1"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_1"] = "BF16";
|
||||
expectedPrecisions["Add_2"] = "ndef";
|
||||
expectedPrecisions["Add_2"] = netPrecision.name();
|
||||
expectedPrecisions["ELT_1"] = "ndef";
|
||||
expectedPrecisions["RELU_1"] = "ndef";
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_4"] = "ndef";
|
||||
expectedPrecisions["Add_4"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_6"] = "BF16";
|
||||
}
|
||||
};
|
||||
|
@ -132,7 +132,7 @@ protected:
|
||||
// STAGE2:
|
||||
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
|
||||
// performance counters
|
||||
expectedPrecisions["Add_4"] = "ndef";
|
||||
expectedPrecisions["Add_4"] = netPrecision.name();
|
||||
expectedPrecisions["Convolution_1"] = "BF16";
|
||||
expectedPrecisions["Convolution_2"] = "BF16";
|
||||
expectedPrecisions["TopK_1"] = netPrecision.name(); // tail kept in FP32 precision
|
||||
|
@ -19,8 +19,8 @@ const std::vector<RuntimePrecisionSpecificParams> params = {
|
||||
{makeEltwiseFunction, {Precision::BF16, Precision::BF16}, {{"Eltwise", Precision::BF16}}},
|
||||
{makeEltwiseFunction, {Precision::U8, Precision::U8}, {{"Eltwise", Precision::U8}}},
|
||||
{makeEltwiseFunction, {Precision::I8, Precision::I8}, {{"Eltwise", Precision::I8}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"Relu_original", Precision::U8}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::U8}, {{"FakeQuantize", Precision::U8}, {"Relu", Precision::U8}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::FP32}, {{"Relu", Precision::FP32}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::U8}, {{"Relu", Precision::U8}}},
|
||||
{makeFakeQuantizeBinaryConvolutionFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"BinaryConvolution", Precision::BIN}}},
|
||||
};
|
||||
|
||||
|
@ -51,6 +51,11 @@ const std::vector<FakeQuantizeTransformationParam> fakeQuantizeOnDataValues = {
|
||||
{ 256ul, {}, { -127.5f }, { 0.f }, { -127.5f }, { 0.f } },
|
||||
"Pooling", "U8"
|
||||
},
|
||||
// corner case: FQ with equal constant values
|
||||
{
|
||||
{ 256ul, {}, { 0.f }, { 0.f }, { 0.f }, { 0.f } },
|
||||
"Pooling", "U8"
|
||||
},
|
||||
{
|
||||
{ 16ul, {}, { 0.f }, { 1.5f }, { 0.f }, { 1.5f } },
|
||||
"Pooling", "U8"
|
||||
|
@ -15,6 +15,10 @@ const std::vector<ngraph::element::Type> netPrecisions = {
|
||||
//ngraph::element::f16
|
||||
};
|
||||
|
||||
// If snippets fuse all operations into one subgraph node,
|
||||
// it's impossible to extract exec precision for the specific layer
|
||||
const auto precision_for_fused_cases = ov::element::undefined;
|
||||
|
||||
const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
{
|
||||
false,
|
||||
@ -22,7 +26,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape {}, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
ngraph::element::f32,
|
||||
precision_for_fused_cases,
|
||||
true
|
||||
},
|
||||
{
|
||||
@ -31,7 +35,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
ngraph::element::i8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -40,7 +44,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
ngraph::element::u8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -49,7 +53,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
ngraph::element::u8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -58,7 +62,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
ngraph::element::i8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -67,7 +71,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
true,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } },
|
||||
ngraph::element::i8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -76,7 +80,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
false,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
ngraph::element::u8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{
|
||||
@ -85,7 +89,7 @@ const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
|
||||
true,
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } },
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } },
|
||||
ngraph::element::u8,
|
||||
precision_for_fused_cases,
|
||||
false
|
||||
},
|
||||
{ false, {}, false, {}, {}, ngraph::element::f32, false },
|
||||
|
@ -17,30 +17,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, Add,
|
||||
::testing::Values(ov::Shape {1, 42, 16, 64}),
|
||||
::testing::Values(ov::Shape {1, 42, 16, 1}),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(1),
|
||||
::testing::Values(1), // one node - Add
|
||||
::testing::Values(0), // SnippetsMarkSkipped disables tokenization for eltwise chains after inputs
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Add::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddSinh,
|
||||
::testing::Combine(
|
||||
::testing::Values(ov::Shape {1, 42, 16, 64}),
|
||||
::testing::Values(ov::Shape {1, 42, 16, 1}),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(3), // Add + 2 sinh after inputs
|
||||
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
AddSinh::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddSinhConst,
|
||||
::testing::Combine(
|
||||
::testing::Values(ov::Shape {1, 42, 16, 64}),
|
||||
::testing::Values(ov::element::f32),
|
||||
::testing::Values(2), // Add + sinh after inputs
|
||||
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
AddSinhConst::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, AddRollConst,
|
||||
::testing::Combine(
|
||||
::testing::Values(ov::Shape {1, 42, 16, 64}),
|
||||
@ -58,8 +39,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise_BF16, AddRollConst,
|
||||
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
AddRollConst::getTestCaseName);
|
||||
|
||||
|
||||
} // namespace
|
||||
} // namespace snippets
|
||||
} // namespace test
|
||||
|
@ -40,7 +40,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Convert, Convert,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_Convert),
|
||||
::testing::ValuesIn(types_Convert),
|
||||
::testing::Values(2),
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -67,7 +67,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertInput, ConvertInput,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_ConvertInput),
|
||||
::testing::ValuesIn(types_ConvertInput),
|
||||
::testing::Values(3),
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -76,7 +76,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertOutput, ConvertOutput,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_ConvertInput),
|
||||
::testing::ValuesIn(types_ConvertInput),
|
||||
::testing::Values(3),
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -85,7 +85,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertStub, ConvertStub,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_ConvertInput),
|
||||
::testing::ValuesIn(types_ConvertInput),
|
||||
::testing::Values(4),
|
||||
::testing::Values(2),
|
||||
::testing::Values(2),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -104,7 +104,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertPartialInputsAndResults, ConvertP
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapes_ConvertPartialInputsAndResults),
|
||||
::testing::ValuesIn(types_ConvertPartialInputsAndResults),
|
||||
::testing::Values(6),
|
||||
::testing::Values(2), // subgraph & roll after subgraph
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -119,7 +119,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputs, ConvertManyOnInputs
|
||||
::testing::Combine(
|
||||
::testing::Values(std::vector<ov::Shape>{ov::Shape{5, 5, 5, 5}}),
|
||||
::testing::ValuesIn(types_ConvertMany),
|
||||
::testing::Values(2),
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -128,7 +128,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnOutputs, ConvertManyOnOutpu
|
||||
::testing::Combine(
|
||||
::testing::Values(std::vector<ov::Shape>{ov::Shape{5, 5, 5, 5}}),
|
||||
::testing::ValuesIn(types_ConvertMany),
|
||||
::testing::Values(2), // sinh + subgraph
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
@ -142,7 +142,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_ConvertManyOnInputOutput, ConvertManyOnI
|
||||
::testing::Combine(
|
||||
::testing::Values(std::vector<ov::Shape>{ov::Shape{5, 5, 5, 5}}),
|
||||
::testing::ValuesIn(types_ConvertManyIO),
|
||||
::testing::Values(2), // sinh + subgraph
|
||||
::testing::Values(1),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
Convert::getTestCaseName);
|
||||
|
@ -14,7 +14,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, EltwiseTwoResults,
|
||||
::testing::Combine(
|
||||
::testing::Values(ov::Shape {1, 64, 10, 10}),
|
||||
::testing::Values(ov::Shape {1, 64, 10, 1}),
|
||||
::testing::Values(4),
|
||||
::testing::Values(2),
|
||||
::testing::Values(2),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
EltwiseTwoResults::getTestCaseName);
|
||||
|
@ -12,13 +12,13 @@ namespace {
|
||||
// Note that we need these shapes to cover all cases of code emission (none/one/multiple of scalar/vector tiles)
|
||||
std::vector<ov::Shape> input_shapes {{1, 64, 10, 10}, {1, 1, 17, 37}, {1, 1, 1, 1}, {1, 1, 1, 7},
|
||||
{1, 1, 1, 128}, {1, 1, 1, 14}, {1, 1, 1, 16}, {1, 1, 1, 30}};
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwiseSinh,
|
||||
::testing::Combine(
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, MaxNumParamsEltwise,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(input_shapes),
|
||||
::testing::Values(12), // 10 Sinh after inputs + Subgraph + Concat
|
||||
::testing::Values(2), // Subgraph + Concat
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
MaxNumParamsEltwiseSinh::getTestCaseName);
|
||||
MaxNumParamsEltwise::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace snippets
|
||||
|
@ -15,21 +15,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwise,
|
||||
::testing::Values(ov::Shape {1, 64, 10, 10}),
|
||||
::testing::Values(ov::Shape {1, 64, 10, 1}),
|
||||
::testing::Values(ov::Shape {1, 1, 1, 10}),
|
||||
::testing::Values(2), // eltwises fuse only for non-broadcasted shapes
|
||||
::testing::Values(0), // SnippetsMarkSkipped disables tokenization for eltwise chains after inputs
|
||||
::testing::Values(1), // eltwises fuse only for non-broadcasted shapes
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
ThreeInputsEltwise::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, ThreeInputsEltwiseSinh,
|
||||
::testing::Combine(
|
||||
::testing::Values(ov::Shape {1, 64, 10, 10}),
|
||||
::testing::Values(ov::Shape {1, 64, 10, 1}),
|
||||
::testing::Values(ov::Shape {1, 1, 1, 10}),
|
||||
::testing::Values(4), // Subgraph + 3 converts after inputs
|
||||
::testing::Values(1), // Subgraph is created, since the inputs are followed by converts
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
ThreeInputsEltwiseSinh::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace snippets
|
||||
} // namespace test
|
||||
|
@ -34,7 +34,7 @@ const std::vector<std::vector<ov::Shape>> input_shapes = {
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Snippets_Eltwise, TwoInputsAndOutputs,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(input_shapes),
|
||||
::testing::Values(4),
|
||||
::testing::Values(2),
|
||||
::testing::Values(1),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
TwoInputsAndOutputs::getTestCaseName);
|
||||
|
@ -156,7 +156,6 @@ std::vector<Precision> netPrc = {
|
||||
|
||||
/* ============= Activation (1D) ============= */
|
||||
std::vector<CPUSpecificParams> cpuParams_3D = {
|
||||
CPUSpecificParams({nCw16c}, {nCw16c}, {}, {}),
|
||||
CPUSpecificParams({nwc}, {nwc}, {}, {}),
|
||||
CPUSpecificParams({ncw}, {ncw}, {}, {})
|
||||
};
|
||||
@ -178,6 +177,27 @@ const auto basicCases3D = ::testing::Combine(
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Activation3D_Eltwise_CPU_BF16, ActivationLayerCPUTest, basicCases3D, ActivationLayerCPUTest::getTestCaseName);
|
||||
|
||||
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes_blocked = {
|
||||
{Mish, {{}}},
|
||||
{SoftSign, {{}}}
|
||||
};
|
||||
|
||||
std::vector<CPUSpecificParams> cpuParams_3D_blocked = {
|
||||
CPUSpecificParams({nCw16c}, {nCw16c}, {}, {}),
|
||||
};
|
||||
|
||||
const auto blockedCases3D = ::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation(basic3D)),
|
||||
::testing::Values(activationShapes),
|
||||
::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes_blocked)),
|
||||
::testing::ValuesIn(netPrc),
|
||||
::testing::Values(Precision::FP32),
|
||||
::testing::Values(Precision::FP32),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_3D_blocked))
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Activation3D_Eltwise_CPU_BF16_Blocked, ActivationLayerCPUTest, blockedCases3D, ActivationLayerCPUTest::getTestCaseName);
|
||||
|
||||
/* ============= Activation (2D) ============= */
|
||||
std::vector<CPUSpecificParams> cpuParams_4D = {
|
||||
CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}),
|
||||
|
@ -52,8 +52,18 @@ protected:
|
||||
std::tie(shapes, inPrc, outPrc, cpuParams) = GetParam();
|
||||
|
||||
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||
auto primitive = selectedType;
|
||||
if (primitive.empty())
|
||||
primitive = getPrimitiveType();
|
||||
// WA: I32 precision support disabled in snippets => primitive has to be changed
|
||||
// TODO: remove the WA after I32 is supported in snippets (ticket: 99803)
|
||||
if (inPrc == InferenceEngine::Precision::I32 || outPrc == InferenceEngine::Precision::I32)
|
||||
primitive = "unknown";
|
||||
|
||||
selectedType = std::string("unknown_") + (inPrc == InferenceEngine::Precision::U8 ? "I8" : inPrc.name());
|
||||
auto exec_type_precision = inPrc != InferenceEngine::Precision::U8
|
||||
? inPrc
|
||||
: InferenceEngine::Precision(InferenceEngine::Precision::I8);
|
||||
selectedType = makeSelectedTypeStr(primitive, InferenceEngine::details::convertPrecision(exec_type_precision));
|
||||
|
||||
for (size_t i = 0; i < shapes.second.size(); i++) {
|
||||
targetStaticShapes.push_back(std::vector<ngraph::Shape>{shapes.second[i]});
|
||||
@ -112,12 +122,10 @@ private:
|
||||
TEST_P(ConvertCPULayerTest, CompareWithRefs) {
|
||||
run();
|
||||
|
||||
CheckPluginRelatedResults(compiledModel, "Convert");
|
||||
CheckPluginRelatedResults(compiledModel, std::set<std::string>{"Convert", "Subgraph"});
|
||||
}
|
||||
|
||||
std::vector<InputShape> inShapes_4D = {
|
||||
{{1, 2, 3, 4}, {{1, 2, 3, 4}}},
|
||||
{{1, 1, 1080, 1920}, {{1, 1, 1080, 1920}}},
|
||||
std::vector<InputShape> inShapes_4D_dynamic = {
|
||||
{
|
||||
// dynamic
|
||||
{{-1, -1, -1, -1}},
|
||||
@ -154,27 +162,69 @@ const std::vector<Precision> precisions_floating_point = {
|
||||
Precision::BF16
|
||||
};
|
||||
|
||||
std::vector<CPUSpecificParams> memForm4D = {
|
||||
CPUSpecificParams({nchw}, {nchw}, {}, {}),
|
||||
CPUSpecificParams({nhwc}, {nhwc}, {}, {}),
|
||||
CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}),
|
||||
CPUSpecificParams({nChw16c}, {nChw16c}, {}, {})
|
||||
std::vector<CPUSpecificParams> memForm4D_dynamic = {
|
||||
CPUSpecificParams({nchw}, {nchw}, {}, "unknown"),
|
||||
CPUSpecificParams({nhwc}, {nhwc}, {}, "unknown"),
|
||||
CPUSpecificParams({nChw8c}, {nChw8c}, {}, "unknown"),
|
||||
CPUSpecificParams({nChw16c}, {nChw16c}, {}, "unknown")
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_Dynamic, ConvertCPULayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_4D_dynamic),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(memForm4D_dynamic)),
|
||||
ConvertCPULayerTest::getTestCaseName);
|
||||
|
||||
std::vector<InputShape> inShapes_4D_static = {
|
||||
{{1, 2, 3, 4}, {{1, 2, 3, 4}}},
|
||||
{{1, 1, 1080, 1920}, {{1, 1, 1080, 1920}}},
|
||||
};
|
||||
|
||||
std::vector<CPUSpecificParams> memForm4D_static_common = {
|
||||
CPUSpecificParams({nchw}, {nchw}, {}, {}),
|
||||
CPUSpecificParams({nhwc}, {nhwc}, {}, {}),
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest, ConvertCPULayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_4D),
|
||||
::testing::ValuesIn(inShapes_4D_static),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(memForm4D)),
|
||||
::testing::ValuesIn(memForm4D_static_common)),
|
||||
ConvertCPULayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL, ConvertCPULayerTest,
|
||||
std::vector<InputShape> inShapes_4D_blocked = {
|
||||
{{1, 16, 5, 5}, {{1, 16, 5, 5}}},
|
||||
};
|
||||
|
||||
std::vector<CPUSpecificParams> memForm4D_static_blocked = {
|
||||
CPUSpecificParams({nChw16c}, {nChw16c}, {}, {})
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_Blocked, ConvertCPULayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_4D),
|
||||
::testing::ValuesIn(inShapes_4D_blocked),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(precisions),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(memForm4D_static_blocked))),
|
||||
ConvertCPULayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Static, ConvertCPULayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_4D_static),
|
||||
::testing::ValuesIn(precisions_floating_point),
|
||||
::testing::Values(Precision::BOOL),
|
||||
::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, {}))),
|
||||
ConvertCPULayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Dynamic, ConvertCPULayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inShapes_4D_dynamic),
|
||||
::testing::ValuesIn(precisions_floating_point),
|
||||
::testing::Values(Precision::BOOL),
|
||||
::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, "unknown"))),
|
||||
ConvertCPULayerTest::getTestCaseName);
|
||||
|
||||
} // namespace CPULayerTestsDefinitions
|
||||
|
@ -169,7 +169,7 @@ private:
|
||||
|
||||
TEST_P(EltwiseLayerCPUTest, CompareWithRefs) {
|
||||
run();
|
||||
CheckPluginRelatedResults(compiledModel, "Eltwise");
|
||||
CheckPluginRelatedResults(compiledModel, std::set<std::string>{"Eltwise", "Subgraph"});
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -223,7 +223,7 @@ const std::vector<fusingSpecificParams> fusingParamsSet{
|
||||
// fake quantize
|
||||
fusingFakeQuantizePerTensorRelu,
|
||||
fusingFakeQuantizePerChannelRelu,
|
||||
fusingFQPerChannelSigmoidFQPerChannel
|
||||
fusingFQPerChannelSigmoidFQPerTensor
|
||||
};
|
||||
|
||||
std::vector<std::vector<ov::Shape>> inShapes_4D = {
|
||||
@ -240,8 +240,8 @@ const auto params_4D = ::testing::Combine(
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)),
|
||||
@ -262,8 +262,8 @@ const auto params_4D_fusing = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(cpuParams_4D),
|
||||
@ -278,8 +278,8 @@ const auto params_4D_emptyCPUSpec = ::testing::Combine(
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::Values(emptyCPUSpec),
|
||||
@ -301,8 +301,8 @@ const auto params_5D = ::testing::Combine(
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)),
|
||||
@ -317,8 +317,8 @@ const auto params_5D_emptyCPUSpec = ::testing::Combine(
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::Values(emptyCPUSpec),
|
||||
@ -346,8 +346,8 @@ const auto params_5D_emptyCPUSpec_I32 = ::testing::Combine(
|
||||
::testing::ValuesIn(secondaryInputTypes),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::Values(ElementType::i32),
|
||||
::testing::Values(ElementType::i32),
|
||||
::testing::Values(ElementType::i32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::Values(emptyCPUSpec),
|
||||
@ -372,8 +372,8 @@ const auto params_4D_Blocked_Planar = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar)),
|
||||
@ -398,8 +398,8 @@ const auto params_4D_Planar_Blocked = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked)),
|
||||
@ -424,8 +424,8 @@ const auto params_5D_Blocked_Planar = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar)),
|
||||
@ -450,8 +450,8 @@ const auto params_5D_Planar_Blocked = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked)),
|
||||
@ -478,8 +478,8 @@ const auto params_4D_1D_constant_mode = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode)),
|
||||
@ -488,8 +488,6 @@ const auto params_4D_1D_constant_mode = ::testing::Combine(
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName);
|
||||
|
||||
std::vector<CPUSpecificParams> cpuParams_4D_1D_Parameter_mode = {
|
||||
CPUSpecificParams({nChw16c, x}, {nChw16c}, {}, {}),
|
||||
CPUSpecificParams({nhwc, x}, {nhwc}, {}, {}),
|
||||
CPUSpecificParams({nchw, x}, {nchw}, {}, {})
|
||||
};
|
||||
|
||||
@ -500,8 +498,8 @@ const auto params_4D_1D_parameter_mode = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode)),
|
||||
@ -527,8 +525,8 @@ const auto params_5D_1D_constant = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant)),
|
||||
@ -537,8 +535,6 @@ const auto params_5D_1D_constant = ::testing::Combine(
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName);
|
||||
|
||||
std::vector<CPUSpecificParams> cpuParams_5D_1D_parameter = {
|
||||
CPUSpecificParams({nCdhw16c, x}, {nCdhw16c}, {}, {}),
|
||||
CPUSpecificParams({ndhwc, x}, {ndhwc}, {}, {}),
|
||||
CPUSpecificParams({ncdhw, x}, {ncdhw}, {}, {})
|
||||
};
|
||||
|
||||
@ -549,8 +545,8 @@ const auto params_5D_1D_parameter = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter)),
|
||||
@ -602,8 +598,8 @@ const auto params_4D_dyn_const = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)),
|
||||
@ -641,8 +637,8 @@ const auto params_4D_dyn_param = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)),
|
||||
@ -682,8 +678,8 @@ const auto params_4D_dyn_param_fusing = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(cpuParams_4D),
|
||||
@ -713,8 +709,8 @@ const auto params_5D_dyn_const = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)),
|
||||
@ -752,8 +748,8 @@ const auto params_5D_dyn_param = ::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(opTypes),
|
||||
::testing::ValuesIn(netType),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(ov::element::undefined),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
::testing::Values(additional_config)),
|
||||
::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)),
|
||||
|
@ -115,9 +115,8 @@ std::string CPUTestsBase::impls2str(const std::vector<std::string> &priority) {
|
||||
return str;
|
||||
}
|
||||
|
||||
void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const {
|
||||
if (!execNet) return;
|
||||
if (nodeType.empty()) return;
|
||||
void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::set<std::string>& nodeType) const {
|
||||
if (!execNet || nodeType.empty()) return;
|
||||
|
||||
ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined.";
|
||||
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
|
||||
@ -125,16 +124,23 @@ void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork
|
||||
CheckPluginRelatedResultsImpl(function, nodeType);
|
||||
}
|
||||
|
||||
void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const {
|
||||
if (!execNet) return;
|
||||
if (nodeType.empty()) return;
|
||||
void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::set<std::string>& nodeType) const {
|
||||
if (!execNet || nodeType.empty()) return;
|
||||
|
||||
ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined.";
|
||||
auto function = execNet.get_runtime_model();
|
||||
CheckPluginRelatedResultsImpl(function, nodeType);
|
||||
}
|
||||
|
||||
void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const {
|
||||
void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const {
|
||||
CheckPluginRelatedResults(execNet, std::set<std::string>{nodeType});
|
||||
}
|
||||
|
||||
void CPUTestsBase::CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const {
|
||||
CheckPluginRelatedResults(execNet, std::set<std::string>{nodeType});
|
||||
}
|
||||
|
||||
void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const {
|
||||
ASSERT_NE(nullptr, function);
|
||||
for (const auto &node : function->get_ops()) {
|
||||
const auto & rtInfo = node->get_rt_info();
|
||||
@ -161,7 +167,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
|
||||
return skip_unsquized_1D || permule_of_1;
|
||||
};
|
||||
|
||||
if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == nodeType) {
|
||||
if (nodeType.count(getExecValue(ExecGraphInfoSerialization::LAYER_TYPE))) {
|
||||
ASSERT_LE(inFmts.size(), node->get_input_size());
|
||||
ASSERT_LE(outFmts.size(), node->get_output_size());
|
||||
for (int i = 0; i < inFmts.size(); i++) {
|
||||
@ -212,7 +218,6 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
|
||||
|
||||
if (should_be_skipped(shape, outFmts[i]))
|
||||
continue;
|
||||
|
||||
ASSERT_EQ(outFmts[i], cpu_str2fmt(actualOutputMemoryFormats[i].c_str()));
|
||||
}
|
||||
|
||||
|
@ -136,13 +136,15 @@ public:
|
||||
const std::shared_ptr<ngraph::Node> &lastNode,
|
||||
std::string name);
|
||||
|
||||
void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::set<std::string>& nodeType) const;
|
||||
void CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::set<std::string>& nodeType) const;
|
||||
void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType) const;
|
||||
void CheckPluginRelatedResults(const ov::CompiledModel &execNet, const std::string& nodeType) const;
|
||||
|
||||
static const char* any_type;
|
||||
|
||||
protected:
|
||||
virtual void CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const;
|
||||
virtual void CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const;
|
||||
/**
|
||||
* @brief This function modifies the initial single layer test graph to add any necessary modifications that are specific to the cpu test scope.
|
||||
* @param ngPrc Graph precision.
|
||||
|
@ -36,7 +36,7 @@ CpuTestWithFusing::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::Param
|
||||
return retNode;
|
||||
}
|
||||
|
||||
void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const {
|
||||
void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const {
|
||||
ASSERT_NE(nullptr, function);
|
||||
bool isNodeFound = false;
|
||||
for (const auto & op : function->get_ops()) {
|
||||
@ -49,22 +49,29 @@ void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr<const ov::Model
|
||||
};
|
||||
|
||||
auto layerType = getExecValue("layerType", rtInfo);
|
||||
if (layerType == nodeType) {
|
||||
if (nodeType.count(layerType)) {
|
||||
isNodeFound = true;
|
||||
auto originalLayersNames = getExecValue("originalLayersNames", rtInfo);
|
||||
std::string opFriendlyName = op->get_friendly_name();
|
||||
auto pos = originalLayersNames.find(opFriendlyName);
|
||||
ASSERT_TRUE(pos != std::string::npos) << "Operation name " << op->get_friendly_name() << " has not been found in originalLayersNames!";
|
||||
ASSERT_TRUE(originalLayersNames.find(opFriendlyName) != std::string::npos)
|
||||
<< "Operation name " << opFriendlyName << " has not been found in originalLayersNames!";
|
||||
|
||||
size_t pos = 0;
|
||||
for (const auto& fusedOp : fusedOps) {
|
||||
pos = originalLayersNames.find(fusedOp, checkFusingPosition ? pos : 0);
|
||||
ASSERT_TRUE(pos != std::string::npos) << "Fused op " << fusedOp << " has not been found!";
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_TRUE(isNodeFound) << "Node type name: \"" << nodeType << "\" has not been found.";
|
||||
std::stringstream error_message;
|
||||
error_message << "Node with types \"";
|
||||
for (const auto& elem : nodeType)
|
||||
error_message << elem << ", ";
|
||||
error_message << "\" wasn't found";
|
||||
ASSERT_TRUE(isNodeFound) << error_message.str();
|
||||
}
|
||||
|
||||
void CpuTestWithFusing::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const {
|
||||
void CpuTestWithFusing::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const {
|
||||
CPUTestsBase::CheckPluginRelatedResultsImpl(function, nodeType);
|
||||
CheckFusingResults(function, nodeType);
|
||||
}
|
||||
|
@ -72,10 +72,10 @@ protected:
|
||||
ngraph::ParameterVector ¶ms,
|
||||
const std::shared_ptr<ngraph::Node> &lastNode) override;
|
||||
|
||||
void CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const override;
|
||||
void CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const override;
|
||||
|
||||
private:
|
||||
void CheckFusingResults(const std::shared_ptr<const ov::Model>& function, const std::string& nodeType) const;
|
||||
void CheckFusingResults(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<postOpMgr> postOpMgrPtr;
|
||||
@ -325,6 +325,28 @@ const auto fusingFQPerChannelSigmoidFQPerChannel = fusingSpecificParams{std::mak
|
||||
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
|
||||
}, "FakeQuantize(PerChannel)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}};
|
||||
|
||||
const auto fusingFQPerChannelSigmoidFQPerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
|
||||
{[](postNodeConfig& cfg){
|
||||
auto localPrc = cfg.input->get_element_type();
|
||||
auto shape = cfg.input->get_output_partial_shape(0);
|
||||
if (shape.size() == 1)
|
||||
IE_THROW() << "If shape.size() == 1 then Granularity can be PerTensor only";
|
||||
ngraph::Shape newShape(shape.size(), 1);
|
||||
newShape[1] = shape[1].get_length();
|
||||
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
|
||||
}, "FakeQuantize(PerChannel)"},
|
||||
{[](postNodeConfig& cfg){
|
||||
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid);
|
||||
}, "Sigmoid"},
|
||||
{[](postNodeConfig& cfg){
|
||||
auto localPrc = cfg.input->get_element_type();
|
||||
auto shape = cfg.input->get_output_partial_shape(0);
|
||||
if (shape.size() == 1)
|
||||
IE_THROW() << "If shape.size() == 1 then Granularity can be PerTensor only";
|
||||
ngraph::Shape newShape(shape.size(), 1);
|
||||
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
|
||||
}, "FakeQuantize(PerTensor)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}};
|
||||
|
||||
const auto fusingFakeQuantizePerTensorRelu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
|
||||
{[](postNodeConfig& cfg) {
|
||||
auto localPrc = cfg.input->get_element_type();
|
||||
|
@ -22,14 +22,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(SnippetsMarkSkippedTests, smoke_Snippets_SkipAfterInputsEltwise) {
|
||||
const auto &f = EltwiseFunction({{2, 3}, {1, 3}});
|
||||
function = f.getOriginal();
|
||||
// None subgraphs are expected, since the whole graph is an eltwise chain after input
|
||||
function_ref = f.getOriginal();
|
||||
run();
|
||||
}
|
||||
|
||||
TEST_F(SnippetsMarkSkippedTests, smoke_Snippets_SkipAfterInputsMatMulEltwise) {
|
||||
const auto &f = MatMulEltwiseBranchesFunction(std::vector<Shape> {{1, 3, 4, 4}, {1, 3, 4, 4}});
|
||||
function = f.getOriginal();
|
||||
|
@ -36,20 +36,15 @@ protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
class AddSinh : public Add {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
class AddSinhConst : public testing::WithParamInterface<ov::test::snippets::AddConstParams>,
|
||||
virtual public ov::test::SnippetsTestsCommon {
|
||||
class AddConst : public testing::WithParamInterface<ov::test::snippets::AddConstParams>,
|
||||
virtual public ov::test::SnippetsTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddConstParams> obj);
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
class AddRollConst : public AddSinhConst {
|
||||
class AddRollConst : public AddConst {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
@ -17,8 +17,8 @@ typedef std::tuple<
|
||||
std::string // Target Device
|
||||
> MaxNumParamsEltwiseParams;
|
||||
|
||||
class MaxNumParamsEltwiseSinh : public testing::WithParamInterface<ov::test::snippets::MaxNumParamsEltwiseParams>,
|
||||
virtual public ov::test::SnippetsTestsCommon {
|
||||
class MaxNumParamsEltwise : public testing::WithParamInterface<ov::test::snippets::MaxNumParamsEltwiseParams>,
|
||||
virtual public ov::test::SnippetsTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ov::test::snippets::MaxNumParamsEltwiseParams> obj);
|
||||
|
||||
|
@ -28,11 +28,6 @@ protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
class ThreeInputsEltwiseSinh : public ThreeInputsEltwise {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
|
||||
} // namespace snippets
|
||||
} // namespace test
|
||||
|
@ -241,6 +241,76 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
const char expected_serialized_model_cpu[] = R"V0G0N(
|
||||
<?xml version="1.0"?>
|
||||
<net name="addmul_abc" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="C" type="Input">
|
||||
<data shape="1" element_type="f32" execOrder="2" execTimeMcs="not_executed" originalLayersNames="C" outputLayouts="a" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32" />
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="B" type="Input">
|
||||
<data shape="1" element_type="f32" execOrder="1" execTimeMcs="not_executed" originalLayersNames="B" outputLayouts="a" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32" />
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="A" type="Input">
|
||||
<data shape="1" element_type="f32" execOrder="0" execTimeMcs="not_executed" originalLayersNames="A" outputLayouts="a" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32" />
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="Y" type="Subgraph">
|
||||
<data execOrder="3" execTimeMcs="not_executed" originalLayersNames="add_node1,add_node2,add_node3,add_node4,Y" outputLayouts="a" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" runtimePrecision="FP32" />
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
<port id="3" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="4" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="Y/sink_port_0" type="Output">
|
||||
<data execOrder="4" execTimeMcs="not_executed" originalLayersNames="Y/sink_port_0" outputLayouts="undef" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32" />
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="3" to-port="2" />
|
||||
<edge from-layer="0" from-port="0" to-layer="3" to-port="3" />
|
||||
<edge from-layer="1" from-port="0" to-layer="3" to-port="1" />
|
||||
<edge from-layer="2" from-port="0" to-layer="3" to-port="0" />
|
||||
<edge from-layer="3" from-port="4" to-layer="4" to-port="0" />
|
||||
</edges>
|
||||
<rt_info />
|
||||
</net>
|
||||
)V0G0N";
|
||||
|
||||
|
||||
std::string ExecGraphSerializationTest::getTestCaseName(testing::TestParamInfo<std::string> obj) {
|
||||
std::ostringstream result;
|
||||
@ -354,7 +424,7 @@ TEST_P(ExecGraphSerializationTest, ExecutionGraph) {
|
||||
|
||||
pugi::xml_document expected;
|
||||
pugi::xml_document result;
|
||||
ASSERT_TRUE(expected.load_string(expected_serialized_model));
|
||||
ASSERT_TRUE(expected.load_string(target_device == "CPU" ? expected_serialized_model_cpu : expected_serialized_model));
|
||||
ASSERT_TRUE(result.load_file(m_out_xml_path.c_str()));
|
||||
|
||||
bool status;
|
||||
|
@ -38,18 +38,7 @@ void Add::SetUp() {
|
||||
setInferenceType(type);
|
||||
}
|
||||
|
||||
void AddSinh::SetUp() {
|
||||
ov::Shape inputShape0, inputShape1;
|
||||
ov::element::Type type;
|
||||
std::tie(inputShape0, inputShape1, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
|
||||
init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}});
|
||||
|
||||
auto f = ov::test::snippets::AddSinhFunction({inputShape0, inputShape1});
|
||||
function = f.getOriginal();
|
||||
setInferenceType(type);
|
||||
}
|
||||
|
||||
std::string AddSinhConst::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddConstParams> obj) {
|
||||
std::string AddConst::getTestCaseName(testing::TestParamInfo<ov::test::snippets::AddConstParams> obj) {
|
||||
ov::Shape inputShapes, newInputShapes;
|
||||
ov::element::Type type;
|
||||
std::string targetDevice;
|
||||
@ -65,13 +54,13 @@ std::string AddSinhConst::getTestCaseName(testing::TestParamInfo<ov::test::snipp
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void AddSinhConst::SetUp() {
|
||||
void AddConst::SetUp() {
|
||||
ov::Shape inputShape;
|
||||
ov::element::Type type;
|
||||
std::tie(inputShape, type, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
|
||||
init_input_shapes({{{}, {inputShape, }}});
|
||||
|
||||
auto f = ov::test::snippets::AddSinhConstFunction({inputShape});
|
||||
auto f = ov::test::snippets::AddConstFunction({inputShape});
|
||||
function = f.getOriginal();
|
||||
setInferenceType(type);
|
||||
}
|
||||
@ -92,12 +81,7 @@ TEST_P(Add, CompareWithRefImpl) {
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
||||
TEST_P(AddSinh, CompareWithRefImpl) {
|
||||
run();
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
||||
TEST_P(AddSinhConst, CompareWithRefImpl) {
|
||||
TEST_P(AddConst, CompareWithRefImpl) {
|
||||
run();
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace snippets {
|
||||
|
||||
std::string MaxNumParamsEltwiseSinh::getTestCaseName(testing::TestParamInfo<ov::test::snippets::MaxNumParamsEltwiseParams> obj) {
|
||||
std::string MaxNumParamsEltwise::getTestCaseName(testing::TestParamInfo<ov::test::snippets::MaxNumParamsEltwiseParams> obj) {
|
||||
ov::Shape inputShapes;
|
||||
std::string targetDevice;
|
||||
size_t num_nodes, num_subgraphs;
|
||||
@ -24,7 +24,7 @@ std::string MaxNumParamsEltwiseSinh::getTestCaseName(testing::TestParamInfo<ov::
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void MaxNumParamsEltwiseSinh::SetUp() {
|
||||
void MaxNumParamsEltwise::SetUp() {
|
||||
ov::Shape inputShape;
|
||||
std::tie(inputShape, ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
|
||||
std::vector<ov::Shape> expandedShapes(10, inputShape);
|
||||
@ -35,11 +35,11 @@ void MaxNumParamsEltwiseSinh::SetUp() {
|
||||
|
||||
init_input_shapes(input_shapes);
|
||||
|
||||
auto f = ov::test::snippets::EltwiseMaxNumParamsSinhFunction(expandedShapes);
|
||||
auto f = ov::test::snippets::EltwiseMaxNumParamsFunction(expandedShapes);
|
||||
function = f.getOriginal();
|
||||
}
|
||||
|
||||
TEST_P(MaxNumParamsEltwiseSinh, CompareWithRefImpl) {
|
||||
TEST_P(MaxNumParamsEltwise, CompareWithRefImpl) {
|
||||
run();
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
@ -37,26 +37,11 @@ void ThreeInputsEltwise::SetUp() {
|
||||
function = f.getOriginal();
|
||||
}
|
||||
|
||||
void ThreeInputsEltwiseSinh::SetUp() {
|
||||
ov::Shape inputShape0, inputShape1, inputShape2;
|
||||
std::tie(inputShape0, inputShape1, inputShape2,
|
||||
ref_num_nodes, ref_num_subgraphs, targetDevice) = this->GetParam();
|
||||
init_input_shapes({{{}, {inputShape0, }}, {{}, {inputShape1, }}, {{}, {inputShape2, }}});
|
||||
|
||||
auto f = ov::test::snippets::EltwiseThreeInputsSinhFunction({inputShape0, inputShape1, inputShape2});
|
||||
function = f.getOriginal();
|
||||
}
|
||||
|
||||
TEST_P(ThreeInputsEltwise, CompareWithRefImpl) {
|
||||
run();
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
||||
TEST_P(ThreeInputsEltwiseSinh, CompareWithRefImpl) {
|
||||
run();
|
||||
validateNumSubgraphs();
|
||||
}
|
||||
|
||||
} // namespace snippets
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -29,32 +29,14 @@ protected:
|
||||
std::shared_ptr<ov::Model> initOriginal() const override;
|
||||
std::shared_ptr<ov::Model> initReference() const override;
|
||||
};
|
||||
/// Add separated from inputs by Sinh to WA CPU-specific disabling after inputs.
|
||||
/// Works because Sinh is not supported by tokenization yet.
|
||||
/// Tokenized simply by starting subgraph.
|
||||
// in1 in2
|
||||
// Sinh Sinh
|
||||
// Add
|
||||
// Result
|
||||
// todo: remove Sinh once "no subgraph after input" limitation is relaxed
|
||||
class AddSinhFunction : public SnippetsFunctionBase {
|
||||
public:
|
||||
explicit AddSinhFunction(const std::vector<Shape>& inputShapes) : SnippetsFunctionBase(inputShapes) {
|
||||
NGRAPH_CHECK(input_shapes.size() == 2, "Got invalid number of input shapes");
|
||||
}
|
||||
protected:
|
||||
std::shared_ptr<ov::Model> initOriginal() const override;
|
||||
std::shared_ptr<ov::Model> initReference() const override;
|
||||
};
|
||||
/// Like AddSinh but with a constant second input (and no sinh on in)
|
||||
// in1 in2
|
||||
// Sin Sinh
|
||||
// Add
|
||||
// Result
|
||||
// todo: remove Sinh once "no subgraph after input" limitation is relaxed
|
||||
class AddSinhConstFunction : public SnippetsFunctionBase {
|
||||
class AddConstFunction : public SnippetsFunctionBase {
|
||||
public:
|
||||
explicit AddSinhConstFunction(const std::vector<Shape>& inputShapes) : SnippetsFunctionBase(inputShapes) {
|
||||
explicit AddConstFunction(const std::vector<Shape>& inputShapes) : SnippetsFunctionBase(inputShapes) {
|
||||
NGRAPH_CHECK(input_shapes.size() == 1, "Got invalid number of input shapes");
|
||||
}
|
||||
protected:
|
||||
@ -108,30 +90,16 @@ public:
|
||||
protected:
|
||||
std::shared_ptr<ov::Model> initOriginal() const override;
|
||||
};
|
||||
/// EltwiseFunctionThreeInputs with Sinh after inputs to to WA CPU-specific disabling after inputs
|
||||
/// See AddSinh for details.
|
||||
// todo: remove Sinh once "no subgraph after input" limitation is relaxed
|
||||
class EltwiseThreeInputsSinhFunction : public SnippetsFunctionBase {
|
||||
public:
|
||||
explicit EltwiseThreeInputsSinhFunction(const std::vector<Shape>& inputShapes) :
|
||||
SnippetsFunctionBase(inputShapes) {
|
||||
NGRAPH_CHECK(input_shapes.size() == 3, "Got invalid number of input shapes");
|
||||
}
|
||||
protected:
|
||||
std::shared_ptr<ov::Model> initOriginal() const override;
|
||||
};
|
||||
/// Eltwise graph with 10 inputs and 2 outputs.
|
||||
/// Needed to test for a max number of inputs+outputs allowed.
|
||||
// in1 in2 in3 ... in10
|
||||
// Sinh Sinh Sinh ...Sinh
|
||||
// ........................
|
||||
// Subtract Power
|
||||
// \ Sinh
|
||||
// Result
|
||||
// todo: remove Sinh once "no subgraph after input" limitation is relaxed
|
||||
class EltwiseMaxNumParamsSinhFunction : public SnippetsFunctionBase {
|
||||
class EltwiseMaxNumParamsFunction : public SnippetsFunctionBase {
|
||||
public:
|
||||
explicit EltwiseMaxNumParamsSinhFunction(const std::vector<Shape>& inputShapes) :
|
||||
explicit EltwiseMaxNumParamsFunction(const std::vector<Shape>& inputShapes) :
|
||||
SnippetsFunctionBase(inputShapes) {
|
||||
NGRAPH_CHECK(input_shapes.size() == 10, "Got invalid number of input shapes");
|
||||
}
|
||||
@ -181,7 +149,6 @@ protected:
|
||||
/// So we have 2 subgraphs - Snippets don't support subgraphs with many results
|
||||
/// Also Output tensors have names to check correct copying output names
|
||||
// in1 in2
|
||||
// Sinh Sinh
|
||||
// Add
|
||||
// HSwish Result
|
||||
// Relu
|
||||
@ -198,7 +165,6 @@ protected:
|
||||
/// Two different Input and Outputs.
|
||||
/// This function is to check correct Broadcasting
|
||||
// in1 in2
|
||||
// Sin Sin
|
||||
// HSwish /
|
||||
// Result Add
|
||||
// Relu
|
||||
|
@ -19,15 +19,13 @@ std::shared_ptr<ov::Node> createRollAsStub(const std::shared_ptr<ov::Node>& pare
|
||||
|
||||
std::shared_ptr<ov::Model> ConvertFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto stub = createRollAsStub(data0);
|
||||
auto convert = std::make_shared<op::v0::Convert>(stub, outType);
|
||||
auto convert = std::make_shared<op::v0::Convert>(data0, outType);
|
||||
return std::make_shared<ov::Model>(NodeVector{convert}, ParameterVector{data0});
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto stub = createRollAsStub(data0);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, stub->get_shape());
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub},
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, data0->get_shape());
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0},
|
||||
std::make_shared<ov::Model>(NodeVector{std::make_shared<ngraph::snippets::op::ConvertTruncation>(indata0, outType)},
|
||||
ParameterVector{indata0}));
|
||||
return std::make_shared<ov::Model>(NodeVector{subgraph}, ParameterVector{data0});
|
||||
@ -36,21 +34,17 @@ std::shared_ptr<ov::Model> ConvertFunction::initReference() const {
|
||||
std::shared_ptr<ov::Model> ConvertInputFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(outType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto convert = std::make_shared<op::v0::Convert>(stub0, outType);
|
||||
auto add = std::make_shared<op::v1::Add>(convert, stub1);
|
||||
auto convert = std::make_shared<op::v0::Convert>(data0, outType);
|
||||
auto add = std::make_shared<op::v1::Add>(convert, data1);
|
||||
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0, data1});
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertInputFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(outType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, stub0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(outType, stub1->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, data0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(outType, data1->get_shape());
|
||||
auto convert = std::make_shared<ngraph::snippets::op::ConvertTruncation>(indata0, outType);
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub0, stub1},
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0, data1},
|
||||
std::make_shared<ov::Model>(
|
||||
NodeVector{std::make_shared<op::v1::Add>(convert, indata1)},
|
||||
ParameterVector{indata0, indata1}));
|
||||
@ -60,22 +54,18 @@ std::shared_ptr<ov::Model> ConvertInputFunction::initReference() const {
|
||||
std::shared_ptr<ov::Model> ConvertOutputFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto add = std::make_shared<op::v1::Add>(stub0, stub1);
|
||||
auto add = std::make_shared<op::v1::Add>(data0, data1);
|
||||
auto convert = std::make_shared<op::v0::Convert>(add, outType);
|
||||
return std::make_shared<ov::Model>(NodeVector{convert}, ParameterVector{data0, data1});
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertOutputFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, stub0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inType, stub1->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, data0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inType, data1->get_shape());
|
||||
auto add = std::make_shared<op::v1::Add>(indata0, indata1);
|
||||
auto convert = std::make_shared<ngraph::snippets::op::ConvertTruncation>(add, outType);
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub0, stub1},
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0, data1},
|
||||
std::make_shared<ov::Model>(
|
||||
NodeVector{convert},
|
||||
ParameterVector{indata0, indata1}));
|
||||
@ -85,9 +75,7 @@ std::shared_ptr<ov::Model> ConvertOutputFunction::initReference() const {
|
||||
std::shared_ptr<ov::Model> ConvertStubFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto add = std::make_shared<op::v1::Add>(stub0, stub1);
|
||||
auto add = std::make_shared<op::v1::Add>(data0, data1);
|
||||
auto convert = std::make_shared<op::v0::Convert>(add, outType);
|
||||
auto relu = std::make_shared<op::v0::Relu>(convert);
|
||||
return std::make_shared<ov::Model>(NodeVector{relu}, ParameterVector{data0, data1});
|
||||
@ -95,14 +83,12 @@ std::shared_ptr<ov::Model> ConvertStubFunction::initOriginal() const {
|
||||
std::shared_ptr<ov::Model> ConvertStubFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inType, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inType, input_shapes[1]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, stub0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inType, stub1->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inType, data0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inType, data1->get_shape());
|
||||
auto add = std::make_shared<op::v1::Add>(indata0, indata1);
|
||||
auto convert = std::make_shared<ngraph::snippets::op::ConvertTruncation>(add, outType);
|
||||
auto subgraph0 = std::make_shared<ngraph::snippets::op::Subgraph>(
|
||||
NodeVector{stub0, stub1}, std::make_shared<ov::Model>(NodeVector{convert}, ParameterVector{indata0, indata1}));
|
||||
NodeVector{data0, data1}, std::make_shared<ov::Model>(NodeVector{convert}, ParameterVector{indata0, indata1}));
|
||||
auto indata2 = std::make_shared<op::v0::Parameter>(convert->get_destination_type(), convert->get_shape());
|
||||
auto relu = std::make_shared<op::v0::Relu>(indata2);
|
||||
auto subgraph1 = std::make_shared<ngraph::snippets::op::Subgraph>(
|
||||
@ -114,14 +100,11 @@ std::shared_ptr<ov::Model> ConvertPartialInputsAndResultsFunction::initOriginal(
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inTypes[0], input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inTypes[1], input_shapes[1]);
|
||||
auto data2 = std::make_shared<op::v0::Parameter>(inTypes[2], input_shapes[2]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto stub2 = createRollAsStub(data2);
|
||||
auto convert0 = std::make_shared<op::v0::Convert>(stub0, outTypes[0]);
|
||||
auto convert1 = std::make_shared<op::v0::Convert>(stub1, outTypes[0]);
|
||||
auto convert0 = std::make_shared<op::v0::Convert>(data0, outTypes[0]);
|
||||
auto convert1 = std::make_shared<op::v0::Convert>(data1, outTypes[0]);
|
||||
auto add = std::make_shared<op::v1::Add>(convert0, convert1);
|
||||
auto relu = std::make_shared<op::v0::Relu>(add);
|
||||
auto sub = std::make_shared<op::v1::Subtract>(relu, stub2);
|
||||
auto sub = std::make_shared<op::v1::Subtract>(relu, data2);
|
||||
auto stub3 = createRollAsStub(sub);
|
||||
auto convert2 = std::make_shared<op::v0::Convert>(relu, outTypes[1]);
|
||||
return std::make_shared<ov::Model>(NodeVector{convert2, stub3}, ParameterVector{data0, data1, data2});
|
||||
@ -130,12 +113,9 @@ std::shared_ptr<ov::Model> ConvertPartialInputsAndResultsFunction::initReference
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inTypes[0], input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(inTypes[1], input_shapes[1]);
|
||||
auto data2 = std::make_shared<op::v0::Parameter>(inTypes[2], input_shapes[2]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto stub1 = createRollAsStub(data1);
|
||||
auto stub2 = createRollAsStub(data2);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inTypes[0], stub0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inTypes[1], stub1->get_shape());
|
||||
auto indata2 = std::make_shared<op::v0::Parameter>(inTypes[2], stub2->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inTypes[0], data0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(inTypes[1], data1->get_shape());
|
||||
auto indata2 = std::make_shared<op::v0::Parameter>(inTypes[2], data2->get_shape());
|
||||
auto convert0 = std::make_shared<ngraph::snippets::op::ConvertTruncation>(indata0, outTypes[0]);
|
||||
auto convert1 = std::make_shared<ngraph::snippets::op::ConvertTruncation>(indata1, outTypes[0]);
|
||||
auto add = std::make_shared<op::v1::Add>(convert0, convert1);
|
||||
@ -143,7 +123,7 @@ std::shared_ptr<ov::Model> ConvertPartialInputsAndResultsFunction::initReference
|
||||
auto sub = std::make_shared<op::v1::Subtract>(relu, indata2);
|
||||
auto convert2 = std::make_shared<ngraph::snippets::op::ConvertTruncation>(relu, outTypes[1]);
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(
|
||||
NodeVector{stub0, stub1, stub2}, std::make_shared<ov::Model>(NodeVector{sub, convert2}, ParameterVector{indata0, indata1, indata2}));
|
||||
NodeVector{data0, data1, data2}, std::make_shared<ov::Model>(NodeVector{sub, convert2}, ParameterVector{indata0, indata1, indata2}));
|
||||
auto stub3 = createRollAsStub(subgraph);
|
||||
return std::make_shared<ov::Model>(OutputVector{subgraph->output(1), stub3->output(0)},
|
||||
ParameterVector{data0, data1, data2});
|
||||
@ -151,8 +131,7 @@ std::shared_ptr<ov::Model> ConvertPartialInputsAndResultsFunction::initReference
|
||||
|
||||
std::shared_ptr<ov::Model> ConvertManyOnInputsFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(types[0], input_shapes[0]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
std::shared_ptr<ov::Node> out = stub0;
|
||||
std::shared_ptr<ov::Node> out = data0;
|
||||
for (auto i = 1; i < types.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, types[i]);
|
||||
out = convert;
|
||||
@ -162,23 +141,21 @@ std::shared_ptr<ov::Model> ConvertManyOnInputsFunction::initOriginal() const {
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertManyOnInputsFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(types[0], input_shapes[0]);
|
||||
auto stub0 = createRollAsStub(data0);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(types[0], stub0->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(types[0], data0->get_shape());
|
||||
std::shared_ptr<ov::Node> out = indata0;
|
||||
for (auto i = 1; i < types.size(); i++) {
|
||||
auto convert = std::make_shared<ngraph::snippets::op::ConvertTruncation>(out, types[i]);
|
||||
out = convert;
|
||||
}
|
||||
auto relu = std::make_shared<op::v0::Relu>(out);
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub0},
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0},
|
||||
std::make_shared<ov::Model>(NodeVector{relu}, ParameterVector{indata0}));
|
||||
return std::make_shared<ov::Model>(NodeVector{subgraph}, ParameterVector{data0});
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> ConvertManyOnOutputsFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(types[0], input_shapes[0]);
|
||||
auto stub0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto relu = std::make_shared<op::v0::Relu>(stub0);
|
||||
auto relu = std::make_shared<op::v0::Relu>(data0);
|
||||
std::shared_ptr<ov::Node> out = relu;
|
||||
for (auto i = 1; i < types.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, types[i]);
|
||||
@ -188,28 +165,26 @@ std::shared_ptr<ov::Model> ConvertManyOnOutputsFunction::initOriginal() const {
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertManyOnOutputsFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(types[0], input_shapes[0]);
|
||||
auto stub0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(types[0], stub0->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(types[0], data0->get_shape());
|
||||
auto relu = std::make_shared<op::v0::Relu>(indata0);
|
||||
std::shared_ptr<ov::Node> out = relu;
|
||||
for (auto i = 1; i < types.size(); i++) {
|
||||
auto convert = std::make_shared<ngraph::snippets::op::ConvertTruncation>(out, types[i]);
|
||||
out = convert;
|
||||
}
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub0},
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0},
|
||||
std::make_shared<ov::Model>(NodeVector{out}, ParameterVector{indata0}));
|
||||
return std::make_shared<ov::Model>(NodeVector{subgraph}, ParameterVector{data0});
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> ConvertManyOnInputOutputFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inTypes[0], input_shapes[0]);
|
||||
auto stub0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
std::shared_ptr<ov::Node> out = stub0;
|
||||
std::shared_ptr<ov::Node> out = data0;
|
||||
for (auto i = 1; i < inTypes.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, inTypes[i]);
|
||||
out = convert;
|
||||
}
|
||||
auto relu = std::make_shared<op::v0::Relu>(stub0);
|
||||
auto relu = std::make_shared<op::v0::Relu>(data0);
|
||||
out = relu;
|
||||
for (auto i = 0; i < outTypes.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, outTypes[i]);
|
||||
@ -219,20 +194,19 @@ std::shared_ptr<ov::Model> ConvertManyOnInputOutputFunction::initOriginal() cons
|
||||
}
|
||||
std::shared_ptr<ov::Model> ConvertManyOnInputOutputFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(inTypes[0], input_shapes[0]);
|
||||
auto stub0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inTypes[0], stub0->get_shape());
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(inTypes[0], data0->get_shape());
|
||||
std::shared_ptr<ov::Node> out = indata0;
|
||||
for (auto i = 1; i < inTypes.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, inTypes[i]);
|
||||
out = convert;
|
||||
}
|
||||
auto relu = std::make_shared<op::v0::Relu>(stub0);
|
||||
auto relu = std::make_shared<op::v0::Relu>(data0);
|
||||
out = relu;
|
||||
for (auto i = 0; i < outTypes.size(); i++) {
|
||||
auto convert = std::make_shared<op::v0::Convert>(out, outTypes[i]);
|
||||
out = convert;
|
||||
}
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{stub0},
|
||||
auto subgraph = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0},
|
||||
std::make_shared<ov::Model>(NodeVector{out}, ParameterVector{indata0}));
|
||||
return std::make_shared<ov::Model>(NodeVector{subgraph}, ParameterVector{data0});
|
||||
}
|
||||
|
@ -26,32 +26,11 @@ std::shared_ptr<ov::Model> AddFunction::initReference() const {
|
||||
ParameterVector{indata0, indata1}));
|
||||
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0, data1});
|
||||
}
|
||||
std::shared_ptr<ov::Model> AddSinhFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
auto sin0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto sin1 = std::make_shared<ov::op::v0::Sinh>(data1);
|
||||
auto add = std::make_shared<op::v1::Add>(sin0, sin1);
|
||||
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0, data1});
|
||||
}
|
||||
std::shared_ptr<ov::Model> AddSinhFunction::initReference() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
auto sin0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto sin1 = std::make_shared<ov::op::v0::Sinh>(data1);
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(precision, sin0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(precision, sin1->get_shape());
|
||||
auto add = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0, data1},
|
||||
std::make_shared<ov::Model>(NodeVector{std::make_shared<op::v1::Add>(sin0, sin1)},
|
||||
ParameterVector{indata0, indata1}));
|
||||
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0, data1});
|
||||
}
|
||||
std::shared_ptr<ov::Model> AddSinhConstFunction::initOriginal() const {
|
||||
std::shared_ptr<ov::Model> AddConstFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
|
||||
const std::vector<float> const_values = CommonTestUtils::generate_float_numbers(shape_size(input_shapes[0]), -10., 10.);
|
||||
auto const_data1 = std::make_shared<op::v0::Constant>(precision, input_shapes[0], const_values);
|
||||
auto sin0 = std::make_shared<ov::op::v0::Sinh>(data0);
|
||||
auto add = std::make_shared<op::v1::Add>(sin0, const_data1);
|
||||
auto add = std::make_shared<op::v1::Add>(data0, const_data1);
|
||||
return std::make_shared<ov::Model>(NodeVector{add}, ParameterVector{data0});
|
||||
}
|
||||
std::shared_ptr<ov::Model> AddRollConstFunction::initOriginal() const {
|
||||
@ -105,31 +84,15 @@ std::shared_ptr<ov::Model> EltwiseThreeInputsFunction::initOriginal() const {
|
||||
return std::make_shared<ov::Model>(NodeVector{mul}, ParameterVector{data0, data1, data2});
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> EltwiseThreeInputsSinhFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
auto data2 = std::make_shared<op::v0::Parameter>(precision, input_shapes[2]);
|
||||
auto sinh0 = std::make_shared<op::v0::Sinh>(data0);
|
||||
auto sinh1 = std::make_shared<op::v0::Sinh>(data1);
|
||||
auto sinh2 = std::make_shared<op::v0::Sinh>(data2);
|
||||
const std::vector<float> const_values = CommonTestUtils::generate_float_numbers(1, -10., 10.);
|
||||
auto const_data = std::make_shared<op::v0::Constant>(ov::element::f32, Shape{1}, const_values);
|
||||
auto add = std::make_shared<op::v1::Add>(sinh0, sinh1);
|
||||
auto sub = std::make_shared<op::v1::Subtract>(sinh2, const_data);
|
||||
auto mul = std::make_shared<op::v1::Multiply>(add, sub);
|
||||
return std::make_shared<ov::Model>(NodeVector{mul}, ParameterVector{data0, data1, data2});
|
||||
}
|
||||
std::shared_ptr<ov::Model> EltwiseMaxNumParamsSinhFunction::initOriginal() const {
|
||||
std::shared_ptr<ov::Model> EltwiseMaxNumParamsFunction::initOriginal() const {
|
||||
ParameterVector params;
|
||||
std::vector<std::shared_ptr<Node>> sinh; // 10
|
||||
for (const auto& shape : input_shapes) {
|
||||
auto param = std::make_shared<op::v0::Parameter>(precision, shape);
|
||||
params.push_back(param);
|
||||
sinh.push_back(std::make_shared<op::v0::Sinh>(param));
|
||||
}
|
||||
std::vector<std::shared_ptr<Node>> add; // 5
|
||||
for (size_t i = 0; i < input_shapes.size() / 2; i++) {
|
||||
add.push_back(std::make_shared<op::v1::Add>(sinh[i * 2], sinh[i * 2 + 1]));
|
||||
add.push_back(std::make_shared<op::v1::Add>(params[i * 2], params[i * 2 + 1]));
|
||||
}
|
||||
std::vector<std::shared_ptr<Node>> mul; // 2
|
||||
for (size_t i = 0; i < add.size() / 2; i++) {
|
||||
@ -235,11 +198,7 @@ std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initOriginal() const {
|
||||
data0->set_friendly_name("data0");
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
data1->set_friendly_name("data1");
|
||||
auto sinh0 = std::make_shared<op::v0::Sinh>(data0);
|
||||
sinh0->set_friendly_name("sinh0");
|
||||
auto sinh1 = std::make_shared<op::v0::Sinh>(data1);
|
||||
sinh1->set_friendly_name("sinh1");
|
||||
auto add = std::make_shared<op::v1::Add>(sinh0, sinh1);
|
||||
auto add = std::make_shared<op::v1::Add>(data0, data1);
|
||||
add->set_friendly_name("add");
|
||||
auto hswish = std::make_shared<op::v4::HSwish>(add);
|
||||
hswish->set_friendly_name("hswish");
|
||||
@ -267,17 +226,14 @@ std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initReference() const {
|
||||
data0->set_friendly_name("data0");
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
data1->set_friendly_name("data1");
|
||||
auto sinh0 = std::make_shared<op::v0::Sinh>(data0);
|
||||
sinh0->set_friendly_name("sinh0");
|
||||
auto sinh1 = std::make_shared<op::v0::Sinh>(data1);
|
||||
sinh1->set_friendly_name("sinh1");
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(precision, sinh0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(precision, sinh1->get_shape());
|
||||
|
||||
auto indata0 = std::make_shared<op::v0::Parameter>(precision, data0->get_shape());
|
||||
auto indata1 = std::make_shared<op::v0::Parameter>(precision, data1->get_shape());
|
||||
auto add = std::make_shared<op::v1::Add>(indata0, indata1);
|
||||
add->set_friendly_name("add");
|
||||
auto hswish = std::make_shared<op::v4::HSwish>(add);
|
||||
hswish->set_friendly_name("hswish");
|
||||
auto subgraph0 = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{sinh0, sinh1},
|
||||
auto subgraph0 = std::make_shared<ngraph::snippets::op::Subgraph>(NodeVector{data0, data1},
|
||||
std::make_shared<ov::Model>(NodeVector{add, hswish},
|
||||
ParameterVector{indata0, indata1}));
|
||||
subgraph0->set_friendly_name("add");
|
||||
@ -308,10 +264,8 @@ std::shared_ptr<ov::Model> EltwiseTwoResultsFunction::initReference() const {
|
||||
std::shared_ptr<ov::Model> TwoInputsAndOutputsFunction::initOriginal() const {
|
||||
auto data0 = std::make_shared<op::v0::Parameter>(precision, input_shapes[0]);
|
||||
auto data1 = std::make_shared<op::v0::Parameter>(precision, input_shapes[1]);
|
||||
auto sin0 = std::make_shared<op::v0::Sin>(data0);
|
||||
auto sin1 = std::make_shared<op::v0::Sin>(data1);
|
||||
auto hswish = std::make_shared<op::v4::HSwish>(sin0);
|
||||
auto add = std::make_shared<op::v1::Add>(hswish, sin1);
|
||||
auto hswish = std::make_shared<op::v4::HSwish>(data0);
|
||||
auto add = std::make_shared<op::v1::Add>(hswish, data1);
|
||||
auto relu = std::make_shared<op::v0::Relu>(add);
|
||||
auto sin3 = std::make_shared<op::v0::Sin>(relu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user