[CPU tests] migrate sub_graph test cases to be 2.0 - part 3 (#21386)

This commit is contained in:
River Li 2023-12-05 14:55:55 +08:00 committed by GitHub
parent 9c94873842
commit 2c6a2a1102
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 229 additions and 246 deletions

View File

@ -4,9 +4,8 @@
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
class AnyLayoutOnInputsAndOutputs : public ::testing::TestWithParam<ov::Shape> {
public:
@ -17,18 +16,18 @@ public:
}
protected:
std::shared_ptr<ngraph::Function>
std::shared_ptr<ov::Model>
create_test_function(const ov::Shape & shape) {
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, shape);
float shift = 1.0f;
auto shift_node = std::make_shared<ngraph::op::Constant>(ov::element::f32, ov::Shape{1}, &shift);
auto shift_node = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{1}, &shift);
auto add = std::make_shared<ngraph::op::v1::Add>(param, shift_node);
auto add = std::make_shared<ov::op::v1::Add>(param, shift_node);
auto result = std::make_shared<ngraph::op::Result>(add);
auto result = std::make_shared<ov::op::v0::Result>(add);
return std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
void Run() {
@ -39,38 +38,21 @@ protected:
std::vector<float> output_data(shape_size);
std::vector<float> expected_output(shape_size, 3);
// Create CNNNetwork
auto ngraph_function = create_test_function(shape);
auto cnn = InferenceEngine::CNNNetwork(ngraph_function);
// Create model
auto function = create_test_function(shape);
// Fill inputs and outputs
std::vector<std::string> input_names;
std::vector<std::string> out_names;
for (const auto& it : cnn.getInputsInfo()) {
input_names.push_back(it.first);
}
for (const auto& it : cnn.getOutputsInfo()) {
out_names.push_back(it.first);
}
auto input = ov::Tensor(ov::element::f32, shape, input_data.data());
auto output = ov::Tensor(ov::element::f32, shape, output_data.data());
BlobMap inputBlobs;
BlobMap outputBlobs;
TensorDesc tensorDescInp1(Precision::FP32, shape, Layout::ANY);
TensorDesc tensorDescOut(Precision::FP32, shape, Layout::ANY);
inputBlobs[input_names[0]] = make_shared_blob<float>(tensorDescInp1, input_data.data());
outputBlobs[out_names[0]] = make_shared_blob<float>(tensorDescOut, output_data.data());
// Load network
Core ie;
ExecutableNetwork executable_network = ie.LoadNetwork(cnn, "CPU");
// Load model
Core core;
auto compiled_model = core.compile_model(function, "CPU");
// Infer
InferRequest infer_request = executable_network.CreateInferRequest();
infer_request.SetInput(inputBlobs);
infer_request.SetOutput(outputBlobs);
infer_request.Infer();
auto infer_req = compiled_model.create_infer_request();
infer_req.set_input_tensor(input);
infer_req.set_output_tensor(output);
infer_req.infer();
ASSERT_EQ(output_data, expected_output);
}
@ -91,4 +73,5 @@ INSTANTIATE_TEST_SUITE_P(AnyLayoutOnInputsAndOutputs,
::testing::ValuesIn(AnyLayoutOnInputsAndOutputsParams),
AnyLayoutOnInputsAndOutputs::getTestCaseName);
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "common_test_utils/node_builders/eltwise.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
/*This test runs the following subgraph:
@ -29,10 +30,8 @@
The main purpose of the test is to check the memory sharing between result and in_place edges.
*/
using namespace InferenceEngine;
using namespace ov::test;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
class SplitConcatAddInPlace : virtual public ov::test::SubgraphBaseTest {
protected:
@ -50,17 +49,17 @@ protected:
auto split = std::make_shared<ov::op::v1::Split>(params.front(), split_axis_op, 3);
auto add_const = ngraph::builder::makeConstant(precision, {1}, std::vector<float>({1.0f}));
auto add_1 = ngraph::builder::makeEltwise(split->output(0), add_const, ngraph::helpers::EltwiseTypes::ADD);
auto result_add_1 = std::make_shared<ngraph::opset3::Result>(add_1);
auto add_2 = ngraph::builder::makeEltwise(split->output(1), add_const, ngraph::helpers::EltwiseTypes::ADD);
auto add_3 = ngraph::builder::makeEltwise(split->output(2), add_const, ngraph::helpers::EltwiseTypes::ADD);
auto add_1 = utils::makeEltwise(split->output(0), add_const, utils::EltwiseTypes::ADD);
auto result_add_1 = std::make_shared<ov::op::v0::Result>(add_1);
auto add_2 = utils::makeEltwise(split->output(1), add_const, utils::EltwiseTypes::ADD);
auto add_3 = utils::makeEltwise(split->output(2), add_const, utils::EltwiseTypes::ADD);
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{add_1, add_2, add_3}, 1);
auto result_concat = std::make_shared<ngraph::opset3::Result>(concat);
auto add_4 = ngraph::builder::makeEltwise(concat, add_const, ngraph::helpers::EltwiseTypes::ADD);
auto add_5 = ngraph::builder::makeEltwise(concat, add_const, ngraph::helpers::EltwiseTypes::ADD);
auto result_1 = std::make_shared<ngraph::opset3::Result>(add_4);
auto result_2 = std::make_shared<ngraph::opset3::Result>(add_5);
ngraph::ResultVector results = {result_1, result_2, result_add_1, result_concat};
auto result_concat = std::make_shared<ov::op::v0::Result>(concat);
auto add_4 = utils::makeEltwise(concat, add_const, utils::EltwiseTypes::ADD);
auto add_5 = utils::makeEltwise(concat, add_const, utils::EltwiseTypes::ADD);
auto result_1 = std::make_shared<ov::op::v0::Result>(add_4);
auto result_2 = std::make_shared<ov::op::v0::Result>(add_5);
ov::ResultVector results = {result_1, result_2, result_add_1, result_concat};
function = std::make_shared<ov::Model>(results, params, "Subgraph");
}
};
@ -69,4 +68,5 @@ TEST_F(SplitConcatAddInPlace, smoke_CompareWithRefs) {
run();
}
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -6,12 +6,10 @@
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
/*
---------------
@ -107,9 +105,9 @@ protected:
const auto& inShapeB = inputDynamicShapes[1];
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ElementType::f32, inShapeA)};
std::shared_ptr<Node> inputB = builder::makeConstant<float>(ElementType::f32, inShapeB.get_shape(), {}, true);
std::shared_ptr<Node> inputB = ngraph::builder::makeConstant<float>(ElementType::f32, inShapeB.get_shape(), {}, true);
auto split = builder::makeVariadicSplit(params[0], {1, 1}, 0);
auto split = ngraph::builder::makeVariadicSplit(params[0], {1, 1}, 0);
auto matMul = std::make_shared<ov::op::v0::MatMul>(split->output(0), inputB, transpA, transpB);
@ -143,4 +141,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP32, SplitMatMulConcatTest, testParams2D_F
} // namespace
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -6,11 +6,8 @@
#include "ov_models/utils/ov_helpers.hpp"
#include "ov_models/builders.hpp"
using namespace InferenceEngine;
using namespace ov::test;
using namespace ngraph;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
/*
param1 [56] param2 [-1, -1, 768] (dynamic shape)
@ -40,12 +37,12 @@ public:
for (auto&& shape : inputDynamicShapes) {
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::f32, shape));
}
auto end = builder::makeConstant(element::i64, {1}, std::vector<int64_t>{2147483647});
auto stride = builder::makeConstant(element::i64, {1}, std::vector<int64_t>{1});
auto indices = builder::makeConstant(element::i64, {1}, std::vector<int64_t>{1});
auto axes = builder::makeConstant(element::i64, {1}, std::vector<int64_t>{0});
auto shapeOf = std::make_shared<opset9::ShapeOf>(inputParams[1]);
auto gather = std::make_shared<opset9::Gather>(shapeOf, indices, axes);
auto end = ngraph::builder::makeConstant(element::i64, {1}, std::vector<int64_t>{2147483647});
auto stride = ngraph::builder::makeConstant(element::i64, {1}, std::vector<int64_t>{1});
auto indices = ngraph::builder::makeConstant(element::i64, {1}, std::vector<int64_t>{1});
auto axes = ngraph::builder::makeConstant(element::i64, {1}, std::vector<int64_t>{0});
auto shapeOf = std::make_shared<ov::op::v3::ShapeOf>(inputParams[1]);
auto gather = std::make_shared<ov::op::v8::Gather>(shapeOf, indices, axes);
auto strided_slice = std::make_shared<ov::op::v1::StridedSlice>(inputParams.front(),
gather,
end,
@ -56,7 +53,7 @@ public:
std::vector<int64_t>{},
std::vector<int64_t>{});
NodeVector results{strided_slice};
function = std::make_shared<Function>(results, inputParams, "StridedSliceStaticShape");
function = std::make_shared<ov::Model>(results, inputParams, "StridedSliceStaticShape");
}
};
@ -64,4 +61,5 @@ TEST_F(StridedSliceZeroDimsTest, smoke_CompareWithRefs) {
run();
}
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -12,7 +12,8 @@
using namespace CPUTestUtils;
using namespace ov::opset9;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
class SubgraphSnippetSerializationTest : public ::testing::Test, public CPUTestsBase {};
@ -146,4 +147,5 @@ TEST_F(SubgraphSnippetSerializationTest, smoke_SerializeSubgraphWithResultAs1stO
ASSERT_TRUE(results.valid) << results.message;
}
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -4,11 +4,9 @@
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/filter_cpu_info.hpp"
#include "ie_ngraph_utils.hpp"
#include "openvino/core/type/element_type.hpp"
#include "utils/rt_info/memory_formats_attribute.hpp"
#include "utils/general_utils.h"
#include <cstdint>
namespace CPUTestUtils {

View File

@ -3,12 +3,11 @@
//
#include "cpu_test_utils.hpp"
#include "ie_ngraph_utils.hpp"
#include "openvino/core/type/element_type.hpp"
#include "utils/rt_info/memory_formats_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
#include "utils/general_utils.h"
#include <cstdint>
#include "utils/rt_info/memory_formats_attribute.hpp"
namespace CPUTestUtils {
const char* CPUTestsBase::any_type = "any_type";
@ -147,14 +146,14 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
OPENVINO_ASSERT(rtInfo.end() != it);
return it->second.as<std::string>();
};
auto getExecValueOutputsLayout = [] (const std::shared_ptr<ngraph::Node>& node) -> std::string {
auto getExecValueOutputsLayout = [] (const std::shared_ptr<ov::Node>& node) -> std::string {
auto rtInfo = node->get_rt_info();
auto it = rtInfo.find(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
auto it = rtInfo.find(ov::exec_model_info::OUTPUT_LAYOUTS);
OPENVINO_ASSERT(rtInfo.end() != it);
return it->second.as<std::string>();
};
// skip policy
auto should_be_skipped = [] (const ngraph::PartialShape &partialShape, cpu_memory_format_t fmt) {
auto should_be_skipped = [] (const ov::PartialShape &partialShape, cpu_memory_format_t fmt) {
if (partialShape.is_dynamic()) {
return false;
}
@ -165,7 +164,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
return skip_unsquized_1D || permule_of_1;
};
if (nodeType.count(getExecValue(ExecGraphInfoSerialization::LAYER_TYPE))) {
if (nodeType.count(getExecValue(ov::exec_model_info::LAYER_TYPE))) {
ASSERT_LE(inFmts.size(), node->get_input_size());
ASSERT_LE(outFmts.size(), node->get_output_size());
for (size_t i = 0; i < inFmts.size(); i++) {
@ -211,7 +210,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
}
for (size_t i = 0; i < fmtsNum; i++) {
const auto actualOutputMemoryFormat = getExecValue(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
const auto actualOutputMemoryFormat = getExecValue(ov::exec_model_info::OUTPUT_LAYOUTS);
const auto shape = node->get_output_partial_shape(i);
if (should_be_skipped(shape, outFmts[i]))
@ -219,7 +218,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
ASSERT_EQ(outFmts[i], cpu_str2fmt(actualOutputMemoryFormats[i].c_str()));
}
auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE);
auto primType = getExecValue(ov::exec_model_info::IMPL_TYPE);
ASSERT_TRUE(primTypeCheck(primType)) << "primType is unexpected : " << primType << " Expected : " << selectedType;
}
@ -270,11 +269,11 @@ std::string CPUTestsBase::getPrimitiveType() const {
#else
std::string CPUTestsBase::getPrimitiveType() const {
std::string isaType;
if (InferenceEngine::with_cpu_x86_avx512f()) {
if (ov::with_cpu_x86_avx512f()) {
isaType = "jit_avx512";
} else if (InferenceEngine::with_cpu_x86_avx2()) {
} else if (ov::with_cpu_x86_avx2()) {
isaType = "jit_avx2";
} else if (InferenceEngine::with_cpu_x86_sse42()) {
} else if (ov::with_cpu_x86_sse42()) {
isaType = "jit_sse42";
} else {
isaType = "ref";
@ -285,13 +284,13 @@ std::string CPUTestsBase::getPrimitiveType() const {
std::string CPUTestsBase::getISA(bool skip_amx) const {
std::string isaType;
if (!skip_amx && InferenceEngine::with_cpu_x86_avx512_core_amx()) {
if (!skip_amx && ov::with_cpu_x86_avx512_core_amx()) {
isaType = "avx512_amx";
} else if (InferenceEngine::with_cpu_x86_avx512f()) {
} else if (ov::with_cpu_x86_avx512f()) {
isaType = "avx512";
} else if (InferenceEngine::with_cpu_x86_avx2()) {
} else if (ov::with_cpu_x86_avx2()) {
isaType = "avx2";
} else if (InferenceEngine::with_cpu_x86_sse42()) {
} else if (ov::with_cpu_x86_sse42()) {
isaType = "sse42";
} else {
isaType = "";
@ -337,25 +336,25 @@ CPUTestsBase::makeCPUInfo(const std::vector<cpu_memory_format_t>& inFmts,
return cpuInfo;
}
std::shared_ptr<ngraph::Function>
CPUTestsBase::makeNgraphFunction(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode, std::string name) {
std::shared_ptr<ov::Model>
CPUTestsBase::makeNgraphFunction(const ov::element::Type &ngPrc, ov::ParameterVector &params,
const std::shared_ptr<ov::Node> &lastNode, std::string name) {
auto newLastNode = modifyGraph(ngPrc, params, lastNode);
ngraph::ResultVector results;
ov::ResultVector results;
for (size_t i = 0; i < newLastNode->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(newLastNode->output(i)));
results.push_back(std::make_shared<ov::op::v0::Result>(newLastNode->output(i)));
return std::make_shared<ngraph::Function>(results, params, name);
return std::make_shared<ov::Model>(results, params, name);
}
std::shared_ptr<ngraph::Node>
CPUTestsBase::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params, const std::shared_ptr<ngraph::Node> &lastNode) {
std::shared_ptr<ov::Node>
CPUTestsBase::modifyGraph(const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) {
lastNode->get_rt_info() = getCPUInfo();
return lastNode;
}
std::string CPUTestsBase::makeSelectedTypeStr(std::string implString, ngraph::element::Type_t elType) {
std::string CPUTestsBase::makeSelectedTypeStr(std::string implString, ov::element::Type_t elType) {
implString.push_back('_');
implString += ov::element::Type(elType).get_type_name();
return implString;
@ -418,7 +417,7 @@ std::vector<CPUSpecificParams> filterCPUSpecificParams(const std::vector<CPUSpec
std::vector<CPUSpecificParams> filteredParamsVector = paramsVector;
if (!InferenceEngine::with_cpu_x86_avx512f()) {
if (!ov::with_cpu_x86_avx512f()) {
for (auto& param : filteredParamsVector) {
adjustBlockedFormatByIsa(std::get<0>(param));
adjustBlockedFormatByIsa(std::get<1>(param));
@ -441,7 +440,7 @@ inline void CheckNumberOfNodesWithTypeImpl(std::shared_ptr<const ov::Model> func
return it->second.as<std::string>();
};
if (nodeTypes.count(getExecValue(ExecGraphInfoSerialization::LAYER_TYPE))) {
if (nodeTypes.count(getExecValue(ov::exec_model_info::LAYER_TYPE))) {
actualNodeCount++;
}
}

View File

@ -4,13 +4,15 @@
#pragma once
#include <string>
#include "ie_system_conf.h"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/exec_model_info.hpp"
#include "openvino/runtime/system_conf.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
#include <exec_graph_info.hpp>
#include <openvino/runtime/compiled_model.hpp>
// To be removed
#include "ie_system_conf.h"
#include "exec_graph_info.hpp"
namespace CPUTestUtils {
typedef enum {
@ -128,13 +130,13 @@ public:
const std::vector<cpu_memory_format_t>& outFmts,
const std::vector<std::string>& priority);
//TODO: change to setter method
static std::string makeSelectedTypeStr(std::string implString, ngraph::element::Type_t elType);
static std::string makeSelectedTypeStr(std::string implString, ov::element::Type_t elType);
void updateSelectedType(const std::string& primitiveType, const ov::element::Type netType, const ov::AnyMap& config);
CPUInfo getCPUInfo() const;
std::shared_ptr<ngraph::Function> makeNgraphFunction(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode,
std::shared_ptr<ov::Model> makeNgraphFunction(const ov::element::Type &ngPrc,
ov::ParameterVector &params,
const std::shared_ptr<ov::Node> &lastNode,
std::string name);
void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, const std::set<std::string>& nodeType) const;
@ -153,9 +155,9 @@ protected:
* @param lastNode The last node of the initial graph.
* @return The last node of the modified graph.
*/
virtual std::shared_ptr<ngraph::Node> modifyGraph(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode);
virtual std::shared_ptr<ov::Node> modifyGraph(const ov::element::Type &ngPrc,
ov::ParameterVector &params,
const std::shared_ptr<ov::Node> &lastNode);
virtual bool primTypeCheck(std::string primType) const;

View File

@ -8,7 +8,6 @@ using namespace LayerTestsDefinitions;
namespace CPUTestUtils {
std::string CpuTestWithFusing::getTestCaseName(fusingSpecificParams params) {
std::ostringstream result;
std::vector<std::string> fusedOps;
@ -25,10 +24,10 @@ std::string CpuTestWithFusing::getTestCaseName(fusingSpecificParams params) {
return result.str();
}
std::shared_ptr<ngraph::Node>
CpuTestWithFusing::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params, const std::shared_ptr<ngraph::Node> &lastNode) {
std::shared_ptr<ov::Node>
CpuTestWithFusing::modifyGraph(const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) {
CPUTestsBase::modifyGraph(ngPrc, params, lastNode);
std::shared_ptr<ngraph::Node> retNode = lastNode;
std::shared_ptr<ov::Node> retNode = lastNode;
if (postOpMgrPtr) {
retNode = postOpMgrPtr->addPostOps(ngPrc, params, lastNode);
}
@ -42,7 +41,7 @@ void CpuTestWithFusing::CheckFusingResults(const std::shared_ptr<const ov::Model
for (const auto & op : function->get_ops()) {
const auto &rtInfo = op->get_rt_info();
auto getExecValue = [](const std::string &paramName, const ngraph::Node::RTMap& rtInfo) -> std::string {
auto getExecValue = [](const std::string &paramName, const ov::Node::RTMap& rtInfo) -> std::string {
auto it = rtInfo.find(paramName);
OPENVINO_ASSERT(rtInfo.end() != it);
return it->second.as<std::string>();
@ -76,9 +75,9 @@ void CpuTestWithFusing::CheckPluginRelatedResultsImpl(const std::shared_ptr<cons
CheckFusingResults(function, nodeType);
}
std::shared_ptr<ngraph::Node>
postFunctionMgr::addPostOps(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params, const std::shared_ptr<ngraph::Node> &lastNode) const {
auto clonedPostFunction = ngraph::clone_function(*_pFunction);
std::shared_ptr<ov::Node>
postFunctionMgr::addPostOps(const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) const {
auto clonedPostFunction = ov::clone_model(*_pFunction);
clonedPostFunction->set_friendly_name(_pFunction->get_friendly_name());
clonedPostFunction->replace_node(clonedPostFunction->get_parameters()[0], lastNode);
return clonedPostFunction->get_result()->get_input_node_shared_ptr(0);
@ -90,9 +89,9 @@ std::string postFunctionMgr::getFusedOpsNames() const {
postNodesMgr::postNodesMgr(std::vector<postNodeBuilder> postNodes) : _postNodes(std::move(postNodes)) {}
std::shared_ptr<ngraph::Node>
postNodesMgr::addPostOps(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params, const std::shared_ptr<ngraph::Node> &lastNode) const {
std::shared_ptr<ngraph::Node> tmpNode = lastNode;
std::shared_ptr<ov::Node>
postNodesMgr::addPostOps(const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) const {
std::shared_ptr<ov::Node> tmpNode = lastNode;
postNodeConfig cfg{lastNode, tmpNode, ngPrc, params};

View File

@ -4,51 +4,55 @@
#pragma once
#include "common_test_utils/node_builders/activation.hpp"
#include "cpu_test_utils.hpp"
#include <memory>
#include <shared_test_classes/single_layer/activation.hpp>
#include "openvino/runtime/system_conf.hpp"
#include "ov_models/utils/data_utils.hpp"
#include "shared_test_classes/single_layer/activation.hpp"
using namespace ov::test;
namespace CPUTestUtils {
struct postNodeConfig {
const std::shared_ptr<ngraph::Node> target;
std::shared_ptr<ngraph::Node> input;
const ngraph::element::Type& type;
ngraph::ParameterVector& params;
const std::shared_ptr<ov::Node> target;
std::shared_ptr<ov::Node> input;
const ov::element::Type& type;
ov::ParameterVector& params;
};
struct postNodeBuilder {
std::function<std::shared_ptr<ngraph::Node>(postNodeConfig& cfg)> makeNode;
std::function<std::shared_ptr<ov::Node>(postNodeConfig& cfg)> makeNode;
std::string name;
};
class postOpMgr {
public:
virtual std::shared_ptr<ngraph::Node> addPostOps(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode) const = 0;
virtual std::shared_ptr<ov::Node> addPostOps(const ov::element::Type& ngPrc,
ov::ParameterVector& params,
const std::shared_ptr<ov::Node>& lastNode) const = 0;
virtual std::string getFusedOpsNames() const = 0;
virtual ~postOpMgr() = default;
};
class postFunctionMgr : public postOpMgr {
public:
postFunctionMgr(std::shared_ptr<ngraph::Function> function) : _pFunction(std::move(function)) {}
std::shared_ptr<ngraph::Node> addPostOps(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode) const override;
postFunctionMgr(std::shared_ptr<ov::Model> function) : _pFunction(std::move(function)) {}
std::shared_ptr<ov::Node> addPostOps(const ov::element::Type& ngPrc,
ov::ParameterVector& params,
const std::shared_ptr<ov::Node>& lastNode) const override;
std::string getFusedOpsNames() const override;
private:
std::shared_ptr<ngraph::Function> _pFunction;
std::shared_ptr<ov::Model> _pFunction;
};
class postNodesMgr : public postOpMgr {
public:
postNodesMgr(std::vector<postNodeBuilder> postNodes);
std::shared_ptr<ngraph::Node> addPostOps(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode) const override;
std::shared_ptr<ov::Node> addPostOps(const ov::element::Type& ngPrc,
ov::ParameterVector& params,
const std::shared_ptr<ov::Node>& lastNode) const override;
std::string getFusedOpsNames() const override;
private:
@ -68,9 +72,9 @@ protected:
/**
* @brief This function adds post operations.
*/
std::shared_ptr<ngraph::Node> modifyGraph(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode) override;
std::shared_ptr<ov::Node> modifyGraph(const ov::element::Type& ngPrc,
ov::ParameterVector& params,
const std::shared_ptr<ov::Node>& lastNode) override;
void CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov::Model>& function, const std::set<std::string>& nodeType) const override;
@ -99,25 +103,25 @@ static int getChannelAxis(const ov::AxisSet &axes, bool keep_dims) {
return channelAxis;
}
static int getFusingAxis(const std::shared_ptr<ngraph::Node>& node) {
if (std::dynamic_pointer_cast<const ngraph::opset1::MatMul>(node)) {
static int getFusingAxis(const std::shared_ptr<ov::Node>& node) {
if (std::dynamic_pointer_cast<const ov::op::v0::MatMul>(node)) {
return node->get_output_partial_shape(0).size() - 1; // last dimension
} else if (const auto reduce = std::dynamic_pointer_cast<const ngraph::op::util::ArithmeticReductionKeepDims>(node)) {
} else if (const auto reduce = std::dynamic_pointer_cast<const ov::op::util::ArithmeticReductionKeepDims>(node)) {
return getChannelAxis(reduce->get_reduction_axes(), reduce->get_keep_dims());
} else if (const auto reduce = std::dynamic_pointer_cast<const ngraph::op::util::LogicalReductionKeepDims>(node)) {
} else if (const auto reduce = std::dynamic_pointer_cast<const ov::op::util::LogicalReductionKeepDims>(node)) {
return getChannelAxis(reduce->get_reduction_axes(), reduce->get_keep_dims());
} else {
return 1; // second dimension
}
}
static ngraph::Shape generatePerChannelShape(const std::shared_ptr<ngraph::Node>& node) {
static ov::Shape generatePerChannelShape(const std::shared_ptr<ov::Node>& node) {
const auto shape = node->get_output_partial_shape(0);
if (shape.size() == 0)
OPENVINO_THROW("If shape.size() == 0 then PerTensor fusing tests are N/A");
if (shape.size() == 1)
OPENVINO_THROW("If shape.size() == 1 then Granularity can be PerTensor only");
ngraph::Shape perChannelShape(shape.size(), 1);
ov::Shape perChannelShape(shape.size(), 1);
const auto channelAxis = getFusingAxis(node);
if (channelAxis >= 0)
perChannelShape[channelAxis] = shape[channelAxis].get_length();
@ -130,176 +134,176 @@ const auto emptyFusingSpec = fusingSpecificParams{nullptr, {}};
const auto fusingRelu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"}}), {"Relu"}};
const auto fusingElu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Elu, {}, {2.0f});
return utils::make_activation(cfg.input, cfg.type, utils::Elu, {}, {2.0f});
}, "Elu"}}), {"Elu"}};
const auto fusingGelu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Gelu);
return utils::make_activation(cfg.input, cfg.type, utils::Gelu);
}, "Gelu"}}), {"Gelu"}};
const auto fusingSigmoid = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid);
return utils::make_activation(cfg.input, cfg.type, utils::Sigmoid);
}, "Sigmoid"}}), {"Sigmoid"}};
const auto fusingClamp = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Clamp, {}, {3.0f, 6.0f});
return utils::make_activation(cfg.input, cfg.type, utils::Clamp, {}, {3.0f, 6.0f});
}, "Clamp"}}), {"Clamp"}};
const auto fusingTanh = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Tanh);
return utils::make_activation(cfg.input, cfg.type, utils::Tanh);
}, "Tanh"}}), {"Tanh"}};
const auto fusingAbs = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Abs);
return utils::make_activation(cfg.input, cfg.type, utils::Abs);
}, "Abs"}}), {"Abs"}};
const auto fusingSqrt = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sqrt);
return utils::make_activation(cfg.input, cfg.type, utils::Sqrt);
}, "Sqrt"}}), {"Sqrt"}};
const auto fusingPReluPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
auto data = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(ngraph::shape_size(newShape));
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::LeakyRelu, newShape, data);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto data = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(ov::shape_size(newShape));
return utils::make_activation(cfg.input, cfg.type, utils::LeakyRelu, newShape, data);
}, "PRelu(PerChannel)"}}), {"PRelu"}};
const auto fusingPReluPerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape shape(1, 1);
auto data = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(ngraph::shape_size(shape));
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::LeakyRelu, shape, data);
ov::Shape shape(1, 1);
auto data = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(ov::shape_size(shape));
return utils::make_activation(cfg.input, cfg.type, utils::LeakyRelu, shape, data);
}, "PRelu(PerTensor)"}}), {"PRelu"}};
const auto fusingSwish = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Swish, {}, {1.0f});
return utils::make_activation(cfg.input, cfg.type, utils::Swish, {}, {1.0f});
}, "Swish"}}), {"Swish"}};
const auto fusingSoftPlus = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::SoftPlus, {}, {});
return utils::make_activation(cfg.input, cfg.type, utils::SoftPlus, {}, {});
}, "SoftPlus"}}), {"SoftPlus"}};
const auto fusingHSwish = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::HSwish, {}, {});
return utils::make_activation(cfg.input, cfg.type, utils::HSwish, {}, {});
}, "HSwish"}}), {"HSwish"}};
const auto fusingMish = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Mish, {}, {});
return utils::make_activation(cfg.input, cfg.type, utils::Mish, {}, {});
}, "Mish"}}), {"Mish"}};
const auto fusingHSigmoid = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::HSigmoid);
return utils::make_activation(cfg.input, cfg.type, utils::HSigmoid);
}, "HSigmoid"}}), {"HSigmoid"}};
const auto fusingReluAdd = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"},
{[](postNodeConfig& cfg){
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"}}), {"Relu", "Add"}};
const auto fusingReluScaleShift = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"},
{[](postNodeConfig& cfg){
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, constNode);
}, "Multiply(PerChannel)"},
{[](postNodeConfig& cfg){
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"}}), {"Relu", "Add"}};
const auto fusingScaleShift = fusingSpecificParams{ std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, constNode);
}, "Multiply(PerChannel)"},
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"}}), {"Add"} };
const auto fusingClampRoundAddRelu = fusingSpecificParams{ std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Clamp, {}, {3.0f, 6.0f});
return utils::make_activation(cfg.input, cfg.type, utils::Clamp, {}, {3.0f, 6.0f});
}, "Clamp"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::RoundHalfToEven);
return utils::make_activation(cfg.input, cfg.type, utils::RoundHalfToEven);
}, "RoundHalfToEven"},
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape(1, 1);
ov::Shape secondMultInShape(1, 1);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Add>(cfg.input, secondMultInput);
}, "AddPerTensor"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"}}), {"Clamp", "Round", "Add", "Relu"}};
const auto fusingScaleShiftAndFakeQuantizePerChannel = fusingSpecificParams{ std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, constNode);
}, "Multiply(PerChannel)"},
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"},
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
// auto newShape = ngraph::Shape(cfg.inputNode->get_output_partial_shape(0).size(), 1);
ov::Shape newShape = generatePerChannelShape(cfg.target);
// auto newShape = ov::Shape(cfg.inputNode->get_output_partial_shape(0).size(), 1);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"}}), {"FakeQuantize"}};
const auto fusingFakeQuantizePerTensor = fusingSpecificParams{ std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
ngraph::Shape newShape(cfg.input->get_output_partial_shape(0).size(), 1);
ov::Shape newShape(cfg.input->get_output_partial_shape(0).size(), 1);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerTensor)"}}), {"FakeQuantize"} };
const auto fusingFakeQuantizePerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"}}), {"FakeQuantize"}};
const auto fusingFakeQuantizePerChannelRelu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
ngraph::Shape newShape = generatePerChannelShape(cfg.target);
ov::Shape newShape = generatePerChannelShape(cfg.target);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"}}), {"FakeQuantize", "Relu"}};
const auto fusingFQPerChannelSigmoidFQPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
@ -308,19 +312,19 @@ const auto fusingFQPerChannelSigmoidFQPerChannel = fusingSpecificParams{std::mak
auto shape = cfg.input->get_output_partial_shape(0);
if (shape.size() == 1)
OPENVINO_THROW("If shape.size() == 1 then Granularity can be PerTensor only");
ngraph::Shape newShape(shape.size(), 1);
ov::Shape newShape(shape.size(), 1);
newShape[1] = shape[1].get_length();
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid);
return utils::make_activation(cfg.input, cfg.type, utils::Sigmoid);
}, "Sigmoid"},
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
auto shape = cfg.input->get_output_partial_shape(0);
if (shape.size() == 1)
OPENVINO_THROW("If shape.size() == 1 then Granularity can be PerTensor only");
ngraph::Shape newShape(shape.size(), 1);
ov::Shape newShape(shape.size(), 1);
newShape[1] = shape[1].get_length();
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}};
@ -331,30 +335,30 @@ const auto fusingFQPerChannelSigmoidFQPerTensor = fusingSpecificParams{std::make
auto shape = cfg.input->get_output_partial_shape(0);
if (shape.size() == 1)
OPENVINO_THROW("If shape.size() == 1 then Granularity can be PerTensor only");
ngraph::Shape newShape(shape.size(), 1);
ov::Shape newShape(shape.size(), 1);
newShape[1] = shape[1].get_length();
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerChannel)"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid);
return utils::make_activation(cfg.input, cfg.type, utils::Sigmoid);
}, "Sigmoid"},
{[](postNodeConfig& cfg){
auto localPrc = cfg.input->get_element_type();
auto shape = cfg.input->get_output_partial_shape(0);
if (shape.size() == 1)
OPENVINO_THROW("If shape.size() == 1 then Granularity can be PerTensor only");
ngraph::Shape newShape(shape.size(), 1);
ov::Shape newShape(shape.size(), 1);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerTensor)"}}), {"FakeQuantize", "Sigmoid", "FakeQuantize"}};
const auto fusingFakeQuantizePerTensorRelu = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg) {
auto localPrc = cfg.input->get_element_type();
auto newShape = ngraph::Shape(cfg.input->get_output_partial_shape(0).size(), 1);
auto newShape = ov::Shape(cfg.input->get_output_partial_shape(0).size(), 1);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerTensor)"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Relu);
return utils::make_activation(cfg.input, cfg.type, utils::Relu);
}, "Relu"}}), {"FakeQuantize", "Relu"}};
const auto fusingSum = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
@ -362,7 +366,7 @@ const auto fusingSum = fusingSpecificParams{std::make_shared<postNodesMgr>(std::
auto shape = cfg.input->get_output_partial_shape(0);
ov::ParameterVector newParams{std::make_shared<ov::op::v0::Parameter>(cfg.type, shape)};
cfg.params.insert(cfg.params.end(), newParams.begin(), newParams.end());
return std::make_shared<ngraph::opset1::Add>(cfg.input, newParams[0]);
return std::make_shared<ov::op::v1::Add>(cfg.input, newParams[0]);
}, "Add(Parameters)"}}), {"Add"}};
const auto fusingSumEluFQ = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
@ -370,116 +374,116 @@ const auto fusingSumEluFQ = fusingSpecificParams{std::make_shared<postNodesMgr>(
auto shape = cfg.input->get_output_partial_shape(0);
ov::ParameterVector newParams{std::make_shared<ov::op::v0::Parameter>(cfg.type, shape)};
cfg.params.insert(cfg.params.end(), newParams.begin(), newParams.end());
return std::make_shared<ngraph::opset1::Add>(cfg.input, newParams[0]);
return std::make_shared<ov::op::v1::Add>(cfg.input, newParams[0]);
}, "Add(Parameters)"},
{[](postNodeConfig& cfg){
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Elu, {}, {2.0f});
return utils::make_activation(cfg.input, cfg.type, utils::Elu, {}, {2.0f});
}, "Elu"},
{[](postNodeConfig& cfg) {
auto localPrc = cfg.input->get_element_type();
auto newShape = ngraph::Shape(cfg.input->get_output_partial_shape(0).size(), 1);
auto newShape = ov::Shape(cfg.input->get_output_partial_shape(0).size(), 1);
return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape);
}, "FakeQuantize(PerTensor)"}}), {"Add", "Elu", "FakeQuantize"}};
const auto fusingMultiplyPerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape(1, 1);
ov::Shape secondMultInShape(1, 1);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::op::v1::Multiply>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, secondMultInput);
}, "Multiply(PerTensor)"}}), {"Multiply"}};
const auto fusingMultiplyPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.target);
ov::Shape secondMultInShape = generatePerChannelShape(cfg.target);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, secondMultInput);
}, "Multiply(PerChannel)"}}), {"Multiply"}};
const auto fusingMultiplyAddPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.input);
ov::Shape newShape = generatePerChannelShape(cfg.input);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, constNode);
}, "Multiply(PerChannel)"},
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.input);
ov::Shape newShape = generatePerChannelShape(cfg.input);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"}}), {"Add"} };
const auto fusingAddPerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape(1, 1);
ov::Shape secondMultInShape(1, 1);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Add>(cfg.input, secondMultInput);
}, "Add(PerTensor)"}}), {"Add"}};
const auto fusingAddPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.target);
ov::Shape secondMultInShape = generatePerChannelShape(cfg.target);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Add>(cfg.input, secondMultInput);
}, "Add(PerChannel)"}}), {"Add"}};
const auto fusingSubtractPerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape(1, 1);
ov::Shape secondMultInShape(1, 1);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Subtract>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Subtract>(cfg.input, secondMultInput);
}, "Subtract(PerTensor)"}}), {"Subtract"}};
const auto fusingSubtractPerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.target);
ov::Shape secondMultInShape = generatePerChannelShape(cfg.target);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Subtract>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Subtract>(cfg.input, secondMultInput);
}, "Subtract(PerChannel)"}}), {"Subtract"}};
const auto fusingDividePerTensor = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape(1, 1);
ov::Shape secondMultInShape(1, 1);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Divide>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Divide>(cfg.input, secondMultInput);
}, "Divide(PerTensor)"}}), {"Divide"}};
const auto fusingDividePerChannel = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.target);
ov::Shape secondMultInShape = generatePerChannelShape(cfg.target);
auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Divide>(cfg.input, secondMultInput);
return std::make_shared<ov::op::v1::Divide>(cfg.input, secondMultInput);
}, "Divide(PerChannel)"}}), {"Divide"}};
const auto fusingPRelu1D = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
auto shape = cfg.input->get_output_partial_shape(0);
ngraph::Shape newShape({static_cast<size_t>(shape[1].get_length())});
auto data = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(ngraph::shape_size(newShape));
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::LeakyRelu, newShape, data);
ov::Shape newShape({static_cast<size_t>(shape[1].get_length())});
auto data = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(ov::shape_size(newShape));
return utils::make_activation(cfg.input, cfg.type, utils::LeakyRelu, newShape, data);
}, "PRelu1D"}}), {"PRelu"}};
const auto fusingPRelu1DScaleShift = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg){
auto shape = cfg.input->get_output_partial_shape(0);
ngraph::Shape newShape({static_cast<size_t>(shape[1].get_length())});
auto data = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(ngraph::shape_size(newShape));
return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::LeakyRelu, newShape, data);
ov::Shape newShape({static_cast<size_t>(shape[1].get_length())});
auto data = NGraphFunctions::Utils::generateVector<ov::element::Type_t::f32>(ov::shape_size(newShape));
return utils::make_activation(cfg.input, cfg.type, utils::LeakyRelu, newShape, data);
}, "PRelu1D"},
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.input);
ov::Shape newShape = generatePerChannelShape(cfg.input);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Multiply>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Multiply>(cfg.input, constNode);
}, "Multiply(PerChannel)"},
{[](postNodeConfig& cfg) {
ngraph::Shape newShape = generatePerChannelShape(cfg.input);
ov::Shape newShape = generatePerChannelShape(cfg.input);
auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, constNode);
return std::make_shared<ov::op::v1::Add>(cfg.input, constNode);
}, "Add(PerChannel)"}}), {"Add"} };
const auto fusingBias = fusingSpecificParams{std::make_shared<postNodesMgr>(std::vector<postNodeBuilder>{
{[](postNodeConfig& cfg) {
size_t last_dim = cfg.input->get_output_partial_shape(0).rbegin()->get_length();
auto bias = ngraph::builder::makeConstant(cfg.type, ngraph::Shape{last_dim}, std::vector<float>{}, true);
return std::make_shared<ngraph::opset1::Add>(cfg.input, bias);
auto bias = ngraph::builder::makeConstant(cfg.type, ov::Shape{last_dim}, std::vector<float>{}, true);
return std::make_shared<ov::op::v1::Add>(cfg.input, bias);
}, "fusingBias"}}), {"Add"}};
} // namespace CPUTestUtils

View File

@ -4,7 +4,6 @@
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/filter_cpu_info.hpp"
#include "ie_ngraph_utils.hpp"
#include "openvino/core/type/element_type.hpp"
#include "utils/rt_info/memory_formats_attribute.hpp"
#include "utils/general_utils.h"