[CPU Tests] migrate FuseTransposeAndReorderTest to API 2.0 (#21297)

This commit is contained in:
River Li 2023-11-27 18:18:23 +08:00 committed by GitHub
parent 598da6e5c0
commit 45d6aa2171
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 139 additions and 127 deletions

View File

@ -178,8 +178,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)",
// Plugin version was changed to ov::Version
R"(.*VersionTest.*pluginCurrentVersionIsCorrect.*)",
// Issue: 120286
R"(.*smoke_Basic/FuseTransposeAndReorderTest.CompareWithRefs.*)",
// Issue: 113703, 114763
R"(.*smoke_If/SimpleIfTest.*Cond=0.*)",
// Issue: 114765

View File

@ -9,51 +9,51 @@
#include <string>
#include "test_utils/cpu_test_utils.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using FuseTransposeAndReorderParams = std::tuple<
InferenceEngine::SizeVector, // Input shape
InferenceEngine::Precision // Input precision
>;
using FuseTransposeAndReorderParams = std::tuple<ov::Shape, // Input shape
ov::element::Type // Input precision
>;
class FuseTransposeAndReorderTest : public testing::WithParamInterface<FuseTransposeAndReorderParams>, public CPUTestsBase,
virtual public LayerTestsUtils::LayerTestsCommon {
virtual public ov::test::SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<FuseTransposeAndReorderParams> obj);
protected:
void SetUp() override;
virtual void CreateGraph();
void CheckTransposeCount(size_t expectedTransposeCount);
virtual void create_model();
void check_transpose_count(size_t expectedTransposeCount);
InferenceEngine::SizeVector inputShape;
InferenceEngine::Precision inPrec;
ov::Shape input_shape;
ov::element::Type in_prec;
};
class FuseTransposeAndReorderTest1 : public FuseTransposeAndReorderTest {
protected:
void CreateGraph() override;
void create_model() override;
};
class FuseTransposeAndReorderTest2 : public FuseTransposeAndReorderTest {
protected:
void CreateGraph() override;
void create_model() override;
};
class FuseTransposeAndReorderTest3 : public FuseTransposeAndReorderTest {
protected:
void CreateGraph() override;
void create_model() override;
};
class FuseTransposeAndReorderTest4 : public FuseTransposeAndReorderTest {
protected:
void CreateGraph() override;
void create_model() override;
};
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -3,59 +3,60 @@
//
#include "subgraph_tests/include/fuse_transpose_reorder.hpp"
#include <ov_models/preprocess/preprocess_builders.hpp>
#include <openvino/openvino.hpp>
#include "common_test_utils/node_builders/convolution.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/preprocess/preprocess_builders.hpp"
#include "openvino/openvino.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
std::string FuseTransposeAndReorderTest::getTestCaseName(testing::TestParamInfo<FuseTransposeAndReorderParams> obj) {
std::ostringstream result;
SizeVector inputShape;
Precision inPrec;
std::tie(inputShape, inPrec) = obj.param;
ov::Shape input_shape;
ov::element::Type in_prec;
std::tie(input_shape, in_prec) = obj.param;
result << "IS=" << ov::test::utils::vec2str(inputShape) << "_";
result << "Precision=" << inPrec.name();
result << "IS=" << ov::test::utils::vec2str(input_shape) << "_";
result << "Precision=" << in_prec.to_string();
return result.str();
}
void FuseTransposeAndReorderTest::CheckTransposeCount(size_t expectedTransposeCount) {
InferenceEngine::CNNNetwork execGraphInfo = executableNetwork.GetExecGraphInfo();
auto function = execGraphInfo.getFunction();
ASSERT_NE(nullptr, function);
size_t actualTransposeCount = 0;
for (const auto &node : function->get_ops()) {
void FuseTransposeAndReorderTest::check_transpose_count(size_t expectedTransposeCount) {
auto runtime_model = compiledModel.get_runtime_model();
ASSERT_NE(nullptr, runtime_model);
size_t actual_transpose_count = 0;
for (const auto &node : runtime_model->get_ops()) {
const auto & rtInfo = node->get_rt_info();
auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
auto it = rtInfo.find(paramName);
OPENVINO_ASSERT(rtInfo.end() != it);
return it->second.as<std::string>();
};
if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Transpose") {
actualTransposeCount++;
if (getExecValue(ov::exec_model_info::LAYER_TYPE) == "Transpose") {
actual_transpose_count++;
}
}
ASSERT_EQ(expectedTransposeCount, actualTransposeCount);
ASSERT_EQ(expectedTransposeCount, actual_transpose_count);
}
void FuseTransposeAndReorderTest::SetUp() {
targetDevice = ov::test::utils::DEVICE_CPU;
SKIP_IF_CURRENT_TEST_IS_DISABLED();
std::tie(inputShape, inPrec) = this->GetParam();
CreateGraph();
std::tie(input_shape, in_prec) = this->GetParam();
create_model();
}
const auto fuseTransposeAndReorderCommonParams = ::testing::Combine(
::testing::Values(SizeVector{1, 2, 3, 4}, SizeVector{1, 2, 3, 4, 5}),
::testing::Values(Precision::I8, Precision::U8)
::testing::Values(ov::Shape{1, 2, 3, 4}, ov::Shape{1, 2, 3, 4, 5}),
::testing::Values(ov::element::i8, ov::element::u8)
);
/* FuseTransposeAndReorderTest graph
/* FuseTransposeAndReorderTest model
---------
|Input |
---------
@ -75,30 +76,29 @@ const auto fuseTransposeAndReorderCommonParams = ::testing::Combine(
---------
*/
void FuseTransposeAndReorderTest::CreateGraph() {
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrec);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
void FuseTransposeAndReorderTest::create_model() {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape))};
auto order = inputShape.size() == 5 ? std::vector<int64_t>{0, 2, 3, 4, 1} : std::vector<int64_t>{0, 2, 3, 1};
auto memFmt = inputShape.size() == 5 ? ndhwc : nhwc;
auto order = input_shape.size() == 5 ? std::vector<int64_t>{0, 2, 3, 4, 1} : std::vector<int64_t>{0, 2, 3, 1};
auto memFmt = input_shape.size() == 5 ? ndhwc : nhwc;
auto constOrder = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose = std::make_shared<ngraph::opset5::Transpose>(params[0], constOrder);
auto constOrder = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose = std::make_shared<ov::op::v1::Transpose>(params[0], constOrder);
transpose->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {});
ngraph::ResultVector results{std::make_shared<ngraph::opset5::Result>(transpose)};
function = std::make_shared<ngraph::Function>(results, params, "TransposeReorder");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(transpose)};
function = std::make_shared<ov::Model>(results, params, "TransposeReorder");
}
TEST_P(FuseTransposeAndReorderTest, CompareWithRefs) {
Run();
CheckTransposeCount(0);
run();
check_transpose_count(0);
}
INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest, fuseTransposeAndReorderCommonParams, FuseTransposeAndReorderTest::getTestCaseName);
/* FuseTransposeAndReorderTest1 graph
/* FuseTransposeAndReorderTest1 model
---------
|Input |
---------
@ -134,40 +134,38 @@ INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest, fuseTranspose
---------
*/
void FuseTransposeAndReorderTest1::CreateGraph() {
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrec);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
void FuseTransposeAndReorderTest1::create_model() {
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape))};
auto order = input_shape.size() == 5 ? std::vector<int64_t>{0, 2, 3, 4, 1} : std::vector<int64_t>{0, 2, 3, 1};
auto order = inputShape.size() == 5 ? std::vector<int64_t>{0, 2, 3, 4, 1} : std::vector<int64_t>{0, 2, 3, 1};
auto constOrder1 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose1 = std::make_shared<ngraph::opset5::Transpose>(params[0], constOrder1);
auto memFmt1 = inputShape.size() == 5 ? ndhwc : nhwc;
auto constOrder1 = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose1 = std::make_shared<ov::op::v1::Transpose>(params[0], constOrder1);
auto memFmt1 = input_shape.size() == 5 ? ndhwc : nhwc;
transpose1->get_rt_info() = makeCPUInfo({memFmt1}, {memFmt1}, {});
auto constOrder2 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose2 = std::make_shared<ngraph::opset5::Transpose>(transpose1, constOrder2);
auto memFmt2 = inputShape.size() == 5 ? ndhwc : nhwc;
auto constOrder2 = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose2 = std::make_shared<ov::op::v1::Transpose>(transpose1, constOrder2);
auto memFmt2 = input_shape.size() == 5 ? ndhwc : nhwc;
transpose2->get_rt_info() = makeCPUInfo({memFmt2}, {memFmt2}, {});
auto constOrder3 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose3 = std::make_shared<ngraph::opset5::Transpose>(transpose2, constOrder3);
auto memFmt3 = inputShape.size() == 5 ? ncdhw : nchw;
auto constOrder3 = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose3 = std::make_shared<ov::op::v1::Transpose>(transpose2, constOrder3);
auto memFmt3 = input_shape.size() == 5 ? ncdhw : nchw;
transpose3->get_rt_info() = makeCPUInfo({memFmt3}, {memFmt3}, {});
auto shape = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, transpose3->get_output_shape(0));
auto reshape = std::make_shared<ngraph::opset5::Reshape>(transpose1, shape, false);
auto shape = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, transpose3->get_output_shape(0));
auto reshape = std::make_shared<ov::op::v1::Reshape>(transpose1, shape, false);
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{transpose3, reshape}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset5::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, params, "Transpose_TransposeReorderTranspose_Reshape_Concat");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
function = std::make_shared<ov::Model>(results, params, "Transpose_TransposeReorderTranspose_Reshape_Concat");
}
// Test disabled temporarily, it conflicts with TransposeFuse transformation in common optimizations step
TEST_P(FuseTransposeAndReorderTest1, DISABLED_CompareWithRefs) {
Run();
CheckTransposeCount(2);
run();
check_transpose_count(2);
}
INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest1, fuseTransposeAndReorderCommonParams, FuseTransposeAndReorderTest::getTestCaseName);
@ -199,35 +197,33 @@ INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest1, fuseTranspos
---------
*/
void FuseTransposeAndReorderTest2::CreateGraph() {
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrec);
void FuseTransposeAndReorderTest2::create_model() {
auto input_shape2(input_shape);
input_shape2[input_shape2.size() - 1] *= 2;
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape)),
std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape2))};
auto order = input_shape.size() == 5 ? std::vector<int64_t>{0, 4, 1, 2, 3} : std::vector<int64_t>{0, 3, 1, 2};
auto inputShape2(inputShape);
inputShape2[inputShape2.size() - 1] *= 2;
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape)),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape2))};
auto order = inputShape.size() == 5 ? std::vector<int64_t>{0, 4, 1, 2, 3} : std::vector<int64_t>{0, 3, 1, 2};
auto constOrder1 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose1 = std::make_shared<ngraph::opset5::Transpose>(params[0], constOrder1);
auto memFmt1 = inputShape.size() == 5 ? ndhwc : nhwc;
auto constOrder1 = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose1 = std::make_shared<ov::op::v1::Transpose>(params[0], constOrder1);
auto memFmt1 = input_shape.size() == 5 ? ndhwc : nhwc;
transpose1->get_rt_info() = makeCPUInfo({memFmt1}, {memFmt1}, {});
auto constOrder2 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order);
auto transpose2 = std::make_shared<ngraph::opset5::Transpose>(params[1], constOrder2);
auto memFmt2 = inputShape.size() == 5 ? ncdhw : nchw;
auto constOrder2 = ngraph::builder::makeConstant(ov::element::i64, {input_shape.size()}, order);
auto transpose2 = std::make_shared<ov::op::v1::Transpose>(params[1], constOrder2);
auto memFmt2 = input_shape.size() == 5 ? ncdhw : nchw;
transpose2->get_rt_info() = makeCPUInfo({memFmt2}, {memFmt2}, {});
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{transpose1, transpose2}, 1);
concat->get_rt_info() = makeCPUInfo({memFmt1, memFmt1}, {memFmt1}, {});
ngraph::ResultVector results{std::make_shared<ngraph::opset5::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, params, "Transpose_Transpose_Concat");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
function = std::make_shared<ov::Model>(results, params, "Transpose_Transpose_Concat");
}
TEST_P(FuseTransposeAndReorderTest2, CompareWithRefs) {
Run();
CheckTransposeCount(1);
run();
check_transpose_count(1);
}
INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest2, fuseTransposeAndReorderCommonParams, FuseTransposeAndReorderTest::getTestCaseName);
@ -247,44 +243,48 @@ INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest2, fuseTranspos
Result
*/
void FuseTransposeAndReorderTest3::CreateGraph() {
OPENVINO_ASSERT(inputShape.size() == 4);
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrec);
void FuseTransposeAndReorderTest3::create_model() {
OPENVINO_ASSERT(input_shape.size() == 4);
auto memFmt = nhwc;
ngraph::op::PadType padType = ngraph::op::PadType::SAME_UPPER;
InferenceEngine::SizeVector kernel{3, 3}, stride{1, 1}, dilation{1, 1};
ov::op::PadType padType = ov::op::PadType::SAME_UPPER;
ov::Shape kernel{3, 3}, stride{1, 1}, dilation{1, 1};
std::vector<ptrdiff_t> padBegin{0, 0}, padEnd{0, 0};
size_t convOutChannels = 32;
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
OPENVINO_ASSERT(inputShape[1] >= 8 && (inputShape[1] % 8 == 0));
auto convolutionNode = ngraph::builder::makeConvolution(params.front(), ngPrc, kernel, stride, padBegin,
padEnd, dilation, padType, convOutChannels);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape))};
OPENVINO_ASSERT(input_shape[1] >= 8 && (input_shape[1] % 8 == 0));
auto convolutionNode = ov::test::utils::make_convolution(params.front(),
in_prec,
kernel,
stride,
padBegin,
padEnd,
dilation,
padType,
convOutChannels);
convolutionNode->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {});
auto sndAddIn = std::make_shared<ngraph::opset1::Parameter>(ngPrc, convolutionNode->get_output_shape(0));
auto sndAddIn = std::make_shared<ov::op::v0::Parameter>(in_prec, convolutionNode->get_output_shape(0));
params.push_back(sndAddIn);
auto add = std::make_shared<ngraph::opset1::Add>(convolutionNode->output(0), sndAddIn);
auto add = std::make_shared<ov::op::v1::Add>(convolutionNode->output(0), sndAddIn);
auto order = std::vector<int64_t>{0, 2, 3, 1};
auto constOrder = ngraph::builder::makeConstant(ngraph::element::i64, {order.size()}, order);
auto transpose = std::make_shared<ngraph::opset5::Transpose>(add, constOrder);
auto constOrder = ngraph::builder::makeConstant(ov::element::i64, {order.size()}, order);
auto transpose = std::make_shared<ov::op::v1::Transpose>(add, constOrder);
transpose->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {});
ngraph::ResultVector results{std::make_shared<ngraph::opset5::Result>(transpose)};
function = std::make_shared<ngraph::Function>(results, params, "TransposeReorder");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(transpose)};
function = std::make_shared<ov::Model>(results, params, "TransposeReorder");
}
TEST_P(FuseTransposeAndReorderTest3, CompareWithRefs) {
Run();
CheckTransposeCount(1);
run();
check_transpose_count(1);
}
const auto convSumTranposeParams = ::testing::Combine(::testing::Values(SizeVector{1, 16, 32, 35}),
::testing::Values(Precision::FP32)
const auto convSumTranposeParams = ::testing::Combine(::testing::Values(ov::Shape{1, 16, 32, 35}),
::testing::Values(ov::element::f32)
);
INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest3, convSumTranposeParams, FuseTransposeAndReorderTest::getTestCaseName);
@ -306,27 +306,40 @@ INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest3, convSumTranp
|
result
*/
void FuseTransposeAndReorderTest4::CreateGraph() {
OPENVINO_ASSERT(inputShape.size() == 4);
const InferenceEngine::SizeVector kernel = {1, 1};
const InferenceEngine::SizeVector stride = {1, 1};
const InferenceEngine::SizeVector dilation = {1, 1};
void FuseTransposeAndReorderTest4::create_model() {
OPENVINO_ASSERT(input_shape.size() == 4);
const ov::Shape kernel = {1, 1};
const ov::Shape stride = {1, 1};
const ov::Shape dilation = {1, 1};
const std::vector<ptrdiff_t> padBegin = {0, 0};
const std::vector<ptrdiff_t> padEnd = {0, 0};
const size_t convOutChannels = 4;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrec);
auto memFmt = nhwc;
ov::ParameterVector inputParams {std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(in_prec, ov::Shape(input_shape))};
const auto relu = std::make_shared<ov::op::v0::Relu>(inputParams[0]);
const auto transposeOrder = ov::op::v0::Constant::create(ov::element::i32, {4}, {0, 3, 1, 2});
const auto transpose1 = std::make_shared<ov::op::v1::Transpose>(relu, transposeOrder);
const auto conv1 = ngraph::builder::makeConvolution(transpose1, ngPrc, kernel, stride, padBegin,
padEnd, dilation, ngraph::op::PadType::AUTO, convOutChannels);
const auto conv1 = ov::test::utils::make_convolution(transpose1,
in_prec,
kernel,
stride,
padBegin,
padEnd,
dilation,
ov::op::PadType::AUTO,
convOutChannels);
conv1->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {});
const auto transpose2 = std::make_shared<ov::op::v1::Transpose>(relu, transposeOrder);
const auto conv2 = ngraph::builder::makeConvolution(transpose2, ngPrc, kernel, stride, padBegin,
padEnd, dilation, ngraph::op::PadType::AUTO, convOutChannels);
const auto conv2 = ov::test::utils::make_convolution(transpose2,
in_prec,
kernel,
stride,
padBegin,
padEnd,
dilation,
ov::op::PadType::AUTO,
convOutChannels);
conv2->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {});
const auto add = std::make_shared<ov::op::v1::Add>(conv1, conv2);
@ -335,8 +348,8 @@ void FuseTransposeAndReorderTest4::CreateGraph() {
}
TEST_P(FuseTransposeAndReorderTest4, CompareWithRefs) {
Run();
CheckTransposeCount(0);
run();
check_transpose_count(0);
}
INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest4, convSumTranposeParams, FuseTransposeAndReorderTest::getTestCaseName);
@ -353,4 +366,5 @@ TEST(smoke_Basic, FuseDynamicTransposeAndReorderTest) {
ASSERT_NO_THROW(core.compile_model(model, "CPU"));
}
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov