[CPU Plugin][Func Test] Upgrade ConvConcatSubgraphTest to API 2.0 (#21301)

* [CPU Plugin][Func Test] Upgrade ConvConcatSubgraphTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade ConvConcatSubgraphTest, remove ngraph namespace

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

---------

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
Xuejun Zhai 2023-11-28 14:39:29 +08:00 committed by GitHub
parent 53baeacc2e
commit 9320fa7c86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 97 additions and 59 deletions

View File

@ -9,34 +9,35 @@
#include <string>
#include "test_utils/cpu_test_utils.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "ov_models/builders.hpp"
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using commonConvParams = std::tuple<
InferenceEngine::SizeVector, // Kernel size
InferenceEngine::SizeVector, // Strides
using commonConvParams = std::tuple<std::vector<size_t>, // Kernel size
std::vector<size_t>, // Strides
std::vector<ptrdiff_t>, // Pad begin
std::vector<ptrdiff_t>, // Pad end
InferenceEngine::SizeVector, // Dilation
std::vector<size_t>, // Dilation
size_t, // Num out channels
ngraph::op::PadType, // Padding type
ov::op::PadType, // Padding type
size_t // Number of groups
>;
>;
using convConcatCPUParams = std::tuple<
nodeType, // Ngraph convolution type
using convConcatCPUParams = std::tuple<nodeType, // Node convolution type
commonConvParams, // Convolution params
CPUTestUtils::CPUSpecificParams, // CPU runtime params
InferenceEngine::SizeVector, // Input shapes
ov::Shape, // Input shapes
int // Axis for concat
>;
>;
class ConvConcatSubgraphTest : public testing::WithParamInterface<convConcatCPUParams>, public CPUTestsBase, virtual public LayerTestsUtils::LayerTestsCommon {
class ConvConcatSubgraphTest : public testing::WithParamInterface<convConcatCPUParams>,
public CPUTestsBase,
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj);
@ -45,4 +46,5 @@ protected:
std::string pluginTypeNode;
};
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,30 +2,36 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "subgraph_tests/include/conv_concat.hpp"
#include "common_test_utils/node_builders/convolution.hpp"
#include "common_test_utils/node_builders/convolution_backprop_data.hpp"
#include "common_test_utils/node_builders/group_convolution.hpp"
#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp"
#include "test_utils/convolution_params.hpp"
#include "test_utils/filter_cpu_info.hpp"
#include "subgraph_tests/include/conv_concat.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
std::string ConvConcatSubgraphTest::getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj) {
std::ostringstream result;
nodeType type;
commonConvParams convParams;
CPUSpecificParams cpuParams;
SizeVector inputShapes;
ov::Shape inputShapes;
int axis;
std::tie(type, convParams, cpuParams, inputShapes, axis) = obj.param;
result << "Type=" << nodeType2str(type) << "_";
SizeVector kernelSize, strides, dilation;
std::vector<size_t> kernelSize, strides, dilation;
std::vector<ptrdiff_t> padBegin, padEnd;
size_t numOutChannels, numOfGroups;
ngraph::op::PadType paddingType;
ov::op::PadType paddingType;
std::tie(kernelSize, strides, padBegin, padEnd, dilation, numOutChannels, paddingType, numOfGroups) = convParams;
result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_";
@ -50,52 +56,81 @@ void ConvConcatSubgraphTest::SetUp() {
nodeType type;
commonConvParams convParams;
CPUSpecificParams cpuParams;
SizeVector inputShapes;
ov::Shape inputShapes;
int axis;
std::tie(type, convParams, cpuParams, inputShapes, axis) = this->GetParam();
pluginTypeNode = nodeType2PluginType(type);
SizeVector kernelSize, strides, dilation;
std::vector<size_t> kernelSize, strides, dilation;
std::vector<ptrdiff_t> padBegin, padEnd;
size_t numOutChannels, numOfGroups;
ngraph::op::PadType paddingType;
ov::op::PadType paddingType;
std::tie(kernelSize, strides, padBegin, padEnd, dilation, numOutChannels, paddingType, numOfGroups) = convParams;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
selectedType += "_FP32";
selectedType += "_f32";
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape(inputShapes)),
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape(inputShapes))};
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, inputShapes),
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, inputShapes)};
std::vector<std::shared_ptr<ngraph::Node>> convolutionNodes(2);
std::vector<std::shared_ptr<ov::Node>> convolutionNodes(2);
switch (type) {
case nodeType::convolution : {
for (size_t conv = 0; conv < convolutionNodes.size(); conv++) {
convolutionNodes[conv] = ngraph::builder::makeConvolution(inputParams[conv], ngraph::element::f32, kernelSize, strides, padBegin,
padEnd, dilation, paddingType, numOutChannels);
convolutionNodes[conv] = ov::test::utils::make_convolution(inputParams[conv],
ov::element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels);
}
break;
}
case nodeType::convolutionBackpropData : {
for (size_t conv = 0; conv < convolutionNodes.size(); conv++) {
convolutionNodes[conv] = ngraph::builder::makeConvolutionBackpropData(inputParams[conv], ngraph::element::f32, kernelSize, strides, padBegin,
padEnd, dilation, paddingType, numOutChannels);
convolutionNodes[conv] = ov::test::utils::make_convolution_backprop_data(inputParams[conv],
ov::element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels);
}
break;
}
case nodeType::groupConvolution : {
for (size_t conv = 0; conv < convolutionNodes.size(); conv++) {
convolutionNodes[conv] = ngraph::builder::makeGroupConvolution(inputParams[conv], ngraph::element::f32, kernelSize, strides, padBegin,
padEnd, dilation, paddingType, numOutChannels, numOfGroups);
convolutionNodes[conv] = ov::test::utils::make_group_convolution(inputParams[conv],
ov::element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels,
numOfGroups);
}
break;
}
case nodeType::groupConvolutionBackpropData : {
for (size_t conv = 0; conv < convolutionNodes.size(); conv++) {
convolutionNodes[conv] = ngraph::builder::makeGroupConvolutionBackpropData(inputParams[conv], ngraph::element::f32, kernelSize,
strides, padBegin, padEnd, dilation, paddingType,
numOutChannels, numOfGroups);
convolutionNodes[conv] = ov::test::utils::make_group_convolution_backprop_data(inputParams[conv],
ov::element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels,
numOfGroups);
}
break;
}
@ -109,36 +144,36 @@ void ConvConcatSubgraphTest::SetUp() {
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{convolutionNodes[0], convolutionNodes[1]}, axis);
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, inputParams, "convolutionConcat");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
function = std::make_shared<ov::Model>(results, inputParams, "convolutionConcat");
}
TEST_P(ConvConcatSubgraphTest, CompareWithRefs) {
Run();
run();
CheckPluginRelatedResults(executableNetwork, pluginTypeNode);
CheckPluginRelatedResults(compiledModel, pluginTypeNode);
};
/* ============= Common Convolution Params ============= */
const ngraph::op::PadType paddingType{ngraph::op::PadType::EXPLICIT};
const ov::op::PadType paddingType{ov::op::PadType::EXPLICIT};
const size_t numOutChannels{32};
const int axis{1};
const SizeVector inputShapes2D{1, 64, 16, 16};
const SizeVector kernelSize2D{3, 3};
const SizeVector strides2D{2, 2};
const ov::Shape inputShapes2D{1, 64, 16, 16};
const std::vector<size_t> kernelSize2D{3, 3};
const std::vector<size_t> strides2D{2, 2};
const std::vector<ptrdiff_t> padBegin2D{1, 1};
const std::vector<ptrdiff_t> padEnd2D{1, 1};
const SizeVector dilation2D{1, 1};
const std::vector<size_t> dilation2D{1, 1};
commonConvParams convParams2D = commonConvParams{kernelSize2D, strides2D, padBegin2D, padEnd2D, dilation2D, numOutChannels, paddingType, 1};
commonConvParams groupConvParams2D = commonConvParams{kernelSize2D, strides2D, padBegin2D, padEnd2D, dilation2D, numOutChannels, paddingType, 2};
const SizeVector inputShapes3D{1, 64, 8, 16, 16};
const SizeVector kernelSize3D{3, 3, 3};
const SizeVector strides3D{2, 2, 2};
const ov::Shape inputShapes3D{1, 64, 8, 16, 16};
const std::vector<size_t> kernelSize3D{3, 3, 3};
const std::vector<size_t> strides3D{2, 2, 2};
const std::vector<ptrdiff_t> padBegin3D{1, 1, 1};
const std::vector<ptrdiff_t> padEnd3D{1, 1, 1};
const SizeVector dilation3D{1, 1, 1};
const std::vector<size_t> dilation3D{1, 1, 1};
commonConvParams convParams3D = commonConvParams{kernelSize3D, strides3D, padBegin3D, padEnd3D, dilation3D, numOutChannels, paddingType, 1};
commonConvParams groupConvParams3D = commonConvParams{kernelSize3D, strides3D, padBegin3D, padEnd3D, dilation3D, numOutChannels, paddingType, 2};
@ -184,7 +219,7 @@ namespace GroupConvolutionBackpropDataDWConcat {
/* ============= GroupConvolutionBackpropData (DW 2D) ============= */
commonConvParams dwDeconvParams2D = commonConvParams{kernelSize2D, strides2D, padBegin2D, padEnd2D, dilation2D, numOutChannels, paddingType, numOutChannels};
const SizeVector inputShapesDW2D{1, 32, 16, 16};
const ov::Shape inputShapesDW2D{1, 32, 16, 16};
const std::vector<CPUSpecificParams> CPUParams2D = {
conv_sse42_dw_2D,
conv_avx2_dw_2D,
@ -207,7 +242,7 @@ namespace GroupConvolutionDWConcat {
/* ============= GroupConvolution (DW 2D) ============= */
commonConvParams dwConvParams2D = commonConvParams{kernelSize2D, strides2D, padBegin2D, padEnd2D, dilation2D, numOutChannels, paddingType, numOutChannels};
const SizeVector inputShapesDW2D{1, 32, 16, 16};
const ov::Shape inputShapesDW2D{1, 32, 16, 16};
const std::vector<CPUSpecificParams> CPUParams2D = {
conv_sse42_dw_2D,
conv_avx2_dw_2D,
@ -226,7 +261,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_DWGroupConvolution2D, ConvConcatSubgraphTest, par
/* ============= GroupConvolution (DW 3D) ============= */
commonConvParams dwConvParams3D = commonConvParams{kernelSize3D, strides3D, padBegin3D, padEnd3D, dilation3D, numOutChannels, paddingType, numOutChannels};
const SizeVector inputShapesDW3D{1, 32, 8, 16, 16};
const ov::Shape inputShapesDW3D{1, 32, 8, 16, 16};
const std::vector<CPUSpecificParams> CPUParams3D = {
conv_sse42_dw_3D,
conv_avx2_dw_3D,
@ -404,4 +439,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData3D, ConvConcatSubgrap
} // namespace GroupConvolutionBackpropDataConcat
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov