[CPU Plugin][Func Test] Upgrade ConvWithZeroPointFuseSubgraphTest to API 2.0 (#21330)

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
Xuejun Zhai 2023-11-28 14:52:09 +08:00 committed by GitHub
parent 164d101295
commit 441427abc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 94 additions and 95 deletions

View File

@ -4,23 +4,23 @@
#pragma once #pragma once
#include <string>
#include <tuple> #include <tuple>
#include <vector> #include <vector>
#include <string>
#include "test_utils/cpu_test_utils.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "ov_models/builders.hpp" #include "ov_models/builders.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace CPUTestUtils; using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions { namespace ov {
namespace test {
using convConcatCPUParams = std::tuple< using convConcatCPUParams = std::tuple<nodeType, // Node convolution type
nodeType, // Ngraph convolution type ov::Shape // Input shapes
InferenceEngine::SizeVector // Input shapes >;
>;
// Subgraph: // Subgraph:
/* /*
@ -44,7 +44,7 @@ using convConcatCPUParams = std::tuple<
class ConvWithZeroPointFuseSubgraphTest : public testing::WithParamInterface<convConcatCPUParams>, class ConvWithZeroPointFuseSubgraphTest : public testing::WithParamInterface<convConcatCPUParams>,
public CPUTestsBase, public CPUTestsBase,
virtual public LayerTestsUtils::LayerTestsCommon { virtual public SubgraphBaseStaticTest {
public: public:
static std::string getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj); static std::string getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj);
@ -53,4 +53,5 @@ protected:
std::string pluginTypeNode; std::string pluginTypeNode;
}; };
} // namespace SubgraphTestsDefinitions } // namespace test
} // namespace ov

View File

@ -2,19 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/opsets/opset1.hpp"
#include "test_utils/convolution_params.hpp"
#include "subgraph_tests/include/conv_with_zero_point_fuse.hpp" #include "subgraph_tests/include/conv_with_zero_point_fuse.hpp"
using namespace InferenceEngine; #include "common_test_utils/node_builders/convolution.hpp"
#include "common_test_utils/node_builders/group_convolution.hpp"
#include "test_utils/convolution_params.hpp"
using namespace CPUTestUtils; using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions { namespace ov {
namespace test {
std::string ConvWithZeroPointFuseSubgraphTest::getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj) { std::string ConvWithZeroPointFuseSubgraphTest::getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj) {
std::ostringstream result; std::ostringstream result;
nodeType type; nodeType type;
SizeVector inputShapes; ov::Shape inputShapes;
std::tie(type, inputShapes) = obj.param; std::tie(type, inputShapes) = obj.param;
result << "Type=" << nodeType2str(type) << "_"; result << "Type=" << nodeType2str(type) << "_";
@ -26,115 +28,110 @@ std::string ConvWithZeroPointFuseSubgraphTest::getTestCaseName(testing::TestPara
void ConvWithZeroPointFuseSubgraphTest::SetUp() { void ConvWithZeroPointFuseSubgraphTest::SetUp() {
targetDevice = ov::test::utils::DEVICE_CPU; targetDevice = ov::test::utils::DEVICE_CPU;
nodeType type; nodeType type;
SizeVector inputShapes; ov::Shape inputShapes;
std::tie(type, inputShapes) = this->GetParam(); std::tie(type, inputShapes) = this->GetParam();
pluginTypeNode = nodeType2PluginType(type); pluginTypeNode = nodeType2PluginType(type);
const ngraph::op::PadType paddingType { ngraph::op::PadType::EXPLICIT }; const ov::op::PadType paddingType{ov::op::PadType::EXPLICIT};
const size_t numOutChannels = 256; const size_t numOutChannels = 256;
const SizeVector dilation { 1, 1 }; const std::vector<size_t> dilation{1, 1};
const SizeVector kernelSize { 1, 1 }; const std::vector<size_t> kernelSize{1, 1};
const SizeVector strides { 1, 1 }; const std::vector<size_t> strides{1, 1};
const std::vector<ptrdiff_t> padBegin { 0, 0 }; const std::vector<ptrdiff_t> padBegin{0, 0};
const std::vector<ptrdiff_t> padEnd { 0, 0 }; const std::vector<ptrdiff_t> padEnd{0, 0};
selectedType = ".*_I8"; selectedType = ".*_i8";
ov::ParameterVector inputParams {std::make_shared<ov::op::v0::Parameter>(ngraph::element::f32, ov::Shape(inputShapes))}; ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, inputShapes)};
const auto fq = ngraph::builder::makeFakeQuantize( const auto fq = ngraph::builder::makeFakeQuantize(inputParams[0],
inputParams[0], ov::element::f32,
ov::element::f32, 256,
256, {1, 1, 1, 1},
{1, 1, 1, 1}, {-12.8f},
{-12.8f}, {12.7f},
{12.7f}, {-12.8f},
{-12.8f}, {12.7f});
{12.7f});
std::vector<std::shared_ptr<ngraph::Node>> branches(2); std::vector<std::shared_ptr<ngraph::Node>> branches(2);
{ {
ngraph::Strides strides{1, 1}; ov::Strides strides{1, 1};
ngraph::Shape pads_begin{0, 0}, pads_end{0, 0}, kernel{1, 1}; ov::Shape pads_begin{0, 0}, pads_end{0, 0}, kernel{1, 1};
branches[0] = std::make_shared<ngraph::opset1::MaxPool>(fq, branches[0] = std::make_shared<ov::op::v1::MaxPool>(fq, strides, pads_begin, pads_end, kernel);
strides,
pads_begin,
pads_end,
kernel);
} }
{ {
const auto fq_conv_data = ngraph::builder::makeFakeQuantize( const auto fq_conv_data = ngraph::builder::makeFakeQuantize(fq,
fq, ov::element::f32,
ov::element::f32, 256,
256, {1, 1, 1, 1},
{1, 1, 1, 1}, {-12.8f},
{-12.8f}, {12.7f},
{12.7f}, {-12.8f},
{-12.8f}, {12.7f});
{12.7f});
const InferenceEngine::SizeVector weights_const_shape = {numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]}; const ov::Shape weights_const_shape = {numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]};
const auto weights_const_values = std::vector<int>(ngraph::shape_size(weights_const_shape), 1); const auto weights_const_values = std::vector<int>(ov::shape_size(weights_const_shape), 1);
const auto weights_const = ngraph::builder::makeConstant(ov::element::i8, weights_const_shape, weights_const_values); const auto weights_const =
ngraph::builder::makeConstant(ov::element::i8, weights_const_shape, weights_const_values);
const auto weights_convert = std::make_shared<ov::op::v0::Convert>(weights_const, ov::element::f32); const auto weights_convert = std::make_shared<ov::op::v0::Convert>(weights_const, ov::element::f32);
const auto weights_multiply = std::make_shared<ov::opset10::Multiply>( const auto weights_multiply = std::make_shared<ov::opset10::Multiply>(
weights_convert, weights_convert,
ngraph::builder::makeConstant(ov::element::f32, ngraph::builder::makeConstant(ov::element::f32,
{numOutChannels, 1, 1, 1}, {numOutChannels, 1, 1, 1},
std::vector<float>(numOutChannels, 1.0))); std::vector<float>(numOutChannels, 1.0)));
switch (type) { switch (type) {
case nodeType::convolution: { case nodeType::convolution: {
branches[1] = ngraph::builder::makeConvolution(fq_conv_data, branches[1] = ov::test::utils::make_convolution(fq_conv_data,
weights_multiply, weights_multiply,
ngraph::element::f32, ov::element::f32,
kernelSize, kernelSize,
strides, strides,
padBegin, padBegin,
padEnd, padEnd,
dilation, dilation,
paddingType, paddingType,
numOutChannels); numOutChannels);
break; break;
} }
case nodeType::groupConvolution: { case nodeType::groupConvolution: {
branches[1] = ngraph::builder::makeGroupConvolution( branches[1] = ov::test::utils::make_group_convolution(
fq_conv_data, fq_conv_data,
std::make_shared<ov::opset10::Reshape>( std::make_shared<ov::opset10::Reshape>(
weights_multiply, weights_multiply,
ngraph::builder::makeConstant( ngraph::builder::makeConstant(
ov::element::i32, ov::element::i32,
{5}, {5},
std::vector<size_t>{1, numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]}), std::vector<size_t>{1, numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]}),
true), true),
ngraph::element::f32, ov::element::f32,
strides, strides,
padBegin, padBegin,
padEnd, padEnd,
dilation, dilation,
paddingType); paddingType);
break; break;
} }
default: { default: {
throw std::runtime_error("Subgraph concat test doesn't support this type of operation"); throw std::runtime_error("Subgraph concat test doesn't support this type of operation");
} }
} }
} }
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{branches[0], branches[1]}, 1); auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{branches[0], branches[1]}, 1);
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(concat)}; ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, inputParams, "ConvWithZeroPointFuseSubgraphTest"); function = std::make_shared<ov::Model>(results, inputParams, "ConvWithZeroPointFuseSubgraphTest");
} }
TEST_P(ConvWithZeroPointFuseSubgraphTest, CompareWithRefs) { TEST_P(ConvWithZeroPointFuseSubgraphTest, CompareWithRefs) {
Run(); run();
CheckPluginRelatedResults(executableNetwork, pluginTypeNode); CheckPluginRelatedResults(compiledModel, pluginTypeNode);
}; };
const SizeVector inputShapes2D = {1, 32, 136, 136}; const ov::Shape inputShapes2D = {1, 32, 136, 136};
const auto params2DConv = ::testing::Combine(::testing::ValuesIn({nodeType::convolution, nodeType::groupConvolution}), const auto params2DConv = ::testing::Combine(::testing::ValuesIn({nodeType::convolution, nodeType::groupConvolution}),
::testing::Values(inputShapes2D)); ::testing::Values(inputShapes2D));
@ -144,4 +141,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvWithZeroPointFuse,
params2DConv, params2DConv,
ConvWithZeroPointFuseSubgraphTest::getTestCaseName); ConvWithZeroPointFuseSubgraphTest::getTestCaseName);
} // namespace SubgraphTestsDefinitions } // namespace test
} // namespace ov