Upgrade CPU func tests to 2.o (#21344)

* [CPU Plugin][Func Test] Upgrade ConvPoolActivTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade Conv3dReshapeTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade ConvsAndSums to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade FQScaleshiftWithConstantShiftTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade FullyConnectedStridedInputsOutputsTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade FuseScaleShiftAndFakeQuantizeTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] Upgrade FuseSplitConcatPairToInterpolateTest to API 2.0

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] remove ngraph namespace

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

* [CPU Plugin][Func Test] fix error

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>

---------

Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
Xuejun Zhai 2023-11-29 17:32:05 +08:00 committed by GitHub
parent 1b43b3c566
commit c1a28d0942
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 354 additions and 280 deletions

View File

@ -2,21 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "common_test_utils/node_builders/convolution.hpp"
#include "common_test_utils/node_builders/group_convolution.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using Conv3dReshapeTestParams = std::tuple<nodeType,
size_t>;
using Conv3dReshapeTestParams = std::tuple<nodeType, size_t>;
class Conv3dReshapeTest : public testing::WithParamInterface<Conv3dReshapeTestParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<Conv3dReshapeTestParams> obj) {
nodeType conv;
@ -31,7 +31,7 @@ public:
}
protected:
std::string cpuNodeType;
std::string cpuNodeType;
void SetUp() override {
targetDevice = ov::test::utils::DEVICE_CPU;
@ -41,7 +41,8 @@ protected:
cpuNodeType = nodeType2PluginType(convType);
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1024, 64})};
ov::ParameterVector inputParams{
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1024, 64})};
std::shared_ptr<Node> conv;
const std::vector<size_t> kernelSize = {1};
@ -53,43 +54,64 @@ protected:
const size_t numOfGroups = 2;
const op::PadType paddingType = op::PadType::EXPLICIT;
switch (convType) {
case nodeType::convolution : {
conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels);
break;
}
case nodeType::groupConvolution : {
conv = builder::makeGroupConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels,
numOfGroups);
break;
}
default: {
throw std::runtime_error("Conv3dReshapeTest doesn't support this type of operation");
}
case nodeType::convolution: {
conv = ov::test::utils::make_convolution(inputParams[0],
element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels);
break;
}
case nodeType::groupConvolution: {
conv = ov::test::utils::make_group_convolution(inputParams[0],
element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels,
numOfGroups);
break;
}
default: {
throw std::runtime_error("Conv3dReshapeTest doesn't support this type of operation");
}
}
ResultVector results;
for (size_t i = 0; i < numOut; i++) {
auto mockNode = std::make_shared<opset5::Multiply>(conv->output(0), opset5::Constant::create(element::f32, Shape{1}, {1}));
results.push_back(std::make_shared<opset5::Result>(mockNode));
auto mockNode =
std::make_shared<ov::op::v1::Multiply>(conv->output(0),
ov::op::v0::Constant::create(element::f32, Shape{1}, {1}));
results.push_back(std::make_shared<ov::op::v0::Result>(mockNode));
}
function = std::make_shared<ngraph::Function>(results, inputParams, "Conv3dReshape");
function = std::make_shared<ov::Model>(results, inputParams, "Conv3dReshape");
}
};
TEST_P(Conv3dReshapeTest, CompareWithRefs) {
Run();
run();
}
namespace {
const std::vector<nodeType> convType = { nodeType::convolution, nodeType::groupConvolution };
const std::vector<size_t> numOut = { 1, 2, 5 };
const auto conv3dReshapeParams = ::testing::Combine(::testing::ValuesIn(convType),
::testing::ValuesIn(numOut));
const std::vector<nodeType> convType = {nodeType::convolution, nodeType::groupConvolution};
const std::vector<size_t> numOut = {1, 2, 5};
const auto conv3dReshapeParams = ::testing::Combine(::testing::ValuesIn(convType), ::testing::ValuesIn(numOut));
INSTANTIATE_TEST_SUITE_P(smoke_Conv3dReshapeTest, Conv3dReshapeTest, conv3dReshapeParams, Conv3dReshapeTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Conv3dReshapeTest,
Conv3dReshapeTest,
conv3dReshapeParams,
Conv3dReshapeTest::getTestCaseName);
} // namespace
} // namespace
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,21 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils/fusing_test_utils.hpp"
#include "common_test_utils/node_builders/convolution.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/fusing_test_utils.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using ConvPoolActivTestParams = fusingSpecificParams;
class ConvPoolActivTest : public testing::WithParamInterface<ConvPoolActivTestParams>, public CpuTestWithFusing,
virtual public LayerTestsUtils::LayerTestsCommon {
class ConvPoolActivTest : public testing::WithParamInterface<fusingSpecificParams>,
public CpuTestWithFusing,
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConvPoolActivTestParams> obj) {
static std::string getTestCaseName(testing::TestParamInfo<fusingSpecificParams> obj) {
fusingSpecificParams fusingParams = obj.param;
std::ostringstream result;
@ -32,7 +32,8 @@ protected:
fusingSpecificParams fusingParams = this->GetParam();
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 40, 40})};
ov::ParameterVector inputParams{
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 40, 40})};
std::shared_ptr<Node> conv;
{
@ -43,7 +44,15 @@ protected:
const std::vector<size_t> dilation = {1, 1};
const size_t numOutChannels = 16;
const op::PadType paddingType = op::PadType::EXPLICIT;
conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels);
conv = ov::test::utils::make_convolution(inputParams[0],
element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels);
}
std::shared_ptr<Node> pooling;
{
@ -52,8 +61,14 @@ protected:
const std::vector<size_t> padBegin = {0, 0};
const std::vector<size_t> padEnd = {0, 0};
const op::PadType paddingType = op::PadType::EXPLICIT;
ngraph::op::RoundingType roundingType = ngraph::op::RoundingType::CEIL;
pooling = std::make_shared<ov::op::v1::MaxPool>(conv, strides, padBegin, padEnd, kernelSize, roundingType, paddingType);
ov::op::RoundingType roundingType = ov::op::RoundingType::CEIL;
pooling = std::make_shared<ov::op::v1::MaxPool>(conv,
strides,
padBegin,
padEnd,
kernelSize,
roundingType,
paddingType);
}
selectedType = makeSelectedTypeStr(getPrimitiveType(), element::f32);
@ -66,27 +81,26 @@ protected:
if (isaType == "")
return primType == "ref";
else
return primType == makeSelectedTypeStr(std::string("jit_") + isaType, element::f32)
|| primType == makeSelectedTypeStr(std::string("brgconv_") + isaType, element::f32);
return primType == makeSelectedTypeStr(std::string("jit_") + isaType, element::f32) ||
primType == makeSelectedTypeStr(std::string("brgconv_") + isaType, element::f32);
}
};
TEST_P(ConvPoolActivTest, CompareWithRefs) {
Run();
CheckPluginRelatedResults(executableNetwork, "Convolution");
run();
CheckPluginRelatedResults(compiledModel, "Convolution");
}
namespace {
const std::vector<fusingSpecificParams> fusingParamsSet {
emptyFusingSpec,
fusingRelu,
fusingSwish,
fusingSigmoid
};
const std::vector<fusingSpecificParams> fusingParamsSet{emptyFusingSpec, fusingRelu, fusingSwish, fusingSigmoid};
INSTANTIATE_TEST_SUITE_P(smoke_Check, ConvPoolActivTest, ::testing::ValuesIn(fusingParamsSet), ConvPoolActivTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Check,
ConvPoolActivTest,
::testing::ValuesIn(fusingParamsSet),
ConvPoolActivTest::getTestCaseName);
} // namespace
} // namespace
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,14 +2,15 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/node_builders/activation.hpp"
#include "common_test_utils/node_builders/eltwise.hpp"
#include "ov_models/builders.hpp"
#include "ov_models/utils/ov_helpers.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using ngraph::helpers::EltwiseTypes;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
/* We can't fuse EltwiseAdd several times into one convolution
@ -28,53 +29,103 @@ namespace SubgraphTestsDefinitions {
RESULT
*/
class ConvsAndSums : virtual public LayerTestsUtils::LayerTestsCommon {
class ConvsAndSums : virtual public SubgraphBaseStaticTest {
protected:
void SetUp() override {
InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32;
ov::element::Type netPrecision = ov::element::f32;
targetDevice = ov::test::utils::DEVICE_CPU;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape{1, 512, 32}),
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape{1, 128, 32})};
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape{1, 512, 32}),
std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape{1, 128, 32})};
auto FQ = ngraph::builder::makeFakeQuantize(params[1], ngPrc, 256, {}, {-2.8215785026550293}, {2.799535036087036},
{-2.8215785026550293}, {2.799535036087036});
auto FQ_0 = ngraph::builder::makeFakeQuantize(params[1], ngPrc, 256, {}, {-5.031249523162842}, {4.991942882537842},
{-5.031249523162842}, {4.991942882537842});
auto FQ = ngraph::builder::makeFakeQuantize(params[1],
netPrecision,
256,
{},
{-2.8215785026550293},
{2.799535036087036},
{-2.8215785026550293},
{2.799535036087036});
auto FQ_0 = ngraph::builder::makeFakeQuantize(params[1],
netPrecision,
256,
{},
{-5.031249523162842},
{4.991942882537842},
{-5.031249523162842},
{4.991942882537842});
auto Add_0 = ngraph::builder::makeEltwise(FQ_0, FQ, EltwiseTypes::ADD);
auto Add_0 = ov::test::utils::makeEltwise(FQ_0, FQ, ov::test::utils::EltwiseTypes::ADD);
auto FQ_1 = ngraph::builder::makeFakeQuantize(params[0], ngPrc, 256, {}, {-2.122633457183838}, {2.106050491333008},
{-2.122633457183838}, {2.106050491333008});
auto FQ_1 = ngraph::builder::makeFakeQuantize(params[0],
netPrecision,
256,
{},
{-2.122633457183838},
{2.106050491333008},
{-2.122633457183838},
{2.106050491333008});
auto Const = ngraph::builder::makeConstant(ngPrc, {128, 512, 1}, std::vector<float>{-0.0512377955019474}, false);
auto FQ_2 = ngraph::builder::makeFakeQuantize(Const, ngPrc, 255, {128, 1, 1}, {-0.56387859582901}, {0.56387859582901},
{-0.56387859582901}, {0.56387859582901});
auto Const =
ngraph::builder::makeConstant(netPrecision, {128, 512, 1}, std::vector<float>{-0.0512377955019474}, false);
auto FQ_2 = ngraph::builder::makeFakeQuantize(Const,
netPrecision,
255,
{128, 1, 1},
{-0.56387859582901},
{0.56387859582901},
{-0.56387859582901},
{0.56387859582901});
auto Conv = std::make_shared<ngraph::opset1::Convolution>(FQ_1, FQ_2, Strides{1}, CoordinateDiff{0}, CoordinateDiff{0}, Strides{1});
auto Conv = std::make_shared<ov::op::v1::Convolution>(FQ_1,
FQ_2,
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
auto Add = ngraph::builder::makeEltwise(Add_0, Conv, EltwiseTypes::ADD);
auto Add = ov::test::utils::makeEltwise(Add_0, Conv, ov::test::utils::EltwiseTypes::ADD);
auto FQ_11 = ngraph::builder::makeFakeQuantize(params[0], ngPrc, 256, {}, {-3.2050728797912598}, {3.1800332069396973},
{-3.2050728797912598}, {3.1800332069396973});
auto FQ_11 = ngraph::builder::makeFakeQuantize(params[0],
netPrecision,
256,
{},
{-3.2050728797912598},
{3.1800332069396973},
{-3.2050728797912598},
{3.1800332069396973});
auto Const_ = ngraph::builder::makeConstant(ngPrc, {128, 512, 1}, std::vector<float>{-0.001183388871140778}, false);
auto FQ_22 = ngraph::builder::makeFakeQuantize(Const_, ngPrc, 255, {128, 1, 1}, {-0.325547456741333}, {0.325547456741333},
{-0.325547456741333}, {0.325547456741333});
auto Const_ = ngraph::builder::makeConstant(netPrecision,
{128, 512, 1},
std::vector<float>{-0.001183388871140778},
false);
auto FQ_22 = ngraph::builder::makeFakeQuantize(Const_,
netPrecision,
255,
{128, 1, 1},
{-0.325547456741333},
{0.325547456741333},
{-0.325547456741333},
{0.325547456741333});
auto Conv2 = std::make_shared<ngraph::opset1::Convolution>(FQ_11, FQ_22, Strides{1}, CoordinateDiff{0}, CoordinateDiff{0}, Strides{1});
auto Add2 = ngraph::builder::makeEltwise(Add, Conv2, EltwiseTypes::ADD);
auto relu3 = ngraph::builder::makeActivation(Add2, ngPrc, ngraph::helpers::ActivationTypes::Relu);
auto Conv2 = std::make_shared<ov::op::v1::Convolution>(FQ_11,
FQ_22,
Strides{1},
CoordinateDiff{0},
CoordinateDiff{0},
Strides{1});
auto Add2 = ov::test::utils::makeEltwise(Add, Conv2, ov::test::utils::EltwiseTypes::ADD);
auto relu3 = ov::test::utils::make_activation(Add2, netPrecision, ov::test::utils::ActivationTypes::Relu);
auto result = std::make_shared<ngraph::opset1::Result>(relu3);
function = std::make_shared<ngraph::Function>(result, params, "SimpleNet");
auto result = std::make_shared<ov::op::v0::Result>(relu3);
function = std::make_shared<ov::Model>(result, params, "SimpleNet");
}
};
TEST_F(ConvsAndSums, smoke_CompareWithRefs) {
Run();
run();
}
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,40 +2,38 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/node_builders/eltwise.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using FQScaleshiftWithConstantShiftTestParams = Precision;
class FQScaleshiftWithConstantShiftTest : public testing::WithParamInterface<FQScaleshiftWithConstantShiftTestParams>,
public CPUTestsBase,
virtual public LayerTestsUtils::LayerTestsCommon {
class FQScaleshiftWithConstantShiftTest : public testing::WithParamInterface<ov::element::Type>,
public CPUTestsBase,
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<FQScaleshiftWithConstantShiftTestParams> obj) {
Precision netPrecision;
static std::string getTestCaseName(testing::TestParamInfo<ov::element::Type> obj) {
ov::element::Type netPrecision;
netPrecision = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "netPRC=" << netPrecision.get_type_name() << "_";
return result.str();
}
protected:
void SetUp() override {
targetDevice = ov::test::utils::DEVICE_CPU;
Precision netPrecision;
ov::element::Type netPrecision;
netPrecision = this->GetParam();
const auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
ov::Shape mmShape{25, 14, 14, 768};
SizeVector mmShape2{768, 2304};
SizeVector sumShape{1, 1, 1, 2304};
ov::Shape mmShape2{768, 2304};
ov::Shape sumShape{1, 1, 1, 2304};
// avoid eliminations
std::vector<int> mmInData(768 * 2304);
@ -44,16 +42,16 @@ protected:
std::vector<int> sumConstData(1 * 1 * 1 * 2304);
std::iota(sumConstData.begin(), sumConstData.end(), 0);
auto constShift = ngraph::opset5::Constant::create(ngraph::element::f32, sumShape, sumConstData);
auto mmConst = ngraph::opset5::Constant::create(ngraph::element::f32, mmShape2, mmInData);
ov::ParameterVector mmParams {std::make_shared<ov::op::v0::Parameter>(ngPrec, mmShape)};
auto constShift = ov::op::v0::Constant::create(ov::element::f32, sumShape, sumConstData);
auto mmConst = ov::op::v0::Constant::create(ov::element::f32, mmShape2, mmInData);
ov::ParameterVector mmParams{std::make_shared<ov::op::v0::Parameter>(netPrecision, mmShape)};
const auto mm = std::make_shared<ov::op::v0::MatMul>(mmParams[0], mmConst, false, false);
auto sum = ngraph::builder::makeEltwise(constShift, mm, ngraph::helpers::EltwiseTypes::ADD);
auto fq = ngraph::builder::makeFakeQuantize(sum, ngraph::element::f32, 256, {}, {-8.0f}, {7.0f}, {-8.0f}, {7.0f});
auto sum = ov::test::utils::makeEltwise(constShift, mm, ov::test::utils::EltwiseTypes::ADD);
auto fq = ngraph::builder::makeFakeQuantize(sum, ov::element::f32, 256, {}, {-8.0f}, {7.0f}, {-8.0f}, {7.0f});
ngraph::ParameterVector inputParams = {mmParams[0]};
function = makeNgraphFunction(ngPrec, inputParams, fq, "FQScaleshiftWithConstantShift");
ov::ParameterVector inputParams = {mmParams[0]};
function = makeNgraphFunction(netPrecision, inputParams, fq, "FQScaleshiftWithConstantShift");
}
};
@ -77,12 +75,13 @@ protected:
*/
TEST_P(FQScaleshiftWithConstantShiftTest, CompareWithRefs) {
Run();
run();
}
namespace {
INSTANTIATE_TEST_SUITE_P(smoke_Check, FQScaleshiftWithConstantShiftTest,
::testing::Values(Precision::FP32),
::testing::Values(ov::element::f32),
FQScaleshiftWithConstantShiftTest::getTestCaseName);
} // namespace
} // namespace SubgraphTestsDefinitions
} // namespace
} // namespace test
} // namespace ov

View File

@ -2,31 +2,32 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/partial_shape.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ov_models/builders.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "openvino/core/partial_shape.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
namespace SubgraphTestsDefinitions {
namespace ov {
namespace test {
using FullyConnectedStridedInputsOutputsTestParams = std::tuple<Precision,
size_t>; // rank (2D or 3D)
using FullyConnectedStridedInputsOutputsTestParams = std::tuple<ov::element::Type,
size_t>; // rank (2D or 3D)
class FullyConnectedStridedInputsOutputsTest : public testing::WithParamInterface<FullyConnectedStridedInputsOutputsTestParams>,
public CPUTestsBase,
virtual public LayerTestsUtils::LayerTestsCommon {
class FullyConnectedStridedInputsOutputsTest
: public testing::WithParamInterface<FullyConnectedStridedInputsOutputsTestParams>,
public CPUTestsBase,
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<FullyConnectedStridedInputsOutputsTestParams> obj) {
Precision netPrecision;
ov::element::Type netPrecision;
size_t rank;
std::tie(netPrecision, rank) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "netPRC=" << netPrecision.get_type_name() << "_";
result << "rank=" << rank;
return result.str();
@ -35,39 +36,42 @@ public:
protected:
void SetUp() override {
targetDevice = ov::test::utils::DEVICE_CPU;
Precision netPrecision;
ov::element::Type netPrecision;
size_t rank;
std::tie(netPrecision, rank) = this->GetParam();
const auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto bcastTo3D = [](SizeVector& shape) {
auto bcastTo3D = [](ov::Shape& shape) {
shape.insert(shape.begin(), 1);
};
SizeVector splitShape{2, 16};
if (rank == 3) bcastTo3D(splitShape);
ov::Shape splitShape{2, 16};
if (rank == 3)
bcastTo3D(splitShape);
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrec, ov::Shape(splitShape))};
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape(splitShape))};
const auto splitAxis = rank == 3 ? 1 : 0;
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{splitAxis});
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64,
ov::Shape{},
std::vector<int64_t>{splitAxis});
auto split = std::make_shared<ov::op::v1::Split>(params[0], split_axis_op, 2);
SizeVector fcWeightsShape{16, 8};
if (rank == 3) bcastTo3D(fcWeightsShape);
ov::Shape fcWeightsShape{16, 8};
if (rank == 3)
bcastTo3D(fcWeightsShape);
auto tensor = ov::test::utils::create_and_fill_tensor(ngPrec, fcWeightsShape);
auto tensor = ov::test::utils::create_and_fill_tensor(netPrecision, fcWeightsShape);
auto fc1secondInput = std::make_shared<ov::op::v0::Constant>(tensor);
const auto fc1 = std::make_shared<ov::op::v0::MatMul>(split->output(0), fc1secondInput, false, false);
auto tensorB = ov::test::utils::create_and_fill_tensor(ngPrec, fcWeightsShape);
auto tensorB = ov::test::utils::create_and_fill_tensor(netPrecision, fcWeightsShape);
auto fc2secondInputB = std::make_shared<ov::op::v0::Constant>(tensorB);
const auto fc2 = std::make_shared<ov::op::v0::MatMul>(split->output(1), fc2secondInputB, false, false);
const auto fcConcatAxis = rank == 3 ? 1 : 0;
const auto concatMatMuls = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{fc1, fc2}, fcConcatAxis);
function = makeNgraphFunction(ngPrec, params, concatMatMuls, "FullyConnectedStridedInputsOutputs");
function = makeNgraphFunction(netPrecision, params, concatMatMuls, "FullyConnectedStridedInputsOutputs");
}
};
@ -96,16 +100,18 @@ protected:
*/
TEST_P(FullyConnectedStridedInputsOutputsTest, CompareWithRefs) {
Run();
run();
}
namespace {
INSTANTIATE_TEST_SUITE_P(smoke_Check, FullyConnectedStridedInputsOutputsTest,
::testing::Combine(::testing::Values(Precision::FP32, Precision::BF16),
INSTANTIATE_TEST_SUITE_P(smoke_Check,
FullyConnectedStridedInputsOutputsTest,
::testing::Combine(::testing::Values(ov::element::f32, ov::element::bf16),
::testing::Values(2, 3)),
FullyConnectedStridedInputsOutputsTest::getTestCaseName);
} // namespace
} // namespace
} // namespace SubgraphTestsDefinitions
} // namespace test
} // namespace ov

View File

@ -2,26 +2,24 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc;
namespace CPUSubgraphTestsDefinitions {
typedef std::tuple<
Shape, // Input shape
element::Type, // Input precision
std::pair<std::vector<float>, std::vector<float>>, // ScaleShift scales and shifts
std::vector<std::vector<float>>, // Quantize intervals
std::string // Device name
> FuseScaleShiftAndQuantizeTuple;
namespace ov {
namespace test {
typedef std::tuple<Shape, // Input shape
element::Type, // Input precision
std::pair<std::vector<float>, std::vector<float>>, // ScaleShift scales and shifts
std::vector<std::vector<float>>, // Quantize intervals
std::string // Device name
>
FuseScaleShiftAndQuantizeTuple;
class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterface<FuseScaleShiftAndQuantizeTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<FuseScaleShiftAndQuantizeTuple> &obj) {
static std::string getTestCaseName(const testing::TestParamInfo<FuseScaleShiftAndQuantizeTuple>& obj) {
Shape inputShape;
element::Type inputPrecision;
std::pair<std::vector<float>, std::vector<float>> scaleShift;
@ -30,13 +28,11 @@ public:
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetName) = obj.param;
std::ostringstream results;
results << "IS=" << inputShape
<< "_InPRC=" << inputPrecision
<< "_Scale=" << vector_to_string(scaleShift.first)
<< "_Shift=" << vector_to_string(scaleShift.second)
<< "_Intervals=";
results << "IS=" << inputShape << "_InPRC=" << inputPrecision
<< "_Scale=" << ngraph::vector_to_string(scaleShift.first)
<< "_Shift=" << ngraph::vector_to_string(scaleShift.second) << "_Intervals=";
for (const auto& vecInt : quantizeIntervals) {
results << vector_to_string(vecInt) << ",";
results << ngraph::vector_to_string(vecInt) << ",";
}
results << "targetDevice=" << targetName;
@ -46,73 +42,72 @@ public:
protected:
void SetUp() override {
threshold = 0.1f;
Shape inputShape;
element::Type inputPrecision;
std::pair<std::vector<float>, std::vector<float>> scaleShift;
std::vector<std::vector<float>> quantizeIntervals;
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetDevice) = this->GetParam();
const auto param = std::make_shared<opset6::Parameter>(inputPrecision, inputShape);
const auto param = std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputShape);
Shape constShape = Shape(inputShape.size(), 1);
constShape[1] = scaleShift.second.size();
const auto subtract = std::make_shared<opset1::Subtract>(
param,
std::make_shared<opset6::Constant>(inputPrecision, constShape, scaleShift.second));
const auto multiply = std::make_shared<opset1::Multiply>(
param,
std::make_shared<opset6::Constant>(inputPrecision, constShape, scaleShift.first));
const auto subtract = std::make_shared<ov::op::v1::Subtract>(
param,
std::make_shared<ov::op::v0::Constant>(inputPrecision, constShape, scaleShift.second));
const auto multiply = std::make_shared<ov::op::v1::Multiply>(
param,
std::make_shared<ov::op::v0::Constant>(inputPrecision, constShape, scaleShift.first));
Shape inConstShape = Shape(inputShape.size(), 1);
inConstShape[1] = quantizeIntervals[0].size();
const auto quantize = builder::makeFakeQuantize(
multiply,
inputPrecision,
256,
inConstShape,
quantizeIntervals[0],
quantizeIntervals[1],
quantizeIntervals[2],
quantizeIntervals[3]);
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(quantize)};
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "FuseScaleShiftAndQuantize");
const auto quantize = ngraph::builder::makeFakeQuantize(multiply,
inputPrecision,
256,
inConstShape,
quantizeIntervals[0],
quantizeIntervals[1],
quantizeIntervals[2],
quantizeIntervals[3]);
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(quantize)};
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseScaleShiftAndQuantize");
}
};
TEST_P(FuseScaleShiftAndFakeQuantizeTest, CompareWithRefs) {
Run();
run();
}
namespace {
std::vector<Shape> inputShapes {
{1, 4, 16, 16}, {8, 4, 16, 16},
{1, 4, 16, 16, 16}, {8, 4, 16, 16, 16},
{1, 4, 16, 16, 16, 16}, {8, 4, 16, 16, 16, 16}
std::vector<Shape> inputShapes{{1, 4, 16, 16},
{8, 4, 16, 16},
{1, 4, 16, 16, 16},
{8, 4, 16, 16, 16},
{1, 4, 16, 16, 16, 16},
{8, 4, 16, 16, 16, 16}};
std::vector<std::pair<std::vector<float>, std::vector<float>>> scaleShifts{
{{30.f}, {17.f}}, // actually fused in LPT
{{-30.f}, {0.f}}, // fused with crop bound invert
{{-17.f}, {12.f}}, // fused with crop bound invert
{{-1.23e-44f}, {0.f}}, // fused with denormal handling
{{0.f}, {0.f}}, // not fused
{{0.f}, {18.f}}, // not fused
};
std::vector<std::pair<std::vector<float>, std::vector<float>>> scaleShifts {
{ {30.f}, {17.f} }, // actually fused in LPT
{ {-30.f}, {0.f} }, // fused with crop bound invert
{ {-17.f}, {12.f} }, // fused with crop bound invert
{ {-1.23e-44f}, {0.f} }, // fused with denormal handling
{ {0.f}, {0.f} }, // not fused
{ {0.f}, {18.f} }, // not fused
std::vector<std::vector<std::vector<float>>> quantizes{
{{-1.f}, {5.f}, {-5.f}, {1.f}},
{{2.f}, {4.f}, {-4.f}, {-2.f}},
{{-1.28f}, {1.27f}, {-1.28f}, {1.27f}},
{{0.f}, {2.55f}, {0.f}, {2.55f}},
};
std::vector<std::vector<std::vector<float>>> quantizes {
{ {-1.f}, {5.f}, {-5.f}, {1.f} },
{ {2.f}, {4.f}, {-4.f}, {-2.f} },
{ {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
{ {0.f}, {2.55f}, {0.f}, {2.55f} },
};
INSTANTIATE_TEST_SUITE_P(smoke_FuseScaleShiftAndFakeQuantize, FuseScaleShiftAndFakeQuantizeTest,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::Values(element::f32),
::testing::ValuesIn(scaleShifts),
::testing::ValuesIn(quantizes),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseScaleShiftAndFakeQuantizeTest::getTestCaseName);
} // namespace
} // namespace CPUSubgraphTestsDefinitions
INSTANTIATE_TEST_SUITE_P(smoke_FuseScaleShiftAndFakeQuantize,
FuseScaleShiftAndFakeQuantizeTest,
::testing::Combine(::testing::ValuesIn(inputShapes),
::testing::Values(element::f32),
::testing::ValuesIn(scaleShifts),
::testing::ValuesIn(quantizes),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseScaleShiftAndFakeQuantizeTest::getTestCaseName);
} // namespace
} // namespace test
} // namespace ov

View File

@ -2,27 +2,27 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ov_models/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace ngraph;
using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc;
namespace CPUSubgraphTestsDefinitions {
typedef std::tuple<
Shape, // Input shape
element::Type, // Input precision
int, // Axis
size_t, // num_splits
size_t, // scale_factor
std::string // Device name
> FuseSplitConcatPairToInterpolateTuple;
namespace ov {
namespace test {
typedef std::tuple<Shape, // Input shape
element::Type, // Input precision
int, // Axis
size_t, // num_splits
size_t, // scale_factor
std::string // Device name
>
FuseSplitConcatPairToInterpolateTuple;
class FuseSplitConcatPairToInterpolateTest : public testing::WithParamInterface<FuseSplitConcatPairToInterpolateTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
virtual public SubgraphBaseStaticTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<FuseSplitConcatPairToInterpolateTuple> &obj) {
static std::string getTestCaseName(const testing::TestParamInfo<FuseSplitConcatPairToInterpolateTuple>& obj) {
Shape inputShape;
element::Type inputPrecision;
int axis;
@ -32,11 +32,8 @@ public:
std::tie(inputShape, inputPrecision, axis, num_splits, scale_factor, targetName) = obj.param;
std::ostringstream results;
results << "IS=" << inputShape
<< "_InPRC=" << inputPrecision
<< "_Axis=" << axis
<< "_Num_splits=" << num_splits
<< "_Scale_factor=" << scale_factor;
results << "IS=" << inputShape << "_InPRC=" << inputPrecision << "_Axis=" << axis
<< "_Num_splits=" << num_splits << "_Scale_factor=" << scale_factor;
results << "_targetDevice=" << targetName;
return results.str();
@ -53,11 +50,12 @@ protected:
size_t num_of_concat_inputs = num_splits * scale_factor;
const auto param = std::make_shared<opset6::Parameter>(inputPrecision, inputShape);
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
const auto param = std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputShape);
auto split_axis_op =
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
auto split = std::make_shared<ov::op::v1::Split>(param, split_axis_op, num_splits);
ngraph::OutputVector concat_inputs_vec(num_of_concat_inputs);
ov::OutputVector concat_inputs_vec(num_of_concat_inputs);
for (size_t split_output_port = 0; split_output_port < num_splits; ++split_output_port) {
for (size_t j = 0; j < scale_factor; ++j) {
concat_inputs_vec[split_output_port * scale_factor + j] = split->output(split_output_port);
@ -66,58 +64,47 @@ protected:
const auto concat = std::make_shared<ov::op::v0::Concat>(concat_inputs_vec, axis);
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "FuseSplitConcatPairToInterpolate");
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseSplitConcatPairToInterpolate");
}
};
TEST_P(FuseSplitConcatPairToInterpolateTest, CompareWithRefs) {
Run();
run();
}
namespace {
std::vector<Shape> inputShapes4D {
{1, 2, 6, 6}
};
std::vector<Shape> inputShapes4D{{1, 2, 6, 6}};
std::vector<size_t> num_of_outputs_of_split {
2, 3, 6
};
std::vector<size_t> num_of_outputs_of_split{2, 3, 6};
std::vector<size_t> scale_factors {
2, 3, 4
};
std::vector<size_t> scale_factors{2, 3, 4};
std::vector<int> axes4D {
2, 3
};
std::vector<int> axes4D{2, 3};
std::vector<Shape> inputShapes5D {
{1, 3, 10, 6, 6}
};
std::vector<Shape> inputShapes5D{{1, 3, 10, 6, 6}};
std::vector<int> axes5D {
3, 4
};
std::vector<int> axes5D{3, 4};
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate4D, FuseSplitConcatPairToInterpolateTest,
::testing::Combine(
::testing::ValuesIn(inputShapes4D),
::testing::Values(element::f32),
::testing::ValuesIn(axes4D),
::testing::ValuesIn(num_of_outputs_of_split),
::testing::ValuesIn(scale_factors),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate4D,
FuseSplitConcatPairToInterpolateTest,
::testing::Combine(::testing::ValuesIn(inputShapes4D),
::testing::Values(element::f32),
::testing::ValuesIn(axes4D),
::testing::ValuesIn(num_of_outputs_of_split),
::testing::ValuesIn(scale_factors),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate5D, FuseSplitConcatPairToInterpolateTest,
::testing::Combine(
::testing::ValuesIn(inputShapes5D),
::testing::Values(element::f32),
::testing::ValuesIn(axes5D),
::testing::ValuesIn(num_of_outputs_of_split),
::testing::ValuesIn(scale_factors),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate5D,
FuseSplitConcatPairToInterpolateTest,
::testing::Combine(::testing::ValuesIn(inputShapes5D),
::testing::Values(element::f32),
::testing::ValuesIn(axes5D),
::testing::ValuesIn(num_of_outputs_of_split),
::testing::ValuesIn(scale_factors),
::testing::Values(ov::test::utils::DEVICE_CPU)),
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
} // namespace
} // namespace CPUSubgraphTestsDefinitions
} // namespace test
} // namespace ov