Upgrade CPU func tests to 2.o (#21344)
* [CPU Plugin][Func Test] Upgrade ConvPoolActivTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade Conv3dReshapeTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade ConvsAndSums to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade FQScaleshiftWithConstantShiftTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade FullyConnectedStridedInputsOutputsTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade FuseScaleShiftAndFakeQuantizeTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] Upgrade FuseSplitConcatPairToInterpolateTest to API 2.0 Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] remove ngraph namespace Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> * [CPU Plugin][Func Test] fix error Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com> --------- Signed-off-by: Zhai, Xuejun <xuejun.zhai@intel.com>
This commit is contained in:
parent
1b43b3c566
commit
c1a28d0942
@ -2,21 +2,21 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
#include "common_test_utils/node_builders/convolution.hpp"
|
||||||
|
#include "common_test_utils/node_builders/group_convolution.hpp"
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
|
||||||
using namespace InferenceEngine;
|
|
||||||
using namespace CPUTestUtils;
|
using namespace CPUTestUtils;
|
||||||
|
|
||||||
namespace SubgraphTestsDefinitions {
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
using Conv3dReshapeTestParams = std::tuple<nodeType,
|
using Conv3dReshapeTestParams = std::tuple<nodeType, size_t>;
|
||||||
size_t>;
|
|
||||||
|
|
||||||
class Conv3dReshapeTest : public testing::WithParamInterface<Conv3dReshapeTestParams>,
|
class Conv3dReshapeTest : public testing::WithParamInterface<Conv3dReshapeTestParams>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
virtual public SubgraphBaseStaticTest {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<Conv3dReshapeTestParams> obj) {
|
static std::string getTestCaseName(testing::TestParamInfo<Conv3dReshapeTestParams> obj) {
|
||||||
nodeType conv;
|
nodeType conv;
|
||||||
@ -31,7 +31,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
std::string cpuNodeType;
|
std::string cpuNodeType;
|
||||||
|
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||||
@ -41,7 +41,8 @@ protected:
|
|||||||
|
|
||||||
cpuNodeType = nodeType2PluginType(convType);
|
cpuNodeType = nodeType2PluginType(convType);
|
||||||
|
|
||||||
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1024, 64})};
|
ov::ParameterVector inputParams{
|
||||||
|
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1024, 64})};
|
||||||
|
|
||||||
std::shared_ptr<Node> conv;
|
std::shared_ptr<Node> conv;
|
||||||
const std::vector<size_t> kernelSize = {1};
|
const std::vector<size_t> kernelSize = {1};
|
||||||
@ -53,43 +54,64 @@ protected:
|
|||||||
const size_t numOfGroups = 2;
|
const size_t numOfGroups = 2;
|
||||||
const op::PadType paddingType = op::PadType::EXPLICIT;
|
const op::PadType paddingType = op::PadType::EXPLICIT;
|
||||||
switch (convType) {
|
switch (convType) {
|
||||||
case nodeType::convolution : {
|
case nodeType::convolution: {
|
||||||
conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels);
|
conv = ov::test::utils::make_convolution(inputParams[0],
|
||||||
break;
|
element::f32,
|
||||||
}
|
kernelSize,
|
||||||
case nodeType::groupConvolution : {
|
strides,
|
||||||
conv = builder::makeGroupConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels,
|
padBegin,
|
||||||
numOfGroups);
|
padEnd,
|
||||||
break;
|
dilation,
|
||||||
}
|
paddingType,
|
||||||
default: {
|
numOutChannels);
|
||||||
throw std::runtime_error("Conv3dReshapeTest doesn't support this type of operation");
|
break;
|
||||||
}
|
}
|
||||||
|
case nodeType::groupConvolution: {
|
||||||
|
conv = ov::test::utils::make_group_convolution(inputParams[0],
|
||||||
|
element::f32,
|
||||||
|
kernelSize,
|
||||||
|
strides,
|
||||||
|
padBegin,
|
||||||
|
padEnd,
|
||||||
|
dilation,
|
||||||
|
paddingType,
|
||||||
|
numOutChannels,
|
||||||
|
numOfGroups);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
throw std::runtime_error("Conv3dReshapeTest doesn't support this type of operation");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVector results;
|
ResultVector results;
|
||||||
for (size_t i = 0; i < numOut; i++) {
|
for (size_t i = 0; i < numOut; i++) {
|
||||||
auto mockNode = std::make_shared<opset5::Multiply>(conv->output(0), opset5::Constant::create(element::f32, Shape{1}, {1}));
|
auto mockNode =
|
||||||
results.push_back(std::make_shared<opset5::Result>(mockNode));
|
std::make_shared<ov::op::v1::Multiply>(conv->output(0),
|
||||||
|
ov::op::v0::Constant::create(element::f32, Shape{1}, {1}));
|
||||||
|
results.push_back(std::make_shared<ov::op::v0::Result>(mockNode));
|
||||||
}
|
}
|
||||||
|
|
||||||
function = std::make_shared<ngraph::Function>(results, inputParams, "Conv3dReshape");
|
function = std::make_shared<ov::Model>(results, inputParams, "Conv3dReshape");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(Conv3dReshapeTest, CompareWithRefs) {
|
TEST_P(Conv3dReshapeTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const std::vector<nodeType> convType = { nodeType::convolution, nodeType::groupConvolution };
|
const std::vector<nodeType> convType = {nodeType::convolution, nodeType::groupConvolution};
|
||||||
const std::vector<size_t> numOut = { 1, 2, 5 };
|
const std::vector<size_t> numOut = {1, 2, 5};
|
||||||
const auto conv3dReshapeParams = ::testing::Combine(::testing::ValuesIn(convType),
|
const auto conv3dReshapeParams = ::testing::Combine(::testing::ValuesIn(convType), ::testing::ValuesIn(numOut));
|
||||||
::testing::ValuesIn(numOut));
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_Conv3dReshapeTest, Conv3dReshapeTest, conv3dReshapeParams, Conv3dReshapeTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(smoke_Conv3dReshapeTest,
|
||||||
|
Conv3dReshapeTest,
|
||||||
|
conv3dReshapeParams,
|
||||||
|
Conv3dReshapeTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
} // namespace SubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
@ -2,21 +2,21 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "test_utils/fusing_test_utils.hpp"
|
#include "common_test_utils/node_builders/convolution.hpp"
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
#include "test_utils/fusing_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
|
||||||
using namespace InferenceEngine;
|
|
||||||
using namespace CPUTestUtils;
|
using namespace CPUTestUtils;
|
||||||
|
|
||||||
namespace SubgraphTestsDefinitions {
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
using ConvPoolActivTestParams = fusingSpecificParams;
|
class ConvPoolActivTest : public testing::WithParamInterface<fusingSpecificParams>,
|
||||||
|
public CpuTestWithFusing,
|
||||||
class ConvPoolActivTest : public testing::WithParamInterface<ConvPoolActivTestParams>, public CpuTestWithFusing,
|
virtual public SubgraphBaseStaticTest {
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<ConvPoolActivTestParams> obj) {
|
static std::string getTestCaseName(testing::TestParamInfo<fusingSpecificParams> obj) {
|
||||||
fusingSpecificParams fusingParams = obj.param;
|
fusingSpecificParams fusingParams = obj.param;
|
||||||
|
|
||||||
std::ostringstream result;
|
std::ostringstream result;
|
||||||
@ -32,7 +32,8 @@ protected:
|
|||||||
fusingSpecificParams fusingParams = this->GetParam();
|
fusingSpecificParams fusingParams = this->GetParam();
|
||||||
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
|
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
|
||||||
|
|
||||||
ov::ParameterVector inputParams{std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 40, 40})};
|
ov::ParameterVector inputParams{
|
||||||
|
std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3, 40, 40})};
|
||||||
|
|
||||||
std::shared_ptr<Node> conv;
|
std::shared_ptr<Node> conv;
|
||||||
{
|
{
|
||||||
@ -43,7 +44,15 @@ protected:
|
|||||||
const std::vector<size_t> dilation = {1, 1};
|
const std::vector<size_t> dilation = {1, 1};
|
||||||
const size_t numOutChannels = 16;
|
const size_t numOutChannels = 16;
|
||||||
const op::PadType paddingType = op::PadType::EXPLICIT;
|
const op::PadType paddingType = op::PadType::EXPLICIT;
|
||||||
conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels);
|
conv = ov::test::utils::make_convolution(inputParams[0],
|
||||||
|
element::f32,
|
||||||
|
kernelSize,
|
||||||
|
strides,
|
||||||
|
padBegin,
|
||||||
|
padEnd,
|
||||||
|
dilation,
|
||||||
|
paddingType,
|
||||||
|
numOutChannels);
|
||||||
}
|
}
|
||||||
std::shared_ptr<Node> pooling;
|
std::shared_ptr<Node> pooling;
|
||||||
{
|
{
|
||||||
@ -52,8 +61,14 @@ protected:
|
|||||||
const std::vector<size_t> padBegin = {0, 0};
|
const std::vector<size_t> padBegin = {0, 0};
|
||||||
const std::vector<size_t> padEnd = {0, 0};
|
const std::vector<size_t> padEnd = {0, 0};
|
||||||
const op::PadType paddingType = op::PadType::EXPLICIT;
|
const op::PadType paddingType = op::PadType::EXPLICIT;
|
||||||
ngraph::op::RoundingType roundingType = ngraph::op::RoundingType::CEIL;
|
ov::op::RoundingType roundingType = ov::op::RoundingType::CEIL;
|
||||||
pooling = std::make_shared<ov::op::v1::MaxPool>(conv, strides, padBegin, padEnd, kernelSize, roundingType, paddingType);
|
pooling = std::make_shared<ov::op::v1::MaxPool>(conv,
|
||||||
|
strides,
|
||||||
|
padBegin,
|
||||||
|
padEnd,
|
||||||
|
kernelSize,
|
||||||
|
roundingType,
|
||||||
|
paddingType);
|
||||||
}
|
}
|
||||||
|
|
||||||
selectedType = makeSelectedTypeStr(getPrimitiveType(), element::f32);
|
selectedType = makeSelectedTypeStr(getPrimitiveType(), element::f32);
|
||||||
@ -66,27 +81,26 @@ protected:
|
|||||||
if (isaType == "")
|
if (isaType == "")
|
||||||
return primType == "ref";
|
return primType == "ref";
|
||||||
else
|
else
|
||||||
return primType == makeSelectedTypeStr(std::string("jit_") + isaType, element::f32)
|
return primType == makeSelectedTypeStr(std::string("jit_") + isaType, element::f32) ||
|
||||||
|| primType == makeSelectedTypeStr(std::string("brgconv_") + isaType, element::f32);
|
primType == makeSelectedTypeStr(std::string("brgconv_") + isaType, element::f32);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(ConvPoolActivTest, CompareWithRefs) {
|
TEST_P(ConvPoolActivTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
CheckPluginRelatedResults(executableNetwork, "Convolution");
|
CheckPluginRelatedResults(compiledModel, "Convolution");
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const std::vector<fusingSpecificParams> fusingParamsSet {
|
const std::vector<fusingSpecificParams> fusingParamsSet{emptyFusingSpec, fusingRelu, fusingSwish, fusingSigmoid};
|
||||||
emptyFusingSpec,
|
|
||||||
fusingRelu,
|
|
||||||
fusingSwish,
|
|
||||||
fusingSigmoid
|
|
||||||
};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_Check, ConvPoolActivTest, ::testing::ValuesIn(fusingParamsSet), ConvPoolActivTest::getTestCaseName);
|
INSTANTIATE_TEST_SUITE_P(smoke_Check,
|
||||||
|
ConvPoolActivTest,
|
||||||
|
::testing::ValuesIn(fusingParamsSet),
|
||||||
|
ConvPoolActivTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
} // namespace SubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
@ -2,14 +2,15 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "common_test_utils/node_builders/activation.hpp"
|
||||||
|
#include "common_test_utils/node_builders/eltwise.hpp"
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
#include "ov_models/utils/ov_helpers.hpp"
|
#include "ov_models/utils/ov_helpers.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
namespace ov {
|
||||||
using ngraph::helpers::EltwiseTypes;
|
namespace test {
|
||||||
|
|
||||||
namespace SubgraphTestsDefinitions {
|
|
||||||
|
|
||||||
/* We can't fuse EltwiseAdd several times into one convolution
|
/* We can't fuse EltwiseAdd several times into one convolution
|
||||||
|
|
||||||
@ -28,53 +29,103 @@ namespace SubgraphTestsDefinitions {
|
|||||||
RESULT
|
RESULT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class ConvsAndSums : virtual public LayerTestsUtils::LayerTestsCommon {
|
class ConvsAndSums : virtual public SubgraphBaseStaticTest {
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32;
|
ov::element::Type netPrecision = ov::element::f32;
|
||||||
|
|
||||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||||
|
|
||||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape{1, 512, 32}),
|
||||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape{1, 512, 32}),
|
std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape{1, 128, 32})};
|
||||||
std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape{1, 128, 32})};
|
|
||||||
|
|
||||||
auto FQ = ngraph::builder::makeFakeQuantize(params[1], ngPrc, 256, {}, {-2.8215785026550293}, {2.799535036087036},
|
auto FQ = ngraph::builder::makeFakeQuantize(params[1],
|
||||||
{-2.8215785026550293}, {2.799535036087036});
|
netPrecision,
|
||||||
auto FQ_0 = ngraph::builder::makeFakeQuantize(params[1], ngPrc, 256, {}, {-5.031249523162842}, {4.991942882537842},
|
256,
|
||||||
{-5.031249523162842}, {4.991942882537842});
|
{},
|
||||||
|
{-2.8215785026550293},
|
||||||
|
{2.799535036087036},
|
||||||
|
{-2.8215785026550293},
|
||||||
|
{2.799535036087036});
|
||||||
|
auto FQ_0 = ngraph::builder::makeFakeQuantize(params[1],
|
||||||
|
netPrecision,
|
||||||
|
256,
|
||||||
|
{},
|
||||||
|
{-5.031249523162842},
|
||||||
|
{4.991942882537842},
|
||||||
|
{-5.031249523162842},
|
||||||
|
{4.991942882537842});
|
||||||
|
|
||||||
auto Add_0 = ngraph::builder::makeEltwise(FQ_0, FQ, EltwiseTypes::ADD);
|
auto Add_0 = ov::test::utils::makeEltwise(FQ_0, FQ, ov::test::utils::EltwiseTypes::ADD);
|
||||||
|
|
||||||
auto FQ_1 = ngraph::builder::makeFakeQuantize(params[0], ngPrc, 256, {}, {-2.122633457183838}, {2.106050491333008},
|
auto FQ_1 = ngraph::builder::makeFakeQuantize(params[0],
|
||||||
{-2.122633457183838}, {2.106050491333008});
|
netPrecision,
|
||||||
|
256,
|
||||||
|
{},
|
||||||
|
{-2.122633457183838},
|
||||||
|
{2.106050491333008},
|
||||||
|
{-2.122633457183838},
|
||||||
|
{2.106050491333008});
|
||||||
|
|
||||||
auto Const = ngraph::builder::makeConstant(ngPrc, {128, 512, 1}, std::vector<float>{-0.0512377955019474}, false);
|
auto Const =
|
||||||
auto FQ_2 = ngraph::builder::makeFakeQuantize(Const, ngPrc, 255, {128, 1, 1}, {-0.56387859582901}, {0.56387859582901},
|
ngraph::builder::makeConstant(netPrecision, {128, 512, 1}, std::vector<float>{-0.0512377955019474}, false);
|
||||||
{-0.56387859582901}, {0.56387859582901});
|
auto FQ_2 = ngraph::builder::makeFakeQuantize(Const,
|
||||||
|
netPrecision,
|
||||||
|
255,
|
||||||
|
{128, 1, 1},
|
||||||
|
{-0.56387859582901},
|
||||||
|
{0.56387859582901},
|
||||||
|
{-0.56387859582901},
|
||||||
|
{0.56387859582901});
|
||||||
|
|
||||||
auto Conv = std::make_shared<ngraph::opset1::Convolution>(FQ_1, FQ_2, Strides{1}, CoordinateDiff{0}, CoordinateDiff{0}, Strides{1});
|
auto Conv = std::make_shared<ov::op::v1::Convolution>(FQ_1,
|
||||||
|
FQ_2,
|
||||||
|
Strides{1},
|
||||||
|
CoordinateDiff{0},
|
||||||
|
CoordinateDiff{0},
|
||||||
|
Strides{1});
|
||||||
|
|
||||||
auto Add = ngraph::builder::makeEltwise(Add_0, Conv, EltwiseTypes::ADD);
|
auto Add = ov::test::utils::makeEltwise(Add_0, Conv, ov::test::utils::EltwiseTypes::ADD);
|
||||||
|
|
||||||
auto FQ_11 = ngraph::builder::makeFakeQuantize(params[0], ngPrc, 256, {}, {-3.2050728797912598}, {3.1800332069396973},
|
auto FQ_11 = ngraph::builder::makeFakeQuantize(params[0],
|
||||||
{-3.2050728797912598}, {3.1800332069396973});
|
netPrecision,
|
||||||
|
256,
|
||||||
|
{},
|
||||||
|
{-3.2050728797912598},
|
||||||
|
{3.1800332069396973},
|
||||||
|
{-3.2050728797912598},
|
||||||
|
{3.1800332069396973});
|
||||||
|
|
||||||
auto Const_ = ngraph::builder::makeConstant(ngPrc, {128, 512, 1}, std::vector<float>{-0.001183388871140778}, false);
|
auto Const_ = ngraph::builder::makeConstant(netPrecision,
|
||||||
auto FQ_22 = ngraph::builder::makeFakeQuantize(Const_, ngPrc, 255, {128, 1, 1}, {-0.325547456741333}, {0.325547456741333},
|
{128, 512, 1},
|
||||||
{-0.325547456741333}, {0.325547456741333});
|
std::vector<float>{-0.001183388871140778},
|
||||||
|
false);
|
||||||
|
auto FQ_22 = ngraph::builder::makeFakeQuantize(Const_,
|
||||||
|
netPrecision,
|
||||||
|
255,
|
||||||
|
{128, 1, 1},
|
||||||
|
{-0.325547456741333},
|
||||||
|
{0.325547456741333},
|
||||||
|
{-0.325547456741333},
|
||||||
|
{0.325547456741333});
|
||||||
|
|
||||||
auto Conv2 = std::make_shared<ngraph::opset1::Convolution>(FQ_11, FQ_22, Strides{1}, CoordinateDiff{0}, CoordinateDiff{0}, Strides{1});
|
auto Conv2 = std::make_shared<ov::op::v1::Convolution>(FQ_11,
|
||||||
auto Add2 = ngraph::builder::makeEltwise(Add, Conv2, EltwiseTypes::ADD);
|
FQ_22,
|
||||||
auto relu3 = ngraph::builder::makeActivation(Add2, ngPrc, ngraph::helpers::ActivationTypes::Relu);
|
Strides{1},
|
||||||
|
CoordinateDiff{0},
|
||||||
|
CoordinateDiff{0},
|
||||||
|
Strides{1});
|
||||||
|
auto Add2 = ov::test::utils::makeEltwise(Add, Conv2, ov::test::utils::EltwiseTypes::ADD);
|
||||||
|
auto relu3 = ov::test::utils::make_activation(Add2, netPrecision, ov::test::utils::ActivationTypes::Relu);
|
||||||
|
|
||||||
auto result = std::make_shared<ngraph::opset1::Result>(relu3);
|
auto result = std::make_shared<ov::op::v0::Result>(relu3);
|
||||||
function = std::make_shared<ngraph::Function>(result, params, "SimpleNet");
|
function = std::make_shared<ov::Model>(result, params, "SimpleNet");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(ConvsAndSums, smoke_CompareWithRefs) {
|
TEST_F(ConvsAndSums, smoke_CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace SubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
@ -2,40 +2,38 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "common_test_utils/node_builders/eltwise.hpp"
|
||||||
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
|
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
|
||||||
using namespace InferenceEngine;
|
|
||||||
using namespace CPUTestUtils;
|
using namespace CPUTestUtils;
|
||||||
|
|
||||||
namespace SubgraphTestsDefinitions {
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
using FQScaleshiftWithConstantShiftTestParams = Precision;
|
class FQScaleshiftWithConstantShiftTest : public testing::WithParamInterface<ov::element::Type>,
|
||||||
|
public CPUTestsBase,
|
||||||
class FQScaleshiftWithConstantShiftTest : public testing::WithParamInterface<FQScaleshiftWithConstantShiftTestParams>,
|
virtual public SubgraphBaseStaticTest {
|
||||||
public CPUTestsBase,
|
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<FQScaleshiftWithConstantShiftTestParams> obj) {
|
static std::string getTestCaseName(testing::TestParamInfo<ov::element::Type> obj) {
|
||||||
Precision netPrecision;
|
ov::element::Type netPrecision;
|
||||||
netPrecision = obj.param;
|
netPrecision = obj.param;
|
||||||
std::ostringstream result;
|
std::ostringstream result;
|
||||||
result << "netPRC=" << netPrecision.name() << "_";
|
result << "netPRC=" << netPrecision.get_type_name() << "_";
|
||||||
return result.str();
|
return result.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||||
Precision netPrecision;
|
ov::element::Type netPrecision;
|
||||||
netPrecision = this->GetParam();
|
netPrecision = this->GetParam();
|
||||||
const auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
|
||||||
|
|
||||||
ov::Shape mmShape{25, 14, 14, 768};
|
ov::Shape mmShape{25, 14, 14, 768};
|
||||||
SizeVector mmShape2{768, 2304};
|
ov::Shape mmShape2{768, 2304};
|
||||||
SizeVector sumShape{1, 1, 1, 2304};
|
ov::Shape sumShape{1, 1, 1, 2304};
|
||||||
|
|
||||||
// avoid eliminations
|
// avoid eliminations
|
||||||
std::vector<int> mmInData(768 * 2304);
|
std::vector<int> mmInData(768 * 2304);
|
||||||
@ -44,16 +42,16 @@ protected:
|
|||||||
std::vector<int> sumConstData(1 * 1 * 1 * 2304);
|
std::vector<int> sumConstData(1 * 1 * 1 * 2304);
|
||||||
std::iota(sumConstData.begin(), sumConstData.end(), 0);
|
std::iota(sumConstData.begin(), sumConstData.end(), 0);
|
||||||
|
|
||||||
auto constShift = ngraph::opset5::Constant::create(ngraph::element::f32, sumShape, sumConstData);
|
auto constShift = ov::op::v0::Constant::create(ov::element::f32, sumShape, sumConstData);
|
||||||
auto mmConst = ngraph::opset5::Constant::create(ngraph::element::f32, mmShape2, mmInData);
|
auto mmConst = ov::op::v0::Constant::create(ov::element::f32, mmShape2, mmInData);
|
||||||
ov::ParameterVector mmParams {std::make_shared<ov::op::v0::Parameter>(ngPrec, mmShape)};
|
ov::ParameterVector mmParams{std::make_shared<ov::op::v0::Parameter>(netPrecision, mmShape)};
|
||||||
|
|
||||||
const auto mm = std::make_shared<ov::op::v0::MatMul>(mmParams[0], mmConst, false, false);
|
const auto mm = std::make_shared<ov::op::v0::MatMul>(mmParams[0], mmConst, false, false);
|
||||||
auto sum = ngraph::builder::makeEltwise(constShift, mm, ngraph::helpers::EltwiseTypes::ADD);
|
auto sum = ov::test::utils::makeEltwise(constShift, mm, ov::test::utils::EltwiseTypes::ADD);
|
||||||
auto fq = ngraph::builder::makeFakeQuantize(sum, ngraph::element::f32, 256, {}, {-8.0f}, {7.0f}, {-8.0f}, {7.0f});
|
auto fq = ngraph::builder::makeFakeQuantize(sum, ov::element::f32, 256, {}, {-8.0f}, {7.0f}, {-8.0f}, {7.0f});
|
||||||
|
|
||||||
ngraph::ParameterVector inputParams = {mmParams[0]};
|
ov::ParameterVector inputParams = {mmParams[0]};
|
||||||
function = makeNgraphFunction(ngPrec, inputParams, fq, "FQScaleshiftWithConstantShift");
|
function = makeNgraphFunction(netPrecision, inputParams, fq, "FQScaleshiftWithConstantShift");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -77,12 +75,13 @@ protected:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
TEST_P(FQScaleshiftWithConstantShiftTest, CompareWithRefs) {
|
TEST_P(FQScaleshiftWithConstantShiftTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_Check, FQScaleshiftWithConstantShiftTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_Check, FQScaleshiftWithConstantShiftTest,
|
||||||
::testing::Values(Precision::FP32),
|
::testing::Values(ov::element::f32),
|
||||||
FQScaleshiftWithConstantShiftTest::getTestCaseName);
|
FQScaleshiftWithConstantShiftTest::getTestCaseName);
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace SubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
@ -2,31 +2,32 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "openvino/core/partial_shape.hpp"
|
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
|
||||||
#include "ov_models/builders.hpp"
|
|
||||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||||
|
#include "openvino/core/partial_shape.hpp"
|
||||||
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
|
||||||
using namespace InferenceEngine;
|
|
||||||
using namespace CPUTestUtils;
|
using namespace CPUTestUtils;
|
||||||
|
|
||||||
namespace SubgraphTestsDefinitions {
|
namespace ov {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
using FullyConnectedStridedInputsOutputsTestParams = std::tuple<Precision,
|
using FullyConnectedStridedInputsOutputsTestParams = std::tuple<ov::element::Type,
|
||||||
size_t>; // rank (2D or 3D)
|
size_t>; // rank (2D or 3D)
|
||||||
|
|
||||||
class FullyConnectedStridedInputsOutputsTest : public testing::WithParamInterface<FullyConnectedStridedInputsOutputsTestParams>,
|
class FullyConnectedStridedInputsOutputsTest
|
||||||
public CPUTestsBase,
|
: public testing::WithParamInterface<FullyConnectedStridedInputsOutputsTestParams>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
public CPUTestsBase,
|
||||||
|
virtual public SubgraphBaseStaticTest {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<FullyConnectedStridedInputsOutputsTestParams> obj) {
|
static std::string getTestCaseName(testing::TestParamInfo<FullyConnectedStridedInputsOutputsTestParams> obj) {
|
||||||
Precision netPrecision;
|
ov::element::Type netPrecision;
|
||||||
size_t rank;
|
size_t rank;
|
||||||
std::tie(netPrecision, rank) = obj.param;
|
std::tie(netPrecision, rank) = obj.param;
|
||||||
|
|
||||||
std::ostringstream result;
|
std::ostringstream result;
|
||||||
result << "netPRC=" << netPrecision.name() << "_";
|
result << "netPRC=" << netPrecision.get_type_name() << "_";
|
||||||
result << "rank=" << rank;
|
result << "rank=" << rank;
|
||||||
|
|
||||||
return result.str();
|
return result.str();
|
||||||
@ -35,39 +36,42 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
targetDevice = ov::test::utils::DEVICE_CPU;
|
targetDevice = ov::test::utils::DEVICE_CPU;
|
||||||
Precision netPrecision;
|
ov::element::Type netPrecision;
|
||||||
size_t rank;
|
size_t rank;
|
||||||
std::tie(netPrecision, rank) = this->GetParam();
|
std::tie(netPrecision, rank) = this->GetParam();
|
||||||
const auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
|
||||||
|
|
||||||
auto bcastTo3D = [](SizeVector& shape) {
|
auto bcastTo3D = [](ov::Shape& shape) {
|
||||||
shape.insert(shape.begin(), 1);
|
shape.insert(shape.begin(), 1);
|
||||||
};
|
};
|
||||||
|
|
||||||
SizeVector splitShape{2, 16};
|
ov::Shape splitShape{2, 16};
|
||||||
if (rank == 3) bcastTo3D(splitShape);
|
if (rank == 3)
|
||||||
|
bcastTo3D(splitShape);
|
||||||
|
|
||||||
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(ngPrec, ov::Shape(splitShape))};
|
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(netPrecision, ov::Shape(splitShape))};
|
||||||
|
|
||||||
const auto splitAxis = rank == 3 ? 1 : 0;
|
const auto splitAxis = rank == 3 ? 1 : 0;
|
||||||
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{splitAxis});
|
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64,
|
||||||
|
ov::Shape{},
|
||||||
|
std::vector<int64_t>{splitAxis});
|
||||||
auto split = std::make_shared<ov::op::v1::Split>(params[0], split_axis_op, 2);
|
auto split = std::make_shared<ov::op::v1::Split>(params[0], split_axis_op, 2);
|
||||||
|
|
||||||
SizeVector fcWeightsShape{16, 8};
|
ov::Shape fcWeightsShape{16, 8};
|
||||||
if (rank == 3) bcastTo3D(fcWeightsShape);
|
if (rank == 3)
|
||||||
|
bcastTo3D(fcWeightsShape);
|
||||||
|
|
||||||
auto tensor = ov::test::utils::create_and_fill_tensor(ngPrec, fcWeightsShape);
|
auto tensor = ov::test::utils::create_and_fill_tensor(netPrecision, fcWeightsShape);
|
||||||
auto fc1secondInput = std::make_shared<ov::op::v0::Constant>(tensor);
|
auto fc1secondInput = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||||
const auto fc1 = std::make_shared<ov::op::v0::MatMul>(split->output(0), fc1secondInput, false, false);
|
const auto fc1 = std::make_shared<ov::op::v0::MatMul>(split->output(0), fc1secondInput, false, false);
|
||||||
|
|
||||||
auto tensorB = ov::test::utils::create_and_fill_tensor(ngPrec, fcWeightsShape);
|
auto tensorB = ov::test::utils::create_and_fill_tensor(netPrecision, fcWeightsShape);
|
||||||
auto fc2secondInputB = std::make_shared<ov::op::v0::Constant>(tensorB);
|
auto fc2secondInputB = std::make_shared<ov::op::v0::Constant>(tensorB);
|
||||||
const auto fc2 = std::make_shared<ov::op::v0::MatMul>(split->output(1), fc2secondInputB, false, false);
|
const auto fc2 = std::make_shared<ov::op::v0::MatMul>(split->output(1), fc2secondInputB, false, false);
|
||||||
|
|
||||||
const auto fcConcatAxis = rank == 3 ? 1 : 0;
|
const auto fcConcatAxis = rank == 3 ? 1 : 0;
|
||||||
const auto concatMatMuls = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{fc1, fc2}, fcConcatAxis);
|
const auto concatMatMuls = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{fc1, fc2}, fcConcatAxis);
|
||||||
|
|
||||||
function = makeNgraphFunction(ngPrec, params, concatMatMuls, "FullyConnectedStridedInputsOutputs");
|
function = makeNgraphFunction(netPrecision, params, concatMatMuls, "FullyConnectedStridedInputsOutputs");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -96,16 +100,18 @@ protected:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
TEST_P(FullyConnectedStridedInputsOutputsTest, CompareWithRefs) {
|
TEST_P(FullyConnectedStridedInputsOutputsTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_Check, FullyConnectedStridedInputsOutputsTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_Check,
|
||||||
::testing::Combine(::testing::Values(Precision::FP32, Precision::BF16),
|
FullyConnectedStridedInputsOutputsTest,
|
||||||
|
::testing::Combine(::testing::Values(ov::element::f32, ov::element::bf16),
|
||||||
::testing::Values(2, 3)),
|
::testing::Values(2, 3)),
|
||||||
FullyConnectedStridedInputsOutputsTest::getTestCaseName);
|
FullyConnectedStridedInputsOutputsTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
} // namespace SubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
@ -2,26 +2,24 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
namespace ov {
|
||||||
using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc;
|
namespace test {
|
||||||
|
typedef std::tuple<Shape, // Input shape
|
||||||
namespace CPUSubgraphTestsDefinitions {
|
element::Type, // Input precision
|
||||||
typedef std::tuple<
|
std::pair<std::vector<float>, std::vector<float>>, // ScaleShift scales and shifts
|
||||||
Shape, // Input shape
|
std::vector<std::vector<float>>, // Quantize intervals
|
||||||
element::Type, // Input precision
|
std::string // Device name
|
||||||
std::pair<std::vector<float>, std::vector<float>>, // ScaleShift scales and shifts
|
>
|
||||||
std::vector<std::vector<float>>, // Quantize intervals
|
FuseScaleShiftAndQuantizeTuple;
|
||||||
std::string // Device name
|
|
||||||
> FuseScaleShiftAndQuantizeTuple;
|
|
||||||
|
|
||||||
class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterface<FuseScaleShiftAndQuantizeTuple>,
|
class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterface<FuseScaleShiftAndQuantizeTuple>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
virtual public SubgraphBaseStaticTest {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(const testing::TestParamInfo<FuseScaleShiftAndQuantizeTuple> &obj) {
|
static std::string getTestCaseName(const testing::TestParamInfo<FuseScaleShiftAndQuantizeTuple>& obj) {
|
||||||
Shape inputShape;
|
Shape inputShape;
|
||||||
element::Type inputPrecision;
|
element::Type inputPrecision;
|
||||||
std::pair<std::vector<float>, std::vector<float>> scaleShift;
|
std::pair<std::vector<float>, std::vector<float>> scaleShift;
|
||||||
@ -30,13 +28,11 @@ public:
|
|||||||
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetName) = obj.param;
|
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetName) = obj.param;
|
||||||
std::ostringstream results;
|
std::ostringstream results;
|
||||||
|
|
||||||
results << "IS=" << inputShape
|
results << "IS=" << inputShape << "_InPRC=" << inputPrecision
|
||||||
<< "_InPRC=" << inputPrecision
|
<< "_Scale=" << ngraph::vector_to_string(scaleShift.first)
|
||||||
<< "_Scale=" << vector_to_string(scaleShift.first)
|
<< "_Shift=" << ngraph::vector_to_string(scaleShift.second) << "_Intervals=";
|
||||||
<< "_Shift=" << vector_to_string(scaleShift.second)
|
|
||||||
<< "_Intervals=";
|
|
||||||
for (const auto& vecInt : quantizeIntervals) {
|
for (const auto& vecInt : quantizeIntervals) {
|
||||||
results << vector_to_string(vecInt) << ",";
|
results << ngraph::vector_to_string(vecInt) << ",";
|
||||||
}
|
}
|
||||||
|
|
||||||
results << "targetDevice=" << targetName;
|
results << "targetDevice=" << targetName;
|
||||||
@ -46,73 +42,72 @@ public:
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
threshold = 0.1f;
|
|
||||||
|
|
||||||
Shape inputShape;
|
Shape inputShape;
|
||||||
element::Type inputPrecision;
|
element::Type inputPrecision;
|
||||||
std::pair<std::vector<float>, std::vector<float>> scaleShift;
|
std::pair<std::vector<float>, std::vector<float>> scaleShift;
|
||||||
std::vector<std::vector<float>> quantizeIntervals;
|
std::vector<std::vector<float>> quantizeIntervals;
|
||||||
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetDevice) = this->GetParam();
|
std::tie(inputShape, inputPrecision, scaleShift, quantizeIntervals, targetDevice) = this->GetParam();
|
||||||
|
|
||||||
const auto param = std::make_shared<opset6::Parameter>(inputPrecision, inputShape);
|
const auto param = std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputShape);
|
||||||
Shape constShape = Shape(inputShape.size(), 1);
|
Shape constShape = Shape(inputShape.size(), 1);
|
||||||
constShape[1] = scaleShift.second.size();
|
constShape[1] = scaleShift.second.size();
|
||||||
const auto subtract = std::make_shared<opset1::Subtract>(
|
const auto subtract = std::make_shared<ov::op::v1::Subtract>(
|
||||||
param,
|
param,
|
||||||
std::make_shared<opset6::Constant>(inputPrecision, constShape, scaleShift.second));
|
std::make_shared<ov::op::v0::Constant>(inputPrecision, constShape, scaleShift.second));
|
||||||
const auto multiply = std::make_shared<opset1::Multiply>(
|
const auto multiply = std::make_shared<ov::op::v1::Multiply>(
|
||||||
param,
|
param,
|
||||||
std::make_shared<opset6::Constant>(inputPrecision, constShape, scaleShift.first));
|
std::make_shared<ov::op::v0::Constant>(inputPrecision, constShape, scaleShift.first));
|
||||||
Shape inConstShape = Shape(inputShape.size(), 1);
|
Shape inConstShape = Shape(inputShape.size(), 1);
|
||||||
inConstShape[1] = quantizeIntervals[0].size();
|
inConstShape[1] = quantizeIntervals[0].size();
|
||||||
const auto quantize = builder::makeFakeQuantize(
|
const auto quantize = ngraph::builder::makeFakeQuantize(multiply,
|
||||||
multiply,
|
inputPrecision,
|
||||||
inputPrecision,
|
256,
|
||||||
256,
|
inConstShape,
|
||||||
inConstShape,
|
quantizeIntervals[0],
|
||||||
quantizeIntervals[0],
|
quantizeIntervals[1],
|
||||||
quantizeIntervals[1],
|
quantizeIntervals[2],
|
||||||
quantizeIntervals[2],
|
quantizeIntervals[3]);
|
||||||
quantizeIntervals[3]);
|
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(quantize)};
|
||||||
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(quantize)};
|
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseScaleShiftAndQuantize");
|
||||||
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "FuseScaleShiftAndQuantize");
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(FuseScaleShiftAndFakeQuantizeTest, CompareWithRefs) {
|
TEST_P(FuseScaleShiftAndFakeQuantizeTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
std::vector<Shape> inputShapes {
|
std::vector<Shape> inputShapes{{1, 4, 16, 16},
|
||||||
{1, 4, 16, 16}, {8, 4, 16, 16},
|
{8, 4, 16, 16},
|
||||||
{1, 4, 16, 16, 16}, {8, 4, 16, 16, 16},
|
{1, 4, 16, 16, 16},
|
||||||
{1, 4, 16, 16, 16, 16}, {8, 4, 16, 16, 16, 16}
|
{8, 4, 16, 16, 16},
|
||||||
|
{1, 4, 16, 16, 16, 16},
|
||||||
|
{8, 4, 16, 16, 16, 16}};
|
||||||
|
|
||||||
|
std::vector<std::pair<std::vector<float>, std::vector<float>>> scaleShifts{
|
||||||
|
{{30.f}, {17.f}}, // actually fused in LPT
|
||||||
|
{{-30.f}, {0.f}}, // fused with crop bound invert
|
||||||
|
{{-17.f}, {12.f}}, // fused with crop bound invert
|
||||||
|
{{-1.23e-44f}, {0.f}}, // fused with denormal handling
|
||||||
|
{{0.f}, {0.f}}, // not fused
|
||||||
|
{{0.f}, {18.f}}, // not fused
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<std::pair<std::vector<float>, std::vector<float>>> scaleShifts {
|
std::vector<std::vector<std::vector<float>>> quantizes{
|
||||||
{ {30.f}, {17.f} }, // actually fused in LPT
|
{{-1.f}, {5.f}, {-5.f}, {1.f}},
|
||||||
{ {-30.f}, {0.f} }, // fused with crop bound invert
|
{{2.f}, {4.f}, {-4.f}, {-2.f}},
|
||||||
{ {-17.f}, {12.f} }, // fused with crop bound invert
|
{{-1.28f}, {1.27f}, {-1.28f}, {1.27f}},
|
||||||
{ {-1.23e-44f}, {0.f} }, // fused with denormal handling
|
{{0.f}, {2.55f}, {0.f}, {2.55f}},
|
||||||
{ {0.f}, {0.f} }, // not fused
|
|
||||||
{ {0.f}, {18.f} }, // not fused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<std::vector<std::vector<float>>> quantizes {
|
INSTANTIATE_TEST_SUITE_P(smoke_FuseScaleShiftAndFakeQuantize,
|
||||||
{ {-1.f}, {5.f}, {-5.f}, {1.f} },
|
FuseScaleShiftAndFakeQuantizeTest,
|
||||||
{ {2.f}, {4.f}, {-4.f}, {-2.f} },
|
::testing::Combine(::testing::ValuesIn(inputShapes),
|
||||||
{ {-1.28f}, {1.27f}, {-1.28f}, {1.27f} },
|
::testing::Values(element::f32),
|
||||||
{ {0.f}, {2.55f}, {0.f}, {2.55f} },
|
::testing::ValuesIn(scaleShifts),
|
||||||
};
|
::testing::ValuesIn(quantizes),
|
||||||
|
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_FuseScaleShiftAndFakeQuantize, FuseScaleShiftAndFakeQuantizeTest,
|
FuseScaleShiftAndFakeQuantizeTest::getTestCaseName);
|
||||||
::testing::Combine(
|
} // namespace
|
||||||
::testing::ValuesIn(inputShapes),
|
} // namespace test
|
||||||
::testing::Values(element::f32),
|
} // namespace ov
|
||||||
::testing::ValuesIn(scaleShifts),
|
|
||||||
::testing::ValuesIn(quantizes),
|
|
||||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
|
||||||
FuseScaleShiftAndFakeQuantizeTest::getTestCaseName);
|
|
||||||
} // namespace
|
|
||||||
} // namespace CPUSubgraphTestsDefinitions
|
|
||||||
|
@ -2,27 +2,27 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
|
||||||
#include "ov_models/builders.hpp"
|
#include "ov_models/builders.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
|
||||||
using namespace ngraph;
|
|
||||||
using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc;
|
using FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc;
|
||||||
|
|
||||||
namespace CPUSubgraphTestsDefinitions {
|
namespace ov {
|
||||||
typedef std::tuple<
|
namespace test {
|
||||||
Shape, // Input shape
|
typedef std::tuple<Shape, // Input shape
|
||||||
element::Type, // Input precision
|
element::Type, // Input precision
|
||||||
int, // Axis
|
int, // Axis
|
||||||
size_t, // num_splits
|
size_t, // num_splits
|
||||||
size_t, // scale_factor
|
size_t, // scale_factor
|
||||||
std::string // Device name
|
std::string // Device name
|
||||||
> FuseSplitConcatPairToInterpolateTuple;
|
>
|
||||||
|
FuseSplitConcatPairToInterpolateTuple;
|
||||||
|
|
||||||
class FuseSplitConcatPairToInterpolateTest : public testing::WithParamInterface<FuseSplitConcatPairToInterpolateTuple>,
|
class FuseSplitConcatPairToInterpolateTest : public testing::WithParamInterface<FuseSplitConcatPairToInterpolateTuple>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
virtual public SubgraphBaseStaticTest {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(const testing::TestParamInfo<FuseSplitConcatPairToInterpolateTuple> &obj) {
|
static std::string getTestCaseName(const testing::TestParamInfo<FuseSplitConcatPairToInterpolateTuple>& obj) {
|
||||||
Shape inputShape;
|
Shape inputShape;
|
||||||
element::Type inputPrecision;
|
element::Type inputPrecision;
|
||||||
int axis;
|
int axis;
|
||||||
@ -32,11 +32,8 @@ public:
|
|||||||
std::tie(inputShape, inputPrecision, axis, num_splits, scale_factor, targetName) = obj.param;
|
std::tie(inputShape, inputPrecision, axis, num_splits, scale_factor, targetName) = obj.param;
|
||||||
std::ostringstream results;
|
std::ostringstream results;
|
||||||
|
|
||||||
results << "IS=" << inputShape
|
results << "IS=" << inputShape << "_InPRC=" << inputPrecision << "_Axis=" << axis
|
||||||
<< "_InPRC=" << inputPrecision
|
<< "_Num_splits=" << num_splits << "_Scale_factor=" << scale_factor;
|
||||||
<< "_Axis=" << axis
|
|
||||||
<< "_Num_splits=" << num_splits
|
|
||||||
<< "_Scale_factor=" << scale_factor;
|
|
||||||
results << "_targetDevice=" << targetName;
|
results << "_targetDevice=" << targetName;
|
||||||
|
|
||||||
return results.str();
|
return results.str();
|
||||||
@ -53,11 +50,12 @@ protected:
|
|||||||
|
|
||||||
size_t num_of_concat_inputs = num_splits * scale_factor;
|
size_t num_of_concat_inputs = num_splits * scale_factor;
|
||||||
|
|
||||||
const auto param = std::make_shared<opset6::Parameter>(inputPrecision, inputShape);
|
const auto param = std::make_shared<ov::op::v0::Parameter>(inputPrecision, inputShape);
|
||||||
auto split_axis_op = std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
|
auto split_axis_op =
|
||||||
|
std::make_shared<ov::op::v0::Constant>(ov::element::Type_t::i64, ov::Shape{}, std::vector<int64_t>{axis});
|
||||||
auto split = std::make_shared<ov::op::v1::Split>(param, split_axis_op, num_splits);
|
auto split = std::make_shared<ov::op::v1::Split>(param, split_axis_op, num_splits);
|
||||||
|
|
||||||
ngraph::OutputVector concat_inputs_vec(num_of_concat_inputs);
|
ov::OutputVector concat_inputs_vec(num_of_concat_inputs);
|
||||||
for (size_t split_output_port = 0; split_output_port < num_splits; ++split_output_port) {
|
for (size_t split_output_port = 0; split_output_port < num_splits; ++split_output_port) {
|
||||||
for (size_t j = 0; j < scale_factor; ++j) {
|
for (size_t j = 0; j < scale_factor; ++j) {
|
||||||
concat_inputs_vec[split_output_port * scale_factor + j] = split->output(split_output_port);
|
concat_inputs_vec[split_output_port * scale_factor + j] = split->output(split_output_port);
|
||||||
@ -66,58 +64,47 @@ protected:
|
|||||||
|
|
||||||
const auto concat = std::make_shared<ov::op::v0::Concat>(concat_inputs_vec, axis);
|
const auto concat = std::make_shared<ov::op::v0::Concat>(concat_inputs_vec, axis);
|
||||||
|
|
||||||
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(concat)};
|
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(concat)};
|
||||||
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "FuseSplitConcatPairToInterpolate");
|
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseSplitConcatPairToInterpolate");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(FuseSplitConcatPairToInterpolateTest, CompareWithRefs) {
|
TEST_P(FuseSplitConcatPairToInterpolateTest, CompareWithRefs) {
|
||||||
Run();
|
run();
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
std::vector<Shape> inputShapes4D {
|
std::vector<Shape> inputShapes4D{{1, 2, 6, 6}};
|
||||||
{1, 2, 6, 6}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<size_t> num_of_outputs_of_split {
|
std::vector<size_t> num_of_outputs_of_split{2, 3, 6};
|
||||||
2, 3, 6
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<size_t> scale_factors {
|
std::vector<size_t> scale_factors{2, 3, 4};
|
||||||
2, 3, 4
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<int> axes4D {
|
std::vector<int> axes4D{2, 3};
|
||||||
2, 3
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<Shape> inputShapes5D {
|
std::vector<Shape> inputShapes5D{{1, 3, 10, 6, 6}};
|
||||||
{1, 3, 10, 6, 6}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<int> axes5D {
|
std::vector<int> axes5D{3, 4};
|
||||||
3, 4
|
|
||||||
};
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate4D, FuseSplitConcatPairToInterpolateTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate4D,
|
||||||
::testing::Combine(
|
FuseSplitConcatPairToInterpolateTest,
|
||||||
::testing::ValuesIn(inputShapes4D),
|
::testing::Combine(::testing::ValuesIn(inputShapes4D),
|
||||||
::testing::Values(element::f32),
|
::testing::Values(element::f32),
|
||||||
::testing::ValuesIn(axes4D),
|
::testing::ValuesIn(axes4D),
|
||||||
::testing::ValuesIn(num_of_outputs_of_split),
|
::testing::ValuesIn(num_of_outputs_of_split),
|
||||||
::testing::ValuesIn(scale_factors),
|
::testing::ValuesIn(scale_factors),
|
||||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||||
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
|
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate5D, FuseSplitConcatPairToInterpolateTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_FuseSplitConcatPairToInterpolate5D,
|
||||||
::testing::Combine(
|
FuseSplitConcatPairToInterpolateTest,
|
||||||
::testing::ValuesIn(inputShapes5D),
|
::testing::Combine(::testing::ValuesIn(inputShapes5D),
|
||||||
::testing::Values(element::f32),
|
::testing::Values(element::f32),
|
||||||
::testing::ValuesIn(axes5D),
|
::testing::ValuesIn(axes5D),
|
||||||
::testing::ValuesIn(num_of_outputs_of_split),
|
::testing::ValuesIn(num_of_outputs_of_split),
|
||||||
::testing::ValuesIn(scale_factors),
|
::testing::ValuesIn(scale_factors),
|
||||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||||
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
|
FuseSplitConcatPairToInterpolateTest::getTestCaseName);
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace CPUSubgraphTestsDefinitions
|
} // namespace test
|
||||||
|
} // namespace ov
|
||||||
|
Loading…
Reference in New Issue
Block a user