[CPU] Several CPU specific SL tests are extended with dynamic shapes (#8339)

This commit is contained in:
Maksim Kutakov 2021-11-18 10:10:24 +03:00 committed by GitHub
parent f639e4e902
commit 60d2643b3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 603 additions and 126 deletions

View File

@ -73,13 +73,12 @@ bool MKLDNNMatMulNode::canFuse(const MKLDNNNodePtr& node) const {
EltwiseRoundHalfAwayFromZero, EltwiseAbs, EltwiseSqrt, EltwiseSoftRelu);
}
void MKLDNNMatMulNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false) const {
void MKLDNNMatMulNode::setPostOps(mkldnn::primitive_attr &attr, const VectorDims& dims, bool initWeights = false) const {
mkldnn::post_ops ops;
for (auto &node : fusedWith) {
if (auto* eltwiseNode = dynamic_cast<MKLDNNEltwiseNode *>(node.get())) {
// TODO [DS]: change to shape from memory
eltwiseNode->appendPostOps(ops, getOutputShapeAtPort(0).getStaticDims());
eltwiseNode->appendPostOps(ops, dims);
continue;
}
@ -90,14 +89,19 @@ void MKLDNNMatMulNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights
}
MKLDNNNode::AttrPtr MKLDNNMatMulNode::initPrimitiveAttr() const {
MKLDNNNode::AttrPtr MKLDNNMatMulNode::initPrimitiveAttr(const VectorDims &dims) const {
auto attr = std::make_shared<mkldnn::primitive_attr>(mkldnn::primitive_attr());
setPostOps(*attr, true);
setPostOps(*attr, dims, true);
return attr;
}
MKLDNNNode::AttrPtr MKLDNNMatMulNode::initPrimitiveAttr() const {
auto dummyShape = MemoryDescUtils::makeDummyShape(getOutputShapeAtPort(0));
return initPrimitiveAttr(dummyShape.getStaticDims());
}
/* Example MatMul:
* 2x128x512(T) * 2x128x512 = 2x512x512
* First input 2x128x512(T) should be transposed
@ -297,7 +301,7 @@ void MKLDNNMatMulNode::prepareParams() {
if (isDynamicNode()) {
if (!pAttr) {
pAttr = initPrimitiveAttr();
pAttr = initPrimitiveAttr(src0MemPtr->getStaticDims());
}
attr = pAttr;

View File

@ -39,9 +39,10 @@ public:
protected:
AttrPtr initPrimitiveAttr() const override;
AttrPtr initPrimitiveAttr(const VectorDims& dims) const;
private:
void setPostOps(mkldnn::primitive_attr &attr, bool initWeights) const;
void setPostOps(mkldnn::primitive_attr &attr, const VectorDims& dims, bool initWeights) const;
std::string errorPrefix;

View File

@ -102,15 +102,19 @@ protected:
TEST_P(DeconvolutionLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
if (!fusedOps.empty()) {
bool isSupportedParams = stride[stride.size() - 1] <= kernel[kernel.size() - 1];
if (stride.size() > 1)
isSupportedParams &= stride[stride.size() - 2] <= kernel[kernel.size() - 2];
if (stride.size() > 2)
isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3];
if (!isSupportedParams) {
GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution doesn't support it" << std::endl;
}
}
Run();
CPUTestsBase::CheckPluginRelatedResults(executableNetwork, "Deconvolution");
bool isSupportedParams = stride[stride.size() - 1] <= kernel[kernel.size() - 1];
if (stride.size() > 1)
isSupportedParams &= stride[stride.size() - 2] <= kernel[kernel.size() - 2];
if (stride.size() > 2)
isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3];
if (isSupportedParams)
CheckFusingResults(executableNetwork, "Deconvolution");
CheckPluginRelatedResults(executableNetwork, "Deconvolution");
}
namespace {

View File

@ -101,15 +101,19 @@ protected:
TEST_P(GroupDeconvolutionLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
if (!fusedOps.empty()) {
bool isSupportedParams = stride[stride.size() - 1] <= kernel[kernel.size() - 1];
if (stride.size() > 1)
isSupportedParams &= stride[stride.size() - 2] <= kernel[kernel.size() - 2];
if (stride.size() > 2)
isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3];
if (!isSupportedParams) {
GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution doesn't support it" << std::endl;
}
}
Run();
CPUTestsBase::CheckPluginRelatedResults(executableNetwork, "Deconvolution");
bool isSupportedParams = stride[stride.size() - 1] <= kernel[kernel.size() - 1];
if (stride.size() > 1)
isSupportedParams &= stride[stride.size() - 2] <= kernel[kernel.size() - 2];
if (stride.size() > 2)
isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3];
if (isSupportedParams)
CheckFusingResults(executableNetwork, "Deconvolution");
CheckPluginRelatedResults(executableNetwork, "Deconvolution");
}
namespace {

View File

@ -2,14 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/mat_mul.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/fusing_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace LayerTestsDefinitions;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
@ -18,12 +18,27 @@ enum class MatMulNodeType {
FullyConnected
};
struct ShapeRelatedParams {
std::vector<InputShape> inputShapes;
std::pair<bool, bool> transpose;
};
typedef std::tuple<
ShapeRelatedParams,
ElementType, // Network precision
ElementType, // Input precision
ElementType, // Output precision
ngraph::helpers::InputLayerType, // Secondary input type
TargetDevice, // Device name
std::map<std::string, std::string> // Additional network configuration
> MatMulLayerTestParamsSet;
using MatMulLayerCPUTestParamSet = std::tuple<MatMulLayerTestParamsSet,
MatMulNodeType,
fusingSpecificParams>;
class MatMulLayerCPUTest : public testing::WithParamInterface<MatMulLayerCPUTestParamSet>,
virtual public LayerTestsUtils::LayerTestsCommon, public CpuTestWithFusing {
virtual public SubgraphBaseTest, public CpuTestWithFusing {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MatMulLayerCPUTestParamSet>& obj) {
MatMulLayerTestParamsSet basicParamsSet;
@ -32,10 +47,44 @@ public:
std::tie(basicParamsSet, nodeType, fusingParams) = obj.param;
ElementType netType;
ElementType inType, outType;
ShapeRelatedParams shapeRelatedParams;
ngraph::helpers::InputLayerType secondaryInputType;
TargetDevice targetDevice;
std::map<std::string, std::string> additionalConfig;
std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) =
basicParamsSet;
std::ostringstream result;
result << (nodeType == MatMulNodeType::MatMul ? "MatMul_" : "FullyConnected_");
result << LayerTestsDefinitions::MatMulTest::getTestCaseName(
testing::TestParamInfo<LayerTestsDefinitions::MatMulLayerTestParamsSet>(basicParamsSet, 0));
result << "IS=";
for (const auto& shape : shapeRelatedParams.inputShapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
}
result << "TS=";
for (const auto& shape : shapeRelatedParams.inputShapes) {
result << "(";
if (!shape.second.empty()) {
auto itr = shape.second.begin();
do {
result << CommonTestUtils::vec2str(*itr);
} while (++itr != shape.second.end() && result << "_");
}
result << ")_";
}
result << "transpose_a=" << shapeRelatedParams.transpose.first << "_";
result << "transpose_b=" << shapeRelatedParams.transpose.second << "_";
result << "secondaryInputType=" << secondaryInputType << "_";
result << "netPRC=" << netType << "_";
result << "inPRC=" << inType << "_";
result << "outPRC=" << outType << "_";
result << "trgDev=" << targetDevice;
result << "config=(";
for (const auto configEntry : additionalConfig) {
result << configEntry.first << ", " << configEntry.second << ":";
}
result << ")";
result << CpuTestWithFusing::getTestCaseName(fusingParams);
return result.str();
@ -44,6 +93,12 @@ public:
protected:
std::string cpuNodeType;
template<typename T>
void transpose(T& shape) {
IE_ASSERT(shape.size() > 1);
std::swap(*(shape.end() - 1), *(shape.end() - 2));
}
void SetUp() override {
MatMulLayerTestParamsSet basicParamsSet;
MatMulNodeType nodeType;
@ -52,16 +107,32 @@ protected:
std::tie(basicParamsSet, nodeType, fusingParams) = this->GetParam();
ShapeRelatedParams shapeRelatedParams;
Precision netPrecision;
ElementType netType;
helpers::InputLayerType secondaryInputType;
std::map<std::string, std::string> additionalConfig;
std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet;
std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet;
SizeVector inShapeA = shapeRelatedParams.input1.first;
SizeVector inShapeB = shapeRelatedParams.input2.first;
bool transpA = shapeRelatedParams.input1.second;
bool transpB = shapeRelatedParams.input2.second;
init_input_shapes(shapeRelatedParams.inputShapes);
bool transpA = shapeRelatedParams.transpose.first;
bool transpB = shapeRelatedParams.transpose.second;
if (transpA) {
transpose(inputDynamicShapes[0]);
for (auto& shapes : targetStaticShapes) {
transpose(shapes[0]);
}
}
if (transpB) {
transpose(inputDynamicShapes[1]);
for (auto& shapes : targetStaticShapes) {
transpose(shapes[1]);
}
}
const auto& inShapeA = inputDynamicShapes[0];
const auto& inShapeB = inputDynamicShapes[1];
/* @todo
* Currently nodes are not fused thought Reshape
@ -73,29 +144,22 @@ protected:
configuration.insert(additionalConfig.begin(), additionalConfig.end());
if (additionalConfig[PluginConfigParams::KEY_ENFORCE_BF16] == PluginConfigParams::YES)
inPrc = outPrc = netPrecision = Precision::BF16;
inType = outType = netType = ElementType::bf16;
else
inPrc = outPrc = netPrecision;
inType = outType = netType;
cpuNodeType = nodeType == MatMulNodeType::MatMul ? "MatMul" : "FullyConnected";
selectedType = makeSelectedTypeStr("jit_gemm", outType);
auto transpose = [](SizeVector& shape) {
IE_ASSERT(shape.size() > 1);
std::swap(*(shape.end() - 1), *(shape.end() - 2));
};
auto params = builder::makeDynamicParams(netType, {inShapeA});
if (transpA) transpose(inShapeA);
if (transpB) transpose(inShapeB);
auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = builder::makeParams(ngPrec, {inShapeA});
auto matrixB = builder::makeInputLayer(ngPrec, secondaryInputType, inShapeB);
auto matrixB = builder::makeDynamicInputLayer(netType, secondaryInputType, inShapeB);
if (secondaryInputType == helpers::InputLayerType::PARAMETER) {
params.push_back(std::dynamic_pointer_cast<opset1::Parameter>(matrixB));
}
auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes<opset1::Parameter>(params));
auto matMul = builder::makeMatMul(paramOuts[0], matrixB, transpA, transpB);
function = makeNgraphFunction(ngPrec, params, matMul, cpuNodeType);
function = makeNgraphFunction(netType, params, matMul, cpuNodeType);
checkFusingPosition = false;
}
};
@ -103,25 +167,21 @@ protected:
TEST_P(MatMulLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
CheckFusingResults(executableNetwork, cpuNodeType);
run();
CheckPluginRelatedResults(executableNetwork, cpuNodeType);
}
namespace {
/* ============= Common params ============= */
const std::vector<bool> transpose = {
true, false
};
std::vector<std::map<std::string, std::string>> additionalConfig {
std::map<std::string, std::string>{/* empty config */},
{{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}}
};
const std::vector<Precision> netPRCs {
Precision::FP32,
Precision::BF16
const std::vector<ElementType> netPRCs {
ElementType::f32,
ElementType::bf16
};
/* ============= FullyConnected ============= */
@ -133,26 +193,26 @@ const auto fusingBiasFC = fusingSpecificParams{std::make_shared<postNodesMgr>(st
return std::make_shared<opset1::Add>(inpNode, bias);
}, "fusingBiasFC"}}), {"Add"}};
const std::vector<ShapeRelatedParams> IS2D {
{{{59, 1}, false}, {{1, 120}, false}},
{{{59, 1}, true}, {{1, 120}, false}},
{{{59, 1}, false}, {{1, 120}, true}},
{{{59, 1}, true}, {{1, 120}, true}},
const std::vector<ShapeRelatedParams> IS2D = {
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}},
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}},
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}},
{static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}},
{{{59, 120}, false}, {{120, 1}, false}},
{{{59, 120}, true}, {{120, 1}, false}},
{{{59, 120}, false}, {{120, 1}, true}},
{{{59, 120}, true}, {{120, 1}, true}},
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}},
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}},
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}},
{static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}},
{{{1, 120}, false}, {{120, 59}, false}},
{{{1, 120}, true}, {{120, 59}, false}},
{{{1, 120}, false}, {{120, 59}, true}},
{{{1, 120}, true}, {{120, 59}, true}},
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}},
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}},
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}},
{static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}},
{{{71, 128}, false}, {{128, 20}, false}},
{{{71, 128}, true}, {{128, 20}, false}},
{{{71, 128}, false}, {{128, 20}, true}},
{{{71, 128}, true}, {{128, 20}, true}},
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}},
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}},
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}},
{static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}},
};
std::vector<fusingSpecificParams> fusingParamsSet2D {
@ -165,9 +225,8 @@ std::vector<fusingSpecificParams> fusingParamsSet2D {
const auto fullyConnectedParams2D = ::testing::Combine(::testing::ValuesIn(IS2D),
::testing::ValuesIn(netPRCs),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Layout::ANY),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(helpers::InputLayerType::CONSTANT),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(additionalConfig));
@ -179,17 +238,17 @@ const auto testParams2D = ::testing::Combine(fullyConnectedParams2D,
INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerCPUTest, testParams2D, MatMulLayerCPUTest::getTestCaseName);
const std::vector<ShapeRelatedParams> IS3D = {
{{{1, 32, 120}, false}, {{120, 5}, false}},
{{{1, 32, 120}, true}, {{120, 5}, false}},
{{{1, 32, 120}, false}, {{120, 5}, true}},
{{{1, 32, 120}, true}, {{120, 5}, true}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}},
{{{7, 32, 120}, false}, {{120, 50}, false}},
{{{7, 32, 120}, true}, {{120, 50}, false}},
{{{7, 32, 120}, false}, {{120, 50}, true}},
{{{7, 32, 120}, true}, {{120, 50}, true}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}},
{static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}},
{{{1, 429}, false}, {{1, 429, 1}, true}},
{static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}}
};
std::vector<fusingSpecificParams> fusingParamsSet3D {
@ -199,9 +258,8 @@ std::vector<fusingSpecificParams> fusingParamsSet3D {
const auto fullyConnectedParams3D = ::testing::Combine(::testing::ValuesIn(IS3D),
::testing::ValuesIn(netPRCs),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Layout::ANY),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(helpers::InputLayerType::CONSTANT),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(additionalConfig));
@ -212,44 +270,189 @@ const auto testParams3D = ::testing::Combine(fullyConnectedParams3D,
INSTANTIATE_TEST_SUITE_P(smoke_FC_3D, MatMulLayerCPUTest, testParams3D, MatMulLayerCPUTest::getTestCaseName);
}; // namespace fullyConnected
} // namespace fullyConnected
/* ============= MatMul ============= */
namespace matmul {
const std::vector<ShapeRelatedParams> IS = {
{{{1, 2, 32, 120}, false}, {{120, 5}, false}},
{{{1, 2, 32, 120}, true}, {{120, 5}, false}},
{{{1, 2, 32, 120}, false}, {{120, 5}, true}},
{{{1, 2, 32, 120}, true}, {{120, 5}, true}},
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}},
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}},
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}},
{static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}},
{{{7, 32, 120}, false}, {{3, 7, 120, 50}, false}},
{{{7, 32, 120}, true}, {{3, 7, 120, 50}, false}},
{{{7, 32, 120}, false}, {{3, 7, 120, 50}, true}},
{{{7, 32, 120}, true}, {{3, 7, 120, 50}, true}},
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}},
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}},
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}},
{static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}},
{{{10, 10, 10}, false}, {{10, 10, 10}, false}},
{{{10, 10, 10}, true}, {{10, 10, 10}, false}},
{{{10, 10, 10}, false}, {{10, 10, 10}, true}},
{{{10, 10, 10}, true}, {{10, 10, 10}, true}},
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}},
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}},
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}},
{static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}},
{{{55, 12}, false}, {{12, 55}, false}},
{{{55, 12}, true}, {{12, 55}, false}},
{{{55, 12}, false}, {{12, 55}, true}},
{{{55, 12}, true}, {{12, 55}, true}},
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}},
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}},
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}},
{static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1}, {{55, 12}, {33, 7}}}, // input 0
{{-1, -1}, {{12, 55}, {7, 33}}} // input 1
},
{false, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1}, {{55, 12}, {33, 7}}}, // input 0
{{-1, -1}, {{12, 55}, {7, 33}}} // input 1
},
{true, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1}, {{55, 12}, {33, 7}}}, // input 0
{{-1, -1}, {{12, 55}, {7, 33}}} // input 1
},
{false, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1}, {{55, 12}, {33, 7}}}, // input 0
{{-1, -1}, {{12, 55}, {7, 33}}} // input 1
},
{true, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1, -1}, {{1, 2, 32, 60}, {1, 2, 32, 30}}}, // input 0
{{-1, -1}, {{60, 5}, {30, 5}}} // input 1
},
{false, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1, -1}, {{1, 2, 32, 60}, {1, 2, 32, 30}}}, // input 0
{{-1, -1}, {{60, 5}, {30, 5}}} // input 1
},
{true, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1, -1}, {{1, 2, 32, 60}, {1, 2, 32, 30}}}, // input 0
{{-1, -1}, {{60, 5}, {30, 5}}} // input 1
},
{false, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1, -1}, {{1, 2, 32, 60}, {1, 2, 32, 30}}}, // input 0
{{-1, -1}, {{60, 5}, {30, 5}}} // input 1
},
{true, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{7, 32, 60}, {7, 32, 30}}}, // input 0
{{-1, -1, -1, -1}, {{3, 7, 60, 25}, {3, 7, 30, 25}}} // input 1
},
{false, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{7, 32, 60}, {7, 32, 30}}}, // input 0
{{-1, -1, -1, -1}, {{3, 7, 60, 25}, {3, 7, 30, 25}}} // input 1
},
{true, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{7, 32, 60}, {7, 32, 30}}}, // input 0
{{-1, -1, -1, -1}, {{3, 7, 60, 25}, {3, 7, 30, 25}}} // input 1
},
{false, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{7, 32, 60}, {7, 32, 30}}}, // input 0
{{-1, -1, -1, -1}, {{3, 7, 60, 25}, {3, 7, 30, 25}}} // input 1
},
{true, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{false, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{true, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{false, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{-1, -1, -1}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{true, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{false, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{true, false}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{false, true}
},
{
{ //dynamic case description each pair per each input has {{dynamic shape}, {{static shape case1}, {static shape case2}, ...}
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}}, // input 0
{{{1, 15}, {1, 15}, {1, 15}}, {{10, 10, 10}, {5, 5, 5}}} // input 1
},
{true, true}
},
};
std::vector<fusingSpecificParams> matmulFusingParams {
emptyFusingSpec,
fusingElu,
fusingSqrt
};
const auto matMulParams = ::testing::Combine(::testing::ValuesIn(IS),
::testing::ValuesIn(netPRCs),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Precision::UNSPECIFIED),
::testing::Values(Layout::ANY),
::testing::Values(ElementType::undefined),
::testing::Values(ElementType::undefined),
::testing::Values(helpers::InputLayerType::PARAMETER),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(additionalConfig));
@ -260,7 +463,7 @@ const auto testParams = ::testing::Combine(matMulParams,
INSTANTIATE_TEST_SUITE_P(smoke_MM, MatMulLayerCPUTest, testParams, MatMulLayerCPUTest::getTestCaseName);
}; // namespace matmul
} // namespace matmul
} // namespace

View File

@ -0,0 +1,175 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
typedef std::tuple<
InputShape, // Input shape definition
ElementType // Net precision
> NonZeroLayerTestParams;
typedef std::tuple<
NonZeroLayerTestParams,
CPUSpecificParams> NonZeroLayerCPUTestParamsSet;
class NonZeroLayerCPUTest : public testing::WithParamInterface<NonZeroLayerCPUTestParamsSet>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<NonZeroLayerCPUTestParamsSet> obj) {
NonZeroLayerTestParams basicParamsSet;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = obj.param;
std::string td;
ElementType netType = ElementType::undefined;
InputShape inputShape;
std::tie(inputShape, netType) = basicParamsSet;
std::ostringstream result;
result << "IS=";
result << CommonTestUtils::partialShape2str({inputShape.first}) << "_";
result << "TS=(";
for (const auto& shape : inputShape.second) {
result << CommonTestUtils::vec2str(shape) << "_";
}
result << ")_";
result << "netPRC=" << netType;
result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str();
}
protected:
void SetUp() override {
targetDevice = CommonTestUtils::DEVICE_CPU;
NonZeroLayerTestParams basicParamsSet;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
ElementType netType = ElementType::undefined;
InputShape inputShape;
std::tie(inputShape, netType) = basicParamsSet;
init_input_shapes({inputShape});
auto inputParams = ngraph::builder::makeDynamicParams(netType, inputDynamicShapes);
auto nonZero = std::make_shared<ngraph::opset3::NonZero>(inputParams[0]);
// I8 was used as a special placeholder during calculating of primitive type if input was U8,
// real runtime precision is still U8
selectedType = makeSelectedTypeStr("ref_", netType == ElementType::u8 ? ElementType::i8 : netType);
inputParams[0]->set_friendly_name("input");
function = makeNgraphFunction(netType, inputParams, nonZero, "NonZero");
}
};
TEST_P(NonZeroLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
CheckPluginRelatedResults(executableNetwork, "NonZero");
}
namespace {
/* CPU PARAMS */
std::vector<CPUSpecificParams> filterCPUInfoForDevice() {
return std::vector<CPUSpecificParams> {CPUSpecificParams{{}, {nc}, {}, {}}};;
}
const std::vector<ElementType> netPrecisions = {
ElementType::f32,
ElementType::bf16,
ElementType::i32,
ElementType::i8,
ElementType::u8
};
std::vector<InputShape> inShapesDynamic = {
{
//dynamic shape
{-1},
{ //target static shapes
{100},
{200},
{300}
}
},
{
//dynamic shape
{-1, -1},
{ //target static shapes
{4, 100},
{4, 200},
{4, 300}
}
},
{
//dynamic shape
{-1, -1, -1},
{ //target static shapes
{4, 4, 100},
{4, 4, 200},
{4, 4, 300}
}
},
{
//dynamic shape
{-1, -1, -1, -1},
{ //target static shapes
{4, 4, 4, 100},
{4, 4, 4, 200},
{4, 4, 4, 300}
}
},
{
//dynamic shape
{-1, {1, 10}, -1, {1, 500}},
{ //target static shapes
{4, 4, 4, 100},
{4, 4, 4, 200},
{4, 4, 4, 300}
}
},
{
//dynamic shape
{{1, 10}, {1, 10}, {1, 10}, {1, 500}},
{ //target static shapes
{4, 4, 4, 100},
{4, 4, 4, 200},
{4, 4, 4, 300}
}
}
};
std::vector<ngraph::Shape> inShapesStatic = {
{ 100 },
{ 4, 100 },
{ 4, 2, 100 },
{ 4, 4, 2, 100 },
{ 4, 4, 4, 2, 100 }
};
const auto paramsStatic = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic)),
::testing::ValuesIn(netPrecisions)),
::testing::ValuesIn(filterCPUInfoForDevice()));
const auto paramsDynamic = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inShapesDynamic),
::testing::ValuesIn(netPrecisions)),
::testing::ValuesIn(filterCPUInfoForDevice()));
INSTANTIATE_TEST_SUITE_P(smoke_NonZeroStaticCPUTest, NonZeroLayerCPUTest,
paramsStatic, NonZeroLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_NonZeroDynamicCPUTest, NonZeroLayerCPUTest,
paramsDynamic, NonZeroLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions

View File

@ -58,6 +58,8 @@ protected:
const auto outputNodes = helpers::convert2OutputVector(helpers::castOps2Nodes<op::Parameter>(inputParams));
const auto matMul = builder::makeMatMul(outputNodes[0], outputNodes[1], false, false);
selectedType = makeSelectedTypeStr("jit_gemm", ngPrec);
function = makeNgraphFunction(ngPrec, inputParams, matMul, "AlignMatMulInputRanks");
}
@ -69,7 +71,7 @@ TEST_P(AlignMatMulInputRanksTest, CompareWithRefs) {
Run();
CheckNodeOfTypeCount(executableNetwork, "Reshape", expectedNumOfReshapes); // Squeeze / Unsqueeze turns into Reshape
CheckFusingResults(executableNetwork, "MatMul");
CheckPluginRelatedResults(executableNetwork, "MatMul");
}
namespace {

View File

@ -58,6 +58,8 @@ protected:
pooling = builder::makePooling(conv, strides, padBegin, padEnd, kernelSize, roundingType, paddingType, false, poolType);
}
selectedType = makeSelectedTypeStr(getPrimitiveType(), element::f32);
function = makeNgraphFunction(element::f32, inputParams, pooling, "ConvPoolActiv");
}
};
@ -66,7 +68,7 @@ TEST_P(ConvPoolActivTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
CheckFusingResults(executableNetwork, "Convolution");
CheckPluginRelatedResults(executableNetwork, "Convolution");
}
namespace {

View File

@ -59,7 +59,10 @@ protected:
auto matrixB = builder::makeConstant<float>(element::f32, isB, {}, true);
auto matMul = builder::makeMatMul(reshape, matrixB, false, transpB);
function = makeNgraphFunction(element::f32, inputParams, matMul, "ReshapeFC");
const auto netType = element::f32;
selectedType = makeSelectedTypeStr("jit_gemm", netType);
function = makeNgraphFunction(netType, inputParams, matMul, "ReshapeFC");
}
};
@ -68,7 +71,7 @@ TEST_P(ReshapeFCTest, CompareWithRefs) {
Run();
CheckNodeOfTypeCount(executableNetwork, "Reshape", 0);
CheckFusingResults(executableNetwork, "FullyConnected");
CheckPluginRelatedResults(executableNetwork, "FullyConnected");
}
namespace {

View File

@ -3,6 +3,7 @@
//
#include "cpu_test_utils.hpp"
#include "ie_ngraph_utils.hpp"
#include "utils/rt_info/memory_formats_attribute.hpp"
#include <cstdint>
@ -119,6 +120,18 @@ void CPUTestsBase::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork
ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined.";
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
auto function = execGraphInfo.getFunction();
CheckPluginRelatedResultsImpl(function, std::move(nodeType));
}
void CPUTestsBase::CheckPluginRelatedResults(ov::runtime::ExecutableNetwork &execNet, std::string nodeType) const {
if (nodeType.empty()) return;
ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined.";
auto function = execNet.get_runtime_function();
CheckPluginRelatedResultsImpl(function, std::move(nodeType));
}
void CPUTestsBase::CheckPluginRelatedResultsImpl(std::shared_ptr<const ov::Function> function, std::string nodeType) const {
ASSERT_NE(nullptr, function);
for (const auto &node : function->get_ops()) {
const auto & rtInfo = node->get_rt_info();
@ -281,6 +294,12 @@ CPUTestsBase::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::ParameterV
return lastNode;
}
std::string CPUTestsBase::makeSelectedTypeStr(std::string implString, ngraph::element::Type_t elType) {
implString.push_back('_');
implString += InferenceEngine::details::convertPrecision(elType).name();
return implString;
}
std::vector<CPUSpecificParams> filterCPUSpecificParams(std::vector<CPUSpecificParams> &paramsVector) {
auto adjustBlockedFormatByIsa = [](std::vector<cpu_memory_format_t>& formats) {
for (int i = 0; i < formats.size(); i++) {

View File

@ -9,6 +9,7 @@
#include "ie_system_conf.h"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include <exec_graph_info.hpp>
#include <openvino/runtime/executable_network.hpp>
#include "ie_system_conf.h"
namespace CPUTestUtils {
@ -126,6 +127,7 @@ public:
static CPUInfo makeCPUInfo(std::vector<cpu_memory_format_t> inFmts,
std::vector<cpu_memory_format_t> outFmts,
std::vector<std::string> priority);
static std::string makeSelectedTypeStr(std::string implString, ngraph::element::Type_t elType);
CPUInfo getCPUInfo() const;
std::shared_ptr<ngraph::Function> makeNgraphFunction(const ngraph::element::Type &ngPrc,
@ -133,8 +135,11 @@ public:
const std::shared_ptr<ngraph::Node> &lastNode,
std::string name) const;
void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const;
void CheckPluginRelatedResults(ov::runtime::ExecutableNetwork &execNet, std::string nodeType) const;
protected:
virtual void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const;
virtual void CheckPluginRelatedResultsImpl(std::shared_ptr<const ov::Function> function, std::string nodeType) const;
/**
* @brief This function modifies the initial single layer test graph to add any necessary modifications that are specific to the cpu test scope.
* @param ngPrc Graph precision.

View File

@ -36,9 +36,7 @@ CpuTestWithFusing::modifyGraph(const ngraph::element::Type &ngPrc, ngraph::Param
return retNode;
}
void CpuTestWithFusing::CheckFusingResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const {
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
auto function = execGraphInfo.getFunction();
void CpuTestWithFusing::CheckFusingResults(std::shared_ptr<const ov::Function> function, std::string nodeType) const {
ASSERT_NE(nullptr, function);
bool isNodeFound = false;
for (const auto & op : function->get_ops()) {
@ -69,9 +67,9 @@ void CpuTestWithFusing::CheckFusingResults(InferenceEngine::ExecutableNetwork &e
ASSERT_TRUE(isNodeFound) << "Node type name: \"" << nodeType << "\" has not been found.";
}
void CpuTestWithFusing::CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const {
CPUTestsBase::CheckPluginRelatedResults(execNet, nodeType);
CheckFusingResults(execNet, nodeType);
void CpuTestWithFusing::CheckPluginRelatedResultsImpl(std::shared_ptr<const ov::Function> function, std::string nodeType) const {
CPUTestsBase::CheckPluginRelatedResultsImpl(function, nodeType);
CheckFusingResults(function, nodeType);
}
std::shared_ptr<ngraph::Node>

View File

@ -64,8 +64,10 @@ protected:
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode) const override;
void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const override;
void CheckFusingResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const;
void CheckPluginRelatedResultsImpl(std::shared_ptr<const ov::Function> function, std::string nodeType) const override;
private:
void CheckFusingResults(std::shared_ptr<const ov::Function> function, std::string nodeType) const;
protected:
std::shared_ptr<postOpMgr> postOpMgrPtr;

View File

@ -5,6 +5,8 @@
#include <signal.h>
#include <fstream>
#include <transformations/utils/utils.hpp>
#include <transformations/convert_precision.hpp>
#include <ngraph_functions/utils/ngraph_helpers.hpp>
#ifdef _WIN32
#include <process.h>
@ -19,8 +21,6 @@
#include "functional_test_utils/ov_tensor_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
@ -187,8 +187,50 @@ void SubgraphBaseTest::infer() {
}
std::vector<ov::runtime::Tensor> SubgraphBaseTest::calculate_refs() {
functionRefs->validate_nodes_and_infer_types();
return ngraph::helpers::interpretFunction(functionRefs, inputs);
using InputsMap = std::map<std::shared_ptr<ov::Node>, ov::runtime::Tensor>;
auto functionToProcess = ov::clone_function(*functionRefs);
//TODO: remove this conversions as soon as function interpreter fully support bf16 and f16
static const precisions_array precisions = {
{ ngraph::element::bf16, ngraph::element::f32 },
{ ngraph::element::f16, ngraph::element::f32}
};
pass::Manager manager;
manager.register_pass<ngraph::pass::ConvertPrecision>(precisions);
manager.run_passes(functionToProcess);
functionToProcess->validate_nodes_and_infer_types();
ov::preprocess::PrePostProcessor p(functionToProcess);
const auto& inputNodes = functionToProcess->inputs();
for (size_t i = 0; i < inputNodes.size(); ++i) {
auto itr = std::find_if(inputs.begin(), inputs.end(),
[&](const InputsMap::value_type& item) {
return item.first->get_friendly_name() == inputNodes[i].get_node_shared_ptr()->get_friendly_name();
});
if (itr != inputs.end()) {
auto elementType = itr->second.get_element_type();
if (inputNodes[i].get_element_type() != elementType) {
p.input(ov::preprocess::InputInfo(i).tensor(ov::preprocess::InputTensorInfo().set_element_type(elementType)));
}
} else {
std::stringstream errMsg;
errMsg << "Couldn't find input with name " << inputNodes[i].get_node_shared_ptr()->get_friendly_name();
errMsg << " in the inputs map";
throw std::runtime_error(errMsg.str());
}
}
const auto& outputs = functionToProcess->outputs();
for (size_t i = 0; i < outputs.size(); ++i) {
if (outType != ElementType::undefined && outType != outputs[i].get_element_type()) {
p.output(ov::preprocess::OutputInfo(i).tensor(ov::preprocess::OutputTensorInfo().set_element_type(outType)));
}
}
functionToProcess = p.build();
return ngraph::helpers::interpretFunction(functionToProcess, inputs);
}
std::vector<ov::runtime::Tensor> SubgraphBaseTest::get_plugin_outputs() {

View File

@ -28,5 +28,18 @@ std::shared_ptr<ngraph::Node> makeInputLayer(const element::Type &type, ngraph::
}
return input;
}
std::shared_ptr<ngraph::Node> makeDynamicInputLayer(const element::Type &type, ngraph::helpers::InputLayerType inputType,
const PartialShape& shape) {
if (shape.is_static()) {
return makeInputLayer(type, inputType, shape.get_shape());
}
if (inputType == ngraph::helpers::InputLayerType::PARAMETER) {
return ngraph::builder::makeDynamicParams(type, {shape}).front();
}
throw std::runtime_error("Could not make input layer. Unsupported inputType for dynamic shape");
}
} // namespace builder
} // namespace ngraph