templateFuncTests
* Remove StaticShape and add DynamicShape to make existing test cases static default * Fix typos cpuFuncTests * Add functionRefs to each case
This commit is contained in:
parent
c3b87f098d
commit
5710141cd8
@ -58,7 +58,7 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
|
||||
);
|
||||
|
||||
// ! [test_convolution:instantiate]
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPaddingStaticShape, ConvolutionLayerTest,
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv2DParams_ExplicitPadding,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
@ -71,7 +71,7 @@ INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPaddingStaticShape, ConvolutionLa
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPaddingDynamicShape, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv2DParams_ExplicitPadding,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
@ -87,7 +87,7 @@ INSTANTIATE_TEST_SUITE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
// ! [test_convolution:instantiate]
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_AutoPadValidStaticShape, ConvolutionLayerTest,
|
||||
INSTANTIATE_TEST_SUITE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv2DParams_AutoPadValid,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
@ -131,7 +131,7 @@ const auto conv3DParams_AutoPadValid = ::testing::Combine(
|
||||
::testing::Values(ngraph::op::PadType::VALID)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Convolution3D_ExplicitPaddingStaticShape, ConvolutionLayerTest,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv3DParams_ExplicitPadding,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
@ -144,7 +144,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution3D_ExplicitPaddingStaticShape, Convolu
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(nightly_Convolution3D_AutoPadValidStaticShape, ConvolutionLayerTest,
|
||||
INSTANTIATE_TEST_SUITE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv3DParams_AutoPadValid,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
|
@ -12,7 +12,7 @@ using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecision = {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
};
|
||||
|
||||
@ -38,8 +38,8 @@ const std::vector<size_t> axis2D = {
|
||||
0, 1
|
||||
};
|
||||
|
||||
const auto params2DStaticShape = testing::Combine(
|
||||
testing::ValuesIn(netPrecision),
|
||||
const auto params2D = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::ValuesIn(inputLayouts2D),
|
||||
@ -51,8 +51,8 @@ const auto params2DStaticShape = testing::Combine(
|
||||
testing::Values(std::map<std::string, std::string>())
|
||||
);
|
||||
|
||||
const auto params2D = testing::Combine(
|
||||
testing::ValuesIn(netPrecision),
|
||||
const auto params2DDynamicShape = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::ValuesIn(inputLayouts2D),
|
||||
@ -65,16 +65,16 @@ const auto params2D = testing::Combine(
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_SoftMax2DStaticShape,
|
||||
smoke_SoftMax2D,
|
||||
SoftMaxLayerTest,
|
||||
params2DStaticShape,
|
||||
params2D,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_SoftMax2D,
|
||||
smoke_SoftMax2DDynamicShape,
|
||||
SoftMaxLayerTest,
|
||||
params2D,
|
||||
params2DDynamicShape,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
@ -94,8 +94,8 @@ const std::vector<std::vector<InferenceEngine::SizeVector>> targetShapes4D = {
|
||||
|
||||
const std::vector<size_t> axis4D = {0, 1, 2, 3};
|
||||
|
||||
const auto params4DStaticShape = testing::Combine(
|
||||
testing::ValuesIn(netPrecision),
|
||||
const auto params4D = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Layout::NCHW),
|
||||
@ -107,8 +107,8 @@ const auto params4DStaticShape = testing::Combine(
|
||||
testing::Values(std::map<std::string, std::string>())
|
||||
);
|
||||
|
||||
const auto params4D = testing::Combine(
|
||||
testing::ValuesIn(netPrecision),
|
||||
const auto params4DDynamicShape = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Layout::NCHW),
|
||||
@ -120,13 +120,6 @@ const auto params4D = testing::Combine(
|
||||
testing::Values(std::map<std::string, std::string>())
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_SoftMax4DStaticShape,
|
||||
SoftMaxLayerTest,
|
||||
params4DStaticShape,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_SoftMax4D,
|
||||
SoftMaxLayerTest,
|
||||
@ -134,4 +127,11 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_SoftMax4DDynamicShape,
|
||||
SoftMaxLayerTest,
|
||||
params4DDynamicShape,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
} // namespace
|
||||
|
@ -70,6 +70,7 @@ protected:
|
||||
ngraph::NodeVector {bias_2},
|
||||
ngraph::ParameterVector {input},
|
||||
"SimpleNet");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -19,10 +19,13 @@ const std::vector<InferenceEngine::Layout> inputLayouts2D = {
|
||||
InferenceEngine::Layout::NC,
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
|
||||
InferenceEngine::SizeVector {1, 100},
|
||||
InferenceEngine::SizeVector {100, 1},
|
||||
InferenceEngine::SizeVector {10, 10},
|
||||
const std::vector<std::vector<std::pair<size_t, size_t>>> inputStaticShape2D = {
|
||||
{NULL_RANGE}
|
||||
};
|
||||
const std::vector<std::vector<InferenceEngine::SizeVector>> inputShapes2D = {
|
||||
{InferenceEngine::SizeVector {1, 100}},
|
||||
{InferenceEngine::SizeVector {100, 1}},
|
||||
{InferenceEngine::SizeVector {10, 10}},
|
||||
};
|
||||
|
||||
const std::vector<size_t> axis2D = {
|
||||
@ -35,6 +38,7 @@ const auto params2D = testing::Combine(
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::ValuesIn(inputLayouts2D),
|
||||
testing::Values(InferenceEngine::Layout::ANY),
|
||||
testing::ValuesIn(inputStaticShape2D),
|
||||
testing::ValuesIn(inputShapes2D),
|
||||
testing::ValuesIn(axis2D),
|
||||
testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
@ -48,10 +52,14 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
|
||||
const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
|
||||
InferenceEngine::SizeVector {1, 100, 1, 1},
|
||||
InferenceEngine::SizeVector {1, 3, 4, 3},
|
||||
InferenceEngine::SizeVector {2, 3, 4, 5},
|
||||
const std::vector<std::vector<std::pair<size_t, size_t>>> inputStaticShape4D = {
|
||||
{NULL_RANGE}
|
||||
};
|
||||
|
||||
const std::vector<std::vector<InferenceEngine::SizeVector>> inputShapes4D = {
|
||||
{InferenceEngine::SizeVector {1, 100, 1, 1}},
|
||||
{InferenceEngine::SizeVector {1, 3, 4, 3}},
|
||||
{InferenceEngine::SizeVector {2, 3, 4, 5}},
|
||||
};
|
||||
|
||||
const std::vector<size_t> axis4D = {0, 1, 2, 3};
|
||||
@ -62,6 +70,7 @@ const auto params4D = testing::Combine(
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Layout::NCHW),
|
||||
testing::Values(InferenceEngine::Layout::ANY),
|
||||
testing::ValuesIn(inputStaticShape4D),
|
||||
testing::ValuesIn(inputShapes4D),
|
||||
testing::ValuesIn(axis4D),
|
||||
testing::Values(CommonTestUtils::DEVICE_CPU),
|
||||
|
@ -92,6 +92,7 @@ protected:
|
||||
ngraph::ResultVector outputs;
|
||||
outputs.push_back(std::make_shared<ngraph::opset1::Result>(outputNode));
|
||||
function = std::make_shared<ngraph::Function>(outputs, inputs);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,6 +71,7 @@ protected:
|
||||
auto activation = ngraph::builder::makeActivation(params[0], ngPrc, activationType, shapes.second, constantsValue);
|
||||
activation->get_rt_info() = getCPUInfo();
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{activation}, params, "Activation");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
InferenceEngine::Precision netPrecision;
|
||||
|
@ -81,6 +81,7 @@ protected:
|
||||
threshold = 1e-2;
|
||||
function = (mode == "max" ? std::make_shared<ngraph::Function>(adapoolMax->outputs(), params, "AdaPoolMax") :
|
||||
std::make_shared<ngraph::Function>(adapoolAvg->outputs(), params, "AdaPoolAvg"));
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -55,6 +55,7 @@ protected:
|
||||
b2s->get_rt_info() = getCPUInfo();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(b2s)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "BatchToSpace");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -56,6 +56,7 @@ protected:
|
||||
auto concat = std::make_shared<ngraph::opset1::Concat>(paramOuts, axis);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, concat, "concat");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -63,6 +63,7 @@ protected:
|
||||
auto powerStatic = ngraph::builder::makeEltwise(inputs[0], inputs[1], nodeType);
|
||||
|
||||
function = std::make_shared<ngraph::Function>(powerStatic, ParameterVector{param}, "ConvertToPluginSpecificNode");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -93,9 +93,10 @@ protected:
|
||||
isBias = (postOpMgrPtr->getFusedOpsNames() == "Add(PerChannel)" && selectedType != "jit_avx512_winograd");
|
||||
|
||||
convSpecificParams convParams;
|
||||
std::vector<size_t> inputShape;
|
||||
std::vector<std::pair<size_t, size_t>> inputDynamicShape;
|
||||
std::vector<std::vector<size_t>> inputShape;
|
||||
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
|
||||
std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = basicParamsSet;
|
||||
std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputDynamicShape, inputShape, targetDevice) = basicParamsSet;
|
||||
|
||||
if (inPrc == Precision::UNSPECIFIED) {
|
||||
selectedType += std::string("_") + Precision(Precision::FP32).name();
|
||||
@ -119,6 +120,7 @@ protected:
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, inputParams, convolutionNode, "Convolution");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -190,8 +192,8 @@ const std::vector<SizeVector> strides2d = { {1, 1}, {2, 2} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2d = { {0, 0}, {1, 1} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2d = { {0, 0} };
|
||||
const std::vector<SizeVector> dilations2d = { {1, 1}, {2, 2} };
|
||||
const std::vector<SizeVector> inputShapes2d = { {1, 64, 7, 7}, {1, 67, 7, 7} };
|
||||
const std::vector<SizeVector> inputShapesPlain2Blocked2d = { {1, 1, 7, 7}, {1, 2, 7, 7}, {1, 3, 7, 7} };
|
||||
const std::vector<std::vector<SizeVector>> inputShapes2d = { { {1, 64, 7, 7}, {1, 67, 7, 7} } };
|
||||
const std::vector<std::vector<SizeVector>> inputShapesPlain2Blocked2d = { { {1, 1, 7, 7}, {1, 2, 7, 7}, {1, 3, 7, 7} } };
|
||||
|
||||
/* ============= Convolution params (3D) ============= */
|
||||
const std::vector<SizeVector> kernels3d = { {3, 3, 3}, {1, 1, 1} };
|
||||
@ -199,8 +201,8 @@ const std::vector<SizeVector> strides3d = { {1, 1, 1}, {2, 2, 2} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins3d = { {0, 0, 0}, {1, 1, 1} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds3d = { {0, 0, 0} };
|
||||
const std::vector<SizeVector> dilations3d = { {1, 1, 1}, {2, 2, 2} };
|
||||
const std::vector<SizeVector> inputShapes3d = { {1, 64, 7, 7, 7}, {1, 67, 7, 7, 7} };
|
||||
const std::vector<SizeVector> inputShapesPlain2Blocked3d = { {1, 1, 7, 7, 7}, {1, 2, 7, 7, 7}, {1, 3, 7, 7, 7} };
|
||||
const std::vector<std::vector<SizeVector>> inputShapes3d = { { {1, 64, 7, 7, 7}, {1, 67, 7, 7, 7} } };
|
||||
const std::vector<std::vector<SizeVector>> inputShapesPlain2Blocked3d = { { {1, 1, 7, 7, 7}, {1, 2, 7, 7, 7}, {1, 3, 7, 7, 7} } };
|
||||
/* ============= */
|
||||
|
||||
/* INSTANCES */
|
||||
@ -229,7 +231,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)),
|
||||
::testing::ValuesIn(fusingParamsSet),
|
||||
@ -245,7 +248,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::BF16),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)),
|
||||
::testing::ValuesIn(fusingParamsSetBF16),
|
||||
@ -261,7 +265,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_I8, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)),
|
||||
::testing::Values(fusingSum),
|
||||
@ -293,7 +298,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)),
|
||||
::testing::ValuesIn(fusingParamsSet),
|
||||
@ -309,7 +315,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_BF16, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::BF16),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)),
|
||||
::testing::ValuesIn(fusingParamsSetBF16),
|
||||
@ -325,7 +332,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_I8, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 12, 7, 7, 7 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 12, 7, 7, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)),
|
||||
::testing::Values(fusingSum),
|
||||
@ -361,6 +369,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)),
|
||||
@ -377,6 +386,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_BF16, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::BF16),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx512_2D_nspc})),
|
||||
@ -393,6 +403,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_I8, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)),
|
||||
@ -415,6 +426,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_PlainToBlocked_2D_FP32, ConvolutionLayerCPUT
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapesPlain2Blocked2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D_plain_to_blocked)),
|
||||
@ -431,6 +443,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_PlainToBlocked_2D_BF16, ConvolutionLayerCPUT
|
||||
::testing::Values(Precision::BF16, Precision::FP32),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapesPlain2Blocked2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_2D})),
|
||||
@ -466,6 +479,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)),
|
||||
@ -482,6 +496,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_BF16, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::BF16),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D, conv_avx512_3D_nspc})),
|
||||
@ -498,6 +513,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_I8, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)),
|
||||
@ -519,6 +535,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_PlainToBlocked_3D_FP32, ConvolutionLayerCPUT
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapesPlain2Blocked3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D_plain_to_blocked)),
|
||||
@ -535,6 +552,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_PlainToBlocked_3D_BF16, ConvolutionLayerCPUT
|
||||
::testing::Values(Precision::BF16, Precision::FP32),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapesPlain2Blocked3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_3D})),
|
||||
@ -572,6 +590,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_2D)),
|
||||
@ -588,6 +607,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_BF16, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::BF16),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1, conv_avx512_2D_1x1_nspc})),
|
||||
@ -604,6 +624,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_I8, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_2D)),
|
||||
@ -644,7 +665,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 2, 64, 7})),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 2, 64, 7 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)),
|
||||
::testing::Values(fusingAddPerChannel),
|
||||
@ -679,6 +701,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Jit_Planar_2D_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes2d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_2D)),
|
||||
@ -712,6 +735,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Jit_Planar_3D_FP32, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::ValuesIn(inputShapes3d),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_3D)),
|
||||
@ -761,7 +785,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_winograd, ConvolutionLayerCPUTest,
|
||||
::testing::Values(Precision::UNSPECIFIED),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({ 1, 16, 10, 10 })),
|
||||
::testing::Values(std::vector<std::pair<size_t, size_t>>(NULL_RANGE)),
|
||||
::testing::Values(std::vector<std::vector<size_t>>({{ 1, 16, 10, 10 }})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
::testing::ValuesIn(filterCPUInfoForDevice(std::vector<CPUSpecificParams>{conv_winograd})),
|
||||
::testing::ValuesIn(fusingParamsSet),
|
||||
|
@ -95,6 +95,7 @@ protected:
|
||||
}
|
||||
|
||||
function = makeNgraphFunction(ngPrc, inputParams, deconvolutionNode, "convolutionBackpropData");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -55,6 +55,7 @@ protected:
|
||||
d2s->get_rt_info() = getCPUInfo();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(d2s)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "DepthToSpace");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -100,6 +100,7 @@ protected:
|
||||
auto eltwise = ngraph::builder::makeEltwise(input[0], secondaryInput, eltwiseType);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, input, eltwise, "Eltwise");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -53,6 +53,7 @@ protected:
|
||||
inputNode, ngraph::Shape(kernel), ngraph::Strides(strides), ngraph::Shape(rates), pad_type);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(extImgPatches)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "ExtractImagePatches");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -123,6 +123,7 @@ protected:
|
||||
fq->get_rt_info() = getCPUInfo();
|
||||
|
||||
function = std::make_shared<Function>(fq, params, "FakeQuantizeCPU");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -60,6 +60,7 @@ protected:
|
||||
auto activation = ngraph::builder::makeGatherElements(params[0], indicesShape, ngIPrc, axis);
|
||||
activation->get_rt_info() = getCPUInfo();
|
||||
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{activation}, params, "GatherElements");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -104,6 +104,7 @@ protected:
|
||||
ngraph::builder::makeGroupConvolution(paramOuts[0], ngPrc, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, numGroups));
|
||||
function = makeNgraphFunction(ngPrc, params, groupConv, "groupConvolution");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -94,6 +94,7 @@ protected:
|
||||
padEnd, dilation, padType, convOutChannels, numGroups, false, outputPadding));
|
||||
}
|
||||
function = makeNgraphFunction(ngPrc, params, groupConv, "groupConvolutionBackpropData");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -88,6 +88,7 @@ protected:
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(gru_cell->output(0))};
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, gru_cell, "gru_cell");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -137,6 +137,8 @@ protected:
|
||||
bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function);
|
||||
EXPECT_EQ(ti_found, false);
|
||||
}
|
||||
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void GenerateInputs() override {
|
||||
|
@ -107,6 +107,7 @@ protected:
|
||||
selectedType += "BF16";
|
||||
else
|
||||
selectedType += netPrecision.name();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -72,6 +72,7 @@ protected:
|
||||
logicalNode->get_rt_info() = getCPUInfo();
|
||||
|
||||
function = std::make_shared<ngraph::Function>(logicalNode, inputs, "Logical");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -87,6 +87,7 @@ protected:
|
||||
std::make_shared<ngraph::opset1::Result>(lstm_cell->output(1))};
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, lstm_cell, "lstm_cell");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -144,6 +144,8 @@ protected:
|
||||
bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function);
|
||||
EXPECT_EQ(ti_found, false);
|
||||
}
|
||||
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void GenerateInputs() override {
|
||||
|
@ -98,6 +98,7 @@ protected:
|
||||
auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes<opset1::Parameter>(params));
|
||||
auto matMul = builder::makeMatMul(paramOuts[0], matrixB, transpA, transpB);
|
||||
function = makeNgraphFunction(ngPrec, params, matMul, cpuNodeType);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
checkFusingPosition = false;
|
||||
}
|
||||
};
|
||||
|
@ -71,6 +71,7 @@ protected:
|
||||
|
||||
threshold = 0.015f;
|
||||
function = makeNgraphFunction(netPrc, param, mvn, "mvn");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -57,6 +57,7 @@ protected:
|
||||
auto normalize = builder::makeNormalizeL2(paramOuts[0], axes, eps, eps_mode);
|
||||
|
||||
function = makeNgraphFunction(netPrc, params, normalize, "Normalize");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
|
||||
selectedType = "unknown_" + std::string(inPrc.name());
|
||||
threshold = 0.015f;
|
||||
|
@ -73,6 +73,7 @@ protected:
|
||||
|
||||
auto oneHot = std::make_shared<ngraph::opset5::OneHot>(inputParams.front(), depthConst, onConst, offConst, axis);
|
||||
function = makeNgraphFunction(ngPrc, inputParams, oneHot, "OneHot");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -58,6 +58,7 @@ protected:
|
||||
pad->get_rt_info() = getCPUInfo();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pad)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "pad");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -86,6 +86,7 @@ protected:
|
||||
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, pooling, "Pooling");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -97,6 +97,7 @@ protected:
|
||||
threshold = 1e-2;
|
||||
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(psroi)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "PSROIPooling");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -74,6 +74,7 @@ protected:
|
||||
|
||||
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(reduce)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Reduce");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override {
|
||||
if (ngraph::helpers::ReductionType::Prod == reductionType) {
|
||||
|
@ -77,6 +77,7 @@ protected:
|
||||
attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, paramRegionYolo, region_yolo, "RegionYolo");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -82,6 +82,7 @@ protected:
|
||||
WRB, hidden_size, activations, {}, {}, clip);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(rnn_cell)};
|
||||
function = makeNgraphFunction(ngPrc, params, rnn_cell, "rnn_cell");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -117,6 +117,7 @@ protected:
|
||||
bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function);
|
||||
EXPECT_EQ(ti_found, false);
|
||||
}
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void GenerateInputs() override {
|
||||
|
@ -144,6 +144,7 @@ protected:
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(roi_pooling)};
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, roi_pooling, "roi_pooling");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
|
||||
selectedType += "_";
|
||||
selectedType += netPrecision.name();
|
||||
|
@ -98,6 +98,7 @@ protected:
|
||||
threshold = 1e-2;
|
||||
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(roialign)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "ROIAlign");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -63,6 +63,8 @@ protected:
|
||||
}
|
||||
selectedType.push_back('_');
|
||||
selectedType += netPrecision.name();
|
||||
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -67,6 +67,7 @@ protected:
|
||||
const auto softMax = std::make_shared<ngraph::opset1::Softmax>(paramOuts.at(0), config.axis);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, params, softMax, "SoftMax");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -60,6 +60,7 @@ protected:
|
||||
s2b->get_rt_info() = getCPUInfo();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(s2b)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "SpaceToBatch");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -55,6 +55,7 @@ protected:
|
||||
d2s->get_rt_info() = getCPUInfo();
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(d2s)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "SpaceToDepth");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -79,6 +79,7 @@ protected:
|
||||
}
|
||||
split->get_rt_info() = getCPUInfo();
|
||||
function = std::make_shared<ngraph::Function>(results, params, "split");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -71,6 +71,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(ss)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -67,6 +67,7 @@ protected:
|
||||
transpose->get_rt_info() = getCPUInfo();
|
||||
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(transpose)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Transpose");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -35,6 +35,7 @@ public:
|
||||
auto gather = std::make_shared<ngraph::opset3::Gather>(paramOuts[0], indicesNode, axisNode);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(gather)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "gather");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> CalculateRefs() override {
|
||||
// Convert the second input constant precision to i64 to run the reference function
|
||||
|
@ -42,6 +42,7 @@ public:
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset8::Result>(concat)};
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "ConcatConstantInPlace");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -75,6 +75,7 @@ protected:
|
||||
}
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "Conv3dReshape");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -109,6 +109,7 @@ void ConvConcatSubgraphTest::SetUp() {
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(concat)};
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "convolutionConcat");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConvConcatSubgraphTest, CompareWithRefs) {
|
||||
|
@ -59,6 +59,7 @@ protected:
|
||||
}
|
||||
|
||||
function = makeNgraphFunction(element::f32, inputParams, pooling, "ConvPoolActiv");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -105,6 +105,7 @@ protected:
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(eltwiseOps[eltwiseOps.size() - 1])};
|
||||
function = std::make_shared<ngraph::Function>(results, ngraphParam, "eltwise_chain");
|
||||
}
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,7 @@ void FuseMulAddAndEwSimpleTest::SetUp() {
|
||||
|
||||
std::tie(inputShape, inPrec) = this->GetParam();
|
||||
CreateGraph();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
const auto mulAddAndEwSimpleCommonParams = ::testing::Combine(
|
||||
|
@ -76,6 +76,7 @@ protected:
|
||||
quantizeIntervals[3]);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(quantize)};
|
||||
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{param}, "FuseScaleShiftAndQuantize");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -48,6 +48,7 @@ void FuseTransposeAndReorderTest::SetUp() {
|
||||
|
||||
std::tie(inputShape, inPrec) = this->GetParam();
|
||||
CreateGraph();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
const auto fuseTransposeAndReorderCommonParams = ::testing::Combine(
|
||||
|
@ -33,6 +33,7 @@ protected:
|
||||
auto eltwise = ngraph::builder::makeEltwise(input[0], secondaryInput, eltwiseType);
|
||||
|
||||
function = makeNgraphFunction(ngPrc, input, eltwise, "Eltwise");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -36,6 +36,7 @@ protected:
|
||||
|
||||
NodeVector results{postOpCandidate, secondConsumpt};
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "NotFusedConvSimpleOp");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -60,6 +60,7 @@ protected:
|
||||
auto matMul = builder::makeMatMul(reshape, matrixB, false, transpB);
|
||||
|
||||
function = makeNgraphFunction(element::f32, inputParams, matMul, "ReshapeFC");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,7 @@ protected:
|
||||
|
||||
NodeVector results{add1, add2};
|
||||
function = std::make_shared<ngraph::Function>(results, inputParams, "TileWithTwoOutputEdges");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -131,6 +131,7 @@ protected:
|
||||
auto relu2 = std::make_shared<ngraph::opset1::Relu>(conv);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(relu2)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -96,6 +96,7 @@ protected:
|
||||
relu->add_control_dependency(mem_w);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(relu)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -72,6 +72,7 @@ class Eltwise4dBroadcast : public testing::WithParamInterface<eltwiseParams>,
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Eltwise4dBroadcast");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -120,6 +121,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape3) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "Eltwise4dMultipleInput");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -90,6 +90,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(maxpool)};
|
||||
function = std::make_shared<ngraph::Function>(results, inputVector, "ActMaxpoolReordering");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -64,6 +64,7 @@ protected:
|
||||
auto add = std::make_shared<ngraph::opset1::Add>(fakeQuantize1, fakeQuantize2);
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(add)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "BroadcastConstWithFq");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -118,6 +118,7 @@ protected:
|
||||
|
||||
auto result = std::make_shared<Result>(lastOp);
|
||||
function = std::make_shared<Function>(ResultVector{result}, ParameterVector{input});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -200,6 +200,7 @@ protected:
|
||||
|
||||
auto result = std::make_shared<Result>(lastOp);
|
||||
function = std::make_shared<Function>(ResultVector{result}, ParameterVector{input});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -199,6 +199,7 @@ protected:
|
||||
|
||||
auto result = std::make_shared<Result>(lastOp);
|
||||
function = std::make_shared<Function>(ResultVector{result}, ParameterVector{input});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -56,6 +56,7 @@ protected:
|
||||
|
||||
auto mul = ngraph::builder::makeEltwise(params[0], const_mult2, ngraph::helpers::EltwiseTypes::MULTIPLY);
|
||||
function = std::make_shared<ngraph::Function>(mul, params, "EltwiseSplitOverChannelsPassTest");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -87,6 +87,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reluFQNode) };
|
||||
function = std::make_shared<ngraph::Function>(results, inputVector, "FQActivation");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -99,6 +99,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset7::Result>(add3)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "FQFusionWithMultipleWeights");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -41,6 +41,7 @@ protected:
|
||||
auto mul3 = ngraph::builder::makeEltwise(mul2, fake3, ngraph::helpers::EltwiseTypes::ADD);
|
||||
auto result = std::make_shared<ngraph::opset7::Result>(mul3);
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, input, "fq_fusion_with_sigmoid");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<fqFusionWithSigmoidParams> &obj) {
|
||||
|
@ -104,6 +104,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(maxpool)};
|
||||
function = std::make_shared<ngraph::Function>(results, inputVector, "FQMaxPoolReorder");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -89,6 +89,7 @@ protected:
|
||||
results.push_back(std::make_shared<ngraph::opset8::Result>(reluFQNode));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, inputVector, "FQOutputsActivation");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,6 +77,7 @@ protected:
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset8::Result>(reshape2),
|
||||
std::make_shared<ngraph::opset8::Result>(reshape3)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "FQFusionWithMultipleWeights");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -80,6 +80,7 @@ protected:
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(relu));
|
||||
}
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InsertCopyBeforeSelfConcat");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -91,6 +91,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(matmul)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InsertTransposeBeforeMatmul");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -100,6 +100,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape3)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InsertTransposeBetweenConvs");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -178,6 +179,7 @@ protected:
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape3)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InsertTransposeBetweenConvs");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -75,6 +75,7 @@ protected:
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(netPrecision, configuration, targetDevice) = this->GetParam();
|
||||
function = T::createTopology(netPrecision);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -102,6 +102,7 @@ class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface<
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "RemovePermutationPass");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -145,6 +146,7 @@ protected:
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(permute2) };
|
||||
|
||||
function = std::make_shared<ngraph::Function>(results, params, "RemovePermutationPass4DOutput");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -240,6 +242,7 @@ class RemovePermutationsWithPoolAndActTest : public testing::WithParamInterface<
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "RemovePermutationPass");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -330,6 +333,7 @@ class RemovePermutationsWithTwoConvTest : public testing::WithParamInterface<rem
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "RemovePermutationPass");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
@ -428,6 +432,7 @@ class RemovePermutationsWithEltwiseTest : public testing::WithParamInterface<rem
|
||||
|
||||
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape3) };
|
||||
function = std::make_shared<ngraph::Function>(results, params, "RemovePermutationPass");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -256,6 +256,7 @@ protected:
|
||||
break;
|
||||
}
|
||||
}
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -69,6 +69,7 @@ TEST_P(TrivialLoopTest, PassThroughBody) {
|
||||
function = std::make_shared<ngraph::Function>(
|
||||
ngraph::OutputVector {loop},
|
||||
ngraph::ParameterVector {start});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
|
||||
// Precalculated ref blobs
|
||||
auto blob = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)});
|
||||
@ -113,6 +114,7 @@ TEST_P(TrivialLoopTest, UnusedInputBody) {
|
||||
function = std::make_shared<ngraph::Function>(
|
||||
ngraph::OutputVector {loop},
|
||||
ngraph::ParameterVector {start});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
|
||||
// Precalculated ref blobs
|
||||
auto blob = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)});
|
||||
|
@ -141,6 +141,7 @@ void LoadNetworkCacheTestBase::SetUp() {
|
||||
} catch (...) {
|
||||
GTEST_SKIP();
|
||||
}
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
|
||||
std::stringstream ss;
|
||||
auto hash = std::hash<std::string>()(GetTestName());
|
||||
|
@ -61,18 +61,21 @@ void DetectNetworkBatch::LoadNetwork() {
|
||||
TEST_P(DetectNetworkBatch, InferWithOneInput) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
function = ngraph::builder::subgraph::makeSplitConvConcat();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
Run();
|
||||
};
|
||||
|
||||
TEST_P(DetectNetworkBatch, InferWithMultipleInputs_DiffDims) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
function = makeNNWithMultipleInputsDiffDims();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
Run();
|
||||
};
|
||||
|
||||
TEST_P(DetectNetworkBatch, InferWithMultipleInputs_SameDims) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
function = makeNNWithMultipleInputsSameDims();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
Run();
|
||||
};
|
||||
|
||||
|
@ -71,6 +71,7 @@ namespace ConfigurationTestsDefinitions {
|
||||
inputs.push_back(blob);
|
||||
}
|
||||
reference_inputs.push_back(inputs);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
reference_outputs.push_back(CalculateRefs());
|
||||
}
|
||||
|
||||
|
@ -91,6 +91,7 @@ void ProposalBehTest::SetUp() {
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(proposal)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "proposal");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void ProposalBehTest::Run() {
|
||||
|
@ -106,6 +106,7 @@ void SetBlobTest::SetUp() {
|
||||
auto cumSum = std::dynamic_pointer_cast<ngraph::opset4::CumSum>(ngraph::builder::makeCumSum(paramOuts[0], axisNode, false, false));
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(cumSum)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "InferSetBlob");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(SetBlobTest, CompareWithRefs) {
|
||||
|
@ -21,6 +21,7 @@ std::string MultipleAllocations::getTestCaseName(const testing::TestParamInfo<Mu
|
||||
void MultipleAllocations::SetUp() {
|
||||
std::tie(targetDevice, m_allocationsCount) = this->GetParam();
|
||||
function = ngraph::builder::subgraph::makeSplitConvConcat();
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(MultipleAllocations, InferWorksCorrectAfterAllocations) {
|
||||
|
@ -14,6 +14,7 @@ void QueryNetworkTest::SetUp() {
|
||||
auto& param = GetParam();
|
||||
targetDevice = std::get<Plugin>(param);
|
||||
function = std::get<Function>(param);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
cnnNetwork = InferenceEngine::CNNNetwork{function};
|
||||
}
|
||||
|
||||
|
@ -63,6 +63,7 @@ void AddTransformation::SetUp() {
|
||||
param.fakeQuantize1, param.fakeQuantize2);
|
||||
|
||||
ngraph::pass::InitNodeInfo().run_on_function(function);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(AddTransformation, CompareWithRefImpl) {
|
||||
|
@ -41,6 +41,7 @@ void ClampTransformation::SetUp() {
|
||||
param.fakeQuantize,
|
||||
param.clampLowConst,
|
||||
param.clampHighConst);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ClampTransformation, CompareWithRefImpl) {
|
||||
|
@ -52,6 +52,7 @@ void ConcatTransformation::SetUp() {
|
||||
inputShape,
|
||||
testValues.fqOnData1,
|
||||
testValues.fqOnData2);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatTransformation, CompareWithRefImpl) {
|
||||
|
@ -54,6 +54,7 @@ void ConcatWithChildAndOutputTransformation::SetUp() {
|
||||
|
||||
function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithChildAndOutput(
|
||||
netPrecision, inputShapes, param.fqOnData1, param.fqOnData2);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithChildAndOutputTransformation, CompareWithRefImpl) {
|
||||
|
@ -56,6 +56,7 @@ void ConcatWithDifferentChildrenTransformation::SetUp() {
|
||||
|
||||
function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChildren(
|
||||
netPrecision, inputShapes, param.axis, param.fqOnData1, param.fqOnData2);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithDifferentChildrenTransformation, CompareWithRefImpl) {
|
||||
|
@ -72,6 +72,7 @@ void ConcatWithIntermediateTransformation::SetUp() {
|
||||
transparentIntermediate,
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} },
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} });
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithIntermediateTransformation, CompareWithRefImpl) {
|
||||
|
@ -55,6 +55,7 @@ void ConcatWithNeighborsGraphTransformation::SetUp() {
|
||||
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 3.f} },
|
||||
"concat",
|
||||
"");
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithNeighborsGraphTransformation, CompareWithRefImpl) {
|
||||
|
@ -65,6 +65,7 @@ void ConcatWithSplitTransformation::SetUp() {
|
||||
param.fqOnData1,
|
||||
param.fqOnData2,
|
||||
true);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConcatWithSplitTransformation, CompareWithRefImpl) {
|
||||
|
@ -65,6 +65,7 @@ void ConvolutionBackpropDataTransformation::SetUp() {
|
||||
outputShape,
|
||||
param.fakeQuantizeOnData,
|
||||
weights);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void ConvolutionBackpropDataTransformation::Run() {
|
||||
|
@ -53,6 +53,7 @@ void ConvolutionQDqTransformation::SetUp() {
|
||||
param.convertOnWeights,
|
||||
param.dequantizationOnWeights,
|
||||
{});
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void ConvolutionQDqTransformation::Run() {
|
||||
|
@ -50,6 +50,7 @@ void ConvolutionTransformation::SetUp() {
|
||||
// TODO: pass from test parameters
|
||||
param.fakeQuantizeOnData,
|
||||
param.fakeQuantizeOnWeights);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
void ConvolutionTransformation::Run() {
|
||||
|
@ -51,6 +51,7 @@ void ConvolutionWIthIncorrectWeightsTransformation::SetUp() {
|
||||
param.fakeQuantizeOnWeights,
|
||||
param.fakeQuantizeOnData,
|
||||
param.isCorrect);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(ConvolutionWIthIncorrectWeightsTransformation, CompareWithRefImpl) {
|
||||
|
@ -64,6 +64,7 @@ void DepthToSpaceTransformation::SetUp() {
|
||||
}
|
||||
|
||||
function = ngraph::builder::subgraph::DepthToSpaceFunction::getOriginal(precision, inputShape, mode, blockSize);
|
||||
functionRefs = ngraph::clone_function(*function);
|
||||
}
|
||||
|
||||
TEST_P(DepthToSpaceTransformation, CompareWithRefImpl) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user