[LPT] Added kernel verification to CPU-plugin tests for MatMul & Convolution (#2870)

This commit is contained in:
Vladislav Golubev 2020-11-25 20:57:47 +03:00 committed by GitHub
parent 1174ef64a4
commit 94c6aefbfe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 77 additions and 11 deletions

View File

@ -26,26 +26,34 @@ const std::vector<LayerTestsDefinitions::ConvolutionTransformationParam> params
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } },
false,
{},
false
false,
"output",
"FP32"
},
{
{},
false,
{ 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } },
false
false,
"output",
"FP32"
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 25.5f } },
false,
{ 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } },
false
false,
"output_original",
"I8"
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.75f }, { 6.375f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } },
true,
{ 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } },
false
}
false,
"output_original",
"I8"
},
};
const std::vector<ngraph::Shape> shapes = {

View File

@ -18,7 +18,9 @@ std::vector<MatMulWithConstantTransformationTestValues> testValues = {
{ 256ul, ngraph::Shape({}), {0.f}, {25.5f}, {0.f}, {25.5f} },
{ 32, 10 },
std::vector<float>(32 * 10, 1.f),
{ 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} }
{ 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} },
"matMul/1",
"I8"
}
};

View File

@ -19,6 +19,8 @@ public:
bool asymmetricQuantizationOnData;
ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights;
bool asymmetricQuantizationOnWeights;
std::string layerName;
std::string expectedKernelType;
};
typedef std::tuple<
@ -38,6 +40,8 @@ public:
protected:
void SetUp() override;
void Run() override;
private:
void validateNGraph();
};

View File

@ -21,6 +21,8 @@ public:
ngraph::Shape weightsConstShape;
std::vector<float> weightsConstValues;
ngraph::builder::subgraph::FakeQuantizeOnWeights fqOnWeights;
std::string layerName;
std::string expectedKernelType;
};
typedef std::tuple<
@ -37,6 +39,8 @@ public:
protected:
void SetUp() override;
void Run() override;
};
} // namespace LayerTestsDefinitions

View File

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -54,6 +54,14 @@ void ConvolutionTransformation::SetUp() {
validateNGraph();
}
void ConvolutionTransformation::Run() {
LayerTestsCommon::Run();
const auto params = std::get<4>(GetParam());
const auto actualType = getRuntimePrecision(params.layerName);
EXPECT_EQ(actualType, params.expectedKernelType);
}
void ConvolutionTransformation::validateNGraph() {
ngraph::element::Type netPrecision;
ngraph::Shape inputShape;

View File

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -71,6 +71,15 @@ void MatMulWithConstantTransformation::SetUp() {
ngraph::pass::InitNodeInfo().run_on_function(function);
}
void MatMulWithConstantTransformation::Run() {
LayerTestsCommon::Run();
const auto params = std::get<2>(GetParam());
const auto actualType = getRuntimePrecision(params.layerName);
EXPECT_EQ(actualType, params.expectedKernelType);
}
TEST_P(MatMulWithConstantTransformation, CompareWithRefImpl) {
Run();
};

View File

@ -1,4 +1,4 @@
// Copyright (C) 2019-2020 Intel Corporation
// Copyright (C) 2019-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
@ -7,6 +7,8 @@
#include <transformations/op_conversions/convert_space_to_batch.hpp>
#include <ngraph/opsets/opset.hpp>
#include <pugixml.hpp>
#include "ngraph/variant.hpp"
#include "layer_test_utils.hpp"
#include "plugin_config.hpp"
@ -408,6 +410,33 @@ void LayerTestsCommon::Validate() {
Compare(expectedOutputs, actualOutputs);
}
std::string LayerTestsCommon::getRuntimePrecision(const std::string& layerName) {
const auto execGraph = executableNetwork.GetExecGraphInfo();
const auto function = execGraph.getFunction();
for (const auto& op : function->get_ops()) {
const auto name = op->get_friendly_name();
if (name == layerName) {
const auto& rtInfo = op->get_rt_info();
const auto& it = rtInfo.find("runtimePrecision");
if (it == rtInfo.end()) {
// WA: CPU impl doesn't contain runtimePrecision attribute
const auto& it1 = rtInfo.find("primitiveType");
const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it1->second);
const std::string kernel = rtPrecisionPtr->get();
const std::string kernelPrecision = kernel.substr(kernel.find_last_of("_") + 1ul);
return kernelPrecision;
} else {
const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it->second);
return rtPrecisionPtr->get();
}
}
}
return "";
}
void LayerTestsCommon::SetRefMode(RefMode mode) {
refMode = mode;
}

View File

@ -1,4 +1,4 @@
// Copyright (C) 2019-2020 Intel Corporation
// Copyright (C) 2019-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -149,6 +149,8 @@ public:
std::map<std::string, std::string>& GetConfiguration();
std::string getRuntimePrecision(const std::string& layerName);
protected:
LayerTestsCommon();