[CPU] Runtime precision for execution graph (#3886)
This commit is contained in:
parent
e82018221c
commit
2bfc941cf1
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -137,3 +137,15 @@ std::string MKLDNNExtensionUtils::getReorderArgs(const InferenceEngine::TensorDe
|
||||
}
|
||||
return inArgs + "_" + outArgs;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNExtensionUtils::getMaxPrecision(std::vector<InferenceEngine::Precision> precisions) {
|
||||
if (!precisions.empty()) {
|
||||
std::sort(precisions.begin(), precisions.end(),
|
||||
[](const InferenceEngine::Precision &lhs, const InferenceEngine::Precision &rhs) {
|
||||
return lhs.size() > rhs.size();
|
||||
});
|
||||
return precisions[0];
|
||||
}
|
||||
|
||||
return InferenceEngine::Precision::UNSPECIFIED;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -23,6 +23,7 @@ public:
|
||||
static InferenceEngine::TensorDesc getUninitTensorDesc(const InferenceEngine::TensorDesc& desc);
|
||||
static bool initTensorsAreEqual(const InferenceEngine::TensorDesc &desc1, const InferenceEngine::TensorDesc &desc2);
|
||||
static std::string getReorderArgs(const InferenceEngine::TensorDesc &parentDesc, const InferenceEngine::TensorDesc &childDesc);
|
||||
static InferenceEngine::Precision getMaxPrecision(std::vector<InferenceEngine::Precision> precisions);
|
||||
};
|
||||
|
||||
} // namespace MKLDNNPlugin
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -281,6 +281,8 @@ std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &no
|
||||
|
||||
serialization_info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex());
|
||||
|
||||
serialization_info[ExecGraphInfoSerialization::RUNTIME_PRECISION] = node->getRuntimePrecision().name();
|
||||
|
||||
return serialization_info;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -1130,6 +1130,45 @@ void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) {
|
||||
THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented";
|
||||
}
|
||||
|
||||
std::vector<InferenceEngine::Precision> MKLDNNNode::getInputPrecisions() const {
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
for (size_t i = 0; i < getParentEdges().size(); i++) {
|
||||
auto parentEdge = getParentEdgeAt(i);
|
||||
if (parentEdge && parentEdge->getStatus() == MKLDNNEdge::Status::Validated) {
|
||||
inputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((parentEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
return inputPrecisions;
|
||||
}
|
||||
|
||||
std::vector<InferenceEngine::Precision> MKLDNNNode::getOutputPrecisions() const {
|
||||
std::vector<InferenceEngine::Precision> outputPrecisions;
|
||||
for (size_t i = 0; i < getChildEdges().size(); i++) {
|
||||
auto childEdge = getChildEdgeAt(i);
|
||||
if (childEdge && childEdge->getStatus() == MKLDNNEdge::Status::Validated) {
|
||||
outputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((childEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
return outputPrecisions;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNNode::getRuntimePrecision() const {
|
||||
// Base implementation consider precision only on data path and
|
||||
// assumes it is placed on 0-th port (which is true for almost all layers)
|
||||
InferenceEngine::Precision runtimePrecision = Precision::UNSPECIFIED;
|
||||
auto inputPrecisions = getInputPrecisions();
|
||||
if (!inputPrecisions.empty()) {
|
||||
runtimePrecision = inputPrecisions[0];
|
||||
} else {
|
||||
auto outputPrecisions = getOutputPrecisions();
|
||||
if (!outputPrecisions.empty()) {
|
||||
runtimePrecision = outputPrecisions[0];
|
||||
}
|
||||
}
|
||||
|
||||
return runtimePrecision;
|
||||
}
|
||||
|
||||
MKLDNNNode* MKLDNNNode::NodesFactory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
|
||||
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
|
||||
MKLDNNNode *newNode = nullptr;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -517,6 +517,12 @@ public:
|
||||
return profiling;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns runtime node precision based on input/output data types or data type used for computations
|
||||
* @return Runtime node precision
|
||||
*/
|
||||
virtual InferenceEngine::Precision getRuntimePrecision() const;
|
||||
|
||||
protected:
|
||||
// TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology
|
||||
std::vector<MKLDNNDims> outDims;
|
||||
@ -591,6 +597,18 @@ protected:
|
||||
|
||||
InferenceEngine::Layout getWeightsLayoutByDims(InferenceEngine::SizeVector dims, bool isGrouped);
|
||||
|
||||
/**
|
||||
* @brief Auxiliary function to get node input precisions
|
||||
* @return Vector of precisions based on information from node input edges. Return empty vector in case edges are not initialized yet.
|
||||
*/
|
||||
virtual std::vector<InferenceEngine::Precision> getInputPrecisions() const;
|
||||
|
||||
/**
|
||||
* @brief Auxiliary function to get node output precisions
|
||||
* @return Vector of precisions based on information from node output edges. Return empty vector in case edges are not initialized yet.
|
||||
*/
|
||||
virtual std::vector<InferenceEngine::Precision> getOutputPrecisions() const;
|
||||
|
||||
private:
|
||||
std::vector<MKLDNNEdgeWeakPtr> parentEdges;
|
||||
std::vector<MKLDNNEdgeWeakPtr> childEdges;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -653,4 +653,8 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) {
|
||||
}
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNConcatNode::getRuntimePrecision() const {
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(getInputPrecisions());
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -26,6 +26,8 @@ public:
|
||||
|
||||
bool isOptimized() const;
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
private:
|
||||
size_t axis = 0;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -913,4 +913,18 @@ const mkldnn::memory& MKLDNNConvolutionNode::getBias() const {
|
||||
return baseInputsNumber > 2 ? getParentEdgeAt(2)->getMemory().GetPrimitive() : internalBlobMemory[1]->GetPrimitive();
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNConvolutionNode::getRuntimePrecision() const {
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
// Don't take bias precision into account
|
||||
size_t inputsNumLimit = 2;
|
||||
for (size_t i = 0; i < std::min(getParentEdges().size(), inputsNumLimit); i++) {
|
||||
auto parentEdge = getParentEdgeAt(i);
|
||||
if (parentEdge && parentEdge->getStatus() == MKLDNNEdge::Status::Validated) {
|
||||
inputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((parentEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(inputPrecisions);
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNConvolutionNode, Convolution);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -49,6 +49,8 @@ public:
|
||||
|
||||
bool canBeExecutedInInt8();
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
std::vector<uint8_t> inputZeroPoints;
|
||||
std::vector<float> weightsZeroPoints;
|
||||
std::vector<int32_t> outputCompensation;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -283,4 +283,18 @@ const mkldnn::memory& MKLDNNDeconvolutionNode::getWeights() const {
|
||||
return getParentEdges().size() > 1 ? getParentEdgeAt(1)->getMemory().GetPrimitive() : internalBlobMemory[0]->GetPrimitive();
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNDeconvolutionNode::getRuntimePrecision() const {
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
// Don't take bias precision into account
|
||||
size_t inputsNumLimit = 2;
|
||||
for (size_t i = 0; i < std::min(getParentEdges().size(), inputsNumLimit); i++) {
|
||||
auto parentEdge = getParentEdgeAt(i);
|
||||
if (parentEdge && parentEdge->getStatus() == MKLDNNEdge::Status::Validated) {
|
||||
inputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((parentEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(inputPrecisions);
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNDeconvolutionNode, Deconvolution);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -36,6 +36,8 @@ public:
|
||||
MKLDNNMemoryDesc getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override;
|
||||
MKLDNNMemoryDesc getDstMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override;
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
private:
|
||||
bool withGroups = false;
|
||||
bool isDW = false;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -279,4 +279,9 @@ void MKLDNNDeformableConvolutionNode::initDescriptor(const InferenceEngine::Laye
|
||||
}
|
||||
selectedPD->getConfig() = rightConfig;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNDeformableConvolutionNode::getRuntimePrecision() const {
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(getInputPrecisions());
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNDeformableConvolutionNode, DeformableConvolution);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -28,6 +28,8 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
private:
|
||||
bool withBiases = false;
|
||||
bool isDW = false;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -1788,4 +1788,17 @@ bool MKLDNNEltwiseNode::canFuse(const MKLDNNNodePtr& node) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNEltwiseNode::getRuntimePrecision() const {
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
// Don't take bias precision into account
|
||||
for (size_t i = 0; i < getParentEdges().size(); i++) {
|
||||
auto parentEdge = getParentEdgeAt(i);
|
||||
if (parentEdge && parentEdge->getStatus() == MKLDNNEdge::Status::Validated && !parentEdge->getParent()->isConstant()) {
|
||||
inputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((parentEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(inputPrecisions);
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNEltwiseNode, Eltwise);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -130,6 +130,8 @@ public:
|
||||
|
||||
void appendPostOps(mkldnn::post_ops& ops) override;
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
private:
|
||||
void init() override;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -438,4 +438,18 @@ const mkldnn::memory& MKLDNNFullyConnectedNode::getBias() const {
|
||||
return baseInputsNumber > 2 ? getParentEdgeAt(2)->getMemory().GetPrimitive() : internalBlobMemory[1]->GetPrimitive();
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNFullyConnectedNode::getRuntimePrecision() const {
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
// Don't take bias precision into account
|
||||
size_t inputsNumLimit = 2;
|
||||
for (size_t i = 0; i < std::min(getParentEdges().size(), inputsNumLimit); i++) {
|
||||
auto parentEdge = getParentEdgeAt(i);
|
||||
if (parentEdge && parentEdge->getStatus() == MKLDNNEdge::Status::Validated) {
|
||||
inputPrecisions.emplace_back(MKLDNNExtensionUtils::DataTypeToIEPrecision((parentEdge->getMemoryPtr()->GetDataType())));
|
||||
}
|
||||
}
|
||||
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(inputPrecisions);
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNFullyConnectedNode, FullyConnected);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -37,6 +37,8 @@ public:
|
||||
const mkldnn::memory& getWeights() const;
|
||||
const mkldnn::memory& getBias() const;
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<mkldnn::primitive_attr> initPrimitiveAttr();
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -322,4 +322,9 @@ int MKLDNNGemmNode::getMaxBatch() {
|
||||
return outDims[0][0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision MKLDNNGemmNode::getRuntimePrecision() const {
|
||||
return MKLDNNExtensionUtils::getMaxPrecision(getInputPrecisions());
|
||||
}
|
||||
|
||||
REG_MKLDNN_PRIM_FOR(MKLDNNGemmNode, Gemm);
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -24,6 +24,8 @@ public:
|
||||
bool created() const override;
|
||||
int getMaxBatch() override;
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
private:
|
||||
float alpha = 1.0f;
|
||||
float beta = 1.0f;
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -369,7 +369,7 @@ void MKLDNNQuantizeNode::initSupportedPrimitiveDescriptors() {
|
||||
if (i == 0) {
|
||||
dataConfig.desc = MKLDNNMemoryDesc(getParentEdgeAt(i)->getDims(), inputDataType, fmt);
|
||||
} else {
|
||||
dataConfig.desc = MKLDNNMemoryDesc(getParentEdgeAt(i)->getDims(), inputDataType, MKLDNNMemory::GetPlainFormat(getParentEdgeAt(i)->getDims()));
|
||||
dataConfig.desc = MKLDNNMemoryDesc(getParentEdgeAt(i)->getDims(), memory::f32, MKLDNNMemory::GetPlainFormat(getParentEdgeAt(i)->getDims()));
|
||||
}
|
||||
config.inConfs.push_back(dataConfig);
|
||||
}
|
||||
|
@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "execution_graph_tests/runtime_precision.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace ExecutionGraphTests;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<RuntimePrecisionSpecificParams> params = {
|
||||
/* {Ngraph function builder, function input precision, expected runtime precisions} */
|
||||
{makeEltwiseFunction, {Precision::FP32, Precision::FP32}, {{"Eltwise", Precision::FP32}}},
|
||||
{makeEltwiseFunction, {Precision::U16, Precision::U16}, {{"Eltwise", Precision::I32}}},
|
||||
{makeEltwiseFunction, {Precision::BF16, Precision::BF16}, {{"Eltwise", Precision::BF16}}},
|
||||
{makeEltwiseFunction, {Precision::U8, Precision::U8}, {{"Eltwise", Precision::U8}}},
|
||||
{makeEltwiseFunction, {Precision::I8, Precision::I8}, {{"Eltwise", Precision::I8}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"Relu", Precision::U8}}},
|
||||
{makeFakeQuantizeReluFunction, {Precision::U8}, {{"FakeQuantize", Precision::U8}, {"Relu", Precision::U8}}},
|
||||
{makeFakeQuantizeBinaryConvolutionFunction, {Precision::FP32}, {{"FakeQuantize", Precision::FP32}, {"BinaryConvolution", Precision::BIN}}},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_ExecGraph, ExecGraphRuntimePrecision,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(params),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||
ExecGraphRuntimePrecision::getTestCaseName);
|
||||
} // namespace
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// Copyright (C) 2019-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -44,7 +44,7 @@ const std::vector<LayerTestsDefinitions::ConvolutionTransformationParam> params
|
||||
{ 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } },
|
||||
false,
|
||||
"output_original",
|
||||
"I8"
|
||||
"U8"
|
||||
},
|
||||
{
|
||||
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -12.7f }, { 12.8f } },
|
||||
@ -52,7 +52,7 @@ const std::vector<LayerTestsDefinitions::ConvolutionTransformationParam> params
|
||||
{ 255ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 254.f }, { -12.7f }, { 12.7f } },
|
||||
false,
|
||||
"output_original",
|
||||
"I8"
|
||||
"U8"
|
||||
},
|
||||
{
|
||||
{ 256ul, ngraph::Shape { 1 }, { 0.f }, { 255.f }, { -18.7f }, { 18.8f } },
|
||||
@ -60,7 +60,7 @@ const std::vector<LayerTestsDefinitions::ConvolutionTransformationParam> params
|
||||
{ 255ul, ngraph::Shape { 1 }, { 0.f }, { 254.f }, { -18.7f }, { 18.7f } },
|
||||
false,
|
||||
"output_original",
|
||||
"I8"
|
||||
"U8"
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// Copyright (C) 2020-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -20,7 +20,7 @@ std::vector<MatMulWithConstantTransformationTestValues> testValues = {
|
||||
std::vector<float>(32 * 10, 1.f),
|
||||
{ 256ul, ngraph::Shape({}), {-12.8f}, {12.7f}, {-12.8f}, {12.7f} },
|
||||
"matMul/1",
|
||||
"I8"
|
||||
"U8"
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
namespace ExecutionGraphTests {
|
||||
|
||||
std::shared_ptr<ngraph::Function> makeEltwiseFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions);
|
||||
std::shared_ptr<ngraph::Function> makeFakeQuantizeReluFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions);
|
||||
std::shared_ptr<ngraph::Function> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<InferenceEngine::Precision> &inputPrecisions);
|
||||
|
||||
struct RuntimePrecisionSpecificParams {
|
||||
std::function<std::shared_ptr<ngraph::Function>(const std::vector<InferenceEngine::Precision>& inputPrecisions)> makeFunction;
|
||||
std::vector<InferenceEngine::Precision> inputPrecisions;
|
||||
std::map<std::string, InferenceEngine::Precision> expectedPrecisions;
|
||||
};
|
||||
|
||||
using ExecGraphRuntimePrecisionParams = std::tuple<
|
||||
RuntimePrecisionSpecificParams,
|
||||
std::string // Target Device
|
||||
>;
|
||||
|
||||
class ExecGraphRuntimePrecision : public testing::WithParamInterface<ExecGraphRuntimePrecisionParams>,
|
||||
public CommonTestUtils::TestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ExecGraphRuntimePrecisionParams> obj);
|
||||
std::string targetDevice;
|
||||
std::shared_ptr<ngraph::Function> fnPtr;
|
||||
std::map<std::string, InferenceEngine::Precision> expectedPrecisions;
|
||||
protected:
|
||||
void SetUp() override;
|
||||
|
||||
void TearDown() override;
|
||||
};
|
||||
|
||||
} // namespace ExecutionGraphTests
|
@ -139,7 +139,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
<net name="addmul_abc" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="C" type="Input">
|
||||
<data execOrder="3" execTimeMcs="not_executed" originalLayersNames="C" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" />
|
||||
<data execOrder="3" execTimeMcs="not_executed" originalLayersNames="C" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32" />
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
@ -147,7 +147,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="B" type="Input">
|
||||
<data execOrder="1" execTimeMcs="not_executed" originalLayersNames="B" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" />
|
||||
<data execOrder="1" execTimeMcs="not_executed" originalLayersNames="B" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
@ -155,7 +155,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="A" type="Input">
|
||||
<data execOrder="0" execTimeMcs="not_executed" originalLayersNames="A" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" />
|
||||
<data execOrder="0" execTimeMcs="not_executed" originalLayersNames="A" outputLayouts="x" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
@ -163,7 +163,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="add_node2" type="Eltwise">
|
||||
<data execOrder="2" execTimeMcs="not_executed" originalLayersNames="add_node2" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" />
|
||||
<data execOrder="2" execTimeMcs="not_executed" originalLayersNames="add_node2" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" runtimePrecision="FP32"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
@ -179,7 +179,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="add_node1" type="Eltwise">
|
||||
<data execOrder="4" execTimeMcs="not_executed" originalLayersNames="add_node1,add_node3,add_node4" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" />
|
||||
<data execOrder="4" execTimeMcs="not_executed" originalLayersNames="add_node1,add_node3,add_node4" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" runtimePrecision="FP32"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
@ -201,7 +201,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="5" name="Y" type="Eltwise">
|
||||
<data execOrder="5" execTimeMcs="not_executed" originalLayersNames="Y" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" />
|
||||
<data execOrder="5" execTimeMcs="not_executed" originalLayersNames="Y" outputLayouts="x" outputPrecisions="FP32" primitiveType="jit_avx512_FP32" runtimePrecision="FP32"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
@ -217,7 +217,7 @@ const char expected_serialized_model[] = R"V0G0N(
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="6" name="out_Y" type="Output">
|
||||
<data execOrder="6" execTimeMcs="not_executed" originalLayersNames="" outputLayouts="undef" outputPrecisions="FP32" primitiveType="unknown_FP32" />
|
||||
<data execOrder="6" execTimeMcs="not_executed" originalLayersNames="" outputLayouts="undef" outputPrecisions="FP32" primitiveType="unknown_FP32" runtimePrecision="FP32"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
|
@ -0,0 +1,136 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <unordered_set>
|
||||
#include <string>
|
||||
#include <functional>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include <ngraph/function.hpp>
|
||||
#include <exec_graph_info.hpp>
|
||||
#include <ngraph/variant.hpp>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
#include "execution_graph_tests/runtime_precision.hpp"
|
||||
|
||||
namespace ExecutionGraphTests {
|
||||
|
||||
std::shared_ptr<ngraph::Function> makeEltwiseFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions) {
|
||||
IE_ASSERT(inputPrecisions.size() == 2);
|
||||
|
||||
auto inputs = ngraph::builder::makeParams(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]), {{1, 16, 5, 4}});
|
||||
auto secondaryInput = ngraph::builder::makeInputLayer(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[1]),
|
||||
ngraph::helpers::InputLayerType::PARAMETER, {{1, 16, 5, 4}});
|
||||
inputs.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(secondaryInput));
|
||||
|
||||
auto eltwise = ngraph::builder::makeEltwise(inputs[0], secondaryInput, ngraph::helpers::EltwiseTypes::ADD);
|
||||
eltwise->set_friendly_name("Eltwise");
|
||||
|
||||
auto function = std::make_shared<ngraph::Function>(eltwise, inputs, "EltwiseWithTwoDynamicInputs");
|
||||
return function;
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Function> makeFakeQuantizeReluFunction(const std::vector<InferenceEngine::Precision>& inputPrecisions) {
|
||||
IE_ASSERT(inputPrecisions.size() == 1);
|
||||
|
||||
auto inputs = ngraph::builder::makeParams(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]), {{1, 16, 5, 4}});
|
||||
|
||||
auto inputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
|
||||
auto inputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {255});
|
||||
auto outputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
|
||||
auto outputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {255});
|
||||
auto fakeQuantize = std::make_shared<ngraph::opset1::FakeQuantize>(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256);
|
||||
fakeQuantize->set_friendly_name("FakeQuantize");
|
||||
|
||||
auto relu = std::make_shared<ngraph::op::Relu>(fakeQuantize);
|
||||
relu->set_friendly_name("Relu");
|
||||
|
||||
auto function = std::make_shared<ngraph::Function>(relu, inputs, "FakeQuantizeRelu");
|
||||
return function;
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Function> makeFakeQuantizeBinaryConvolutionFunction(const std::vector<InferenceEngine::Precision> &inputPrecisions) {
|
||||
IE_ASSERT(inputPrecisions.size() == 1);
|
||||
|
||||
auto inputs = ngraph::builder::makeParams(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecisions[0]), {{1, 16, 5, 4}});
|
||||
|
||||
auto inputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
|
||||
auto inputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
|
||||
auto outputLowNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {0});
|
||||
auto outputHighNode = ngraph::builder::makeConstant<float>(ngraph::element::f32, {1, 1, 1, 1}, {1});
|
||||
auto fakeQuantize = std::make_shared<ngraph::opset1::FakeQuantize>(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2);
|
||||
fakeQuantize->set_friendly_name("FakeQuantize");
|
||||
|
||||
auto binConv = ngraph::builder::makeBinaryConvolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 32, 0);
|
||||
binConv->set_friendly_name("BinaryConvolution");
|
||||
|
||||
auto function = std::make_shared<ngraph::Function>(binConv, inputs, "FakeQuantizeBinaryConvolution");
|
||||
return function;
|
||||
}
|
||||
|
||||
std::string ExecGraphRuntimePrecision::getTestCaseName(testing::TestParamInfo<ExecGraphRuntimePrecisionParams> obj) {
|
||||
RuntimePrecisionSpecificParams specificParams;
|
||||
std::string targetDevice;
|
||||
std::tie(specificParams, targetDevice) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "Function=" << specificParams.makeFunction(specificParams.inputPrecisions)->get_friendly_name() << "_";
|
||||
result << "InPrcs=" << CommonTestUtils::vec2str(specificParams.inputPrecisions) << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void ExecGraphRuntimePrecision::SetUp() {
|
||||
RuntimePrecisionSpecificParams specificParams;
|
||||
std::tie(specificParams, targetDevice) = this->GetParam();
|
||||
expectedPrecisions = specificParams.expectedPrecisions;
|
||||
fnPtr = specificParams.makeFunction(specificParams.inputPrecisions);
|
||||
}
|
||||
|
||||
void ExecGraphRuntimePrecision::TearDown() {
|
||||
}
|
||||
|
||||
TEST_P(ExecGraphRuntimePrecision, CheckRuntimePrecision) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNet(fnPtr);
|
||||
auto ie = PluginCache::get().ie();
|
||||
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
|
||||
const auto execGraph = execNet.GetExecGraphInfo().getFunction();
|
||||
|
||||
auto ops = execGraph->get_ops();
|
||||
for (auto expectedPrc : expectedPrecisions) {
|
||||
auto opIter = std::find_if(ops.begin(), ops.end(), [&expectedPrc](std::shared_ptr<ngraph::Node> op) {
|
||||
return op->get_friendly_name() == expectedPrc.first;
|
||||
});
|
||||
|
||||
if (opIter == ops.end())
|
||||
FAIL() << "Execution graph doesn't contain node with name: " << expectedPrc.first;
|
||||
|
||||
const auto& rtInfo = opIter->get()->get_rt_info();
|
||||
const auto& rtIter = rtInfo.find("runtimePrecision");
|
||||
|
||||
if (rtIter == rtInfo.end())
|
||||
FAIL() << "Runtime precision is not found for node: " << opIter->get()->get_friendly_name();
|
||||
|
||||
const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(rtIter->second);
|
||||
|
||||
if (expectedPrc.second.name() != rtPrecisionPtr->get())
|
||||
FAIL() << "`" << expectedPrc.first << "' node runtime precision mismatch: actual = " <<
|
||||
expectedPrc.second.name() << ", expected = " << rtPrecisionPtr->get();
|
||||
}
|
||||
|
||||
fnPtr.reset();
|
||||
};
|
||||
|
||||
} // namespace ExecutionGraphTests
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2019-2020 Intel Corporation
|
||||
// Copyright (C) 2019-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <fstream>
|
||||
@ -449,19 +449,12 @@ std::string LayerTestsCommon::getRuntimePrecision(const std::string& layerName)
|
||||
const auto& rtInfo = op->get_rt_info();
|
||||
const auto& it = rtInfo.find("runtimePrecision");
|
||||
|
||||
if (it == rtInfo.end()) {
|
||||
// WA: CPU impl doesn't contain runtimePrecision attribute
|
||||
const auto& it1 = rtInfo.find("primitiveType");
|
||||
const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it1->second);
|
||||
const std::string kernel = rtPrecisionPtr->get();
|
||||
const std::string kernelPrecision = kernel.substr(kernel.find_last_of("_") + 1ul);
|
||||
return kernelPrecision;
|
||||
} else {
|
||||
IE_ASSERT(it != rtInfo.end()) << "Runtime precision is not found for node: " << name;
|
||||
|
||||
const auto rtPrecisionPtr = ngraph::as_type_ptr<ngraph::VariantWrapper<std::string>>(it->second);
|
||||
return rtPrecisionPtr->get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user