[LPT] Eltwise Prod transformation fix (#1135)

* [LPT] Eltwise Prod transformation fix

* [LPT] ngraph Multiply lp transformation test
This commit is contained in:
Vladimir Zinoviev 2020-07-09 10:19:52 +03:00 committed by GitHub
parent 1f8a8ab33c
commit 77dc21cbdf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 422 additions and 0 deletions

View File

@ -202,9 +202,11 @@ void EltwiseTransformation::transform(TransformationContext& context, CNNLayer&
} else if (eltwiseLayer->_operation == EltwiseLayer::eOperation::Prod) {
for (size_t i = 0ul; i < emptyPathDequantizationScales.size(); ++i) {
fullPathDequantizationScales[i] = fullPathDequantizationScales[i] * emptyPathDequantizationScales[i];
fullPathDequantizationShifts[i] = fullPathDequantizationShifts[i] * emptyPathDequantizationScales[i];
}
CNNNetworkHelper::updateBlobs(*fullPathDequantizationLayer, "weights", fullPathDequantizationScales);
CNNNetworkHelper::updateBlobs(*fullPathDequantizationLayer, "biases", fullPathDequantizationShifts);
} else {
THROW_IE_EXCEPTION << "unexpected operation '" << eltwiseLayer->_operation << "'";
}

View File

@ -0,0 +1,77 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "low_precision_transformations/multiply_transformation.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<LayerTestsDefinitions::MultiplyTestValues> params = {
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } },
false,
{InferenceEngine::Precision::I8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::I8}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
false,
{InferenceEngine::Precision::I8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -128.f }, { 127.f } },
true,
{InferenceEngine::Precision::I8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
true,
{InferenceEngine::Precision::I8}, {InferenceEngine::Precision::I8, InferenceEngine::Precision::FP32}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } },
false,
{InferenceEngine::Precision::U8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
false,
{InferenceEngine::Precision::U8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { -127.f }, { 128.f } },
true,
{InferenceEngine::Precision::U8}, {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP32}
},
{
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { -128.f }, { 127.f }, { -128.f }, { 127.f } },
{ 256ul, ngraph::Shape { 1, 1, 1, 1 }, { 0.f }, { 255.f }, { 0.f }, { 255.f } },
true,
{InferenceEngine::Precision::U8}, {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32}
},
{ {}, {}, false }, { {}, {}, true },
};
INSTANTIATE_TEST_CASE_P(LPT, MultiplyTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(params)),
MultiplyTransformation::getTestCaseName);
} // namespace

View File

@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <memory>
#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp"
#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp"
namespace LayerTestsDefinitions {
class MultiplyTestValues {
public:
ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1;
ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2;
bool broadcast;
std::vector<InferenceEngine::Precision> precisionOnActivations;
std::vector<InferenceEngine::Precision> expectedPrecisions;
};
typedef std::tuple<
InferenceEngine::Precision,
InferenceEngine::SizeVector,
std::string,
MultiplyTestValues
> MultiplyTransformationParams;
class MultiplyTransformation :
public testing::WithParamInterface<MultiplyTransformationParams>,
public LayerTestsUtils::LayerTransformation {
public:
static std::string getTestCaseName(testing::TestParamInfo<MultiplyTransformationParams> obj);
protected:
void SetUp() override;
private:
void validate();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,121 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "low_precision_transformations/multiply_transformation.hpp"
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/low_precision_transformations/multiply_function.hpp"
namespace LayerTestsDefinitions {
std::string MultiplyTransformation::getTestCaseName(testing::TestParamInfo<MultiplyTransformationParams> obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShape;
std::string targetDevice;
InferenceEngine::details::LayerTransformation::Params params;
MultiplyTestValues param;
std::tie(netPrecision, inputShape, targetDevice, param) = obj.param;
std::ostringstream result;
result << netPrecision.name() << "_" <<
CommonTestUtils::vec2str(inputShape) << "_" <<
targetDevice << "_" <<
param.precisionOnActivations <<
(param.broadcast ? "_broadcast" : "");
if (!param.fakeQuantize1.empty()) {
result << "_on_branch1_" <<
param.fakeQuantize1.inputLowValues[0] << "_" <<
param.fakeQuantize1.inputHighValues[0] << "_" <<
param.fakeQuantize1.outputLowValues[0] << "_" <<
param.fakeQuantize1.outputHighValues[0];
}
if (!param.fakeQuantize2.empty()) {
result << "_on_branch2_" <<
param.fakeQuantize2.inputLowValues[0] << "_" <<
param.fakeQuantize2.inputHighValues[0] << "_" <<
param.fakeQuantize2.outputLowValues[0] << "_" <<
param.fakeQuantize2.outputHighValues[0];
}
return result.str();
}
void MultiplyTransformation::SetUp() {
threshold = 0.01f;
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShape1;
InferenceEngine::details::LayerTransformation::Params params;
MultiplyTestValues param;
std::tie(netPrecision, inputShape1, targetDevice, param) = this->GetParam();
auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
InferenceEngine::SizeVector inputShape2 = inputShape1;
if (param.broadcast) {
inputShape2[2] = 1;
inputShape2[3] = 1;
}
function = ngraph::builder::subgraph::MultiplyFunction::getOriginal(
precision,
inputShape1,
inputShape2,
param.fakeQuantize1,
param.fakeQuantize2);
validate();
}
void MultiplyTransformation::validate() {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShape;
std::string targetDevice;
InferenceEngine::details::LayerTransformation::Params params = LayerTestsUtils::LayerTransformationParamsFactory::createParams();
MultiplyTestValues param;
std::tie(netPrecision, inputShape, targetDevice, param) = this->GetParam();
params.precisionsOnActivations = param.precisionOnActivations;
const InferenceEngine::CNNNetwork network = transform(params);
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("Eltwise", outputLayer->type);
if (!((param.fakeQuantize1.empty()) || (param.fakeQuantize2.empty())) && params.updatePrecisions) {
const InferenceEngine::Precision precision1 =
InferenceEngine::details::CNNNetworkHelper::getParents(*outputLayer)[0]->outData[0]->getPrecision();
const InferenceEngine::Precision precision2 =
InferenceEngine::details::CNNNetworkHelper::getParents(*outputLayer)[1]->outData[0]->getPrecision();
EXPECT_EQ(precision1, param.expectedPrecisions[0]);
EXPECT_EQ(precision2, param.expectedPrecisions[1]);
}
IE_SUPPRESS_DEPRECATED_END
}
TEST_P(MultiplyTransformation, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,58 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <memory>
#include <ngraph/ngraph.hpp>
namespace ngraph {
namespace builder {
namespace subgraph {
class FakeQuantizeOnData {
public:
FakeQuantizeOnData();
FakeQuantizeOnData(
const size_t quantizationLevel,
const ngraph::Shape& constantShape,
const std::vector<float>& inputLowValues,
const std::vector<float>& inputHighValues,
const std::vector<float>& outputLowValues,
const std::vector<float>& outputHighValues);
virtual ~FakeQuantizeOnData();
bool isSigned() const;
virtual bool empty() const;
size_t quantizationLevel;
ngraph::Shape constantShape;
std::vector<float> inputLowValues;
std::vector<float> inputHighValues;
std::vector<float> outputLowValues;
std::vector<float> outputHighValues;
};
inline std::ostream& operator<<(std::ostream& os, const std::vector<float>& values) {
os << "{ ";
for (size_t i = 0; i < values.size(); ++i) {
os << values[i];
if (i != (values.size() - 1ul)) {
os << ", ";
}
}
os << " }";
return os;
}
inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeOnData& data) {
return out << "_" << data.constantShape << "_" << data.outputLowValues << "_" << data.outputHighValues;
}
} // namespace subgraph
} // namespace builder
} // namespace ngraph

View File

@ -0,0 +1,27 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <ngraph/ngraph.hpp>
#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp"
namespace ngraph {
namespace builder {
namespace subgraph {
class MultiplyFunction {
public:
static std::shared_ptr<ngraph::Function> getOriginal(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape1,
const ngraph::Shape& inputShape2,
const FakeQuantizeOnData& fakeQuantize1,
const FakeQuantizeOnData& fakeQuantize2);
};
} // namespace subgraph
} // namespace builder
} // namespace ngraph

View File

@ -0,0 +1,49 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp"
#include <ngraph/opsets/opset1.hpp>
namespace ngraph {
namespace builder {
namespace subgraph {
FakeQuantizeOnData::FakeQuantizeOnData() {
quantizationLevel = 0ul;
}
FakeQuantizeOnData::FakeQuantizeOnData(
const size_t quantizationLevel,
const ngraph::Shape& constantShape,
const std::vector<float>& inputLowValues,
const std::vector<float>& inputHighValues,
const std::vector<float>& outputLowValues,
const std::vector<float>& outputHighValues) :
quantizationLevel(quantizationLevel),
constantShape(constantShape),
inputLowValues(inputLowValues),
inputHighValues(inputHighValues),
outputLowValues(outputLowValues),
outputHighValues(outputHighValues)
{}
FakeQuantizeOnData::~FakeQuantizeOnData() {}
bool FakeQuantizeOnData::isSigned() const {
return std::any_of(outputLowValues.begin(), outputLowValues.end(), [](const float value) { return value < 0.f; }) ||
std::any_of(outputHighValues.begin(), outputHighValues.end(), [](const float value) { return value < 0.f; });
}
bool FakeQuantizeOnData::empty() const {
return (quantizationLevel == 0ul) &&
constantShape.empty() &&
inputLowValues.empty() &&
inputHighValues.empty() &&
outputLowValues.empty() &&
outputHighValues.empty();
}
} // namespace subgraph
} // namespace builder
} // namespace ngraph

View File

@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/low_precision_transformations/multiply_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
namespace subgraph {
std::shared_ptr<ngraph::Function> MultiplyFunction::getOriginal(
const ngraph::element::Type precision,
const ngraph::Shape& inputShape1,
const ngraph::Shape& inputShape2,
const FakeQuantizeOnData& fq1,
const FakeQuantizeOnData& fq2) {
const auto input1 = std::make_shared<ngraph::opset1::Parameter>(precision, ngraph::Shape(inputShape1));
const auto fakeQuantize1 = fq1.empty() ?
nullptr :
ngraph::builder::makeFakeQuantize(
input1, precision, fq1.quantizationLevel, fq1.constantShape,
fq1.inputLowValues, fq1.inputHighValues, fq1.outputLowValues, fq1.outputHighValues);
const auto input2 = std::make_shared<ngraph::opset1::Parameter>(precision, ngraph::Shape(inputShape2));
const auto fakeQuantize2 = fq2.empty() ?
nullptr :
ngraph::builder::makeFakeQuantize(
input2, precision, fq2.quantizationLevel, fq2.constantShape,
fq2.inputLowValues, fq2.inputHighValues, fq2.outputLowValues, fq2.outputHighValues);
const auto multiply = std::make_shared<ngraph::opset1::Multiply>(
fq1.empty() ? input1 : fakeQuantize1,
fq2.empty() ? input2 : fakeQuantize2);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(multiply) };
return std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{ input1, input2 }, "MultiplyTransformation");
}
} // namespace subgraph
} // namespace builder
} // namespace ngraph