[CPU] fix set up config for bin conv fused (#608)

This commit is contained in:
Maxim Andronov 2020-06-09 09:59:29 +03:00 committed by GitHub
parent e91453e006
commit d8e82d56d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 218 additions and 30 deletions

View File

@ -35,10 +35,13 @@ using namespace InferenceEngine;
MKLDNNBinaryConvolutionNode::MKLDNNBinaryConvolutionNode(const InferenceEngine::CNNLayerPtr& layer, MKLDNNBinaryConvolutionNode::MKLDNNBinaryConvolutionNode(const InferenceEngine::CNNLayerPtr& layer,
const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
: MKLDNNNode(layer, eng, cache) { : MKLDNNNode(layer, eng, cache), baseInputsNumber(1) {
internalBlobDesc.emplace_back([&](primitive_desc_iterator &primitive_desc_it, size_t idx) -> MKLDNNMemoryDesc { internalBlobDesc.emplace_back([&](primitive_desc_iterator &primitive_desc_it, size_t idx) -> MKLDNNMemoryDesc {
return MKLDNNMemoryDesc(primitive_desc_it.weights_primitive_desc(0).desc()); return MKLDNNMemoryDesc(primitive_desc_it.weights_primitive_desc(0).desc());
}); });
if (getCnnLayer()->type == "BinaryConvolution")
baseInputsNumber = getCnnLayer().get()->insData.size();
} }
void MKLDNNBinaryConvolutionNode::getSupportedDescriptors() { void MKLDNNBinaryConvolutionNode::getSupportedDescriptors() {
@ -135,7 +138,7 @@ void MKLDNNBinaryConvolutionNode::getSupportedDescriptors() {
#endif #endif
} }
int expectedInputEdgesNum = 1 + isFusedWith(Eltwise); int expectedInputEdgesNum = baseInputsNumber + isFusedWith(Eltwise);
for (int i = 0; i < fusedWith.size(); i++) { for (int i = 0; i < fusedWith.size(); i++) {
auto *convolutionNode = dynamic_cast<MKLDNNConvolutionNode *>(fusedWith[i].get()); auto *convolutionNode = dynamic_cast<MKLDNNConvolutionNode *>(fusedWith[i].get());
if (convolutionNode) { if (convolutionNode) {
@ -319,8 +322,8 @@ void MKLDNNBinaryConvolutionNode::setPostOps(mkldnn::primitive_attr &attr, bool
ops.append_dw_conv(dw_conv_ih, dw_conv_iw, dw_conv_kernel[Y_AXIS], dw_conv_kernel[X_AXIS], ops.append_dw_conv(dw_conv_ih, dw_conv_iw, dw_conv_kernel[Y_AXIS], dw_conv_kernel[X_AXIS],
dw_conv_strides[Y_AXIS], dw_conv_strides[X_AXIS], dw_conv_strides[Y_AXIS], dw_conv_strides[X_AXIS],
mkldnn::memory::convert_to_c(dw_conv_in_dt), mkldnn::memory::convert_to_c(dw_conv_in_dt),
static_cast<float *>(getParentEdgeAt(1)->getMemory().GetData()), static_cast<float *>(getParentEdgeAt(baseInputsNumber + 0)->getMemory().GetData()),
static_cast<float *>(getParentEdgeAt(2)->getMemory().GetData())); static_cast<float *>(getParentEdgeAt(baseInputsNumber + 1)->getMemory().GetData()));
} }
} else { } else {
ops.append_dw_conv(dw_conv_ih, dw_conv_iw, dw_conv_kernel[Y_AXIS], dw_conv_kernel[X_AXIS], ops.append_dw_conv(dw_conv_ih, dw_conv_iw, dw_conv_kernel[Y_AXIS], dw_conv_kernel[X_AXIS],
@ -363,6 +366,15 @@ void MKLDNNBinaryConvolutionNode::initSupportedPrimitiveDescriptors() {
} }
if (withDWConv) { if (withDWConv) {
int convNumInput = 1;
for (auto &node : fusedWith) {
auto* convolutionNode = dynamic_cast<MKLDNNConvolutionNode *>(node.get());
if (convolutionNode) {
convNumInput = convolutionNode->getBaseIntputsNumber();
break;
}
}
if (convNumInput > 1) {
auto weightsPrc = memory::data_type::f32; auto weightsPrc = memory::data_type::f32;
auto biasPrc = memory::data_type::f32; auto biasPrc = memory::data_type::f32;
@ -380,6 +392,7 @@ void MKLDNNBinaryConvolutionNode::initSupportedPrimitiveDescriptors() {
dataConfig.desc = MKLDNNMemoryDesc(dwBiasesDims, biasPrc, memory::format::x); dataConfig.desc = MKLDNNMemoryDesc(dwBiasesDims, biasPrc, memory::format::x);
config.inConfs.push_back(dataConfig); config.inConfs.push_back(dataConfig);
} }
}
std::vector<memory::format> outFormats; std::vector<memory::format> outFormats;
for (size_t i = 0; i < desc.outputNumbers(); i++) { for (size_t i = 0; i < desc.outputNumbers(); i++) {
@ -481,6 +494,15 @@ void MKLDNNBinaryConvolutionNode::initDescriptor(const InferenceEngine::LayerCon
} }
if (withDWConv) { if (withDWConv) {
int convNumInput = 1;
for (auto &node : fusedWith) {
auto* convolutionNode = dynamic_cast<MKLDNNConvolutionNode *>(node.get());
if (convolutionNode) {
convNumInput = convolutionNode->getBaseIntputsNumber();
break;
}
}
if (convNumInput > 1) {
auto weightsPrc = memory::data_type::f32; auto weightsPrc = memory::data_type::f32;
auto biasPrc = memory::data_type::f32; auto biasPrc = memory::data_type::f32;
@ -498,6 +520,7 @@ void MKLDNNBinaryConvolutionNode::initDescriptor(const InferenceEngine::LayerCon
dataConfig.desc = MKLDNNMemoryDesc(dwBiasesDims, biasPrc, memory::format::x); dataConfig.desc = MKLDNNMemoryDesc(dwBiasesDims, biasPrc, memory::format::x);
cfg.inConfs.push_back(dataConfig); cfg.inConfs.push_back(dataConfig);
} }
}
for (size_t j = 0; j < desc.outputNumbers(); j++) { for (size_t j = 0; j < desc.outputNumbers(); j++) {
InferenceEngine::DataConfig dataConfig; InferenceEngine::DataConfig dataConfig;

View File

@ -51,6 +51,8 @@ private:
mkldnn::memory::data_type dw_conv_in_dt = mkldnn::memory::data_type::data_undef; mkldnn::memory::data_type dw_conv_in_dt = mkldnn::memory::data_type::data_undef;
std::vector<MKLDNNMemoryPtr> PostOpsIntBlobMemory; std::vector<MKLDNNMemoryPtr> PostOpsIntBlobMemory;
int baseInputsNumber;
float pad_value = 0.f; float pad_value = 0.f;
}; };

View File

@ -0,0 +1,11 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "execution_graph_tests/num_inputs_fusing_bin_conv.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
INSTANTIATE_TEST_CASE_P(inputsNumFusingBinConv, ExecGraphInputsFusingBinConv, ::testing::Values(CommonTestUtils::DEVICE_CPU),
ExecGraphInputsFusingBinConv::getTestCaseName);

View File

@ -0,0 +1,25 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "ngraph_functions/builders.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
namespace LayerTestsDefinitions {
class ExecGraphInputsFusingBinConv : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::string> {
public:
static std::string getTestCaseName(testing::TestParamInfo<std::string> obj);
std::shared_ptr<ngraph::Function> fnPtr;
std::string targetDevice;
protected:
void SetUp() override;
void TearDown() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,72 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <ie_core.hpp>
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "execution_graph_tests/num_inputs_fusing_bin_conv.hpp"
#include "network_serializer.h"
namespace LayerTestsDefinitions {
std::string ExecGraphInputsFusingBinConv::getTestCaseName(testing::TestParamInfo<std::string> obj) {
std::string targetDevice = obj.param;
return "targetDevice=" + targetDevice;
}
void ExecGraphInputsFusingBinConv::SetUp() {
const InferenceEngine::SizeVector inputShapes = { 1, 16, 30, 30}, binConvKernelSize = {2, 2}, convKernelSize = {3, 3};
const size_t numOutChannels = 16, numGroups = 16;
const std::vector<size_t > strides = {1, 1}, dilations = {1, 1};
const std::vector<ptrdiff_t> padsBegin = {1, 1}, padsEnd = {0, 0};
const ngraph::op::PadType paddingType = ngraph::op::PadType::EXPLICIT;
const float padValue = 1.0;
targetDevice = this->GetParam();
auto params = ngraph::builder::makeParams(ngraph::element::f32, {inputShapes});
auto binConv = ngraph::builder::makeBinaryConvolution(params[0], binConvKernelSize, strides, padsBegin, padsEnd, dilations, paddingType, numOutChannels,
padValue);
auto conv = ngraph::builder::makeGroupConvolution(binConv, ngraph::element::f32, convKernelSize, strides, padsBegin, padsEnd, dilations, paddingType,
numOutChannels, numGroups);
auto biasNode = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, std::vector<size_t>{16, 1, 1});
auto add = std::make_shared<ngraph::opset1::Add>(conv, biasNode);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(add)};
fnPtr = std::make_shared<ngraph::Function>(results, params, "BinConvFuseConv");
}
void ExecGraphInputsFusingBinConv::TearDown() {
if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
PluginCache::get().reset();
}
}
TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) {
InferenceEngine::CNNNetwork cnnNet(fnPtr);
auto ie = PluginCache::get().ie();
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
std::vector<InferenceEngine::CNNLayerPtr> nodes;
ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo));
for (auto &node : nodes) {
if (node->type == "BinaryConvolution") {
std::string originalLayersNames = node->params["originalLayersNames"];
ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
ASSERT_EQ(node->insData.size(), 1);
}
}
IE_SUPPRESS_DEPRECATED_END
fnPtr.reset();
};
} // namespace LayerTestsDefinitions

View File

@ -98,6 +98,17 @@ std::shared_ptr<ngraph::Node> makeGroupConvolutionBackpropData(const ngraph::Out
bool addBiases = false, bool addBiases = false,
const std::vector<float> &biasesWeights = {}); const std::vector<float> &biasesWeights = {});
std::shared_ptr<ngraph::Node> makeBinaryConvolution(const ngraph::Output<Node> &in,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
float padValue,
const std::vector<int8_t> &filterWeihgts = {});
std::shared_ptr<ngraph::Node> makeSplit(const ngraph::Output<Node> &in, std::shared_ptr<ngraph::Node> makeSplit(const ngraph::Output<Node> &in,
const element::Type &type, const element::Type &type,
size_t numSplits, size_t numSplits,

View File

@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/data_utils.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<Node> makeBinaryConvolution(const Output<Node> &in,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
float padValue,
const std::vector<int8_t> &filterWeihgts) {
auto shape = in.get_shape();
std::vector<size_t> filterWeightsShape = {numOutChannels, shape[1]};
filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
auto filterWeightsNode = std::make_shared<op::Constant>(element::u1, filterWeightsShape);
size_t byteNum = ngraph::shape_size(filterWeightsShape) / sizeof(int8_t);
int8_t *buffer = const_cast<int8_t *>(filterWeightsNode->get_data_ptr<int8_t>());
if (filterWeihgts.size() == 0) {
std::vector<int8_t> weihgts = NGraphFunctions::Utils::generateVector<element::Type_t::i8>(byteNum);
for (size_t i = 0; i < byteNum; i++)
buffer[i] = weihgts[i];
} else {
for (size_t i = 0; i < byteNum; i++)
buffer[i] = filterWeihgts[i];
}
auto conv = std::make_shared<opset1::BinaryConvolution>(in, filterWeightsNode, strides, padsBegin, padsEnd, dilations,
opset1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT, padValue, autoPad);
return conv;
}
} // namespace builder
} // namespace ngraph