Added memory format attribute (#3395)

* [CPU] Added memory format attribute
This commit is contained in:
Maxim Andronov 2020-12-24 10:25:53 +03:00 committed by GitHub
parent 6d320d7162
commit cdf7f5eff5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 177 additions and 29 deletions

View File

@ -113,6 +113,7 @@ file(GLOB SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/utils/rt_info/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.cpp
${LAYERS}
${OS_SPECIFIC_SRC}
@ -124,6 +125,7 @@ file(GLOB HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.h
${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.hpp
${CMAKE_CURRENT_SOURCE_DIR}/utils/*.h
${CMAKE_CURRENT_SOURCE_DIR}/utils/rt_info/*.hpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/*.h
${CMAKE_CURRENT_SOURCE_DIR}/nodes/*.hpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.h

View File

@ -49,6 +49,7 @@
#include "nodes/common/cpu_memcpy.h"
#include "mkldnn_debug.h"
#include "utils/rt_info/memory_formats_attribute.hpp"
using namespace mkldnn;
using namespace MKLDNNPlugin;
@ -190,22 +191,29 @@ MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::
THROW_IE_EXCEPTION << "Unsupported CPU implementation " << str << " for node " << getName();
}
}
if (layer->params.find("InputMemoryFormats") != layer->params.end()) {
std::istringstream stream(layer->params["InputMemoryFormats"]);
std::string str;
while (getline(stream, str, ',')) {
if (str.substr(0, 4) != "cpu:")
continue;
inputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
auto ngraphNode = layer->getNode();
if (ngraphNode != nullptr) {
std::string inputMemoryFormats = ngraph::getMLKDNNInputMemoryFormats(ngraphNode);
if (!inputMemoryFormats.empty()) {
std::istringstream stream(inputMemoryFormats);
std::string str;
while (getline(stream, str, ',')) {
if (str.substr(0, 4) != "cpu:")
continue;
inputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
}
}
}
if (layer->params.find("OutputMemoryFormats") != layer->params.end()) {
std::istringstream stream(layer->params["OutputMemoryFormats"]);
std::string str;
while (getline(stream, str, ',')) {
if (str.substr(0, 4) != "cpu:")
continue;
outputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
std::string outputMemoryFormats = ngraph::getMLKDNNOutputMemoryFormats(ngraphNode);
if (!outputMemoryFormats.empty()) {
std::istringstream stream(outputMemoryFormats);
std::string str;
while (getline(stream, str, ',')) {
if (str.substr(0, 4) != "cpu:")
continue;
outputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
}
}
}
}

View File

@ -23,6 +23,7 @@
#include "mkldnn_weights_cache.hpp"
#include "mkldnn.hpp"
#include <openvino/itt.hpp>
#include <ngraph/node.hpp>
namespace MKLDNNPlugin {

View File

@ -0,0 +1,27 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/node.hpp>
#include <ngraph/variant.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "memory_formats_attribute.hpp"
namespace ngraph {
template class ngraph::MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>;
constexpr VariantTypeInfo VariantWrapper<MLKDNNInputMemoryFormats>::type_info;
std::string getMLKDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node> & node) {
return MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>::getMemoryFormats(node);
}
template class ngraph::MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>;
constexpr VariantTypeInfo VariantWrapper<MLKDNNOutputMemoryFormats>::type_info;
std::string getMLKDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node> & node) {
return MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>::getMemoryFormats(node);
}
} // namespace ngraph

View File

@ -0,0 +1,104 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <set>
#include <ngraph/node.hpp>
#include <ngraph/variant.hpp>
namespace ngraph {
constexpr const char *MLKDNNInputMemoryFormatsAttr = "MLKDNNInputMemoryFormats";
constexpr const char *MLKDNNOutputMemoryFormatsAttr = "MLKDNNOutputMemoryFormats";
class MLKDNNMemoryFormats {
protected:
std::string memory_format;
public:
MLKDNNMemoryFormats() = default;
explicit MLKDNNMemoryFormats(const std::string &_memory_format) : memory_format(_memory_format) {}
std::string getMemoryFormats() const { return memory_format; }
};
template <typename MemoryFormatsType>
class MLKDNNMemoryFormatsHelper : public VariantImpl<MemoryFormatsType> {
public:
MLKDNNMemoryFormatsHelper(const MemoryFormatsType& value) : VariantImpl<MemoryFormatsType>(value) {}
static std::string getMemoryFormats(const std::shared_ptr<ngraph::Node>& node) {
const auto &rtInfo = node->get_rt_info();
using MemoryFormatsWraper = VariantWrapper<MemoryFormatsType>;
if (!rtInfo.count(MemoryFormatsWraper::type_info.name)) return "";
const auto &attr = rtInfo.at(MemoryFormatsWraper::type_info.name);
MemoryFormatsType mem_format = as_type_ptr<MemoryFormatsWraper>(attr)->get();
return mem_format.getMemoryFormats();
}
std::shared_ptr<ngraph::Variant> merge(const ngraph::NodeVector & nodes) override {
std::set<std::string> unique_mem_format;
for (auto &node : nodes) {
std::string mem_format = getMemoryFormats(node);
if (!mem_format.empty()) unique_mem_format.insert(mem_format);
}
if (unique_mem_format.size() > 1) {
throw ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::type_info.name) + " no rule defined for multiple values.");
}
std::string final_mem_format;
if (unique_mem_format.size() == 1) {
final_mem_format = *unique_mem_format.begin();
}
return std::make_shared<VariantWrapper<MemoryFormatsType>>(MemoryFormatsType(final_mem_format));
}
std::shared_ptr<ngraph::Variant> init(const std::shared_ptr<ngraph::Node> & node) override {
throw ngraph_error(std::string(VariantWrapper<MemoryFormatsType>::type_info.name) + " has no default initialization.");
}
};
class MLKDNNInputMemoryFormats : public MLKDNNMemoryFormats {
public:
MLKDNNInputMemoryFormats() = default;
explicit MLKDNNInputMemoryFormats(const std::string &_memory_format) : MLKDNNMemoryFormats(_memory_format) {}
};
extern template class MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>;
template<>
class VariantWrapper<MLKDNNInputMemoryFormats> : public MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats> {
public:
static constexpr VariantTypeInfo type_info{MLKDNNInputMemoryFormatsAttr, 0};
const VariantTypeInfo &get_type_info() const override { return type_info; }
VariantWrapper(const MLKDNNInputMemoryFormats &value) : MLKDNNMemoryFormatsHelper<MLKDNNInputMemoryFormats>(value) {}
};
std::string getMLKDNNInputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
class MLKDNNOutputMemoryFormats : public MLKDNNMemoryFormats {
public:
MLKDNNOutputMemoryFormats() = default;
explicit MLKDNNOutputMemoryFormats(const std::string &_memory_format) : MLKDNNMemoryFormats(_memory_format) {}
};
extern template class MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>;
template<>
class VariantWrapper<MLKDNNOutputMemoryFormats> : public MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats> {
public:
static constexpr VariantTypeInfo type_info{MLKDNNOutputMemoryFormatsAttr, 0};
const VariantTypeInfo &get_type_info() const override { return type_info; }
VariantWrapper(const MLKDNNOutputMemoryFormats &value) : MLKDNNMemoryFormatsHelper<MLKDNNOutputMemoryFormats>(value) {}
};
std::string getMLKDNNOutputMemoryFormats(const std::shared_ptr<ngraph::Node>& node);
} // namespace ngraph

View File

@ -67,7 +67,7 @@ public:
/**
* @ingroup ie_runtime_attr_api
* @brief getPrimitivesPriority return string with dequantization value
* @brief getDequantization return string with dequantization value
* @param[in] node The node will be used to get Dequantization attribute
*/
TRANSFORMATIONS_API std::string getDequantization(const std::shared_ptr<ngraph::Node>& node);

View File

@ -71,8 +71,7 @@ ngraph::pass::ConvertGroupConvolution::ConvertGroupConvolution() {
} else {
weights = std::make_shared<ngraph::opset1::Reshape>(gconv->input_value(1),
op::Constant::create(element::i64, Shape{reshape_shape.size()}, reshape_shape), true);
// FIXME: 42956
// ngraph::copy_runtime_info(gconv, weights.get_node_shared_ptr());
ngraph::copy_runtime_info(gconv, weights.get_node_shared_ptr());
}
auto conv_ie = std::make_shared<ngraph::op::ConvolutionIE>(gconv->input_value(0),
weights,

View File

@ -1,18 +1,24 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME cpuFuncTests)
add_library(cpuSpecificRtInfo STATIC ${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin/utils/rt_info/memory_formats_attribute.hpp
${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin/utils/rt_info/memory_formats_attribute.cpp)
target_link_libraries(cpuSpecificRtInfo PRIVATE ${NGRAPH_LIBRARIES})
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
INCLUDES ${CMAKE_CURRENT_SOURCE_DIR}
${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin
DEPENDENCIES
MKLDNNPlugin
LINK_LIBRARIES
funcSharedTests
cpuSpecificRtInfo
ADD_CPPLINT
LABELS
CPU

View File

@ -38,7 +38,7 @@ std::string ConvConcatSubgraphTest::getTestCaseName(testing::TestParamInfo<convC
result << CPUTestsBase::getTestCaseName(cpuParams);
result << "axis=" << axis;
result << "_axis=" << axis;
return result.str();
}

View File

@ -3,6 +3,7 @@
//
#include "cpu_test_utils.hpp"
#include "utils/rt_info/memory_formats_attribute.hpp"
namespace CPUTestUtils {
@ -155,10 +156,12 @@ CPUTestsBase::makeCPUInfo(std::vector<cpu_memory_format_t> inFmts, std::vector<c
CPUInfo cpuInfo;
if (!inFmts.empty()) {
cpuInfo.insert({"InputMemoryFormats", std::make_shared<ngraph::VariantWrapper<std::string>>(fmts2str(inFmts))});
cpuInfo.insert({std::string(ngraph::MLKDNNInputMemoryFormatsAttr),
std::make_shared<ngraph::VariantWrapper<ngraph::MLKDNNInputMemoryFormats>>(ngraph::MLKDNNInputMemoryFormats(fmts2str(inFmts)))});
}
if (!outFmts.empty()) {
cpuInfo.insert({"OutputMemoryFormats", std::make_shared<ngraph::VariantWrapper<std::string>>(fmts2str(outFmts))});
cpuInfo.insert({std::string(ngraph::MLKDNNOutputMemoryFormatsAttr),
std::make_shared<ngraph::VariantWrapper<ngraph::MLKDNNOutputMemoryFormats>>(ngraph::MLKDNNOutputMemoryFormats(fmts2str(outFmts)))});
}
if (!priority.empty()) {
cpuInfo.insert({"PrimitivesPriority", std::make_shared<ngraph::VariantWrapper<std::string>>(impls2str(priority))});

View File

@ -8,10 +8,8 @@
#include <ngraph/variant.hpp>
#include "ie_system_conf.h"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include <ngraph/function.hpp>
#include <ngraph/variant.hpp>
#include <exec_graph_info.hpp>
#include "ie_system_conf.h"
namespace CPUTestUtils {
typedef enum {
@ -29,10 +27,10 @@ namespace CPUTestUtils {
} cpu_memory_format_t;
using CPUSpecificParams = std::tuple<
std::vector<cpu_memory_format_t>, //input memomry format
std::vector<cpu_memory_format_t>, //output memory format
std::vector<std::string>, //priority
std::string // selected primitive type
std::vector<cpu_memory_format_t>, // input memomry format
std::vector<cpu_memory_format_t>, // output memory format
std::vector<std::string>, // priority
std::string // selected primitive type
>;
class CPUTestsBase {