Add fake quantize support for convolution padding (#6577)

* [GNA] Add fake quantize support for convolution padding

Combine seven ngraph matcher passes into two.
Remove max pool size checking.
Add ngraph reference tests for subgraphs which were processed by POT.
Fix remaining issues with Max Pooling output calculations.
Add setting of default compile target based on execution target.

* [GNA] Remove redundant subgraph matcher

* [GNA] Remove redundant subgraph matcher
This commit is contained in:
Szymon Irzabek 2021-07-14 15:29:10 +02:00 committed by GitHub
parent ae25f5f581
commit 34c20ad9a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 256 additions and 497 deletions

View File

@ -1361,7 +1361,7 @@ uint32_t GNAPluginNS::backend::AMIntelDNN::CountLayers() {
}
#if GNA_LIB_VER == 2
void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel) {
void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel, const std::string& gnaCompileTarget) {
Gna2Operation * gnaOperation;
if (gnaModel == nullptr)
THROW_GNA_EXCEPTION << "Invalid input parameter";
@ -1677,7 +1677,11 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
const auto fltStride = fltStrideShape->Dimensions[0];
const auto outFromConv = outputFromConv(inVecCnt, nFltSize, fltStride);
// FLAT input matrix, pooled outputs per filter
outputTensor.Shape.Dimensions[1] = outputFromPoolingLegacy(outFromConv, poolStride->Dimensions[0]);
if (gnaCompileTarget == InferenceEngine::GNAConfigParams::GNA_TARGET_3_0) {
outputTensor.Shape.Dimensions[1] = outputFromPooling(outFromConv, poolWindow->Dimensions[0], poolStride->Dimensions[0]);
} else {
outputTensor.Shape.Dimensions[1] = outputFromPoolingLegacy(outFromConv, poolStride->Dimensions[0]);
}
} else { // kDnnConvolutional2dOp
// Override GNA operation output pointer with the one from pooling component
outputTensor.Data = comp.ptr_outputs;
@ -1743,7 +1747,8 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
|| (component[i - 1].operation == kDnnConvolutional1dOp)
|| (component[i - 1].operation == kDnnConvolutional2dOp)
|| ((component[i - 1].operation == kDnnMaxPoolOp) &&
(component[i - 2].operation == kDnnConvolutional1dOp))) {
(component[i - 2].operation == kDnnConvolutional1dOp
|| component[i - 2].operation == kDnnConvolutional2dOp))) {
if (gnaOperation->Operands[PwlOpIdx] == nullptr) {
HelperGna2OperationSetOperand(gnaOperation, gnaUserAllocator, gnaUserFree, PwlOpIdx, createGna2TensorPwl(1, nullptr));
}

View File

@ -15,6 +15,7 @@
#if GNA_LIB_VER == 2
#include <gna2-model-api.h>
#include <gna/gna_config.hpp>
#endif
namespace GNAPluginNS {
@ -293,7 +294,7 @@ public:
#if GNA_LIB_VER == 2
void InitGNAStruct(Gna2Model *gnaModel);
void InitGNAStruct(Gna2Model *gnaModel, const std::string& gnaCompileTarget = InferenceEngine::GNAConfigParams::GNA_TARGET_2_0);
void DestroyGNAStruct(Gna2Model *gnaModel);
#else

View File

@ -680,12 +680,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
manager.register_pass<ngraph::pass::ConvertPriorBox>();
manager.register_pass<ngraph::pass::CommonOptimizations>();
manager.register_pass<ConvertPadded2ValidConv>();
manager.register_pass<ConvertPaddedWithBias2ValidConv>();
manager.register_pass<ConvertPaddedWithBiasAF2ValidConv>();
manager.register_pass<ConvertPaddedWithBiasMaxPool2ValidConv>();
manager.register_pass<ConvertPaddedWithBiasMaxPoolAF2ValidConv>();
manager.register_pass<ConvertPaddedTransposedWithBias2ValidConv>();
manager.register_pass<ConvertPaddedTransposedWithBiasAF2ValidConv>();
// TODO enable this transformation for networks with convolutions
if (!ngraph::op::util::has_op_with_type<ngraph::opset7::Convolution>(graph)) {
manager.register_pass<ConvertMatmulWithFqToPointWiseConvolution>();
@ -992,7 +986,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
if (!gnaFlags->sw_fp32 && !graphCompiler.dnnComponents.components.empty()) {
// number of layer gets calculated inside that InitGNAStruct function
#if GNA_LIB_VER == 2
dnn->InitGNAStruct(&std::get<0>(gnaModels.front())->obj);
dnn->InitGNAStruct(&std::get<0>(gnaModels.front())->obj, config.gnaCompileTarget);
#else
dnn->InitGNAStruct(&std::get<0>(nnets.front())->obj);
#endif
@ -1003,7 +997,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
#if GNA_LIB_VER == 2
gnaModels.push_back(std::make_tuple(make_shared<CPPWrapper<Gna2Model>>()));
// this can be improved by just copy all structures, but we are too lazy
dnn->InitGNAStruct(&std::get<0>(gnaModels.back())->obj);
dnn->InitGNAStruct(&std::get<0>(gnaModels.back())->obj, config.gnaCompileTarget);
#else
nnets.emplace_back(make_shared<CPPWrapper<intel_nnet_type_t>>(), -1, InferenceEngine::BlobMap());
dnn->InitGNAStruct(&std::get<0>(nnets.back())->obj);

View File

@ -131,7 +131,13 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
if (supportedTargets.count(value) == 0) {
THROW_GNA_EXCEPTION << "Unsupported GNA config value (key, value): (" << key << ", " << value << ")";
}
(key == GNA_CONFIG_KEY(EXEC_TARGET) ? gnaExecTarget : gnaCompileTarget) = value;
if (key == GNA_CONFIG_KEY(EXEC_TARGET)) {
gnaExecTarget = value;
if (gnaCompileTarget == "")
gnaCompileTarget = value;
} else {
gnaCompileTarget = value;
}
} else if (key == GNA_CONFIG_KEY(COMPACT_MODE)) {
if (value == PluginConfigParams::YES) {
gnaFlags.compact_mode = true;

View File

@ -10,6 +10,7 @@
#include <ngraph/opsets/opset7.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/pattern/op/or.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pass/manager.hpp>
#include <ie_common.h>
@ -18,12 +19,6 @@
using namespace GNAPluginNS;
NGRAPH_RTTI_DEFINITION(ConvertPadded2ValidConv, "ConvertPadded2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedWithBias2ValidConv, "ConvertPaddedWithBias2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedWithBiasAF2ValidConv, "ConvertPaddedWithBiasAF2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedWithBiasMaxPool2ValidConv, "ConvertPaddedWithBiasMaxPool2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedWithBiasMaxPoolAF2ValidConv, "ConvertPaddedWithBiasMaxPoolAF2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedTransposedWithBias2ValidConv, "ConvertPaddedTransposedWithBias2ValidConv", 0);
NGRAPH_RTTI_DEFINITION(ConvertPaddedTransposedWithBiasAF2ValidConv, "ConvertPaddedTransposedWithBiasAF2ValidConv", 0);
struct ConvData {
size_t input_height;
@ -92,26 +87,16 @@ static bool VerifyBias(std::shared_ptr<ngraph::opset7::Add> bias, const size_t&
if (!add_const)
add_const = std::dynamic_pointer_cast<ngraph::opset7::Constant>(bias->input_value(1).get_node_shared_ptr());
// The add may be a normal add not conv bias, then we just go further
// The add may be a normal add not convolution bias, then we just go further
return (add_const && shape_size(add_const->get_shape()) == filter_count);
}
static bool VerifyMaxPool(std::shared_ptr<ngraph::opset7::MaxPool> max_pool) {
auto pool_strides = max_pool->get_strides();
auto pool_kernel = max_pool->get_kernel();
// Check if MaxPool vertical stride == pool size
// Check if padding is VALID
return (max_pool->get_auto_pad() == ngraph::op::PadType::VALID &&
pool_kernel.size() == 2 && pool_strides.size() == 2);
}
static std::shared_ptr<ngraph::opset7::StridedSlice> FlatCrop(ngraph::Output<ngraph::Node> input, size_t offset, size_t size) {
return std::make_shared<ngraph::opset7::StridedSlice>(
input, // data
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 2 }, { (size_t)0, offset }), // begin sice index
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 2 }, { (size_t)0, offset + size }), // end slice index
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 2 }, { (size_t)1, (size_t)1 }), // strides
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset + size}), // end slice index
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides
std::vector<int64_t>{1, 0}, // begin mask
std::vector<int64_t>{1, 0}); // end mask
}
@ -142,11 +127,11 @@ static std::shared_ptr<ngraph::Node> CreatePaddedNet(std::shared_ptr<ngraph::ops
}
auto flat_input = std::make_shared<ngraph::opset7::Reshape>(leading_transpose->input_value(0),
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 2 },
ngraph::Shape{ 1ull, shape_size(leading_transpose->input_value(0).get_shape()) }), false);
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2},
ngraph::Shape{1ull, shape_size(leading_transpose->input_value(0).get_shape())}), false);
// Constant with zero padding
auto const_holding_padding = std::make_shared<ngraph::opset7::Constant>(conv_data.element_type, ngraph::Shape{ 1, biggest_padding }, 0);
auto const_holding_padding = std::make_shared<ngraph::opset7::Constant>(conv_data.element_type, ngraph::Shape{1, biggest_padding}, 0);
copy_runtime_info(conv, const_holding_padding);
std::shared_ptr<ngraph::Node> original_row = flat_input;
@ -213,21 +198,21 @@ static void GeneratePadding(std::shared_ptr<ngraph::opset7::Transpose> leading_t
auto padded_input_plane = CreatePaddedNet(leading_transpose, conv, conv_data);
auto padded_input_plane_reshaped = std::make_shared<ngraph::opset7::Reshape>(padded_input_plane,
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { static_cast<size_t>(1),
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {static_cast<size_t>(1),
conv_data.pads_begin_height + conv_data.input_height + conv_data.pads_end_height,
conv_data.pads_begin_width + conv_data.input_width + conv_data.pads_end_width,
conv_data.input_channel_count }), false);
conv_data.input_channel_count}), false);
// NHWC => NCHW
auto transposed2chw = std::make_shared<ngraph::opset7::Transpose>(padded_input_plane_reshaped,
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0ull, 3ull, 1ull, 2ull })->output(0));
ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0ull, 3ull, 1ull, 2ull})->output(0));
auto conv_copy = std::make_shared<ngraph::opset7::Convolution>(
transposed2chw->output(0),
conv->input_value(1),
conv->get_strides(),
ngraph::CoordinateDiff{ 0, 0 },
ngraph::CoordinateDiff{ 0, 0 },
ngraph::CoordinateDiff{0, 0},
ngraph::CoordinateDiff{0, 0},
conv->get_dilations(),
ngraph::op::PadType::EXPLICIT);
@ -237,9 +222,7 @@ static void GeneratePadding(std::shared_ptr<ngraph::opset7::Transpose> leading_t
static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
std::shared_ptr<ngraph::Node> conv,
std::shared_ptr<ngraph::Node> trailing_transpose,
std::shared_ptr<ngraph::Node> bias,
std::shared_ptr<ngraph::Node> af,
std::shared_ptr<ngraph::Node> max_pool) {
std::shared_ptr<ngraph::Node> bias) {
ConvData conv_data;
@ -248,18 +231,15 @@ static bool Convert(std::shared_ptr<ngraph::Node> leading_transpose,
// We are looking for Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC)
// or similar cases, so required network must be in NHWC order like in TF
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(leading_transpose), { 0, 3, 1, 2 }))
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(leading_transpose), {0, 3, 1, 2}))
return false;
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(trailing_transpose), { 0, 2, 3, 1 }))
if (!TransposeOrderMatches(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(trailing_transpose), {0, 2, 3, 1}))
return false;
if (bias && !VerifyBias(std::dynamic_pointer_cast<ngraph::opset7::Add>(bias), conv_data.filter_count))
return false;
if (max_pool && !VerifyMaxPool(std::dynamic_pointer_cast<ngraph::opset7::MaxPool>(max_pool)))
return false;
GeneratePadding(std::dynamic_pointer_cast<ngraph::opset7::Transpose>(leading_transpose),
std::dynamic_pointer_cast<ngraph::opset7::Convolution>(conv), conv_data);
@ -276,184 +256,46 @@ ConvertPadded2ValidConv::ConvertPadded2ValidConv() {
MATCHER_SCOPE(ConvertPadded2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ngraph::pattern::any_input(), const_input},
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
{leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant, ngraph::opset7::FakeQuantize>(ngraph::pattern::rank_equals(4))},
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ conv, const_input },
consumers_and_rank(1, 4));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), nullptr, nullptr, nullptr);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedWithBias2ValidConv::ConvertPaddedWithBias2ValidConv() {
MATCHER_SCOPE(ConvertPaddedWithBias2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({conv, const_input},
ngraph::pattern::consumers_count(1));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ conv, const_input },
auto fq = ngraph::pattern::wrap_type<ngraph::opset7::FakeQuantize>({bias, const_input, const_input, const_input, const_input},
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ bias, const_input },
consumers_and_rank(1, 4));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr, nullptr);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedWithBiasAF2ValidConv::ConvertPaddedWithBiasAF2ValidConv() {
MATCHER_SCOPE(ConvertPaddedWithBiasAF2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
auto max_pool1 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({bias},
ngraph::pattern::consumers_count(1));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ conv, const_input },
auto max_pool2 = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({fq},
ngraph::pattern::consumers_count(1));
auto af = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
auto af1 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({ bias },
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ af, const_input },
consumers_and_rank(1, 4));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(),
pattern_map.at(af).get_node_shared_ptr(), nullptr);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedWithBiasMaxPool2ValidConv::ConvertPaddedWithBiasMaxPool2ValidConv() {
MATCHER_SCOPE(ConvertPaddedWithBiasMaxPool2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
ngraph::pattern::consumers_count(1));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ conv, const_input },
ngraph::pattern::consumers_count(1));
auto max_pool = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({ bias },
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ max_pool, const_input },
consumers_and_rank(1, 4));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(),
nullptr, pattern_map.at(max_pool).get_node_shared_ptr());
};
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedWithBiasMaxPoolAF2ValidConv::ConvertPaddedWithBiasMaxPoolAF2ValidConv() {
MATCHER_SCOPE(ConvertPaddedWithBiasMaxPoolAF2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
ngraph::pattern::consumers_count(1));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ conv, const_input },
ngraph::pattern::consumers_count(1));
auto max_pool = ngraph::pattern::wrap_type<ngraph::opset7::MaxPool>({ bias },
ngraph::pattern::consumers_count(1));
auto af = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({bias}, ngraph::pattern::consumers_count(1));
auto af2 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({ max_pool },
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ af, const_input },
ngraph::opset7::Sign, ngraph::opset7::Clamp>({fq}, ngraph::pattern::consumers_count(1));
auto af3 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool1}, ngraph::pattern::consumers_count(1));
auto af4 = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({max_pool2}, ngraph::pattern::consumers_count(1));
auto transpose_input = std::make_shared<ngraph::pattern::op::Or>(ngraph::OutputVector{conv, bias, max_pool1, max_pool2, fq, af1, af2, af3, af4});
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({transpose_input, const_input},
consumers_and_rank(1, 4));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto conv_output = conv->output(0).get_node_shared_ptr();
IE_ASSERT(conv_output != nullptr);
auto bias_node = std::dynamic_pointer_cast<ngraph::opset7::Add>(conv_output);
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(),
pattern_map.at(af).get_node_shared_ptr(), pattern_map.at(max_pool).get_node_shared_ptr());
pattern_map.at(trailing_transpose).get_node_shared_ptr(), bias_node);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(trailing_transpose, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedTransposedWithBias2ValidConv::ConvertPaddedTransposedWithBias2ValidConv() {
MATCHER_SCOPE(ConvertPaddedTransposedWithBias2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ conv, const_input },
consumers_and_rank(1, 4));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ trailing_transpose, const_input },
ngraph::pattern::consumers_count(1));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(), nullptr, nullptr);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(bias, matcher_name);
this->register_matcher(m, callback);
}
ConvertPaddedTransposedWithBiasAF2ValidConv::ConvertPaddedTransposedWithBiasAF2ValidConv() {
MATCHER_SCOPE(ConvertPaddedTransposedWithBiasAF2ValidConv);
auto const_input = ngraph::pattern::wrap_type<ngraph::opset7::Constant>();
auto leading_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ ngraph::pattern::any_input(), const_input },
consumers_and_rank(1, 4));
auto conv = ngraph::pattern::wrap_type<ngraph::opset7::Convolution>(
{ leading_transpose, ngraph::pattern::wrap_type<ngraph::opset7::Constant>(ngraph::pattern::rank_equals(4)) },
ngraph::pattern::consumers_count(1));
auto trailing_transpose = ngraph::pattern::wrap_type<ngraph::opset7::Transpose>({ conv, const_input },
consumers_and_rank(1, 4));
auto bias = ngraph::pattern::wrap_type<ngraph::opset7::Add>({ trailing_transpose, const_input },
ngraph::pattern::consumers_count(1));
auto af = ngraph::pattern::wrap_type<ngraph::opset7::Relu, ngraph::opset7::Sigmoid,
ngraph::opset7::Tanh, ngraph::opset7::Abs, ngraph::opset7::Log, ngraph::opset7::Exp,
ngraph::opset7::Sign, ngraph::opset7::Clamp>({ bias },
ngraph::pattern::consumers_count(1));
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
return Convert(pattern_map.at(leading_transpose).get_node_shared_ptr(), pattern_map.at(conv).get_node_shared_ptr(),
pattern_map.at(trailing_transpose).get_node_shared_ptr(), pattern_map.at(bias).get_node_shared_ptr(),
pattern_map.at(af).get_node_shared_ptr(), nullptr);
};
auto m = std::make_shared<ngraph::pattern::Matcher>(af, matcher_name);
this->register_matcher(m, callback);
}

View File

@ -8,159 +8,30 @@
namespace GNAPluginNS {
/**
* @brief Convert a padded convolution, wrapped with transposes,
* to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
/**
* @brief Convert a padded convolution with bias, max pooling and activation function
* wrapped with transposes, to a valid convolution with padding added before the leading transpose,
* POT precessed models are supported (fake quantized layers omitted below for clarity):
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Broadcast Bias (optional) Broadcast Bias (optional)
* | |
* Max Pooling (optional) Max Pooling (optional)
* | |
* Activation Function (optional) Activation Function (optional)
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
class ConvertPadded2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPadded2ValidConv();
};
/**
* @brief Convert a padded convolution with bias, wrapped with transposes,
* to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Broadcast Bias Broadcast Bias
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
class ConvertPaddedWithBias2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedWithBias2ValidConv();
};
/**
* @brief Convert a padded convolution with bias and an activation function,
* wrapped with transposes, to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Broadcast Bias Broadcast Bias
* | |
* Activation Function Activation Function
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
class ConvertPaddedWithBiasAF2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedWithBiasAF2ValidConv();
};
/**
* @brief Convert a padded convolution with bias and max pooling,
* wrapped with transposes, to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Broadcast Bias Broadcast Bias
* | |
* Max Pooling Max Pooling
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
class ConvertPaddedWithBiasMaxPool2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedWithBiasMaxPool2ValidConv();
};
/**
* @brief Convert a padded convolution with bias, max pooling and activation function
* wrapped with transposes, to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Broadcast Bias Broadcast Bias
* | |
* Max Pooling Max Pooling
* | |
* Activation Function Activation Function
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
*
*/
class ConvertPaddedWithBiasMaxPoolAF2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedWithBiasMaxPoolAF2ValidConv();
};
/**
* @brief Convert a padded convolution wrapped with transposes, with bias after trailing transpose,
* to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
* | |
* Broadcast Bias Broadcast Bias
*
*/
class ConvertPaddedTransposedWithBias2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedTransposedWithBias2ValidConv();
};
/**
* @brief Convert a padded convolution wrapped with transposes, with bias
* and activation function after trailing transpose, to a valid convolution with padding added before the leading transpose:
*
* Padding
* |
* Transpose (NHWC -> NCHW) Transpose (NHWC -> NCHW)
* | |
* Convolution with padding Convolution with padding
* | |
* Transpose (NCHW -> NHWC) Transpose (NCHW -> NHWC)
* | |
* Broadcast Bias Broadcast Bias
* | |
* Activation Function Activation Function
*
*/
class ConvertPaddedTransposedWithBiasAF2ValidConv : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ConvertPaddedTransposedWithBiasAF2ValidConv();
};
} // namespace GNAPluginNS

View File

@ -117,19 +117,19 @@ protected:
std::tie(bias, transpBias, maxpoolPool, maxpoolStride) = miscParams;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
Shape biasShape{ bias };
Shape transpBiasShape{ transpBias };
Shape maxpoolShape{ maxpoolPool };
Strides maxpoolStrides{ maxpoolStride };
Shape biasShape{bias};
Shape transpBiasShape{transpBias};
Shape maxpoolShape{maxpoolPool};
Strides maxpoolStrides{maxpoolStride};
auto input = builder::makeParams(ngPrc, { inputShape });
auto transposeInOrder = op::Constant::create(element::i64, Shape{ 4 }, { 0, 3, 1, 2 });
auto input = builder::makeParams(ngPrc, {inputShape});
auto transposeInOrder = op::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2});
auto transposeIn = std::make_shared<Transpose>(input[0], transposeInOrder);
auto filterSize = std::accumulate(std::begin(kernel), std::end(kernel), 1ull, std::multiplies<size_t>());
auto filterWeights = CommonTestUtils::generate_float_numbers(numOutChannels * inputShape[3] * filterSize, -0.05f, 0.05f);
auto conv = builder::makeConvolution(transposeIn, ngPrc, kernel, stride, padBegin,
padEnd, dilation, padType, numOutChannels, false, filterWeights);
auto transposeOutOrder = op::Constant::create(element::i64, Shape{ 4 }, { 0, 2, 3, 1 });
auto transposeOutOrder = op::Constant::create(element::i64, Shape{4}, {0, 2, 3, 1});
auto biasWeights = CommonTestUtils::generate_float_numbers(shape_size(biasShape), -1.5f, 1.5f);
Output<Node> biasConst = std::make_shared<Constant>(ngPrc, biasShape, biasWeights);
Output<Node> lastOp = std::make_shared<Transpose>(conv, transposeOutOrder);
@ -145,9 +145,10 @@ protected:
case modelType::TranspConvBcastAddMaxPoolTransp:
{
auto bcastAdd = std::make_shared<Add>(conv, biasConst);
auto maxpool = std::make_shared<MaxPool>(bcastAdd, maxpoolStrides, Shape{ 0, 0 }, Shape{ 0, 0 }, maxpoolShape,
auto maxpool = std::make_shared<MaxPool>(bcastAdd, maxpoolStrides, Shape{0, 0}, Shape{0, 0}, maxpoolShape,
op::RoundingType::FLOOR, op::PadType::VALID);
lastOp = std::make_shared<Transpose>(maxpool, transposeOutOrder);
auto transpose = std::make_shared<Transpose>(maxpool, transposeOutOrder);
auto lastOp = std::make_shared<Relu>(transpose);
}
break;
@ -162,7 +163,7 @@ protected:
case modelType::TranspConvBcastAddMaxPoolActTransp:
{
auto bcastAdd = std::make_shared<Add>(conv, biasConst);
auto maxpool = std::make_shared<MaxPool>(bcastAdd, maxpoolStrides, Shape{ 0, 0 }, Shape{ 0, 0 }, maxpoolShape,
auto maxpool = std::make_shared<MaxPool>(bcastAdd, maxpoolStrides, Shape{0, 0}, Shape{0, 0}, maxpoolShape,
op::RoundingType::FLOOR, op::PadType::VALID);
auto activation = std::make_shared<Relu>(maxpool);
lastOp = std::make_shared<Transpose>(activation, transposeOutOrder);
@ -190,7 +191,7 @@ protected:
}
auto result = std::make_shared<Result>(lastOp);
function = std::make_shared<Function>(ResultVector{ result }, ParameterVector{ input });
function = std::make_shared<Function>(ResultVector{result}, ParameterVector{input});
}
};
@ -259,34 +260,33 @@ const std::vector<modelType> models = {
modelType::TranspConvBcastAddActTransp,
modelType::TranspConvTranspBcastAdd,
modelType::TranspConvTranspBcastAddAct,
//TODO: enable when 50386 and 50379 are fixed
//modelType::TranspConvBcastAddMaxPoolTransp,
//modelType::TranspConvBcastAddMaxPoolActTransp,
modelType::TranspConvBcastAddMaxPoolTransp,
modelType::TranspConvBcastAddMaxPoolActTransp
};
const std::vector<std::vector<size_t>> input1DNHWC = { {1, 1, 16, 8} };
const std::vector<std::vector<size_t >> kernels1D = { {1, 2}, {1, 3}, {1, 4} };
const std::vector<std::vector<size_t >> strides1D = { {1, 1} };
const std::vector<std::vector<ptrdiff_t>> padBegins1D = { {0, 2} };
const std::vector<std::vector<ptrdiff_t>> padEnds1D = { {0, 3} };
const std::vector<std::vector<size_t >> dilations1D = { {1, 1} };
const std::vector<size_t> numOutChannels1D = { 4 };
const std::vector<std::vector<size_t >> biases1D = { {1, 4, 1, 1} };
const std::vector<std::vector<size_t >> transpBiases1D = { {1, 1, 1, 4} };
const std::vector<std::vector<size_t >> maxpool1DPools = { {1, 2} };
const std::vector<std::vector<size_t >> maxpool1DStrides = { {1, 1} };
const std::vector<std::vector<size_t>> input1DNHWC = {{1, 1, 16, 8}};
const std::vector<std::vector<size_t >> kernels1D = {{1, 2}, {1, 3}, {1, 4}};
const std::vector<std::vector<size_t >> strides1D = {{1, 1}};
const std::vector<std::vector<ptrdiff_t>> padBegins1D = {{0, 2}};
const std::vector<std::vector<ptrdiff_t>> padEnds1D = {{0, 3}};
const std::vector<std::vector<size_t >> dilations1D = {{1, 1}};
const std::vector<size_t> numOutChannels1D = {4};
const std::vector<std::vector<size_t >> biases1D = {{1, 4, 1, 1}};
const std::vector<std::vector<size_t >> transpBiases1D = {{1, 1, 1, 4}};
const std::vector<std::vector<size_t >> maxpool1DPools = {{1, 2}};
const std::vector<std::vector<size_t >> maxpool1DStrides = {{1, 1}};
const std::vector<std::vector<size_t>> input2DNHWC = { {1, 16, 16, 32} };
const std::vector<std::vector<size_t >> kernels2D = { {2, 2}, {4, 1}, {1, 3} };
const std::vector<std::vector<size_t >> strides2D = { {1, 1}, {1, 2}, {2, 1}, {2, 2} };
const std::vector<std::vector<ptrdiff_t>> padBegins2D = { {1, 2} };
const std::vector<std::vector<ptrdiff_t>> padEnds2D = { {3, 1} };
const std::vector<std::vector<size_t >> dilations2D = { {1, 1} };
const std::vector<size_t> numOutChannels2D = { 8 };
const std::vector<std::vector<size_t >> biases2D = { {1, 8, 1, 1} };
const std::vector<std::vector<size_t >> transpBiases2D = { {1, 1, 1, 8} };
const std::vector<std::vector<size_t >> maxpool2DPools = { {2, 2} };
const std::vector<std::vector<size_t >> maxpool2DStrides = { {2, 1} };
const std::vector<std::vector<size_t>> input2DNHWC = {{1, 16, 16, 32}};
const std::vector<std::vector<size_t >> kernels2D = {{2, 2}, {4, 1}, {1, 3}};
const std::vector<std::vector<size_t >> strides2D = {{1, 1}, {1, 2}, {2, 1}, {2, 2}};
const std::vector<std::vector<ptrdiff_t>> padBegins2D = {{1, 2}};
const std::vector<std::vector<ptrdiff_t>> padEnds2D = {{3, 1}};
const std::vector<std::vector<size_t >> dilations2D = {{1, 1}};
const std::vector<size_t> numOutChannels2D = {8};
const std::vector<std::vector<size_t >> biases2D = {{1, 8, 1, 1}};
const std::vector<std::vector<size_t >> transpBiases2D = {{1, 1, 1, 8}};
const std::vector<std::vector<size_t >> maxpool2DPools = {{2, 2}};
const std::vector<std::vector<size_t >> maxpool2DStrides = {{2, 1}};
const auto conv1DParams = ::testing::Combine(
::testing::ValuesIn(kernels1D),

View File

@ -24,9 +24,28 @@ enum class modelType {
TranspConvBcastAddActTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => Activation Function => Transpose(NCHW->NHWC) */
TranspConvBcastAddMaxPoolActTransp, /* Transpose(NHWC->NCHW) => Conv => Broadcasted Add (Bias) => MaxPool => Activation Function => Transpose(NCHW->NHWC) */
TranspConvTranspBcastAdd, /* Transpose(NHWC->NCHW) => conv => Transpose(NCHW->NHWC) => Bias */
TranspConvTranspBcastAddAct /* Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC) => Bias => Activation Function */
TranspConvTranspBcastAddAct, /* Transpose(NHWC->NCHW) => Conv => Transpose(NCHW->NHWC) => Bias => Activation Function */
};
typedef std::tuple<
modelType, // Test model
ngraph::PartialShape, // Input shape
ngraph::Shape, // Convolution filter shape
ngraph::Strides, // Convolution stride
ngraph::CoordinateDiff, // Convolution pads begin
ngraph::CoordinateDiff, // Convolution pads end
ngraph::Strides, // Convolution dilation
ngraph::Shape, // Bias shape
ngraph::Strides, // Max Pool stride
ngraph::Shape, // Max Pool shape
ngraph::op::PadType // Padding type
> padded2ValidParams;
typedef std::tuple<
bool, // With / without Fake Quantize layers
padded2ValidParams // Test parameters
> fqPadded2ValidParams;
struct ConvData {
size_t input_height;
size_t input_width;
@ -47,40 +66,66 @@ void GetConvParams(std::shared_ptr<ngraph::opset7::Convolution> conv, ConvData&
conv_data.pads_end_width = conv->get_pads_end()[1];
}
std::shared_ptr<ngraph::opset7::Result> createFunction(const modelType& model,
const ngraph::Output<ngraph::Node>& input_node,
const ngraph::Shape& filters_shape,
const ngraph::Strides& conv_stride,
const ngraph::CoordinateDiff& pads_begin,
const ngraph::CoordinateDiff& pads_end,
const ngraph::Strides& conv_dilation,
const ngraph::Shape& bias_shape,
const ngraph::Strides& maxpool_stride,
const ngraph::Shape& maxpool_shape,
const ngraph::op::PadType& pad_type,
ConvData* conv_data) {
auto transpose_in_order = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2});
std::shared_ptr<ngraph::opset7::FakeQuantize> createFQ(ngraph::Output<ngraph::Node>& in_node) {
auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1});
auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {5});
auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0});
auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10});
return std::make_shared<ngraph::opset7::FakeQuantize>(in_node, input_low, input_high, output_low, output_high, 11);
}
ngraph::Output<ngraph::Node> createBiasFQ(const ngraph::Output<ngraph::Node>& in_node,
std::shared_ptr<ngraph::opset7::Constant>& bias_const, const bool& fq) {
ngraph::Output<ngraph::Node> bcast_add = std::make_shared<ngraph::opset7::Add>(in_node, bias_const);
if (fq) {
bcast_add = createFQ(bcast_add);
}
return bcast_add;
}
std::shared_ptr<ngraph::opset7::Result> createFunction(const bool& fq,
const modelType& model,
const ngraph::Output<ngraph::Node>& input_node,
const ngraph::Shape& filters_shape,
const ngraph::Strides& conv_stride,
const ngraph::CoordinateDiff& pads_begin,
const ngraph::CoordinateDiff& pads_end,
const ngraph::Strides& conv_dilation,
const ngraph::Shape& bias_shape,
const ngraph::Strides& maxpool_stride,
const ngraph::Shape& maxpool_shape,
const ngraph::op::PadType& pad_type,
ConvData* conv_data) {
auto transpose_in_order = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64, ngraph::Shape{4}, std::vector<int64_t>{0, 3, 1, 2});
auto transpose_in = std::make_shared<ngraph::opset7::Transpose>(input_node, transpose_in_order);
auto filters = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64,
ngraph::Output<ngraph::Node> filters = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64,
ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]});
if (fq) {
filters = createFQ(filters);
}
auto conv = std::make_shared<ngraph::opset7::Convolution>(transpose_in, filters, conv_stride, pads_begin, pads_end, conv_dilation, pad_type);
if (conv_data)
GetConvParams(conv, *conv_data);
auto transpose_out_order = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1});
auto transpose_out_order = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64, ngraph::Shape{4}, std::vector<int64_t>{0, 2, 3, 1});
auto bias_const = std::make_shared<ngraph::opset7::Constant>(ngraph::element::i64, bias_shape);
ngraph::Output<ngraph::Node> last_op = std::make_shared<ngraph::opset7::Transpose>(conv, transpose_out_order);
switch (model) {
case modelType::TranspConvBcastAddTransp:
{
auto bcast_add = std::make_shared<ngraph::opset7::Add>(conv, bias_const);
auto bcast_add = createBiasFQ(conv, bias_const, fq);
last_op = std::make_shared<ngraph::opset7::Transpose>(bcast_add, transpose_out_order);
}
break;
case modelType::TranspConvBcastAddMaxPoolTransp:
{
auto bcast_add = std::make_shared<ngraph::opset7::Add>(conv, bias_const);
auto bcast_add = createBiasFQ(conv, bias_const, fq);
auto maxpool = std::make_shared<ngraph::opset7::MaxPool>(bcast_add, maxpool_stride, ngraph::Shape{0, 0}, ngraph::Shape{0, 0}, maxpool_shape,
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID);
auto transpose = std::make_shared<ngraph::opset7::Transpose>(maxpool, transpose_out_order);
@ -90,7 +135,7 @@ std::shared_ptr<ngraph::opset7::Result> createFunction(const modelType& model,
case modelType::TranspConvBcastAddActTransp:
{
auto bcast_add = std::make_shared<ngraph::opset7::Add>(conv, bias_const);
auto bcast_add = createBiasFQ(conv, bias_const, fq);
auto activation = std::make_shared<ngraph::opset7::Relu>(bcast_add);
last_op = std::make_shared<ngraph::opset7::Transpose>(activation, transpose_out_order);
}
@ -98,7 +143,7 @@ std::shared_ptr<ngraph::opset7::Result> createFunction(const modelType& model,
case modelType::TranspConvBcastAddMaxPoolActTransp:
{
auto bcast_add = std::make_shared<ngraph::opset7::Add>(conv, bias_const);
auto bcast_add = createBiasFQ(conv, bias_const, fq);
auto maxpool = std::make_shared<ngraph::opset7::MaxPool>(bcast_add, maxpool_stride, ngraph::Shape{0, 0}, ngraph::Shape{0, 0}, maxpool_shape,
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID);
auto activation = std::make_shared<ngraph::opset7::Relu>(maxpool);
@ -108,13 +153,13 @@ std::shared_ptr<ngraph::opset7::Result> createFunction(const modelType& model,
case modelType::TranspConvTranspBcastAdd:
{
last_op = std::make_shared<ngraph::opset7::Add>(last_op, bias_const);
last_op = createBiasFQ(last_op, bias_const, fq);
}
break;
case modelType::TranspConvTranspBcastAddAct:
{
auto bcast_add = std::make_shared<ngraph::opset7::Add>(last_op, bias_const);
auto bcast_add = createBiasFQ(last_op, bias_const, fq);
last_op = std::make_shared<ngraph::opset7::Relu>(bcast_add);
}
break;
@ -127,7 +172,8 @@ std::shared_ptr<ngraph::opset7::Result> createFunction(const modelType& model,
return std::make_shared<ngraph::opset7::Result>(last_op);
}
std::shared_ptr<ngraph::Function> get_initial_function(const modelType& model,
std::shared_ptr<ngraph::Function> get_initial_function(const bool& fq,
const modelType& model,
const ngraph::PartialShape& input_shape,
const ngraph::Shape& filters_shape,
const ngraph::Strides& conv_stride,
@ -140,7 +186,7 @@ std::shared_ptr<ngraph::Function> get_initial_function(const modelType& model,
const ngraph::op::PadType& pad_type,
ConvData& conv_data) {
auto inputParams = std::make_shared<ngraph::opset7::Parameter>(ngraph::element::i64, input_shape);
auto result = createFunction(model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape,
auto result = createFunction(fq, model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape,
maxpool_stride, maxpool_shape, pad_type, &conv_data);
return std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{inputParams});
}
@ -148,11 +194,7 @@ std::shared_ptr<ngraph::Function> get_initial_function(const modelType& model,
// ---------------------------------------------------------------------------------------------------------------------
class ConvertPadded2ValidConvTestInvalidFixture : public CommonTestUtils::TestsCommon,
public ::testing::WithParamInterface<std::tuple<modelType, ngraph::PartialShape, ngraph::Shape, ngraph::Strides,
ngraph::CoordinateDiff, ngraph::CoordinateDiff,
ngraph::Strides, ngraph::Shape,
ngraph::Strides, ngraph::Shape,
ngraph::op::PadType>> {
public ::testing::WithParamInterface<fqPadded2ValidParams> {
public:
void SetUp() override;
public:
@ -161,32 +203,32 @@ public:
};
void ConvertPadded2ValidConvTestInvalidFixture::SetUp() {
bool fq;
padded2ValidParams params;
ngraph::PartialShape input_shape;
ngraph::Shape filters_shape, bias_shape, maxpool_shape;
ngraph::Strides conv_stride, conv_dilation, maxpool_stride;
ngraph::CoordinateDiff pads_begin, pads_end;
ngraph::op::PadType pad_type;
ConvData conv_data;
std::tie(fq, params) = this->GetParam();
std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type) = this->GetParam();
bias_shape, maxpool_stride, maxpool_shape, pad_type) = params;
function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data);
reference_function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
reference_function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data);
}
// ---------------------------------------------------------------------------------------------------------------------
class ConvertPadded2ValidConvTestFixture: public CommonTestUtils::TestsCommon,
public ::testing::WithParamInterface<std::tuple<modelType, ngraph::PartialShape, ngraph::Shape, ngraph::Strides,
ngraph::CoordinateDiff, ngraph::CoordinateDiff,
ngraph::Strides, ngraph::Shape,
ngraph::Strides, ngraph::Shape,
ngraph::op::PadType>> {
public ::testing::WithParamInterface<fqPadded2ValidParams> {
public:
void SetUp() override;
std::shared_ptr<ngraph::Function> get_reference(const modelType& model,
std::shared_ptr<ngraph::Function> get_reference(const bool& fq,
const modelType& model,
const ngraph::PartialShape& input_shape,
const ngraph::Shape& filters_shape,
const ngraph::Strides& conv_stride,
@ -204,18 +246,21 @@ public:
};
void ConvertPadded2ValidConvTestFixture::SetUp() {
bool fq;
padded2ValidParams params;
ngraph::PartialShape input_shape;
ngraph::Shape filters_shape, bias_shape, maxpool_shape;
ngraph::Strides conv_stride, conv_dilation, maxpool_stride;
ngraph::CoordinateDiff pads_begin, pads_end;
ngraph::op::PadType pad_type;
ConvData conv_data;
std::tie(fq, params) = this->GetParam();
std::tie(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type) = this->GetParam();
bias_shape, maxpool_stride, maxpool_shape, pad_type) = params;
function = get_initial_function(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
function = get_initial_function(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data);
reference_function = get_reference(model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
reference_function = get_reference(fq, model, input_shape, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation,
bias_shape, maxpool_stride, maxpool_shape, pad_type, conv_data);
}
@ -309,7 +354,8 @@ std::shared_ptr<ngraph::Node> CreatePaddedNet(const ngraph::Output<ngraph::Node>
return padded_input_plane;
}
std::shared_ptr<ngraph::Function> ConvertPadded2ValidConvTestFixture::get_reference(const modelType& model,
std::shared_ptr<ngraph::Function> ConvertPadded2ValidConvTestFixture::get_reference(const bool& fq,
const modelType& model,
const ngraph::PartialShape& input_shape,
const ngraph::Shape& filters_shape,
const ngraph::Strides& conv_stride,
@ -343,12 +389,12 @@ std::shared_ptr<ngraph::Function> ConvertPadded2ValidConvTestFixture::get_refere
conv_data.pads_begin_width + conv_data.input_width + conv_data.pads_end_width,
conv_data.input_channel_count});
auto padded_input_plane_reshaped = std::make_shared<ngraph::opset7::Reshape>(padded_input_plane, shape_const, false);
result = createFunction(model, padded_input_plane_reshaped, filters_shape, conv_stride,
result = createFunction(fq, model, padded_input_plane_reshaped, filters_shape, conv_stride,
ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0}, conv_dilation, bias_shape,
maxpool_stride, maxpool_shape, ngraph::op::PadType::EXPLICIT, nullptr);
} else {
// Valid padding
result = createFunction(model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape,
result = createFunction(fq, model, inputParams, filters_shape, conv_stride, pads_begin, pads_end, conv_dilation, bias_shape,
maxpool_stride, maxpool_shape, pad_type, nullptr);
}
@ -364,25 +410,13 @@ void execute_test(const modelType& model, std::shared_ptr<ngraph::Function> func
switch (model) {
default:
case modelType::TranspConvTransp:
manager.register_pass<GNAPluginNS::ConvertPadded2ValidConv>();
break;
case modelType::TranspConvBcastAddTransp:
manager.register_pass<GNAPluginNS::ConvertPaddedWithBias2ValidConv>();
break;
case modelType::TranspConvBcastAddMaxPoolTransp:
manager.register_pass<GNAPluginNS::ConvertPaddedWithBiasMaxPool2ValidConv>();
break;
case modelType::TranspConvBcastAddActTransp:
manager.register_pass<GNAPluginNS::ConvertPaddedWithBiasAF2ValidConv>();
break;
case modelType::TranspConvBcastAddMaxPoolActTransp:
manager.register_pass<GNAPluginNS::ConvertPaddedWithBiasMaxPoolAF2ValidConv>();
break;
case modelType::TranspConvTranspBcastAdd:
manager.register_pass<GNAPluginNS::ConvertPaddedTransposedWithBias2ValidConv>();
break;
case modelType::TranspConvTranspBcastAddAct:
manager.register_pass<GNAPluginNS::ConvertPaddedTransposedWithBiasAF2ValidConv>();
manager.register_pass<GNAPluginNS::ConvertPadded2ValidConv>();
break;
}
@ -397,56 +431,62 @@ TEST_P(ConvertPadded2ValidConvTestFixture, CompareFunctions) {
}
INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvTestSuite, ConvertPadded2ValidConvTestFixture,
::testing::Values(
std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT)));
::testing::Combine(
// With / without Fake Quantize layers
::testing::Values(true, false),
::testing::Values(
std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{1, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT))));
TEST_P(ConvertPadded2ValidConvTestInvalidFixture, CompareFunctions) {
execute_test(model, function, reference_function);
}
INSTANTIATE_TEST_SUITE_P(ConvertPadded2ValidConvInvalidTestSuite, ConvertPadded2ValidConvTestInvalidFixture,
::testing::Values(
std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{2, 16, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{5, 1}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 5}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 4}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT)));
::testing::Combine(
// With / without Fake Quantize layers
::testing::Values(true, false),
::testing::Values(
std::make_tuple(modelType::TranspConvTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvBcastAddTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolTransp, ngraph::PartialShape{2, 16, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{5, 1}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvBcastAddActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::SAME_LOWER),
std::make_tuple(modelType::TranspConvBcastAddMaxPoolActTransp, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 5}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 4, 1, 1}, ngraph::Strides{1, 1}, ngraph::Shape{1, 4}, ngraph::op::PadType::SAME_UPPER),
std::make_tuple(modelType::TranspConvTranspBcastAdd, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT),
std::make_tuple(modelType::TranspConvTranspBcastAddAct, ngraph::PartialShape{2, 1, 16, 8}, ngraph::Shape{1, 2}, ngraph::Strides{1, 1},
ngraph::CoordinateDiff{0, 2}, ngraph::CoordinateDiff{0, 3}, ngraph::Strides{1, 1},
ngraph::Shape{1, 1, 1, 4}, ngraph::Strides{1, 1}, ngraph::Shape{1, 2}, ngraph::op::PadType::EXPLICIT))));
} // namespace