Add DilatedConvolutionConverter, BatchToSpaceFusion and SpaceToBatchF… (#4689)

This commit is contained in:
Mateusz Tabaka 2021-03-24 16:00:30 +01:00 committed by GitHub
parent f0e574903a
commit c9d0292929
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1011 additions and 9 deletions

View File

@ -0,0 +1,37 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API BatchToSpaceFusion;
} // namespace pass
} // namespace ngraph
/**
* @ingroup ie_transformation_common_api
* @brief BatchToSpaceFusion transformation replaces following graph:
* Transpose (or Reshape) -> DepthToSpace -> StridedSlice -> Transpose (or Reshape)
* to BatchToSpace
* Restrictions:
* - input rank must be 4
* - Transpose permutation must be [1, 0, 2, 3]
* - DepthToSpaceMode must be BLOCKS_FIRST
*/
class ngraph::pass::BatchToSpaceFusion: public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
BatchToSpaceFusion();
};

View File

@ -0,0 +1,35 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API DilatedConvolutionConverter;
} // namespace pass
} // namespace ngraph
/**
* @ingroup ie_transformation_common_api
* @brief DilatedConvolutionConverter transformation replaces following graph:
* SpaceToBatch -> Convolution -> BatchToSpace
* to a single Convolution node with updated pads and dilations
* Restrictions:
* - pads in SpaceToBatch must have 0 on first and second position
*/
class ngraph::pass::DilatedConvolutionConverter: public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
DilatedConvolutionConverter();
};

View File

@ -0,0 +1,38 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <memory>
#include <transformations_visibility.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API SpaceToBatchFusion;
} // namespace pass
} // namespace ngraph
/**
* @ingroup ie_transformation_common_api
* @brief SpaceToBatchFusion transformation replaces following graph:
* Transpose (or Reshape) -> Pad -> SpaceToDepth -> Transpose (or Reshape)
* to SpaceToBatch
* Restrictions:
* - input rank must be 4
* - Transpose permutation must be [1, 0, 2, 3]
* - pad value is 0, PadMode is CONSTANT
* - SpaceToDepthMode must be BLOCKS_FIRST
*/
class ngraph::pass::SpaceToBatchFusion: public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
SpaceToBatchFusion();
};

View File

@ -0,0 +1,119 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/common_optimizations/batch_to_space_fusion.hpp"
#include "transformations/utils/utils.hpp"
#include <memory>
#include <vector>
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/pattern/op/or.hpp>
#include "itt.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::BatchToSpaceFusion, "BatchToSpaceFusion", 0);
ngraph::pass::BatchToSpaceFusion::BatchToSpaceFusion() {
MATCHER_SCOPE(BatchToSpaceFusion);
auto data_pattern = pattern::any_input(pattern::has_static_shape());
auto reshape_before_pattern = pattern::wrap_type<opset6::Reshape>({data_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto trans_before_pattern = pattern::wrap_type<opset6::Transpose>({data_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto reshape_or_transpose_before_pattern = std::make_shared<pattern::op::Or>(OutputVector{reshape_before_pattern, trans_before_pattern});
auto depth_to_space_pattern = pattern::wrap_type<opset6::DepthToSpace>({reshape_or_transpose_before_pattern});
auto starts_pattern = pattern::wrap_type<opset6::Constant>();
auto ends_pattern = pattern::wrap_type<opset6::Constant>();
auto slice_pattern = pattern::wrap_type<opset6::StridedSlice>({depth_to_space_pattern, starts_pattern, ends_pattern,
pattern::wrap_type<opset6::Constant>()});
auto reshape_after_pattern = pattern::wrap_type<opset6::Reshape>({slice_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto trans_after_pattern = pattern::wrap_type<opset6::Transpose>({slice_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto reshape_or_transpose_after_pattern = std::make_shared<pattern::op::Or>(OutputVector{reshape_after_pattern, trans_after_pattern});
ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto get_reshape_or_transpose = [&pattern_map] (const std::shared_ptr<Node>& reshape_pattern,
const std::shared_ptr<Node>& trans_pattern) -> std::shared_ptr<Node> {
if (pattern_map.count(reshape_pattern))
return pattern_map.at(reshape_pattern).get_node_shared_ptr();
if (pattern_map.count(trans_pattern))
return pattern_map.at(trans_pattern).get_node_shared_ptr();
return nullptr;
};
auto check_input_output_shape = [] (const std::shared_ptr<Node>& node) -> bool {
const auto& input_shape = node->get_input_shape(0);
const auto& output_shape = node->get_output_shape(0);
// Transpose permutation has to be [1, 0, 2, 3]
return input_shape[0] == output_shape[1] &&
input_shape[1] == output_shape[0] &&
input_shape[2] == output_shape[2] &&
input_shape[3] == output_shape[3];
};
std::shared_ptr<Node> reshape_or_trans_before = get_reshape_or_transpose(reshape_before_pattern, trans_before_pattern);
if (!reshape_or_trans_before)
return false;
if (!check_input_output_shape(reshape_or_trans_before))
return false;
std::shared_ptr<Node> reshape_or_trans_after = get_reshape_or_transpose(reshape_after_pattern, trans_after_pattern);
if (!reshape_or_trans_after)
return false;
if (!check_input_output_shape(reshape_or_trans_after))
return false;
auto depth_to_space = std::dynamic_pointer_cast<opset6::DepthToSpace>(pattern_map.at(depth_to_space_pattern).get_node_shared_ptr());
if (!depth_to_space)
return false;
if (depth_to_space->get_mode() != opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST)
return false;
const auto& dts_shape = depth_to_space->get_shape();
if (dts_shape.size() != 4)
return false;
auto block_size = static_cast<int64_t>(depth_to_space->get_block_size());
auto block_shape = op::Constant::create(element::i64, Shape{4},
std::vector<int64_t>{1, 1, block_size, block_size});
auto starts = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(starts_pattern).get_node_shared_ptr());
if (!starts)
return false;
auto ends = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(ends_pattern).get_node_shared_ptr());
if (!ends)
return false;
auto starts_value = starts->cast_vector<int64_t>();
auto ends_value = ends->cast_vector<int64_t>();
// Convert StridedSlice's 'ends' input to BatchToSpace's 'crops_ends'
for (size_t i = 0; i < ends_value.size(); i++) {
if (ends_value[i] < 0) {
// negative ends become positive crops
// e.g. ends[i] == -2 means cropping i-th dimension by 2 from the back
ends_value[i] = -ends_value[i];
} else if (ends_value[i] > static_cast<int64_t>(dts_shape[i])) {
// no cropping from the back if ends[i] > shape[i]
ends_value[i] = 0;
} else {
// else if ends[i] is positive and within [0, shape[i]] - crop the difference: shape[i] - ends[i]
ends_value[i] = dts_shape[i] - ends_value[i];
}
}
auto crops_begin = op::Constant::create(element::i64, Shape{4}, starts_value);
auto crops_end = op::Constant::create(element::i64, Shape{4}, ends_value);
auto batch_to_space = register_new_node<ngraph::opset6::BatchToSpace>(pattern_map.at(data_pattern), block_shape, crops_begin, crops_end);
batch_to_space->set_friendly_name(reshape_or_trans_after->get_friendly_name());
copy_runtime_info({
reshape_or_trans_before,
depth_to_space,
pattern_map.at(slice_pattern).get_node_shared_ptr(),
reshape_or_trans_after
},
batch_to_space);
replace_node(reshape_or_trans_after, batch_to_space);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(reshape_or_transpose_after_pattern, matcher_name);
this->register_matcher(m, callback);
}

View File

@ -35,6 +35,9 @@
#include "transformations/common_optimizations/mvn_fusion.hpp"
#include "transformations/common_optimizations/binarize_weights.hpp"
#include "transformations/common_optimizations/conv_to_binary_conv.hpp"
#include "transformations/common_optimizations/space_to_batch_fusion.hpp"
#include "transformations/common_optimizations/batch_to_space_fusion.hpp"
#include "transformations/common_optimizations/dilated_convolution_converter.hpp"
#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp"
#include "transformations/op_conversions/convert_pad_to_group_conv.hpp"
#include "transformations/op_conversions/convert_divide.hpp"
@ -104,6 +107,9 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::
common_fusions->add_matcher<ngraph::pass::PadFusion>();
common_fusions->add_matcher<ngraph::pass::SoftmaxFusion>();
common_fusions->add_matcher<ngraph::pass::MVNFusion>();
common_fusions->add_matcher<ngraph::pass::SpaceToBatchFusion>();
common_fusions->add_matcher<ngraph::pass::BatchToSpaceFusion>();
common_fusions->add_matcher<ngraph::pass::DilatedConvolutionConverter>();
common_fusions->set_name("ngraph::pass::CommonFusions");
manager.register_pass<ngraph::pass::ConvertPadToGroupConvolution, false>();

View File

@ -0,0 +1,92 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/common_optimizations/dilated_convolution_converter.hpp"
#include "transformations/utils/utils.hpp"
#include <memory>
#include <vector>
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include "itt.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::DilatedConvolutionConverter, "DilatedConvolutionConverter", 0);
ngraph::pass::DilatedConvolutionConverter::DilatedConvolutionConverter() {
MATCHER_SCOPE(DilatedConvolutionConverter);
auto data_pattern = pattern::any_input();
auto block_shape_pattern = pattern::wrap_type<opset6::Constant>();
auto pads_begin_pattern = pattern::wrap_type<opset6::Constant>();
auto pads_end_pattern = pattern::wrap_type<opset6::Constant>();
auto space_to_batch_pattern = pattern::wrap_type<opset6::SpaceToBatch>({data_pattern, block_shape_pattern, pads_begin_pattern, pads_end_pattern});
auto conv_pattern = pattern::wrap_type<opset6::Convolution>({space_to_batch_pattern, pattern::any_input()});
auto crops_begin_pattern = pattern::wrap_type<opset6::Constant>();
auto crops_end_pattern = pattern::wrap_type<opset6::Constant>();
auto batch_to_space_pattern = pattern::wrap_type<opset6::BatchToSpace>({conv_pattern, pattern::any_input(),
crops_begin_pattern, crops_end_pattern});
matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto block_shape = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(block_shape_pattern).get_node_shared_ptr());
if (!block_shape)
return false;
auto pads_begin = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(pads_begin_pattern).get_node_shared_ptr());
if (!pads_begin)
return false;
auto pads_end = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(pads_end_pattern).get_node_shared_ptr());
if (!pads_end)
return false;
auto crops_begin = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(crops_begin_pattern).get_node_shared_ptr());
if (!crops_begin)
return false;
auto crops_end = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(crops_end_pattern).get_node_shared_ptr());
if (!crops_end)
return false;
auto conv = std::dynamic_pointer_cast<opset6::Convolution>(pattern_map.at(conv_pattern).get_node_shared_ptr());
if (!conv)
return false;
auto block_shape_val = block_shape->cast_vector<size_t>();
auto dilations = conv->get_dilations();
for (size_t i = 0; i < dilations.size(); i++)
dilations[i] = block_shape_val[i + 2];
auto pads_begin_val = pads_begin->cast_vector<std::ptrdiff_t>();
auto pads_end_val = pads_end->cast_vector<std::ptrdiff_t>();
if (!(pads_begin_val[0] == 0 &&
pads_begin_val[1] == 0 &&
pads_end_val[0] == 0 &&
pads_end_val[1] == 0))
return false;
auto crops_begin_val = crops_begin->cast_vector<std::ptrdiff_t>();
auto crops_end_val = crops_end->cast_vector<std::ptrdiff_t>();
std::vector<std::ptrdiff_t> new_pads_begin;
for (size_t i = 2; i < pads_begin_val.size(); i++)
new_pads_begin.push_back(pads_begin_val[i] - crops_begin_val[i]);
std::vector<std::ptrdiff_t> new_pads_end;
for (size_t i = 2; i < pads_end_val.size(); i++)
new_pads_end.push_back(pads_end_val[i] - crops_end_val[i]);
auto new_conv = register_new_node<opset6::Convolution>(pattern_map.at(data_pattern), conv->input_value(1),
conv->get_strides(), new_pads_begin, new_pads_end, dilations, op::PadType::EXPLICIT);
auto batch_to_space = pattern_map.at(batch_to_space_pattern).get_node_shared_ptr();
new_conv->set_friendly_name(batch_to_space->get_friendly_name());
copy_runtime_info({
pattern_map.at(space_to_batch_pattern).get_node_shared_ptr(),
pattern_map.at(conv_pattern).get_node_shared_ptr(),
batch_to_space,
},
new_conv);
replace_node(batch_to_space, new_conv);
return true;
};
auto m = std::make_shared<pattern::Matcher>(batch_to_space_pattern, matcher_name);
this->register_matcher(m, callback);
}

View File

@ -0,0 +1,103 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/common_optimizations/space_to_batch_fusion.hpp"
#include "transformations/utils/utils.hpp"
#include <memory>
#include <vector>
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/pattern/op/or.hpp>
#include "itt.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::SpaceToBatchFusion, "SpaceToBatchFusion", 0);
ngraph::pass::SpaceToBatchFusion::SpaceToBatchFusion() {
MATCHER_SCOPE(SpaceToBatchFusion);
auto data_pattern = pattern::any_input();
auto reshape_before_pattern = pattern::wrap_type<opset6::Reshape>({data_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto trans_before_pattern = pattern::wrap_type<opset6::Transpose>({data_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto reshape_or_transpose_before_pattern = std::make_shared<pattern::op::Or>(OutputVector{reshape_before_pattern, trans_before_pattern});
auto pads_begin_pattern = pattern::wrap_type<opset6::Constant>();
auto pads_end_pattern = pattern::wrap_type<opset6::Constant>();
auto pad_value = pattern::wrap_type<opset6::Constant>();
auto pad_pattern = pattern::wrap_type<opset6::Pad>({reshape_or_transpose_before_pattern, pads_begin_pattern, pads_end_pattern, pad_value});
auto space_to_depth_pattern = pattern::wrap_type<opset6::SpaceToDepth>({pad_pattern}, pattern::has_static_shape());
auto reshape_after_pattern = pattern::wrap_type<opset6::Reshape>({space_to_depth_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto trans_after_pattern = pattern::wrap_type<opset6::Transpose>({space_to_depth_pattern, pattern::wrap_type<opset6::Constant>()}, pattern::rank_equals(4));
auto reshape_or_transpose_after_pattern = std::make_shared<pattern::op::Or>(OutputVector{reshape_after_pattern, trans_after_pattern});
matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto get_reshape_or_transpose = [&pattern_map] (const std::shared_ptr<Node>& reshape_pattern,
const std::shared_ptr<Node>& trans_pattern) -> std::shared_ptr<Node> {
if (pattern_map.count(reshape_pattern))
return pattern_map.at(reshape_pattern).get_node_shared_ptr();
if (pattern_map.count(trans_pattern))
return pattern_map.at(trans_pattern).get_node_shared_ptr();
return nullptr;
};
auto check_input_output_shape = [] (const std::shared_ptr<Node>& node) -> bool {
const auto& input_shape = node->get_input_shape(0);
const auto& output_shape = node->get_output_shape(0);
// Transpose permutation has to be [1, 0, 2, 3]
return input_shape[0] == output_shape[1] &&
input_shape[1] == output_shape[0] &&
input_shape[2] == output_shape[2] &&
input_shape[3] == output_shape[3];
};
std::shared_ptr<Node> reshape_or_trans_before = get_reshape_or_transpose(reshape_before_pattern, trans_before_pattern);
if (!reshape_or_trans_before)
return false;
std::shared_ptr<Node> reshape_or_trans_after = get_reshape_or_transpose(reshape_after_pattern, trans_after_pattern);
if (!reshape_or_trans_after)
return false;
if (!check_input_output_shape(reshape_or_trans_before))
return false;
if (!check_input_output_shape(reshape_or_trans_after))
return false;
auto pad = std::dynamic_pointer_cast<opset6::Pad>(pattern_map.at(pad_pattern).get_node_shared_ptr());
if (!pad || pad->get_pad_mode() != op::PadMode::CONSTANT)
return false;
auto pad_value_const = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(pad_value).get_node_shared_ptr());
if (!pad_value_const)
return false;
auto pad_value = pad_value_const->cast_vector<float>();
if (pad_value.size() != 1 || pad_value[0] != 0.0f)
return false;
auto space_to_depth = std::dynamic_pointer_cast<opset6::SpaceToDepth>(pattern_map.at(space_to_depth_pattern).get_node_shared_ptr());
if (!space_to_depth)
return false;
if (space_to_depth->get_mode() != opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST)
return false;
auto block_size = static_cast<int64_t>(space_to_depth->get_block_size());
auto block_shape = op::Constant::create(element::i64, Shape{4},
std::vector<int64_t>{1, 1, block_size, block_size});
auto space_to_batch = register_new_node<opset6::SpaceToBatch>(pattern_map.at(data_pattern), block_shape,
pattern_map.at(pads_begin_pattern), pattern_map.at(pads_end_pattern));
space_to_batch->set_friendly_name(reshape_or_trans_after->get_friendly_name());
copy_runtime_info({
reshape_or_trans_before,
pad,
space_to_depth,
reshape_or_trans_after,
},
space_to_batch);
replace_node(reshape_or_trans_after, space_to_batch);
return true;
};
auto m = std::make_shared<pattern::Matcher>(reshape_or_transpose_after_pattern, matcher_name);
this->register_matcher(m, callback);
}

View File

@ -0,0 +1,207 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <queue>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset6.hpp>
#include <transformations/common_optimizations/batch_to_space_fusion.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
using namespace testing;
using namespace ngraph;
TEST(TransformationTests, BatchToSpaceFusionTranspose) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 1, -1, 2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto batch_to_space = std::make_shared<opset6::BatchToSpace>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {1, 2, 1, 14}));
f_ref = std::make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, BatchToSpaceFusionReshape) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{4, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 3, 0}),
op::Constant::create(element::i64, Shape{4}, {2, 1, 7, -2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto reshape_after = std::make_shared<opset6::Reshape>(slice, op::Constant::create(element::i64, Shape{4}, {1, 2, 4, 14}), false);
f = std::make_shared<Function>(NodeVector{reshape_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{4, 3, 4, 8});
auto batch_to_space = std::make_shared<opset6::BatchToSpace>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 3, 0}),
op::Constant::create(element::i64, Shape{4}, {1, 0, 1, 2}));
f_ref = std::make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeBatchToSpaceFusionInvalidTransposePerm) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {2, 0, 1, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 1, -1, 2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {2, 0, 1, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 1, -1, 2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeBatchToSpaceFusionInvalidMode) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 1, -1, 2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 1, -1, 2}),
std::vector<int64_t>{0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeBatchToSpaceFusionInvalidRank) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{5}, {0, 0, 2, 1, 1}),
op::Constant::create(element::i64, Shape{5}, {2, 1, -1, 2, 2}),
std::vector<int64_t>{0, 0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
op::Constant::create(element::i64, Shape{5}, {0, 0, 2, 1, 1}),
op::Constant::create(element::i64, Shape{5}, {2, 1, -1, 2, 2}),
std::vector<int64_t>{0, 0, 0, 0, 0}, std::vector<int64_t>{0, 0, 0, 0, 0});
auto trans_after = std::make_shared<opset6::Transpose>(slice, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::BatchToSpaceFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}

View File

@ -0,0 +1,101 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <queue>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset6.hpp>
#include <transformations/common_optimizations/dilated_convolution_converter.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
using namespace testing;
using namespace ngraph;
TEST(TransformationTests, DilatedConvolutionConverter) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 10, 10});
auto filters = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 3, 3});
auto space_to_batch = std::make_shared<opset6::SpaceToBatch>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}));
auto conv = std::make_shared<opset6::Convolution>(space_to_batch, filters,
Strides{1, 1}, CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1});
auto batch_to_space = std::make_shared<opset6::BatchToSpace>(conv,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data, filters});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::DilatedConvolutionConverter>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 10, 10});
auto filters = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 3, 3});
auto conv = std::make_shared<opset6::Convolution>(data, filters,
Strides{1, 1}, CoordinateDiff{1, 1}, CoordinateDiff{-1, -2}, Strides{2, 2}, op::PadType::EXPLICIT);
f_ref = std::make_shared<Function>(NodeVector{conv}, ParameterVector{data, filters});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeDilatedConvolutionConverterNonZeroPadsForNC) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 10, 10});
auto filters = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 5, 3, 3});
auto space_to_batch = std::make_shared<opset6::SpaceToBatch>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}));
auto conv = std::make_shared<opset6::Convolution>(space_to_batch, filters,
Strides{1, 1}, CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1});
auto batch_to_space = std::make_shared<opset6::BatchToSpace>(conv,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data, filters});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::DilatedConvolutionConverter>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 4, 10, 10});
auto filters = std::make_shared<opset6::Parameter>(element::f32, Shape{1, 5, 3, 3});
auto space_to_batch = std::make_shared<opset6::SpaceToBatch>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}));
auto conv = std::make_shared<opset6::Convolution>(space_to_batch, filters,
Strides{1, 1}, CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1});
auto batch_to_space = std::make_shared<opset6::BatchToSpace>(conv,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}),
op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data, filters});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}

View File

@ -0,0 +1,236 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <queue>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset6.hpp>
#include <transformations/common_optimizations/space_to_batch_fusion.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
using namespace testing;
using namespace ngraph;
TEST(TransformationTests, SpaceToBatchFusionTranspose) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 3}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto space_to_batch = std::make_shared<opset6::SpaceToBatch>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 3}));
f_ref = std::make_shared<Function>(NodeVector{space_to_batch}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, SpaceToBatchFusionReshape) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto reshape_before = std::make_shared<opset6::Reshape>(data, op::Constant::create(element::i64, Shape{4}, {3, 12, 4, 8}), false);
auto pad = std::make_shared<opset6::Pad>(reshape_before,
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 3}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto space_to_batch = std::make_shared<opset6::SpaceToBatch>(data,
op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 3}));
f_ref = std::make_shared<Function>(NodeVector{space_to_batch}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeSpaceToBatchFusionInvalidTransposePerm) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {3, 0, 2, 1}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {3, 0, 2, 1}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeSpaceToBatchFusionInvalidPad) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {0, 1, 1, 0}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {1}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {0, 1, 1, 0}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {1}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeSpaceToBatchFusionInvalidMode) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {0, 1, 1, 0}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{4}, {0, 1, 1, 0}),
op::Constant::create(element::i64, Shape{4}, {1, 1, 3, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{4}, {1, 0, 2, 3}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, NegativeSpaceToBatchFusionInvalidRank) {
std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{5}, {0, 1, 1, 0, 0}),
op::Constant::create(element::i64, Shape{5}, {1, 1, 3, 2, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
f = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
pass::Manager m;
m.register_pass<pass::InitNodeInfo>();
m.register_pass<pass::SpaceToBatchFusion>();
m.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto pad = std::make_shared<opset6::Pad>(trans_before,
op::Constant::create(element::i64, Shape{5}, {0, 1, 1, 0, 0}),
op::Constant::create(element::i64, Shape{5}, {1, 1, 3, 2, 2}),
op::Constant::create(element::f32, Shape{}, {0}), op::PadMode::CONSTANT);
auto space_to_depth = std::make_shared<opset6::SpaceToDepth>(pad, opset6::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
auto trans_after = std::make_shared<opset6::Transpose>(space_to_depth, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
f_ref = std::make_shared<Function>(NodeVector{trans_after}, ParameterVector{data});
}
auto res = compare_functions(f, f_ref, true);
ASSERT_TRUE(res.first) << res.second;
}

View File

@ -64,6 +64,9 @@ namespace ngraph
NGRAPH_API
std::function<bool(Output<Node>)> has_static_rank();
NGRAPH_API
std::function<bool(Output<Node>)> rank_equals(const Dimension& expected_rank);
NGRAPH_API
std::function<bool(Output<Node>)> type_matches(const element::Type& type);

View File

@ -96,6 +96,7 @@ namespace convert
TYPE_OUT_CASE(f16, arg, out);
TYPE_OUT_CASE(f32, arg, out);
TYPE_OUT_CASE(f64, arg, out);
TYPE_OUT_CASE(boolean, arg, out);
default: rc = false; break;
}
return rc;
@ -115,6 +116,7 @@ namespace convert
NGRAPH_TYPE_CASE(evaluate_convert, u64, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, f16, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, f32, arg, out);
NGRAPH_TYPE_CASE(evaluate_convert, boolean, arg, out);
default: rc = false; break;
}
return rc;

View File

@ -102,6 +102,13 @@ namespace ngraph
};
}
std::function<bool(Output<Node>)> rank_equals(const Dimension& expected_rank)
{
return [=](Output<Node> output) -> bool {
return output.get_partial_shape().rank() == expected_rank;
};
}
std::function<bool(Output<Node>)> type_matches(const element::Type& type)
{
return [=](Output<Node> output) -> bool { return output.get_element_type() == type; };

View File

@ -429,13 +429,11 @@ TEST(constant_folding, constant_unary_binary)
ASSERT_NO_THROW(pass_manager.run_passes(func_error));
}
TEST(constant_folding, const_convert)
template <typename T, typename U>
static void test_const_convert(const vector<T>& values_in, const vector<U>& values_expected)
{
Shape input_shape{3, 4};
vector<int32_t> values_in{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7};
auto constant = op::Constant::create(element::f32, input_shape, values_in);
auto convert = make_shared<op::Convert>(constant, element::u64);
auto constant = op::Constant::create(element::from<T>(), Shape{values_in.size()}, values_in);
auto convert = make_shared<op::Convert>(constant, element::from<U>());
convert->set_friendly_name("test");
auto f = make_shared<Function>(convert, ParameterVector{});
@ -450,13 +448,31 @@ TEST(constant_folding, const_convert)
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
ASSERT_TRUE(new_const);
ASSERT_EQ(new_const->get_friendly_name(), "test");
ASSERT_EQ(new_const->get_output_element_type(0), element::u64);
auto values_out = new_const->get_vector<uint64_t>();
ASSERT_EQ(new_const->get_output_element_type(0), element::from<U>());
auto values_out = new_const->template get_vector<U>();
vector<uint64_t> values_expected{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_convert)
{
{
vector<float> in{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7};
vector<uint64_t> expected{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7};
test_const_convert(in, expected);
}
{
vector<bool> in{false, true, true, false, false, false, true};
vector<float> expected{0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f};
test_const_convert(in, expected);
}
{
vector<float> in{1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f};
vector<bool> expected{true, false, true, false, true, false, true};
test_const_convert(in, expected);
}
}
TEST(constant_folding, shape_of_v0)
{
Shape input_shape{3, 4, 0, 22, 608, 909, 3};