[OV20] Preprocessing: convert_layout<std::vector<uint64_t>> implementation (#8213)

* Preprocessing: convert_layout<std::vector<uint64_t>> implementation

User is able to use this version without specifying layout explicitly

Same version of convert_layout is added for post-processing

Added usage of new convert_layout to ie_network_reader

* Fix review comment
This commit is contained in:
Mikhail Nosov 2021-10-27 11:32:44 +03:00 committed by GitHub
parent ccffed468c
commit d65e7d4d4f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 436 additions and 59 deletions

View File

@ -514,6 +514,72 @@ static RefPreprocessParams convert_layout_nhwc_to_net_no_tensor_shape() {
return res;
}
static RefPreprocessParams convert_layout_by_dims() {
RefPreprocessParams res("convert_layout_by_dims");
res.function = []() {
auto f = create_simple_function(element::u8, {1, 3, 2, 2});
f = PrePostProcessor()
.input(InputInfo()
.preprocess(PreProcessSteps().convert_layout({0, 3, 1, 2})))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::u8, std::vector<uint8_t>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams convert_layout_by_dims_multi() {
RefPreprocessParams res("convert_layout_by_dims_multi");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 3, 2, 2});
auto p = PreProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor()
.input(InputInfo().preprocess(std::move(p)))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams convert_layout_by_dims_multi_layout() {
RefPreprocessParams res("convert_layout_by_dims_multi_layout");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 3, 2, 2});
auto p = PreProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.mean({1, 2, 2}); // Apply means to 'C' channel
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor()
.input(InputInfo().tensor(InputTensorInfo().set_layout("N??C"))
.preprocess(std::move(p)))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1-1, 4-1, 7-1, 10-1, // R
2-2, 5-2, 8-2, 11-2, // G
3-2, 6-2, 9-2, 12-2}); // B
return res;
}
static RefPreprocessParams resize_and_convert_layout() {
RefPreprocessParams res("resize_and_convert_layout");
res.function = []() {
@ -719,6 +785,48 @@ static RefPreprocessParams postprocess_2_inputs_basic() {
return res;
}
static RefPreprocessParams post_convert_layout_by_dims() {
RefPreprocessParams res("post_convert_layout_by_dims");
res.function = []() {
auto f = create_simple_function(element::u8, {1, 2, 2, 3});
f = PrePostProcessor()
.output(OutputInfo()
.postprocess(PostProcessSteps().convert_layout({0, 3, 1, 2})))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::u8, std::vector<uint8_t>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams post_convert_layout_by_dims_multi() {
RefPreprocessParams res("post_convert_layout_by_dims_multi");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 2, 2, 3});
auto p = PostProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor()
.output(OutputInfo().postprocess(std::move(p)))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams pre_and_post_processing() {
RefPreprocessParams res("pre_and_post_processing");
res.function = []() {
@ -862,12 +970,17 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
resize_lvalues(),
convert_layout_nhwc_to_nchw_lvalue(),
convert_layout_nhwc_to_net_no_tensor_shape(),
convert_layout_by_dims(),
convert_layout_by_dims_multi(),
convert_layout_by_dims_multi_layout(),
resize_and_convert_layout(),
convert_color_nv12_to_bgr_two_planes(),
convert_color_nv12_single_plane(),
convert_color_nv12_layout_resize(),
element_type_before_convert_color_nv12(),
postprocess_2_inputs_basic(),
post_convert_layout_by_dims(),
post_convert_layout_by_dims_multi(),
pre_and_post_processing(),
rgb_to_bgr(),
bgr_to_rgb(),

View File

@ -328,17 +328,9 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
if (old_api_type == ov::element::undefined)
old_api_type = parameter->get_element_type();
std::stringstream tensorLayout, networkLayout;
for (size_t i = 0; i < old_api_transpose_args.size(); ++i) {
tensorLayout << i;
networkLayout << old_api_transpose_args[i];
}
prepost.input(
ov::preprocess::InputInfo(i)
.tensor(
InputTensorInfo().set_element_type(old_api_type).set_layout(ov::Layout(tensorLayout.str())))
.network(InputNetworkInfo().set_layout(ov::Layout(networkLayout.str()))));
prepost.input(ov::preprocess::InputInfo(i)
.tensor(InputTensorInfo().set_element_type(old_api_type))
.preprocess(PreProcessSteps().convert_layout(old_api_transpose_args)));
// Set version to 10
rt_info["version"] = std::make_shared<ov::VariantWrapper<int64_t>>(10);
@ -363,34 +355,15 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
if (old_api_type == ov::element::undefined)
old_api_type = result->get_element_type();
std::stringstream tensorLayout, networkLayout;
for (size_t i = 0; i < old_api_transpose_args.size(); ++i) {
networkLayout << i;
tensorLayout << old_api_transpose_args[i];
}
prepost.output(OutputInfo(i)
.network(OutputNetworkInfo().set_layout(ov::Layout(networkLayout.str())))
.tensor(OutputTensorInfo()
.set_element_type(old_api_type)
.set_layout(ov::Layout(tensorLayout.str()))));
.postprocess(PostProcessSteps().convert_layout(old_api_transpose_args))
.tensor(OutputTensorInfo().set_element_type(old_api_type)));
// remove old api once we applied it
rtInfo.erase(it);
}
function = prepost.build(function);
// TODO: keep information about layout once we have an ability to
// apply permutation to layout
// restore layout information
for (const auto& parameter : function->get_parameters()) {
parameter->set_layout({});
}
for (const auto& result : function->get_results()) {
result->set_layout({});
}
}
}

View File

@ -473,30 +473,33 @@ TEST_F(RTInfoDeserialization, NodeV11) {
param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor"});
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit pre-processing
auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f32);
auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::i64,
// TODO: No guarantee that Transpose will use exactly 'uint64_t' constant
auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::u64,
ngraph::Shape{4},
std::vector<int64_t>{0, 2, 3, 1});
auto transpose_param = std::make_shared<opset8::Transpose>(convert_param, constant_param);
std::vector<uint64_t>{0, 2, 3, 1});
auto transpose_param = std::make_shared<opset8::Transpose>(param, constant_param);
auto round = std::make_shared<opset8::Round>(transpose_param,
// TODO: No guarantee that only 'convert' will be added by implicit pre-processing
auto convert_param = std::make_shared<opset8::Convert>(transpose_param, ngraph::element::f32);
auto round = std::make_shared<opset8::Round>(convert_param,
ngraph::opset8::Round::RoundMode::HALF_TO_EVEN);
// TODO: runtime information should migrate as well?
round->get_rt_info()[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("Round1,Round2"));
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing
auto convert_result = std::make_shared<opset8::Convert>(round, type);
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::i64,
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::u64,
ngraph::Shape{4},
std::vector<int64_t>{0, 3, 1, 2});
auto transpose_result = std::make_shared<opset8::Transpose>(convert_result, constant_result);
transpose_result->set_friendly_name("Round");
transpose_result->get_output_tensor(0).set_names({"output_tensor"});
std::vector<uint64_t>{0, 3, 1, 2});
auto transpose_result = std::make_shared<opset8::Transpose>(round, constant_result);
auto result = std::make_shared<opset8::Result>(transpose_result);
auto convert_result = std::make_shared<opset8::Convert>(transpose_result, type);
convert_result->set_friendly_name("Round");
convert_result->get_output_tensor(0).set_names({"output_tensor"});
auto result = std::make_shared<opset8::Result>(convert_result);
result->set_friendly_name("output");
auto f_10_ref =

View File

@ -90,5 +90,9 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*CanSetOutBlobWithDifferentPrecision/netPRC=BIN.*)",
// TODO: Issue: 68712
R"(.*.MatMul.*CompareWithRefs.*IS0=\(1.5\)_IS1=\(1.5\).*transpose_a=0.*transpose_b=1.*CONSTANT.*FP16.*UNSPECIFIED.*UNSPECIFIED.*ANY.*)",
// TODO: Issue 66685
R"(smoke_PrePostProcess.*resize_linear_nhwc.*)",
// TODO: Issue 69187
R"(smoke_PrePostProcess.*cvt_color_nv12.*)",
};
}

View File

@ -14,11 +14,25 @@ using namespace ov::builder::preprocess;
inline std::vector<preprocess_func> GPU_smoke_preprocess_functions() {
return std::vector<preprocess_func>{
preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(mean_scale, "mean_scale", 0.01f),
preprocess_func(scale_mean, "scale_mean", 0.01f),
preprocess_func(mean_vector, "mean_vector", 0.01f),
preprocess_func(scale_vector, "scale_vector", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),
preprocess_func(tensor_layout, "tensor_layout", 0.01f),
preprocess_func(resize_linear, "resize_linear", 0.01f),
preprocess_func(resize_nearest, "resize_nearest", 0.01f),
preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f),
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 2.f),
preprocess_func(cvt_color_nv12_to_bgr_two_planes, "cvt_color_nv12_to_bgr_two_planes", 2.f),
preprocess_func(cvt_color_nv12_cvt_layout_resize, "cvt_color_nv12_cvt_layout_resize", 2.f),
};
}

View File

@ -299,6 +299,17 @@ inline std::shared_ptr<Function> resize_and_convert_layout() {
return function;
}
inline std::shared_ptr<Function> convert_layout_by_dims() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 30, 20, 3});
function = PrePostProcessor()
.input(InputInfo()
.preprocess(PreProcessSteps()
.convert_layout({0, 3, 1, 2})))
.build(function);
return function;
}
inline std::shared_ptr<Function> resize_and_convert_layout_i8() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, PartialShape{1, 30, 20, 3});
@ -376,6 +387,7 @@ inline std::vector<preprocess_func> generic_preprocess_functions() {
preprocess_func(resize_nearest, "resize_nearest", 0.01f),
preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f),
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(resize_and_convert_layout_i8, "resize_and_convert_layout_i8", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 2.f),

View File

@ -20,8 +20,9 @@ class Layout;
namespace layout {
std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& src_shape_rank, const Layout& dst_layout);
Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims);
}
} // namespace layout
class OPENVINO_API Layout {
public:
@ -84,6 +85,8 @@ private:
int64_t m_left_size = 0;
int64_t m_right_size = 0;
friend Layout layout::apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims);
friend std::vector<int64_t> layout::find_permutation(const Layout& src_layout,
const Rank& src_shape_rank,
const Layout& dst_layout);

View File

@ -86,6 +86,24 @@ public:
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
PostProcessSteps&& convert_layout(const Layout& dst_layout = {}) &&;
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: network produces output with shape [1, 3, 480, 640] and user's needs
/// interleaved output image [1, 480, 640, 3]. Post-processing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor()
/// .output(OutputInfo()
/// .postprocess(PostProcessSteps()
/// .convert_layout({0, 2, 3, 1})
/// );
/// \param dims Dimensions array specifying places for new axis. If not empty, array size (N) must match to input
/// shape rank. Array values shall contain all values from 0 to N-1. If empty, no actual conversion will be added.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PostProcessSteps& convert_layout(const std::vector<uint64_t>& dims) &;
PostProcessSteps&& convert_layout(const std::vector<uint64_t>& dims) &&;
/// \brief Signature for custom postprocessing operation. Custom postprocessing operation takes one output node and
/// produces one output node. For more advanced cases, client's code can use transformation passes over ov::Function
/// directly

View File

@ -220,15 +220,26 @@ public:
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& convert_layout(const Layout& dst_layout = {}) &;
/// \brief Add resize operation to network dimensions - Rvalue version.
///
/// \param dst_layout New layout after conversion. If not specified - destination layout is obtained from
/// appropriate network input properties.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps&& convert_layout(const Layout& dst_layout = {}) &&;
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: when user data has input RGB image {1x480x640x3} but network expects
/// planar input image ('NCHW', [1, 3, 480, 640]). Preprocessing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor()
/// .input(InputInfo()
/// .preprocess(PreProcessSteps()
/// .convert_layout({0, 3, 1, 2})
/// );
/// \param dims Dimensions array specifying places for new axis. If not empty, array size (N) must match to input
/// shape rank. Array values shall contain all values from 0 to N-1. If empty, no actual conversion will be added.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& convert_layout(const std::vector<uint64_t>& dims) &;
PreProcessSteps&& convert_layout(const std::vector<uint64_t>& dims) &&;
/// \brief Reverse channels operation - Lvalue version.
///
/// \details Adds appropriate operation which reverses channels layout. Operation requires layout having 'C'

View File

@ -240,6 +240,40 @@ std::string Layout::to_string() const {
namespace layout {
Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims) {
{ // Validate dims
std::vector<bool> used(dims.size(), false);
for (size_t i = 0; i < dims.size(); i++) {
auto dim = dims[i];
OPENVINO_ASSERT(dim < dims.size(), "Convert layout: dimension ", dim, " is out of bounds");
OPENVINO_ASSERT(!used[dim],
"Convert layout: dimension ",
dim,
" is used more than once in convert arguments");
used[dim] = true;
}
}
if (src_layout.empty()) {
return src_layout; // Can return immediately
}
// No way to calculate layout from [N...C] with permutation {0, 3, 1, 2}
OPENVINO_ASSERT(!src_layout.m_dynamic,
"Layout conversion by indexes is not supported for dynamic layout: ",
src_layout.to_string());
Layout res;
res.m_dynamic = false;
res.m_left_size = src_layout.m_left_size;
for (size_t i = 0; i < dims.size(); i++) {
auto it = src_layout.m_index_map.find(static_cast<int64_t>(dims[i]));
if (it == src_layout.m_index_map.end()) {
continue;
}
res.m_index_map[static_cast<int64_t>(i)] = it->second;
res.m_names[it->second] = static_cast<int64_t>(i);
}
return res;
}
std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& rank, const Layout& dst) {
// Basic implementation so far, can support partially-specified layouts later (shape rank will be needed for dynamic
// layouts)

View File

@ -380,6 +380,8 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
});
new_param_shape = PartialShape(dims);
}
} else if (input->m_preprocess) {
new_param_shape = input->m_preprocess->calculate_param_shape(new_param_shape);
}
if (input->m_tensor_data->is_spatial_shape_set()) {
auto height_idx = get_and_check_height_idx(input->m_tensor_data->get_layout(), new_param_shape);
@ -754,6 +756,16 @@ PreProcessSteps&& PreProcessSteps::convert_layout(const Layout& dst_layout) && {
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_layout(const std::vector<uint64_t>& dims) & {
m_impl->add_convert_layout_impl(dims);
return *this;
}
PreProcessSteps&& PreProcessSteps::convert_layout(const std::vector<uint64_t>& dims) && {
m_impl->add_convert_layout_impl(dims);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_color(const ov::preprocess::ColorFormat& dst_format) & {
m_impl->add_convert_color_impl(dst_format);
return *this;
@ -869,6 +881,16 @@ PostProcessSteps&& PostProcessSteps::convert_layout(const Layout& dst_layout) &&
return std::move(*this);
}
PostProcessSteps& PostProcessSteps::convert_layout(const std::vector<uint64_t>& dims) & {
m_impl->add_convert_layout_impl(dims);
return *this;
}
PostProcessSteps&& PostProcessSteps::convert_layout(const std::vector<uint64_t>& dims) && {
m_impl->add_convert_layout_impl(dims);
return std::move(*this);
}
PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) & {
// 'true' indicates that custom postprocessing step will trigger validate_and_infer_types
m_impl->actions().emplace_back([postprocess_cb](const Output<ov::Node>& node, PostprocessingContext&) {

View File

@ -177,6 +177,27 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
});
}
void PreStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
if (dims.empty()) {
return;
}
m_layout_converts.emplace_front(dims);
m_actions.emplace_back([dims](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Function>& function,
PreprocessingContext& context) {
OPENVINO_ASSERT(!nodes.empty(), "Internal error: Can't convert layout for empty input.");
OPENVINO_ASSERT(nodes.size() == 1,
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
"RGB/BGR color format using 'convert_color'");
auto new_layout = layout::apply_permutation(context.layout(), dims);
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant);
auto res = std::make_tuple(std::vector<Output<Node>>{transpose}, true);
context.layout() = std::move(new_layout); // Update context's current layout
return res;
});
}
void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) {
m_actions.emplace_back([dst_format](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Function>& function,
@ -310,5 +331,19 @@ void PostStepsList::add_convert_layout_impl(const Layout& layout) {
});
}
void PostStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
if (dims.empty()) {
return;
}
m_actions.emplace_back([dims](const Output<Node>& node, PostprocessingContext& context) {
auto perm_constant = op::v0::Constant::create<uint64_t>(element::u64, Shape{dims.size()}, dims);
auto new_layout = layout::apply_permutation(context.layout(), dims);
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
auto res = std::make_tuple(Output<Node>(transpose), true);
context.layout() = std::move(new_layout); // Update context's current layout
return res;
});
}
} // namespace preprocess
} // namespace ov

View File

@ -154,6 +154,7 @@ public:
void add_convert_impl(const element::Type& type);
void add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_width);
void add_convert_layout_impl(const Layout& layout);
void add_convert_layout_impl(const std::vector<uint64_t>& dims);
void add_convert_color_impl(const ColorFormat& dst_format);
void add_reverse_channels();
@ -164,6 +165,27 @@ public:
return m_actions;
}
PartialShape calculate_param_shape(const PartialShape& network_shape) const {
if (network_shape.rank().is_dynamic()) {
return network_shape;
}
std::vector<Dimension> old_dims(network_shape.rank().get_length());
std::vector<Dimension> dims(network_shape.rank().get_length());
for (size_t i = 0; i < network_shape.rank().get_length(); i++) {
dims[i] = network_shape[i];
}
for (const auto& convert : m_layout_converts) {
old_dims = dims;
dims = std::vector<Dimension>(network_shape.rank().get_length());
for (size_t i = 0; i < convert.size(); i++) {
OPENVINO_ASSERT(convert[i] < dims.size(), "Convert dimension ", convert[i], " is out of bounds.");
dims[convert[i]] = old_dims[i];
}
}
return {dims};
}
private:
static std::tuple<std::vector<Output<Node>>, bool> reverse_channels(const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Function>& function,
@ -171,6 +193,7 @@ private:
private:
std::list<InternalPreprocessOp> m_actions;
std::list<std::vector<uint64_t>> m_layout_converts;
};
class PreProcessSteps::PreProcessStepsImpl : public PreStepsList {};
@ -189,6 +212,7 @@ class PostStepsList {
public:
void add_convert_impl(const element::Type& type);
void add_convert_layout_impl(const Layout& layout);
void add_convert_layout_impl(const std::vector<uint64_t>& dims);
const std::list<InternalPostprocessOp>& actions() const {
return m_actions;

View File

@ -697,6 +697,58 @@ TEST(pre_post_process, preprocess_convert_layout_same) {
EXPECT_EQ(size_old, f->get_ordered_ops().size());
}
TEST(pre_post_process, preprocess_convert_layout_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});
f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().convert_layout({0, 3, 1, 2}))).build(f);
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape{1, 480, 640, 3}));
}
TEST(pre_post_process, preprocess_convert_layout_dims_empty) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});
f = PrePostProcessor()
.input(InputInfo().preprocess(PreProcessSteps().convert_layout(std::vector<uint64_t>{})))
.build(f);
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape{1, 3, 480, 640}));
}
TEST(pre_post_process, preprocess_convert_layout_dims_dyn_shape) {
auto f = create_simple_function(element::f32, PartialShape::dynamic());
f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().convert_layout({0, 3, 1, 2}))).build(f);
EXPECT_EQ(f->input().get_partial_shape(), (PartialShape::dynamic()));
}
TEST(pre_post_process, preprocess_convert_layout_invalid_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
EXPECT_THROW(
f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().convert_layout({0, 3, 2, 2}))).build(f),
ov::AssertFailure);
EXPECT_THROW(f = PrePostProcessor()
.input(InputInfo().preprocess(
PreProcessSteps().convert_layout({0, 3, 1, std::numeric_limits<uint64_t>::max()})))
.build(f),
ov::AssertFailure);
}
TEST(pre_post_process, preprocess_convert_layout_invalid_dims_dyn_shape) {
auto f = create_simple_function(element::f32, PartialShape::dynamic());
EXPECT_THROW(
f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().convert_layout({0, 3, 2, 2}))).build(f),
ov::AssertFailure);
EXPECT_THROW(f = PrePostProcessor()
.input(InputInfo().preprocess(
PreProcessSteps().convert_layout({0, 3, 1, std::numeric_limits<uint64_t>::max()})))
.build(f),
ov::AssertFailure);
}
TEST(pre_post_process, preprocess_reverse_channels_multiple_planes) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
EXPECT_THROW(
@ -899,6 +951,65 @@ TEST(pre_post_process, postprocess_convert_layout_default_error) {
ov::AssertFailure);
}
TEST(pre_post_process, postprocess_convert_layout_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});
f = PrePostProcessor().output(OutputInfo().postprocess(PostProcessSteps().convert_layout({0, 2, 3, 1}))).build(f);
EXPECT_EQ(f->output().get_partial_shape(), (PartialShape{1, 480, 640, 3}));
}
TEST(pre_post_process, postprocess_convert_layout_dims_empty) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});
f = PrePostProcessor()
.output(OutputInfo().postprocess(PostProcessSteps().convert_layout(std::vector<uint64_t>{})))
.build(f);
EXPECT_EQ(f->output().get_partial_shape(), (PartialShape{1, 3, 480, 640}));
}
TEST(pre_post_process, postprocess_convert_layout_has_layout) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});
auto p = PostProcessSteps();
p.convert_layout({0, 2, 3, 1});
f = PrePostProcessor()
.output(OutputInfo().network(OutputNetworkInfo().set_layout("NC??")).postprocess(std::move(p)))
.build(f);
EXPECT_EQ(f->output().get_partial_shape(), (PartialShape{1, 480, 640, 3}));
EXPECT_EQ(f->get_results()[0]->get_layout(), "N??C");
}
TEST(pre_post_process, postprocess_convert_layout_invalid_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
EXPECT_THROW(f = PrePostProcessor()
.output(OutputInfo().postprocess(PostProcessSteps().convert_layout({0, 3, 2, 2})))
.build(f),
ov::AssertFailure);
EXPECT_THROW(f = PrePostProcessor()
.output(OutputInfo().postprocess(
PostProcessSteps().convert_layout({0, 3, 1, std::numeric_limits<uint64_t>::max()})))
.build(f),
ov::AssertFailure);
}
TEST(pre_post_process, postprocess_convert_layout_invalid_dims_dyn_shape) {
auto f = create_simple_function(element::f32, PartialShape::dynamic());
EXPECT_THROW(f = PrePostProcessor()
.output(OutputInfo().postprocess(PostProcessSteps().convert_layout({0, 3, 2, 2})))
.build(f),
ov::AssertFailure);
EXPECT_THROW(f = PrePostProcessor()
.output(OutputInfo().postprocess(
PostProcessSteps().convert_layout({0, 3, 1, std::numeric_limits<uint64_t>::max()})))
.build(f),
ov::AssertFailure);
}
// Postprocessing - other
TEST(pre_post_process, postprocess_custom_step) {