[OV2.0] Preprocessing: support convert HWC->NCHW layout (#9540)

* Initial implementation

* Template reference tests

* cpu & gpu tests for HWC -> NCHW conversion
This commit is contained in:
Mikhail Nosov 2022-01-11 13:13:37 +03:00 committed by GitHub
parent 062523c9e1
commit bd3a996239
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 347 additions and 6 deletions

View File

@ -430,6 +430,68 @@ static RefPreprocessParams convert_layout_nhwc_to_nchw() {
return res;
}
static RefPreprocessParams convert_layout_nhwc_to_nchw_fully_dynamic() {
RefPreprocessParams res("convert_layout_nhwc_to_nchw_fully_dynamic");
res.function = []() {
auto f = create_simple_function(element::u8, PartialShape::dynamic());
f->get_parameters()[0]->set_layout("NCHW");
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NHWC");
p.input().preprocess().convert_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(element::u8, Shape{1, 2, 2, 3}, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::u8, std::vector<uint8_t>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams convert_layout_hwc_to_nchw() {
RefPreprocessParams res("convert_layout_hwc_to_nchw");
res.function = []() {
auto f = create_simple_function(element::f32, {Dimension::dynamic(), 3, 2, 2});
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("HWC").set_element_type(element::u8);
p.input().model().set_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(Shape{2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams convert_layout_hwc_to_nchw_fully_dynamic() {
RefPreprocessParams res("convert_layout_hwc_to_nchw_fully_dynamic");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape::dynamic());
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("HWC").set_element_type(element::u8);
p.input().model().set_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(element::u8, Shape{2, 2, 3}, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
4, 5, 6, // [H=0, W=1]
7, 8, 9, // [H=1, W=0]
10, 11, 12}); // [H=1, W=1]
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1, 4, 7, 10, // R
2, 5, 8, 11, // G
3, 6, 9, 12}); // B
return res;
}
static RefPreprocessParams convert_layout_nhwc_to_net_no_tensor_shape() {
RefPreprocessParams res("convert_layout_nhwc_to_net_no_tensor_shape");
res.function = []() {
@ -1056,10 +1118,13 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
resize_to_network_width_height(),
resize_to_specified_width_height(),
convert_layout_nhwc_to_nchw(),
convert_layout_nhwc_to_nchw_fully_dynamic(),
convert_layout_nhwc_to_net_no_tensor_shape(),
convert_layout_by_dims(),
convert_layout_by_dims_multi(),
convert_layout_by_dims_multi_layout(),
convert_layout_hwc_to_nchw(),
convert_layout_hwc_to_nchw_fully_dynamic(),
resize_and_convert_layout(),
convert_color_nv12_to_bgr_two_planes(),
convert_color_nv12_single_plane(),

View File

@ -245,6 +245,12 @@ public:
static std::vector<int64_t> find_permutation(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);
static std::tuple<PartialShape, Layout> find_squeeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);
static std::tuple<PartialShape, Layout, size_t> find_unsqueeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);
};
Layout LayoutUtils::apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims) {
@ -335,7 +341,10 @@ std::vector<int64_t> LayoutUtils::find_permutation(const Layout& src_layout,
auto src_static = to_static(src_layout, rank);
auto dst_static = to_static(dst, rank);
OPENVINO_ASSERT(src_static.m_left_size == dst_static.m_left_size,
"Conversion is not supported for layouts with different sizes");
"Conversion is not supported for layouts with different sizes, ",
src_layout.to_string(),
" <-> ",
dst.to_string());
OPENVINO_ASSERT(rank.is_dynamic() || src_static.m_left_size == rank.get_length(),
"Conversion layout ",
src_layout.to_string(),
@ -393,6 +402,121 @@ std::vector<int64_t> LayoutUtils::find_permutation(const Layout& src_layout,
return check_trivial(res);
}
std::tuple<PartialShape, Layout> LayoutUtils::find_squeeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout) {
if (src_layout.m_dynamic || dst_layout.m_dynamic || src_layout.m_left_size <= dst_layout.m_left_size) {
return {src_shape, src_layout};
}
// Don't allow conversions like model_layout=NC??, tensor_layout=HWC
// Though in future such conversions may be possible to implement
OPENVINO_ASSERT(src_layout.m_left_size == src_layout.m_index_map.size(),
"Layout conversion ",
dst_layout.to_string(),
" <-> ",
src_layout.to_string(),
" is not supported. Please use fully specified model layout, current is ",
src_layout.to_string());
// Don't allow conversions like model_layout=NCHW, tensor_layout=?HW
OPENVINO_ASSERT(dst_layout.m_left_size == dst_layout.m_index_map.size(),
"Layout conversion ",
dst_layout.to_string(),
" <-> ",
src_layout.to_string(),
" is not supported. Please use fully specified tensor layout, current is ",
dst_layout.to_string());
bool rank_dynamic = src_shape.rank().is_dynamic();
OPENVINO_ASSERT(rank_dynamic || src_shape.rank().get_length() == src_layout.m_left_size,
"Model input layout ",
src_layout.to_string(),
" is inconsistent with input shape ",
src_shape,
". Layout and shape shall have same rank, got ",
src_layout.m_left_size,
" != ",
src_shape.rank().get_length());
// At this point src_layout and dst_layout don't have '...' or '?'
std::vector<Dimension> res_dims(dst_layout.m_left_size);
Layout res;
res.m_dynamic = false;
res.m_left_size = dst_layout.m_left_size;
int64_t dst_idx = 0;
for (int64_t src_idx = 0; src_idx < src_layout.m_left_size; src_idx++) {
auto src_dim_name = src_layout.m_index_map.at(src_idx);
if (dst_layout.has_name(src_dim_name)) {
if (!rank_dynamic) {
res_dims[dst_idx] = src_shape[src_idx];
}
res.m_index_map[dst_idx] = src_dim_name;
res.m_names[src_dim_name] = dst_idx;
dst_idx++;
}
}
if (dst_idx != dst_layout.m_left_size) {
std::stringstream missing_names;
missing_names << "( ";
for (const auto& dst_item : dst_layout.m_names) {
const auto& key = dst_item.first;
if (!res.m_names.count(key)) {
missing_names << "'" << key << "' ";
}
}
missing_names << ")";
OPENVINO_ASSERT(dst_idx == dst_layout.m_left_size,
"Layout conversion failed. Tensor layout",
dst_layout.to_string(),
" has dimensions missing in model layout ",
src_layout.to_string(),
". Missing dimensions are ",
missing_names.str());
}
if (rank_dynamic) {
return {PartialShape::dynamic(), res};
} else {
return {PartialShape(res_dims), res};
}
}
std::tuple<PartialShape, Layout, size_t> LayoutUtils::find_unsqueeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout) {
if (src_layout.m_dynamic || dst_layout.m_dynamic || src_layout.m_left_size >= dst_layout.m_left_size) {
return {src_shape, src_layout, {}};
}
// find_squeeze already performed necessary validation, no need to repeat here
bool rank_dynamic = src_shape.rank().is_dynamic();
auto dims_cnt = dst_layout.m_left_size - src_layout.m_left_size;
std::vector<Dimension> res_dims(dst_layout.m_left_size, 1);
Layout res;
res.m_dynamic = false;
res.m_left_size = dst_layout.m_left_size;
int64_t unset_idx = 0;
for (auto i = 0; i < dst_layout.m_left_size; i++) {
auto dim_name = dst_layout.m_index_map.at(i);
if (src_layout.has_name(dim_name)) {
auto src_idx = src_layout.get_index_by_name(dim_name);
res.m_names[dim_name] = src_idx + dims_cnt;
res.m_index_map[src_idx + dims_cnt] = dim_name;
if (!rank_dynamic) {
res_dims[src_idx + dims_cnt] = src_shape[src_idx];
}
} else {
res.m_names[dim_name] = unset_idx;
res.m_index_map[unset_idx] = dim_name;
unset_idx++;
}
}
if (rank_dynamic) {
return {PartialShape::dynamic(), res, dims_cnt};
} else {
return {PartialShape(res_dims), res, dims_cnt};
}
}
namespace layout {
namespace utils {
Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims) {
@ -404,6 +528,19 @@ std::vector<int64_t> find_permutation(const Layout& src_layout,
const Layout& dst_layout) {
return LayoutUtils::find_permutation(src_layout, src_shape, dst_layout);
}
std::tuple<PartialShape, Layout> find_squeeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout) {
return LayoutUtils::find_squeeze(src_layout, src_shape, dst_layout);
}
std::tuple<PartialShape, Layout, size_t> find_unsqueeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout) {
return LayoutUtils::find_unsqueeze(src_layout, src_shape, dst_layout);
}
} // namespace utils
// Helper functions

View File

@ -12,6 +12,22 @@ namespace ov {
namespace layout {
namespace utils {
// Example is NCHW to HWC. Need to calculate user's shape from (?, 3, 480, 640) to (480, 640, 3)
// src_layout shall be 'bigger' than 'dst_layout'
// Returns shape and layout after 'squeeze' (CHW). Next step will be to apply "find_permutation" CHW->HWC
std::tuple<PartialShape, Layout> find_squeeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);
// Example is HWC to NCDHW. Needs also to calculate user's shape from (480, 640, 3) to (1, 3, 1, 480, 640)
// src_layout shall be 'smaller' than 'dst_layout'
// Returns shape, layout and number of axis for unsqueeze after 'unsqueeze'.
// In this example, function will return: Shape {1,1,480,640,3}, Layout "NDCHW", axis=2
// Next step will be to apply "find_permutation" NDCHW->NCDHW
std::tuple<PartialShape, Layout, size_t> find_unsqueeze(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);
std::vector<int64_t> find_permutation(const Layout& src_layout,
const PartialShape& src_shape,
const Layout& dst_layout);

View File

@ -397,10 +397,15 @@ std::shared_ptr<Model> PrePostProcessor::build() {
}
if (input->get_tensor_data()->is_layout_set() && !param->get_layout().empty() &&
param->get_layout() != input->get_tensor_data()->get_layout()) {
auto sq_layout = Layout();
// Find if some squeeze is needed between model and tensor
// E.g. model=NCHW, tensor=HWC
std::tie(new_param_shape, sq_layout) =
layout::utils::find_squeeze(param->get_layout(), net_shape, input->get_tensor_data()->get_layout());
// Find transpose between model and tensor layouts and update tensor shape
auto net_to_tensor =
layout::utils::find_permutation(param->get_layout(), net_shape, input->get_tensor_data()->get_layout());
if (!net_to_tensor.empty()) {
layout::utils::find_permutation(sq_layout, new_param_shape, input->get_tensor_data()->get_layout());
if (!net_to_tensor.empty() && new_param_shape.rank().is_static()) {
std::vector<ov::Dimension> dims(new_param_shape.size());
std::transform(net_to_tensor.begin(), net_to_tensor.end(), dims.begin(), [&](int64_t v) {
return new_param_shape[v];
@ -525,7 +530,9 @@ std::shared_ptr<Model> PrePostProcessor::build() {
"Resulting shape '",
node.get_partial_shape(),
"' after preprocessing is not aligned with original parameter's shape: ",
param->get_partial_shape());
param->get_partial_shape(),
", input parameter: ",
param->get_friendly_name());
// Replace parameter
for (auto consumer : consumers) {

View File

@ -178,7 +178,24 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
"Can't convert layout for multi-plane input. Suggesting to convert current image to "
"RGB/BGR color format using 'convert_color'");
Layout dst_layout = layout.empty() ? context.target_layout() : layout;
auto permutation = layout::utils::find_permutation(context.layout(), nodes[0].get_partial_shape(), dst_layout);
auto node = nodes[0];
auto shape = node.get_partial_shape();
size_t add_cnt;
Layout unsqueeze_layout;
std::tie(shape, unsqueeze_layout, add_cnt) = layout::utils::find_unsqueeze(context.layout(), shape, dst_layout);
if (add_cnt) {
std::vector<size_t> dims;
dims.push_back(add_cnt);
Shape const_shape(dims);
std::vector<int64_t> vals(add_cnt);
for (auto i = 0; i < add_cnt; i++) {
vals[i] = i;
}
auto axes = op::v0::Constant::create<int64_t>(element::i64, const_shape, vals);
// Add unsqueeze on top
node = std::make_shared<opset8::Unsqueeze>(node, axes);
}
auto permutation = layout::utils::find_permutation(unsqueeze_layout, shape, dst_layout);
if (permutation.empty()) {
// No transpose is needed, just update layout
if (!layout.empty()) {
@ -187,7 +204,7 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
return std::make_tuple(nodes, false);
}
auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant);
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
context.layout() = dst_layout; // Update context's current layout
// return false to avoid excess function revalidations as layout conversion
// doesn't require shape or type propagation.

View File

@ -967,6 +967,93 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_trivial) {
EXPECT_EQ(ops_num, f->get_ordered_ops().size());
}
TEST(pre_post_process, preprocess_convert_layout_squeeze) {
auto f = create_n_inputs<3>(element::f32, Shape{1, 3, 1, 480, 640});
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("HWC");
p.input(0).model().set_layout("NCDHW");
p.input(1).tensor().set_layout("NHWC");
p.input(1).model().set_layout("NCDHW");
p.input(2).tensor().set_layout("WCHD");
p.input(2).model().set_layout("NCDHW");
p.build();
EXPECT_EQ(ov::layout::get_layout(f->input(0)), "HWC");
EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{480, 640, 3}));
EXPECT_EQ(ov::layout::get_layout(f->input(1)), "NHWC");
EXPECT_EQ(f->input(1).get_partial_shape(), (PartialShape{1, 480, 640, 3}));
EXPECT_EQ(ov::layout::get_layout(f->input(2)), "WCHD");
EXPECT_EQ(f->input(2).get_partial_shape(), (PartialShape{640, 3, 480, 1}));
}
TEST(pre_post_process, preprocess_convert_layout_squeeze_dynamic) {
auto f = create_n_inputs<2>(element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("HWC");
p.input(0).model().set_layout("NCDHW");
p.input(1).tensor().set_layout("NHWC");
p.input(1).model().set_layout("NCDHW");
p.build();
EXPECT_EQ(ov::layout::get_layout(f->input(0)), "HWC");
EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{480, 640, 3}));
EXPECT_EQ(ov::layout::get_layout(f->input(1)), "NHWC");
EXPECT_EQ(f->input(1).get_partial_shape(), (PartialShape{Dimension::dynamic(), 480, 640, 3}));
}
TEST(pre_post_process, preprocess_convert_layout_squeeze_unsupported) {
auto f = create_n_inputs<1>(element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
EXPECT_THROW(
{
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("NCDHWS");
p.input(0).model().set_layout("NCDHW");
p.build();
},
ov::AssertFailure);
EXPECT_THROW(
{
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("HWC");
p.input(0).model().set_layout("?????");
p.build();
},
ov::AssertFailure);
EXPECT_THROW(
{
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("...S");
p.input(0).model().set_layout("NCDHW");
p.build();
},
ov::AssertFailure);
EXPECT_THROW(
{
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("HWC");
p.input(0).model().set_layout("...NCDHW");
p.build();
},
ov::AssertFailure);
EXPECT_THROW(
{
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("HW?");
p.input(0).model().set_layout("NCDHW");
p.build();
},
ov::AssertFailure);
}
TEST(pre_post_process, preprocess_convert_layout_partially_defined_error) {
auto f = create_simple_function(element::f32, Shape{1, 2, 3, 4, 5});

View File

@ -30,6 +30,7 @@ inline std::vector<preprocess_func> GPU_smoke_preprocess_functions() {
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, { ov::Shape {1, 3, 123, 123} }),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(convert_layout_hwc_to_nchw, "convert_layout_hwc_to_nchw", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 1.f),
preprocess_func(cvt_color_nv12_to_bgr_two_planes, "cvt_color_nv12_to_bgr_two_planes", 1.f),

View File

@ -302,6 +302,16 @@ inline std::shared_ptr<Model> convert_layout_by_dims() {
return function;
}
inline std::shared_ptr<Model> convert_layout_hwc_to_nchw() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 30, 20});
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("HWC").set_element_type(element::u8);
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Model> resize_and_convert_layout_i8() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, PartialShape{1, 30, 20, 3});
@ -413,6 +423,7 @@ inline std::vector<preprocess_func> generic_preprocess_functions() {
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, { Shape {1, 3, 123, 123} }),
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
preprocess_func(convert_layout_hwc_to_nchw, "convert_layout_hwc_to_nchw", 0.01f),
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
preprocess_func(resize_and_convert_layout_i8, "resize_and_convert_layout_i8", 0.01f),
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 1.f),