[OV2.0] Calculate model layout based on 'tensor' layout and convert steps (#9550)

* Calculate model layout based on 'tensor' layout and convert steps

Previously, 'model layout' is set to '...' by default,
thus no shape conversion happened when tensor layout is set to 'NHWC', then there was explicit convert_layout "NCHW"

Now "model layout" is calculated based on tensor layout and conversion steps:
Examples:
 1) Tensor: NHWC, Convert: NCHW. Result: NCHW
 2) Tensor: NHWC, Convert: 0312. Result: NCHW

* Fix for set_shape + resize case
This commit is contained in:
Mikhail Nosov 2022-01-12 10:52:02 +03:00 committed by GitHub
parent c1d7535044
commit 26a78fcb5d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 125 additions and 15 deletions

View File

@ -858,6 +858,28 @@ static RefPreprocessParams set_shape_custom_crop() {
return res;
}
static RefPreprocessParams set_shape_with_resize() {
RefPreprocessParams res("set_shape_with_resize");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 3, 1, 1});
auto p = PrePostProcessor(f);
p.input().tensor().set_shape({1, 2, 2, 3}).set_layout("NHWC");
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().model().set_layout("NCHW");
p.build();
return f;
};
auto input_size = 1 * 2 * 2 * 3;
std::vector<float> input_values(input_size);
std::iota(input_values.begin(), input_values.end(), 0);
res.inputs.emplace_back(element::f32, Shape{1, 2, 2, 3}, std::vector<float> {1, 2, 3,
1, 2, 3,
1, 2, 3,
1, 2, 3});
res.expected.emplace_back(Shape{1, 3, 1, 1}, element::f32, std::vector<float>{ 1, 2, 3});
return res;
}
static RefPreprocessParams postprocess_2_inputs_basic() {
RefPreprocessParams res("postprocess_2_inputs_basic");
res.function = []() {
@ -1133,6 +1155,7 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
convert_color_i420_to_bgr_three_planes(),
convert_color_i420_single_plane(),
set_shape_custom_crop(),
set_shape_with_resize(),
postprocess_2_inputs_basic(),
post_convert_layout_by_dims(),
post_convert_layout_by_dims_multi(),

View File

@ -392,16 +392,17 @@ std::shared_ptr<Model> PrePostProcessor::build() {
auto net_shape = param->get_partial_shape();
auto new_param_shape = net_shape;
if (input->get_tensor_data()->is_shape_set()) {
new_param_shape = input->get_tensor_data()->get_shape();
auto model_layout = param->get_layout();
if (model_layout.empty() && input->get_tensor_data()->is_layout_set()) {
model_layout = input->get_preprocess()->propagate_layout(input->get_tensor_data()->get_layout());
}
if (input->get_tensor_data()->is_layout_set() && !param->get_layout().empty() &&
param->get_layout() != input->get_tensor_data()->get_layout()) {
if (input->get_tensor_data()->is_layout_set() && !model_layout.empty() &&
model_layout != input->get_tensor_data()->get_layout()) {
auto sq_layout = Layout();
// Find if some squeeze is needed between model and tensor
// E.g. model=NCHW, tensor=HWC
std::tie(new_param_shape, sq_layout) =
layout::utils::find_squeeze(param->get_layout(), net_shape, input->get_tensor_data()->get_layout());
layout::utils::find_squeeze(model_layout, net_shape, input->get_tensor_data()->get_layout());
// Find transpose between model and tensor layouts and update tensor shape
auto net_to_tensor =
layout::utils::find_permutation(sq_layout, new_param_shape, input->get_tensor_data()->get_layout());
@ -415,14 +416,15 @@ std::shared_ptr<Model> PrePostProcessor::build() {
} else {
Layout new_layout;
std::tie(new_param_shape, new_layout) =
input->get_preprocess()->calculate_param_shape(new_param_shape, param->get_layout());
input->get_preprocess()->calculate_param_shape(new_param_shape, model_layout);
if (!input->get_tensor_data()->is_layout_set()) {
// Reusing param's layout according to converted calculated layout
input->get_tensor_data()->set_layout(new_layout);
}
}
if (input->get_tensor_data()->is_spatial_shape_set()) {
if (input->get_tensor_data()->is_shape_set()) {
new_param_shape = input->get_tensor_data()->get_shape();
} else if (input->get_tensor_data()->is_spatial_shape_set()) {
auto height_idx = get_and_check_height_idx(input->get_tensor_data()->get_layout(), new_param_shape);
auto width_idx = get_and_check_width_idx(input->get_tensor_data()->get_layout(), new_param_shape);
if (input->get_tensor_data()->is_spatial_shape_dynamic()) {
@ -487,7 +489,7 @@ std::shared_ptr<Model> PrePostProcessor::build() {
PreprocessingContext context(input->get_tensor_data()->get_layout());
context.color_format() = input->get_tensor_data()->get_color_format();
context.target_layout() = param->get_layout();
context.target_layout() = model_layout;
context.model_shape() = param->get_partial_shape();
context.target_element_type() = param->get_element_type();

View File

@ -28,7 +28,11 @@ static Shape construct_mean_scale_shape(const Output<Node>& node,
"Number of channels and mean/values size mismatch: Channels = ",
node_shape[channels_index].get_length(),
", mean/scale = ",
values_size);
values_size,
", shape = ",
node_shape,
", layout = ",
context.layout().to_string());
v[channels_index] = values_size;
return {v};
}
@ -169,7 +173,18 @@ void PreStepsList::add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_
});
}
Layout PreStepsList::propagate_layout(const Layout& tensor_layout) const {
auto res = m_last_explicit_layout_set ? m_last_explicit_layout : tensor_layout;
for (const auto& convert : m_forward_layout_converts) {
res = layout::utils::apply_permutation(res, convert);
}
return res;
}
void PreStepsList::add_convert_layout_impl(const Layout& layout) {
m_forward_layout_converts.clear();
m_last_explicit_layout = layout;
m_last_explicit_layout_set = true;
m_actions.emplace_back([layout](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Model>& function,
PreprocessingContext& context) {
@ -217,6 +232,7 @@ void PreStepsList::add_convert_layout_impl(const std::vector<uint64_t>& dims) {
return;
}
m_layout_converts.emplace_front(dims);
m_forward_layout_converts.emplace_back(dims);
m_actions.emplace_back([dims](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Model>& function,
PreprocessingContext& context) {

View File

@ -167,6 +167,8 @@ public:
return m_actions;
}
Layout propagate_layout(const Layout& tensor_layout) const;
private:
static std::tuple<std::vector<Output<Node>>, bool> reverse_channels(const std::vector<Output<Node>>& nodes,
const std::shared_ptr<Model>& function,
@ -179,6 +181,9 @@ private:
private:
std::list<InternalPreprocessOp> m_actions;
std::list<std::vector<uint64_t>> m_layout_converts;
std::list<std::vector<uint64_t>> m_forward_layout_converts;
Layout m_last_explicit_layout;
bool m_last_explicit_layout_set = false;
};
class PreProcessSteps::PreProcessStepsImpl : public PreStepsList {};

View File

@ -489,8 +489,60 @@ TEST(pre_post_process, convert_layout_implicit_several_time) {
preprocessor.output().tensor().set_layout("NHCW");
preprocessor.output().tensor().set_layout("NCHW");
f = preprocessor.build();
EXPECT_EQ(f->get_parameters().front()->get_layout().to_string(), "[N,H,W,C]");
EXPECT_EQ(f->get_results().front()->get_layout().to_string(), "[N,C,H,W]");
EXPECT_EQ(f->get_parameters().front()->get_layout(), Layout("[N,H,W,C]"));
EXPECT_EQ(f->get_results().front()->get_layout(), Layout("[N,C,H,W]"));
}
TEST(pre_post_process, tensor_set_layout) {
auto f = create_n_inputs<6>(element::f32, Shape{1, 3, 480, 640});
PrePostProcessor preprocessor(f);
preprocessor.input(0).tensor().set_layout("NCHW");
preprocessor.input(0).preprocess().mean({1.0, 2.0, 3.0});
preprocessor.input(1).tensor().set_layout("NHWC");
preprocessor.input(1).preprocess().mean({1.0, 2.0, 3.0}).convert_layout("NCHW");
preprocessor.input(2).tensor().set_layout("NHWC");
preprocessor.input(2).model().set_layout("NCHW");
preprocessor.input(3).model().set_layout("NCHW");
preprocessor.input(4).tensor().set_layout("NHWC");
// Model layout will be calculated as "HWCN" -> "3,2,0,1" = NCHW
preprocessor.input(4)
.preprocess()
.mean({1.0, 2.0, 3.0})
.convert_layout({3, 2, 1, 0})
.convert_layout("HWCN")
.convert_layout({3, 2, 0, 1});
preprocessor.input(5).tensor().set_layout("NHWC");
preprocessor.input(5)
.preprocess()
.mean({1.0, 2.0, 3.0})
.convert_layout({3, 2, 1, 0}) // NHWC -> CWHN
.convert_layout({3, 0, 2, 1}); // CWHN -> NCHW
f = preprocessor.build();
EXPECT_EQ(f->get_parameters()[0]->get_partial_shape(), (Shape{1, 3, 480, 640}));
EXPECT_EQ(f->get_parameters()[1]->get_partial_shape(), (Shape{1, 480, 640, 3}));
EXPECT_EQ(f->get_parameters()[2]->get_partial_shape(), (Shape{1, 480, 640, 3}));
EXPECT_EQ(f->get_parameters()[3]->get_partial_shape(), (Shape{1, 3, 480, 640}));
EXPECT_EQ(f->get_parameters()[4]->get_partial_shape(), (Shape{1, 480, 640, 3}));
EXPECT_EQ(f->get_parameters()[5]->get_partial_shape(), (Shape{1, 480, 640, 3}));
}
TEST(pre_post_process, postprocess_set_model_layout) {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 224, 224});
PrePostProcessor p(f);
p.output(0).model().set_layout("NCHW");
p.output(0).postprocess().convert_layout("NHWC");
p.output(1).model().set_layout("NCHW");
f = p.build();
EXPECT_EQ(f->get_results()[0]->get_shape(), (Shape{1, 224, 224, 3}));
EXPECT_EQ(f->get_results()[1]->get_shape(), (Shape{1, 3, 224, 224}));
}
TEST(pre_post_process, unsupported_model_color_format) {
@ -715,14 +767,15 @@ TEST(pre_post_process, mean_vector_dynamic_channels_shape) {
EXPECT_EQ(f->get_output_element_type(0), element::f32);
}
// Error cases for 'resize'
TEST(pre_post_process, resize_no_model_layout) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
EXPECT_THROW(p.input().tensor().set_layout("NHWC"); p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.build(), ov::AssertFailure);
p.input().tensor().set_layout("NHWC");
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
EXPECT_NO_THROW(p.build());
}
// Error cases for 'resize'
TEST(pre_post_process, tensor_spatial_shape_no_layout_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
@ -735,6 +788,17 @@ TEST(pre_post_process, tensor_spatial_shape_no_layout_dims) {
p.build(), ov::AssertFailure);
}
TEST(pre_post_process, tensor_set_shape_for_resize) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
p.input().tensor().set_shape({1, 720, 1280, 3}).set_layout("NHWC");
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().model().set_layout("NCHW");
p.build();
EXPECT_EQ(f->input().get_partial_shape(), (Shape{1, 720, 1280, 3}));
EXPECT_EQ(f->output().get_partial_shape(), (Shape{1, 3, 224, 224}));
}
TEST(pre_post_process, tensor_set_shape_incompatible) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);