diff --git a/docs/template_plugin/tests/functional/subgraph_reference/preprocess.cpp b/docs/template_plugin/tests/functional/subgraph_reference/preprocess.cpp index d1840156bdd..7bc8fcea486 100644 --- a/docs/template_plugin/tests/functional/subgraph_reference/preprocess.cpp +++ b/docs/template_plugin/tests/functional/subgraph_reference/preprocess.cpp @@ -858,6 +858,28 @@ static RefPreprocessParams set_shape_custom_crop() { return res; } +static RefPreprocessParams set_shape_with_resize() { + RefPreprocessParams res("set_shape_with_resize"); + res.function = []() { + auto f = create_simple_function(element::f32, PartialShape{1, 3, 1, 1}); + auto p = PrePostProcessor(f); + p.input().tensor().set_shape({1, 2, 2, 3}).set_layout("NHWC"); + p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); + p.input().model().set_layout("NCHW"); + p.build(); + return f; + }; + auto input_size = 1 * 2 * 2 * 3; + std::vector input_values(input_size); + std::iota(input_values.begin(), input_values.end(), 0); + res.inputs.emplace_back(element::f32, Shape{1, 2, 2, 3}, std::vector {1, 2, 3, + 1, 2, 3, + 1, 2, 3, + 1, 2, 3}); + res.expected.emplace_back(Shape{1, 3, 1, 1}, element::f32, std::vector{ 1, 2, 3}); + return res; +} + static RefPreprocessParams postprocess_2_inputs_basic() { RefPreprocessParams res("postprocess_2_inputs_basic"); res.function = []() { @@ -1133,6 +1155,7 @@ std::vector allPreprocessTests() { convert_color_i420_to_bgr_three_planes(), convert_color_i420_single_plane(), set_shape_custom_crop(), + set_shape_with_resize(), postprocess_2_inputs_basic(), post_convert_layout_by_dims(), post_convert_layout_by_dims_multi(), diff --git a/src/core/src/preprocess/pre_post_process.cpp b/src/core/src/preprocess/pre_post_process.cpp index 34af0657458..c779481f9aa 100644 --- a/src/core/src/preprocess/pre_post_process.cpp +++ b/src/core/src/preprocess/pre_post_process.cpp @@ -392,16 +392,17 @@ std::shared_ptr PrePostProcessor::build() { auto net_shape = param->get_partial_shape(); auto new_param_shape = net_shape; - if (input->get_tensor_data()->is_shape_set()) { - new_param_shape = input->get_tensor_data()->get_shape(); + auto model_layout = param->get_layout(); + if (model_layout.empty() && input->get_tensor_data()->is_layout_set()) { + model_layout = input->get_preprocess()->propagate_layout(input->get_tensor_data()->get_layout()); } - if (input->get_tensor_data()->is_layout_set() && !param->get_layout().empty() && - param->get_layout() != input->get_tensor_data()->get_layout()) { + if (input->get_tensor_data()->is_layout_set() && !model_layout.empty() && + model_layout != input->get_tensor_data()->get_layout()) { auto sq_layout = Layout(); // Find if some squeeze is needed between model and tensor // E.g. model=NCHW, tensor=HWC std::tie(new_param_shape, sq_layout) = - layout::utils::find_squeeze(param->get_layout(), net_shape, input->get_tensor_data()->get_layout()); + layout::utils::find_squeeze(model_layout, net_shape, input->get_tensor_data()->get_layout()); // Find transpose between model and tensor layouts and update tensor shape auto net_to_tensor = layout::utils::find_permutation(sq_layout, new_param_shape, input->get_tensor_data()->get_layout()); @@ -415,14 +416,15 @@ std::shared_ptr PrePostProcessor::build() { } else { Layout new_layout; std::tie(new_param_shape, new_layout) = - input->get_preprocess()->calculate_param_shape(new_param_shape, param->get_layout()); + input->get_preprocess()->calculate_param_shape(new_param_shape, model_layout); if (!input->get_tensor_data()->is_layout_set()) { // Reusing param's layout according to converted calculated layout input->get_tensor_data()->set_layout(new_layout); } } - - if (input->get_tensor_data()->is_spatial_shape_set()) { + if (input->get_tensor_data()->is_shape_set()) { + new_param_shape = input->get_tensor_data()->get_shape(); + } else if (input->get_tensor_data()->is_spatial_shape_set()) { auto height_idx = get_and_check_height_idx(input->get_tensor_data()->get_layout(), new_param_shape); auto width_idx = get_and_check_width_idx(input->get_tensor_data()->get_layout(), new_param_shape); if (input->get_tensor_data()->is_spatial_shape_dynamic()) { @@ -487,7 +489,7 @@ std::shared_ptr PrePostProcessor::build() { PreprocessingContext context(input->get_tensor_data()->get_layout()); context.color_format() = input->get_tensor_data()->get_color_format(); - context.target_layout() = param->get_layout(); + context.target_layout() = model_layout; context.model_shape() = param->get_partial_shape(); context.target_element_type() = param->get_element_type(); diff --git a/src/core/src/preprocess/preprocess_steps_impl.cpp b/src/core/src/preprocess/preprocess_steps_impl.cpp index 6e8d8b4bbfa..92341d5e118 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.cpp +++ b/src/core/src/preprocess/preprocess_steps_impl.cpp @@ -28,7 +28,11 @@ static Shape construct_mean_scale_shape(const Output& node, "Number of channels and mean/values size mismatch: Channels = ", node_shape[channels_index].get_length(), ", mean/scale = ", - values_size); + values_size, + ", shape = ", + node_shape, + ", layout = ", + context.layout().to_string()); v[channels_index] = values_size; return {v}; } @@ -169,7 +173,18 @@ void PreStepsList::add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_ }); } +Layout PreStepsList::propagate_layout(const Layout& tensor_layout) const { + auto res = m_last_explicit_layout_set ? m_last_explicit_layout : tensor_layout; + for (const auto& convert : m_forward_layout_converts) { + res = layout::utils::apply_permutation(res, convert); + } + return res; +} + void PreStepsList::add_convert_layout_impl(const Layout& layout) { + m_forward_layout_converts.clear(); + m_last_explicit_layout = layout; + m_last_explicit_layout_set = true; m_actions.emplace_back([layout](const std::vector>& nodes, const std::shared_ptr& function, PreprocessingContext& context) { @@ -217,6 +232,7 @@ void PreStepsList::add_convert_layout_impl(const std::vector& dims) { return; } m_layout_converts.emplace_front(dims); + m_forward_layout_converts.emplace_back(dims); m_actions.emplace_back([dims](const std::vector>& nodes, const std::shared_ptr& function, PreprocessingContext& context) { diff --git a/src/core/src/preprocess/preprocess_steps_impl.hpp b/src/core/src/preprocess/preprocess_steps_impl.hpp index 3f177d871cf..f01b88debe6 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.hpp +++ b/src/core/src/preprocess/preprocess_steps_impl.hpp @@ -167,6 +167,8 @@ public: return m_actions; } + Layout propagate_layout(const Layout& tensor_layout) const; + private: static std::tuple>, bool> reverse_channels(const std::vector>& nodes, const std::shared_ptr& function, @@ -179,6 +181,9 @@ private: private: std::list m_actions; std::list> m_layout_converts; + std::list> m_forward_layout_converts; + Layout m_last_explicit_layout; + bool m_last_explicit_layout_set = false; }; class PreProcessSteps::PreProcessStepsImpl : public PreStepsList {}; diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index 2fbbef7d2e4..eedbabdf6b8 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -489,8 +489,60 @@ TEST(pre_post_process, convert_layout_implicit_several_time) { preprocessor.output().tensor().set_layout("NHCW"); preprocessor.output().tensor().set_layout("NCHW"); f = preprocessor.build(); - EXPECT_EQ(f->get_parameters().front()->get_layout().to_string(), "[N,H,W,C]"); - EXPECT_EQ(f->get_results().front()->get_layout().to_string(), "[N,C,H,W]"); + EXPECT_EQ(f->get_parameters().front()->get_layout(), Layout("[N,H,W,C]")); + EXPECT_EQ(f->get_results().front()->get_layout(), Layout("[N,C,H,W]")); +} + +TEST(pre_post_process, tensor_set_layout) { + auto f = create_n_inputs<6>(element::f32, Shape{1, 3, 480, 640}); + PrePostProcessor preprocessor(f); + preprocessor.input(0).tensor().set_layout("NCHW"); + preprocessor.input(0).preprocess().mean({1.0, 2.0, 3.0}); + + preprocessor.input(1).tensor().set_layout("NHWC"); + preprocessor.input(1).preprocess().mean({1.0, 2.0, 3.0}).convert_layout("NCHW"); + + preprocessor.input(2).tensor().set_layout("NHWC"); + preprocessor.input(2).model().set_layout("NCHW"); + + preprocessor.input(3).model().set_layout("NCHW"); + + preprocessor.input(4).tensor().set_layout("NHWC"); + // Model layout will be calculated as "HWCN" -> "3,2,0,1" = NCHW + preprocessor.input(4) + .preprocess() + .mean({1.0, 2.0, 3.0}) + .convert_layout({3, 2, 1, 0}) + .convert_layout("HWCN") + .convert_layout({3, 2, 0, 1}); + + preprocessor.input(5).tensor().set_layout("NHWC"); + preprocessor.input(5) + .preprocess() + .mean({1.0, 2.0, 3.0}) + .convert_layout({3, 2, 1, 0}) // NHWC -> CWHN + .convert_layout({3, 0, 2, 1}); // CWHN -> NCHW + + f = preprocessor.build(); + EXPECT_EQ(f->get_parameters()[0]->get_partial_shape(), (Shape{1, 3, 480, 640})); + EXPECT_EQ(f->get_parameters()[1]->get_partial_shape(), (Shape{1, 480, 640, 3})); + EXPECT_EQ(f->get_parameters()[2]->get_partial_shape(), (Shape{1, 480, 640, 3})); + EXPECT_EQ(f->get_parameters()[3]->get_partial_shape(), (Shape{1, 3, 480, 640})); + EXPECT_EQ(f->get_parameters()[4]->get_partial_shape(), (Shape{1, 480, 640, 3})); + EXPECT_EQ(f->get_parameters()[5]->get_partial_shape(), (Shape{1, 480, 640, 3})); +} + +TEST(pre_post_process, postprocess_set_model_layout) { + auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 224, 224}); + PrePostProcessor p(f); + p.output(0).model().set_layout("NCHW"); + p.output(0).postprocess().convert_layout("NHWC"); + + p.output(1).model().set_layout("NCHW"); + + f = p.build(); + EXPECT_EQ(f->get_results()[0]->get_shape(), (Shape{1, 224, 224, 3})); + EXPECT_EQ(f->get_results()[1]->get_shape(), (Shape{1, 3, 224, 224})); } TEST(pre_post_process, unsupported_model_color_format) { @@ -715,14 +767,15 @@ TEST(pre_post_process, mean_vector_dynamic_channels_shape) { EXPECT_EQ(f->get_output_element_type(0), element::f32); } -// Error cases for 'resize' TEST(pre_post_process, resize_no_model_layout) { auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224}); auto p = PrePostProcessor(f); - EXPECT_THROW(p.input().tensor().set_layout("NHWC"); p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC); - p.build(), ov::AssertFailure); + p.input().tensor().set_layout("NHWC"); + p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC); + EXPECT_NO_THROW(p.build()); } +// Error cases for 'resize' TEST(pre_post_process, tensor_spatial_shape_no_layout_dims) { auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224}); auto p = PrePostProcessor(f); @@ -735,6 +788,17 @@ TEST(pre_post_process, tensor_spatial_shape_no_layout_dims) { p.build(), ov::AssertFailure); } +TEST(pre_post_process, tensor_set_shape_for_resize) { + auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224}); + auto p = PrePostProcessor(f); + p.input().tensor().set_shape({1, 720, 1280, 3}).set_layout("NHWC"); + p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); + p.input().model().set_layout("NCHW"); + p.build(); + EXPECT_EQ(f->input().get_partial_shape(), (Shape{1, 720, 1280, 3})); + EXPECT_EQ(f->output().get_partial_shape(), (Shape{1, 3, 224, 224})); +} + TEST(pre_post_process, tensor_set_shape_incompatible) { auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224}); auto p = PrePostProcessor(f);