Rename "network" to "model" in preprocessing API (#9054)

This commit is contained in:
Mikhail Nosov 2021-12-07 19:26:27 +03:00 committed by GitHub
parent 70b5e28979
commit 20bf5fcc4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 222 additions and 224 deletions

View File

@ -284,7 +284,7 @@ static RefPreprocessParams resize_to_network_height() {
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
p.build();
return f;
};
@ -301,7 +301,7 @@ static RefPreprocessParams resize_to_network_width() {
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
return f;
};
@ -319,7 +319,7 @@ static RefPreprocessParams resize_from_spatial_dims() {
p.input()
.tensor().set_spatial_static_shape(1, 4);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
return f;
};
@ -337,7 +337,7 @@ static RefPreprocessParams resize_i8() {
.tensor()
.set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
return f;
};
@ -356,7 +356,7 @@ static RefPreprocessParams resize_to_network_width_height() {
p.input()
.tensor().set_spatial_static_shape(5, 5);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("...HW");
p.input().model().set_layout("...HW");
p.build();
return f;
};
@ -386,7 +386,7 @@ static RefPreprocessParams resize_to_specified_width_height() {
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST, 4, 4);
p.input().network().set_layout("...HW");
p.input().model().set_layout("...HW");
p.build();
return f;
};
@ -524,7 +524,7 @@ static RefPreprocessParams resize_and_convert_layout() {
p.input().preprocess()
.resize(ResizeAlgorithm::RESIZE_LINEAR)
.convert_layout();
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
p.build();
return f;
};
@ -639,7 +639,7 @@ static RefPreprocessParams convert_color_nv12_layout_resize() {
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
return f;
};
@ -676,7 +676,7 @@ static RefPreprocessParams element_type_before_convert_color_nv12() {
p.input().preprocess()
.convert_element_type(element::f32)
.convert_color(ColorFormat::RGB);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
p.build();
return f;
};
@ -802,7 +802,7 @@ static RefPreprocessParams postprocess_2_inputs_basic() {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 2});
auto p = PrePostProcessor(f);
p.output("tensor_output1")
.network().set_layout("NCHW");
.model().set_layout("NCHW");
p.output("tensor_output1").postprocess().convert_layout();
p.output("tensor_output1").tensor().set_layout("NHWC");
p.output("tensor_output2")
@ -868,7 +868,7 @@ static RefPreprocessParams pre_and_post_processing() {
p.input(0).preprocess().convert_element_type(element::f32).mean(1.f);
p.input(1).preprocess().scale(2.f);
p.output("tensor_output1")
.network().set_layout("NCHW");
.model().set_layout("NCHW");
p.output("tensor_output1").postprocess().convert_layout();
p.output("tensor_output1").tensor().set_layout("NHWC");
p.output("tensor_output2")

View File

@ -103,7 +103,7 @@ TEST_F(ReferencePreprocessLegacyTest, resize) {
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NCHW").set_spatial_static_shape(42, 30);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
auto &preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess();
@ -126,7 +126,7 @@ TEST_F(ReferencePreprocessLegacyTest, bgrx_to_bgr) {
auto& input = p.input();
input.tensor().set_color_format(ColorFormat::BGRX).set_element_type(element::u8);
input.preprocess().convert_color(ColorFormat::BGR);
input.network().set_layout("NCHW");
input.model().set_layout("NCHW");
function = p.build();
inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data());
@ -156,7 +156,7 @@ TEST_F(ReferencePreprocessLegacyTest, rgbx_to_bgr) {
auto& input = p.input();
input.tensor().set_color_format(ColorFormat::RGBX).set_element_type(element::u8);
input.preprocess().convert_color(ColorFormat::BGR);
input.network().set_layout("NCHW");
input.model().set_layout("NCHW");
function = p.build();
inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data());
@ -184,7 +184,7 @@ public:
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess().convert_color(ColorFormat::BGR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
const auto &param = function->get_parameters()[0];
@ -261,7 +261,7 @@ public:
auto& input_info = p.input();
input_info.tensor().set_color_format(ColorFormat::I420_SINGLE_PLANE);
input_info.preprocess().convert_color(ColorFormat::BGR);
input_info.network().set_layout("NCHW");
input_info.model().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];

View File

@ -165,7 +165,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_simple_linear) {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(2, 2);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
@ -203,7 +203,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_large_picture_linear) {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
@ -240,7 +240,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_f32_large_picture_linear) {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
@ -268,7 +268,7 @@ TEST_F(PreprocessOpenCVReferenceTest, DISABLED_resize_f32_large_picture_cubic_sm
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
inputData.emplace_back(element_type, input_shape, input_img.data());

View File

@ -232,7 +232,7 @@ inline std::shared_ptr<Function> resize_linear() {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
@ -243,7 +243,7 @@ inline std::shared_ptr<Function> resize_nearest() {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
@ -254,7 +254,7 @@ inline std::shared_ptr<Function> resize_linear_nhwc() {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
function = p.build();
return function;
}
@ -265,7 +265,7 @@ inline std::shared_ptr<Function> resize_cubic() {
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(10, 10);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
@ -276,7 +276,7 @@ inline std::shared_ptr<Function> resize_and_convert_layout() {
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NHWC").set_spatial_static_shape(40, 30);
p.input().preprocess().convert_layout().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
@ -296,7 +296,7 @@ inline std::shared_ptr<Function> resize_and_convert_layout_i8() {
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NHWC").set_spatial_static_shape(40, 30);
p.input().preprocess().convert_layout().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}
@ -334,7 +334,7 @@ inline std::shared_ptr<Function> cvt_color_nv12_cvt_layout_resize() {
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
function = p.build();
return function;
}

View File

@ -363,9 +363,9 @@ def apply_preprocessing(ov_function: Function, argv: argparse.Namespace):
for node_name, layout_value in layout_values.items():
if layout_value.get('source_layout'):
if layout_value.get('is_input'):
prep.input(node_name).network().set_layout(Layout(layout_value['source_layout']))
prep.input(node_name).model().set_layout(Layout(layout_value['source_layout']))
else:
prep.output(node_name).network().set_layout(Layout(layout_value['source_layout']))
prep.output(node_name).model().set_layout(Layout(layout_value['source_layout']))
if layout_value.get('target_layout'):
if layout_value.get('is_input'):
prep.input(node_name).tensor().set_layout(Layout(layout_value['target_layout']))

View File

@ -110,7 +110,7 @@ int main(int argc, char* argv[]) {
// - layout of data is 'NHWC'
input_info.tensor().set_element_type(ov::element::u8).set_layout(tensor_layout);
// 3) Here we suppose model has 'NCHW' layout for input
input_info.network().set_layout("NCHW");
input_info.model().set_layout("NCHW");
// 4) output() with no args assumes a model has a single result
// - output() with no args assumes a model has a single result
// - precision of tensor is supposed to be 'f32'

View File

@ -86,7 +86,7 @@ int tmain(int argc, tchar* argv[]) {
// - apply linear resize from tensor spatial dims to model spatial dims
preproc.input().preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
// 4) Here we suppose model has 'NCHW' layout for input
preproc.input().network().set_layout("NCHW");
preproc.input().model().set_layout("NCHW");
// 5) Set output tensor information:
// - precision of tensor is supposed to be 'f32'
preproc.output().tensor().set_element_type(ov::element::f32);

View File

@ -284,7 +284,7 @@ int main(int argc, char* argv[]) {
// - precision of tensor is supposed to be 'u8'
input_info.tensor().set_layout(tensor_layout).set_element_type(element::u8);
// 3) Here we suppose model has 'NCHW' layout for input
input_info.network().set_layout("NCHW");
input_info.model().set_layout("NCHW");
// 4) Once the build() method is called, the preprocessing steps
// for layout and precision conversions are inserted automatically
model = proc.build();

View File

@ -40,8 +40,8 @@ from openvino.pyopenvino.preprocess import InputInfo
from openvino.pyopenvino.preprocess import OutputInfo
from openvino.pyopenvino.preprocess import InputTensorInfo
from openvino.pyopenvino.preprocess import OutputTensorInfo
from openvino.pyopenvino.preprocess import InputNetworkInfo
from openvino.pyopenvino.preprocess import OutputNetworkInfo
from openvino.pyopenvino.preprocess import InputModelInfo
from openvino.pyopenvino.preprocess import OutputModelInfo
from openvino.pyopenvino.preprocess import PrePostProcessor
from openvino.pyopenvino.preprocess import PreProcessSteps
from openvino.pyopenvino.preprocess import PostProcessSteps

View File

@ -236,7 +236,7 @@ static void regclass_graph_InputTensorInfo(py::module m) {
},
py::arg("type"),
R"(
Set initial client's tensor element type. If type is not the same as network's element type,
Set initial client's tensor element type. If type is not the same as model's element type,
conversion of element type will be done automatically.
Parameters
----------
@ -284,7 +284,7 @@ static void regclass_graph_OutputTensorInfo(py::module m) {
},
py::arg("type"),
R"(
Set client's output tensor element type. If type is not the same as network's element type,
Set client's output tensor element type. If type is not the same as model's element type,
conversion of element type will be done automatically.
Parameters
----------
@ -310,8 +310,8 @@ static void regclass_graph_InputInfo(py::module m) {
inp.def("preprocess", [](ov::preprocess::InputInfo& me) {
return &me.preprocess();
});
inp.def("network", [](ov::preprocess::InputInfo& me) {
return &me.network();
inp.def("model", [](ov::preprocess::InputInfo& me) {
return &me.model();
});
}
@ -325,29 +325,29 @@ static void regclass_graph_OutputInfo(py::module m) {
out.def("postprocess", [](ov::preprocess::OutputInfo& me) {
return &me.postprocess();
});
out.def("network", [](ov::preprocess::OutputInfo& me) {
return &me.network();
out.def("model", [](ov::preprocess::OutputInfo& me) {
return &me.model();
});
}
static void regclass_graph_OutputNetworkInfo(py::module m) {
py::class_<ov::preprocess::OutputNetworkInfo, Common::ref_wrapper<ov::preprocess::OutputNetworkInfo>> info(
static void regclass_graph_OutputModelInfo(py::module m) {
py::class_<ov::preprocess::OutputModelInfo, Common::ref_wrapper<ov::preprocess::OutputModelInfo>> info(
m,
"OutputNetworkInfo");
info.doc() = "openvino.impl.preprocess.OutputNetworkInfo wraps ov::preprocess::OutputNetworkInfo";
"OutputModelInfo");
info.doc() = "openvino.impl.preprocess.OutputModelInfo wraps ov::preprocess::OutputModelInfo";
info.def("set_layout", [](ov::preprocess::OutputNetworkInfo& me, const ov::Layout& layout) {
info.def("set_layout", [](ov::preprocess::OutputModelInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
}
static void regclass_graph_InputNetworkInfo(py::module m) {
py::class_<ov::preprocess::InputNetworkInfo, Common::ref_wrapper<ov::preprocess::InputNetworkInfo>> info(
static void regclass_graph_InputModelInfo(py::module m) {
py::class_<ov::preprocess::InputModelInfo, Common::ref_wrapper<ov::preprocess::InputModelInfo>> info(
m,
"InputNetworkInfo");
info.doc() = "openvino.impl.preprocess.InputNetworkInfo wraps ov::preprocess::InputNetworkInfo";
"InputModelInfo");
info.doc() = "openvino.impl.preprocess.InputModelInfo wraps ov::preprocess::InputModelInfo";
info.def("set_layout", [](ov::preprocess::InputNetworkInfo& me, const ov::Layout& layout) {
info.def("set_layout", [](ov::preprocess::InputModelInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
}
@ -381,8 +381,8 @@ void regclass_graph_PrePostProcessor(py::module m) {
regclass_graph_OutputInfo(m);
regclass_graph_InputTensorInfo(m);
regclass_graph_OutputTensorInfo(m);
regclass_graph_InputNetworkInfo(m);
regclass_graph_OutputNetworkInfo(m);
regclass_graph_InputModelInfo(m);
regclass_graph_OutputModelInfo(m);
regenum_graph_ColorFormat(m);
regenum_graph_ResizeAlgorithm(m);
py::class_<ov::preprocess::PrePostProcessor, std::shared_ptr<ov::preprocess::PrePostProcessor>> proc(

View File

@ -186,10 +186,10 @@ def test_ngraph_preprocess_spatial_static_shape():
inp = p.input()
inp.tensor().set_layout(layout).set_spatial_static_shape(2, 2).set_color_format(color_format, [])
inp.preprocess().convert_element_type(Type.f32).mean([1., 2.])
inp.network().set_layout(layout)
inp.model().set_layout(layout)
out = p.output()
out.tensor().set_layout(layout).set_element_type(Type.f32)
out.network().set_layout(layout)
out.model().set_layout(layout)
function = p.build()
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.int32)

View File

@ -45,10 +45,10 @@
// Preprocessing
#include "openvino/core/preprocess/color_format.hpp"
#include "openvino/core/preprocess/input_info.hpp"
#include "openvino/core/preprocess/input_network_info.hpp"
#include "openvino/core/preprocess/input_model_info.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/core/preprocess/output_info.hpp"
#include "openvino/core/preprocess/output_network_info.hpp"
#include "openvino/core/preprocess/output_model_info.hpp"
#include "openvino/core/preprocess/output_tensor_info.hpp"
#include "openvino/core/preprocess/postprocess_steps.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"

View File

@ -5,7 +5,7 @@
#pragma once
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/preprocess/input_network_info.hpp"
#include "openvino/core/preprocess/input_model_info.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/core/preprocess/preprocess_steps.hpp"
@ -16,7 +16,7 @@ namespace preprocess {
/// From preprocessing pipeline perspective, each input can be represented as:
/// - User's input parameter info (InputInfo::tensor)
/// - Preprocessing steps applied to user's input (InputInfo::preprocess)
/// - Network's input info, which is a final info after preprocessing (InputInfo::network)
/// - Model's input info, which is a final input's info after preprocessing (InputInfo::model)
///
class OPENVINO_API InputInfo final {
class InputInfoImpl;
@ -46,10 +46,10 @@ public:
/// \return Reference to current preprocess steps structure
PreProcessSteps& preprocess();
/// \brief Get current input network/model information with ability to change original network's input data
/// \brief Get current input model information with ability to change original model's input data
///
/// \return Reference to current network's input information structure
InputNetworkInfo& network();
/// \return Reference to current model's input information structure
InputModelInfo& model();
};
} // namespace preprocess

View File

@ -11,38 +11,38 @@
namespace ov {
namespace preprocess {
/// \brief Information about network's input tensor. If all information is already included to loaded network, this info
/// may not be needed. However it can be set to specify additional information about network, like 'layout'.
/// \brief Information about model's input tensor. If all information is already included to loaded model, this info
/// may not be needed. However it can be set to specify additional information about model, like 'layout'.
///
/// Example of usage of network 'layout':
/// Support network has input parameter with shape {1, 3, 224, 224} and user needs to resize input image to network's
/// Example of usage of model 'layout':
/// Support model has input parameter with shape {1, 3, 224, 224} and user needs to resize input image to model's
/// dimensions. It can be done like this
///
/// \code{.cpp}
/// <network has input parameter with shape {1, 3, 224, 224}>
/// <model has input parameter with shape {1, 3, 224, 224}>
/// auto proc = PrePostProcessor(function);
/// proc.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
/// proc.input().network().set_layout("NCHW");
/// proc.input().model().set_layout("NCHW");
/// \endcode
class OPENVINO_API InputNetworkInfo final {
class InputNetworkInfoImpl;
std::unique_ptr<InputNetworkInfoImpl> m_impl;
class OPENVINO_API InputModelInfo final {
class InputModelInfoImpl;
std::unique_ptr<InputModelInfoImpl> m_impl;
friend class InputInfo;
/// \brief Default empty constructor
InputNetworkInfo();
InputModelInfo();
public:
/// \brief Default destructor
~InputNetworkInfo();
~InputModelInfo();
/// \brief Set layout for network's input tensor
/// \brief Set layout for model's input tensor
/// This version allows chaining for Lvalue objects
///
/// \param layout Layout for network's input tensor.
/// \param layout Layout for model's input tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputNetworkInfo& set_layout(const ov::Layout& layout);
InputModelInfo& set_layout(const ov::Layout& layout);
};
} // namespace preprocess

View File

@ -30,7 +30,7 @@ public:
};
/// \brief Information about user's input tensor. By default, it will be initialized to same data (type/shape/etc) as
/// network's input parameter. User application can override particular parameters (like 'element_type') according to
/// model's input parameter. User application can override particular parameters (like 'element_type') according to
/// application's data and specify appropriate conversions in pre-processing steps
///
/// \code{.cpp}
@ -63,14 +63,14 @@ public:
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo& set_layout(const ov::Layout& layout);
/// \brief By default, input image shape is inherited from network input shape. This method specifies that user's
/// \brief By default, input image shape is inherited from model input shape. This method specifies that user's
/// input image has dynamic spatial dimensions (width & height). This can be useful for adding resize preprocessing
/// from any input image to network's expected dimensions.
/// from any input image to model's expected dimensions.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_spatial_dynamic_shape();
/// \brief By default, input image shape is inherited from network input shape. Use this method to specify different
/// \brief By default, input image shape is inherited from model input shape. Use this method to specify different
/// width and height of user's input image. In case if input image size is not known, use
/// `set_spatial_dynamic_shape` method.
///

View File

@ -5,7 +5,7 @@
#pragma once
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/preprocess/output_network_info.hpp"
#include "openvino/core/preprocess/output_model_info.hpp"
#include "openvino/core/preprocess/output_tensor_info.hpp"
#include "openvino/core/preprocess/postprocess_steps.hpp"
@ -14,7 +14,7 @@ namespace preprocess {
/// \brief Class holding postprocessing information for one output
/// From postprocessing pipeline perspective, each output can be represented as:
/// - Network's output info, (OutputInfo::network)
/// - Model's output info, (OutputInfo::model)
/// - Postprocessing steps applied to user's input (OutputInfo::postprocess)
/// - User's desired output parameter information, which is a final one after preprocessing (OutputInfo::tensor)
class OPENVINO_API OutputInfo final {
@ -35,10 +35,10 @@ public:
/// \brief Default destructor
~OutputInfo();
/// \brief Get current output network/model information with ability to change original network's output data
/// \brief Get current output model information with ability to change original model's output data
///
/// \return Reference to current network's output information structure
OutputNetworkInfo& network();
/// \return Reference to current model's output information structure
OutputModelInfo& model();
/// \brief Get current output post-process information with ability to add more post-processing steps
///

View File

@ -10,38 +10,38 @@
namespace ov {
namespace preprocess {
/// \brief Information about network's output tensor. If all information is already included to loaded network, this
/// info may not be needed. However it can be set to specify additional information about network, like 'layout'.
/// \brief Information about model's output tensor. If all information is already included to loaded model, this
/// info may not be needed. However it can be set to specify additional information about model's output, like 'layout'.
///
/// Example of usage of network 'layout':
/// Support network has output result with shape {1, 3, 224, 224} and `NHWC` layout. User may need to transpose
/// Example of usage of model's 'layout':
/// Suppose model has output result with shape {1, 3, 224, 224} and `NHWC` layout. User may need to transpose
/// output picture to interleaved format {1, 224, 224, 3}. This can be done with the following code
///
/// \code{.cpp}
/// <network has output result with shape {1, 3, 224, 224}>
/// <model has output result with shape {1, 3, 224, 224}>
/// auto proc = PrePostProcessor(function);
/// proc.output().network().set_layout("NCHW");
/// proc.output().model().set_layout("NCHW");
/// proc.output().postprocess().convert_layout("NHWC");
/// function = proc.build();
/// \endcode
class OPENVINO_API OutputNetworkInfo final {
class OutputNetworkInfoImpl;
std::unique_ptr<OutputNetworkInfoImpl> m_impl;
class OPENVINO_API OutputModelInfo final {
class OutputModelInfoImpl;
std::unique_ptr<OutputModelInfoImpl> m_impl;
friend class OutputInfo;
/// \brief Default internal empty constructor
OutputNetworkInfo();
OutputModelInfo();
public:
/// \brief Default destructor
~OutputNetworkInfo();
~OutputModelInfo();
/// \brief Set layout for network's output tensor
/// \brief Set layout for model's output tensor
///
/// \param layout Layout for network's output tensor.
/// \param layout Layout for model's output tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputNetworkInfo& set_layout(const ov::Layout& layout);
OutputModelInfo& set_layout(const ov::Layout& layout);
};
} // namespace preprocess

View File

@ -12,7 +12,7 @@ namespace ov {
namespace preprocess {
/// \brief Information about user's desired output tensor. By default, it will be initialized to same data
/// (type/shape/etc) as network's output parameter. User application can override particular parameters (like
/// (type/shape/etc) as model's output parameter. User application can override particular parameters (like
/// 'element_type') according to application's data and specify appropriate conversions in post-processing steps
///
/// \code{.cpp}

View File

@ -42,14 +42,14 @@ public:
/// \brief Add 'convert layout' operation to specified layout.
///
/// \details Adds appropriate 'transpose' operation between network layout and user's desired layout.
/// \details Adds appropriate 'transpose' operation between model layout and user's desired layout.
/// Current implementation requires source and destination layout to have same number of dimensions
///
/// \example Example: when network data has output in 'NCHW' layout ([1, 3, 224, 224]) but user needs
/// \example Example: when model data has output in 'NCHW' layout ([1, 3, 224, 224]) but user needs
/// interleaved output image ('NHWC', [1, 224, 224, 3]). Post-processing may look like this:
///
/// \code{.cpp} auto proc = PrePostProcessor(function);
/// proc.output().network(OutputTensorInfo().set_layout("NCHW"); // Network output is NCHW
/// proc.output().model(OutputTensorInfo().set_layout("NCHW"); // model output is NCHW
/// proc.output().postprocess().convert_layout("NHWC"); // User needs output as NHWC
/// \endcode
///
@ -61,7 +61,7 @@ public:
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: network produces output with shape [1, 3, 480, 640] and user's needs
/// \example Example: model produces output with shape [1, 3, 480, 640] and user's needs
/// interleaved output image [1, 480, 640, 3]. Post-processing may look like this:
///
/// \code{.cpp} auto proc = PrePostProcessor(function);

View File

@ -45,11 +45,11 @@ public:
/// \brief Default destructor
~PrePostProcessor();
/// \brief Gets input pre-processing data structure. Should be used only if network/function has only one input
/// \brief Gets input pre-processing data structure. Should be used only if model/function has only one input
/// Using returned structure application's code is able to set user's tensor data (e.g layout), preprocess steps,
/// target model's data
///
/// \return Reference to network's input information structure
/// \return Reference to model's input information structure
InputInfo& input();
/// \brief Gets input pre-processing data structure for input identified by it's tensor name
@ -57,21 +57,21 @@ public:
/// \param tensor_name Tensor name of specific input. Throws if tensor name is not associated with any input in a
/// model
///
/// \return Reference to network's input information structure
/// \return Reference to model's input information structure
InputInfo& input(const std::string& tensor_name);
/// \brief Gets input pre-processing data structure for input identified by it's order in a model
///
/// \param input_index Input index of specific input. Throws if input index is out of range for associated function
///
/// \return Reference to network's input information structure
/// \return Reference to model's input information structure
InputInfo& input(size_t input_index);
/// \brief Gets output post-processing data structure. Should be used only if network/function has only one output
/// \brief Gets output post-processing data structure. Should be used only if model/function has only one output
/// Using returned structure application's code is able to set model's output data, post-process steps, user's
/// tensor data (e.g layout)
///
/// \return Reference to network's output information structure
/// \return Reference to model's output information structure
OutputInfo& output();
/// \brief Gets output post-processing data structure for output identified by it's tensor name
@ -79,7 +79,7 @@ public:
/// \param tensor_name Tensor name of specific output. Throws if tensor name is not associated with any input in a
/// model
///
/// \return Reference to network's output information structure
/// \return Reference to model's output information structure
OutputInfo& output(const std::string& tensor_name);
/// \brief Gets output post-processing data structure for output identified by it's order in a model
@ -87,7 +87,7 @@ public:
/// \param output_index Output index of specific output. Throws if output index is out of range for associated
/// function
///
/// \return Reference to network's output information structure
/// \return Reference to model's output information structure
OutputInfo& output(size_t output_index);
/// \brief Adds pre/post-processing operations to function passed in constructor

View File

@ -108,7 +108,7 @@ public:
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width);
/// \brief Add resize operation to network's dimensions.
/// \brief Add resize operation to model's dimensions.
///
/// \param alg Resize algorithm.
///
@ -120,23 +120,23 @@ public:
/// \details Adds appropriate 'transpose' operation between user layout and target layout.
/// Current implementation requires source and destination layout to have same number of dimensions
///
/// \example Example: when user data has 'NHWC' layout (example is RGB image, [1, 224, 224, 3]) but network expects
/// \example Example: when user data has 'NHWC' layout (example is RGB image, [1, 224, 224, 3]) but model expects
/// planar input image ('NCHW', [1, 3, 224, 224]). Preprocessing may look like this:
///
/// \code{.cpp} auto proc = PrePostProcessor(function);
/// proc.input().tensor().set_layout("NHWC"); // User data is NHWC
/// proc.input().preprocess().convert_layout("NCHW")) // Network expects input as NCHW
/// proc.input().preprocess().convert_layout("NCHW")) // model expects input as NCHW
/// \endcode
///
/// \param dst_layout New layout after conversion. If not specified - destination layout is obtained from
/// appropriate network input properties.
/// appropriate model input properties.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& convert_layout(const Layout& dst_layout = {});
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: when user data has input RGB image {1x480x640x3} but network expects
/// \example Example: when user data has input RGB image {1x480x640x3} but model expects
/// planar input image ('NCHW', [1, 3, 480, 640]). Preprocessing may look like this:
///
/// \code{.cpp}
@ -154,7 +154,7 @@ public:
/// \details Adds appropriate operation which reverses channels layout. Operation requires layout having 'C'
/// dimension Operation convert_color (RGB<->BGR) does reversing of channels also, but only for NHWC layout
///
/// \example Example: when user data has 'NCHW' layout (example is [1, 3, 224, 224] RGB order) but network expects
/// \example Example: when user data has 'NCHW' layout (example is [1, 3, 224, 224] RGB order) but model expects
/// BGR planes order. Preprocessing may look like this:
///
/// \code{.cpp}

View File

@ -163,10 +163,10 @@ private:
class OutputTensorInfo::OutputTensorInfoImpl : public TensorInfoImplBase {};
/// \brief InputNetworkInfoImpl - internal data structure
class NetworkInfoImpl {
/// \brief ModelInfoImpl - internal data structure
class ModelInfoImpl {
public:
NetworkInfoImpl() = default;
ModelInfoImpl() = default;
void set_layout(const Layout& layout) {
m_layout = layout;
@ -184,9 +184,9 @@ private:
bool m_layout_set = false;
};
class InputNetworkInfo::InputNetworkInfoImpl : public NetworkInfoImpl {};
class InputModelInfo::InputModelInfoImpl : public ModelInfoImpl {};
class OutputNetworkInfo::OutputNetworkInfoImpl : public NetworkInfoImpl {};
class OutputModelInfo::OutputModelInfoImpl : public ModelInfoImpl {};
/// \brief InputInfoImpl - internal data structure
struct InputInfo::InputInfoImpl {
@ -200,13 +200,13 @@ struct InputInfo::InputInfoImpl {
return m_preprocess.m_impl;
}
std::unique_ptr<InputNetworkInfo::InputNetworkInfoImpl>& get_network() {
return m_network_data.m_impl;
std::unique_ptr<InputModelInfo::InputModelInfoImpl>& get_model() {
return m_model_data.m_impl;
}
InputTensorInfo m_tensor_info;
PreProcessSteps m_preprocess;
InputNetworkInfo m_network_data;
InputModelInfo m_model_data;
std::shared_ptr<op::v0::Parameter> m_resolved_param;
};
@ -222,13 +222,13 @@ struct OutputInfo::OutputInfoImpl {
return m_postprocess.m_impl;
}
std::unique_ptr<OutputNetworkInfo::OutputNetworkInfoImpl>& get_network_data() {
return m_network_info.m_impl;
std::unique_ptr<OutputModelInfo::OutputModelInfoImpl>& get_model_data() {
return m_model_info.m_impl;
}
OutputTensorInfo m_tensor_info;
PostProcessSteps m_postprocess;
OutputNetworkInfo m_network_info;
OutputModelInfo m_model_info;
ov::Output<ov::Node> m_output_node;
};
@ -247,8 +247,8 @@ PreProcessSteps& InputInfo::preprocess() {
return m_impl->m_preprocess;
}
InputNetworkInfo& InputInfo::network() {
return m_impl->m_network_data;
InputModelInfo& InputInfo::model() {
return m_impl->m_model_data;
}
//-------------- OutputInfo ------------------
@ -257,8 +257,8 @@ OutputInfo::OutputInfo(OutputInfo&& other) noexcept = default;
OutputInfo& OutputInfo::operator=(OutputInfo&& other) noexcept = default;
OutputInfo::~OutputInfo() = default;
OutputNetworkInfo& OutputInfo::network() {
return m_impl->m_network_info;
OutputModelInfo& OutputInfo::model() {
return m_impl->m_model_info;
}
PostProcessSteps& OutputInfo::postprocess() {
@ -365,9 +365,9 @@ std::shared_ptr<Function> PrePostProcessor::build() {
bool tensor_data_updated = false;
for (const auto& input_info : m_impl->m_inputs) {
auto& input = input_info.m_impl;
// Set parameter layout from 'network' information
if (input->get_network()->is_layout_set() && input->m_resolved_param->get_layout().empty()) {
input->m_resolved_param->set_layout(input->get_network()->get_layout());
// Set parameter layout from 'model' information
if (input->get_model()->is_layout_set() && input->m_resolved_param->get_layout().empty()) {
input->m_resolved_param->set_layout(input->get_model()->get_layout());
}
}
auto results = function->get_results();
@ -397,7 +397,7 @@ std::shared_ptr<Function> PrePostProcessor::build() {
}
if (input->get_tensor_data()->is_layout_set() && !param->get_layout().empty() &&
param->get_layout() != input->get_tensor_data()->get_layout()) {
// Find transpose between network and tensor layouts and update tensor shape
// Find transpose between model and tensor layouts and update tensor shape
auto net_to_tensor =
layout::find_permutation(param->get_layout(), net_shape.rank(), input->get_tensor_data()->get_layout());
if (!net_to_tensor.empty()) {
@ -445,7 +445,7 @@ std::shared_ptr<Function> PrePostProcessor::build() {
std::get<0>(existing_names).count(new_name) == 0,
"Error while trying to create plane input with name '",
new_name,
"' - name already exists in network. Please specify another sub-name for set_color_format");
"' - name already exists in model. Please specify another sub-name for set_color_format");
plane_tensor_names.insert(new_name);
}
plane_param->get_default_output().get_tensor().set_names(plane_tensor_names);
@ -476,7 +476,7 @@ std::shared_ptr<Function> PrePostProcessor::build() {
PreprocessingContext context(input->get_tensor_data()->get_layout());
context.color_format() = input->get_tensor_data()->get_color_format();
context.target_layout() = param->get_layout();
context.network_shape() = param->get_partial_shape();
context.model_shape() = param->get_partial_shape();
context.target_element_type() = param->get_element_type();
// 2. Apply preprocessing
@ -487,12 +487,12 @@ std::shared_ptr<Function> PrePostProcessor::build() {
}
OPENVINO_ASSERT(nodes.size() == 1,
"Multiple plane input is not allowed as network input. Consider using of convert_color "
"Multiple plane input is not allowed as model input. Consider using of convert_color "
"preprocessing operation. Current format is '",
color_format_name(context.color_format()),
"'");
OPENVINO_ASSERT(is_rgb_family(context.color_format()) || context.color_format() == ColorFormat::UNDEFINED,
"Network shall have RGB/BGR color format. Consider add 'convert_color' preprocessing operation "
"model shall have RGB/BGR color format. Consider add 'convert_color' preprocessing operation "
"to convert current color format '",
color_format_name(context.color_format()),
"'to RGB/BGR");
@ -556,9 +556,9 @@ std::shared_ptr<Function> PrePostProcessor::build() {
auto start_out_node_names = node.get_tensor().get_names();
node.get_tensor().set_names({});
result = std::dynamic_pointer_cast<op::v0::Result>(node.get_node_shared_ptr());
// Set result layout from 'network' information
if (output->get_network_data()->is_layout_set() && result->get_layout().empty()) {
result->set_layout(output->get_network_data()->get_layout());
// Set result layout from 'model' information
if (output->get_model_data()->is_layout_set() && result->get_layout().empty()) {
result->set_layout(output->get_model_data()->get_layout());
}
auto parent = result->get_input_source_output(0);
PostprocessingContext context(result->get_layout());
@ -656,11 +656,11 @@ InputTensorInfo& InputTensorInfo::set_spatial_static_shape(size_t height, size_t
return *this;
}
// --------------------- InputNetworkInfo ------------------
InputNetworkInfo::InputNetworkInfo() : m_impl(std::unique_ptr<InputNetworkInfoImpl>(new InputNetworkInfoImpl())) {}
InputNetworkInfo::~InputNetworkInfo() = default;
// --------------------- InputModelInfo ------------------
InputModelInfo::InputModelInfo() : m_impl(std::unique_ptr<InputModelInfoImpl>(new InputModelInfoImpl())) {}
InputModelInfo::~InputModelInfo() = default;
InputNetworkInfo& InputNetworkInfo::set_layout(const Layout& layout) {
InputModelInfo& InputModelInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}
@ -771,11 +771,11 @@ OutputTensorInfo& OutputTensorInfo::set_layout(const Layout& layout) {
return *this;
}
// --------------------- OutputNetworkInfo ------------------
OutputNetworkInfo::OutputNetworkInfo() : m_impl(std::unique_ptr<OutputNetworkInfoImpl>(new OutputNetworkInfoImpl())) {}
OutputNetworkInfo::~OutputNetworkInfo() = default;
// --------------------- OutputModelInfo ------------------
OutputModelInfo::OutputModelInfo() : m_impl(std::unique_ptr<OutputModelInfoImpl>(new OutputModelInfoImpl())) {}
OutputModelInfo::~OutputModelInfo() = default;
OutputNetworkInfo& OutputNetworkInfo::set_layout(const Layout& layout) {
OutputModelInfo& OutputModelInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}

View File

@ -142,11 +142,11 @@ void PreStepsList::add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_
auto height_idx = static_cast<int64_t>(get_and_check_height_idx(layout, node.get_partial_shape()));
auto width_idx = static_cast<int64_t>(get_and_check_width_idx(layout, node.get_partial_shape()));
if (dst_height < 0 || dst_width < 0) {
OPENVINO_ASSERT(ctxt.network_shape().rank().is_static(),
"Resize is not fully specified while target network shape is dynamic");
OPENVINO_ASSERT(ctxt.model_shape().rank().is_static(),
"Resize is not fully specified while target model shape is dynamic");
}
int new_image_width = dst_width < 0 ? static_cast<int>(ctxt.get_network_width_for_resize()) : dst_width;
int new_image_height = dst_height < 0 ? static_cast<int>(ctxt.get_network_height_for_resize()) : dst_height;
int new_image_width = dst_width < 0 ? static_cast<int>(ctxt.get_model_width_for_resize()) : dst_width;
int new_image_height = dst_height < 0 ? static_cast<int>(ctxt.get_model_height_for_resize()) : dst_height;
auto target_spatial_shape =
op::v0::Constant::create<int64_t>(element::i64, Shape{2}, {new_image_height, new_image_width});

View File

@ -75,7 +75,7 @@ public:
}
// Final layout. Needed if user specified convert_layout without arguments
// For preprocessing it is parameter's network layout
// For preprocessing it is parameter's model layout
// For post-processing it is result's tensor layout
const Layout& target_layout() const {
return m_target_layout;
@ -105,26 +105,26 @@ class PreprocessingContext : public PrePostProcessingContextBase {
public:
explicit PreprocessingContext(const Layout& layout) : PrePostProcessingContextBase(layout) {}
const PartialShape& network_shape() const {
return m_network_shape;
const PartialShape& model_shape() const {
return m_model_shape;
}
PartialShape& network_shape() {
return m_network_shape;
PartialShape& model_shape() {
return m_model_shape;
}
size_t get_network_height_for_resize() const {
auto network_height_idx = get_and_check_height_idx(target_layout(), network_shape());
OPENVINO_ASSERT(network_shape()[network_height_idx].is_static(),
"Dynamic resize: Network height dimension shall be static");
return network_shape()[network_height_idx].get_length();
size_t get_model_height_for_resize() const {
auto model_height_idx = get_and_check_height_idx(target_layout(), model_shape());
OPENVINO_ASSERT(model_shape()[model_height_idx].is_static(),
"Dynamic resize: Model height dimension shall be static");
return model_shape()[model_height_idx].get_length();
}
size_t get_network_width_for_resize() const {
auto network_width_idx = get_and_check_width_idx(target_layout(), network_shape());
OPENVINO_ASSERT(network_shape()[network_width_idx].is_static(),
"Dynamic resize: Network width dimension shall be static");
return network_shape()[network_width_idx].get_length();
size_t get_model_width_for_resize() const {
auto model_width_idx = get_and_check_width_idx(target_layout(), model_shape());
OPENVINO_ASSERT(model_shape()[model_width_idx].is_static(),
"Dynamic resize: Model width dimension shall be static");
return model_shape()[model_width_idx].get_length();
}
const ColorFormat& color_format() const {
@ -136,8 +136,8 @@ public:
}
private:
PartialShape m_network_shape;
Layout m_network_layout;
PartialShape m_model_shape;
Layout m_model_layout;
ColorFormat m_color_format = ColorFormat::UNDEFINED;
};
@ -165,19 +165,19 @@ public:
return m_actions;
}
PartialShape calculate_param_shape(const PartialShape& network_shape) const {
if (network_shape.rank().is_dynamic()) {
return network_shape;
PartialShape calculate_param_shape(const PartialShape& model_shape) const {
if (model_shape.rank().is_dynamic()) {
return model_shape;
}
std::vector<Dimension> old_dims(network_shape.rank().get_length());
std::vector<Dimension> dims(network_shape.rank().get_length());
for (size_t i = 0; i < network_shape.rank().get_length(); i++) {
dims[i] = network_shape[i];
std::vector<Dimension> old_dims(model_shape.rank().get_length());
std::vector<Dimension> dims(model_shape.rank().get_length());
for (size_t i = 0; i < model_shape.rank().get_length(); i++) {
dims[i] = model_shape[i];
}
for (const auto& convert : m_layout_converts) {
old_dims = dims;
dims = std::vector<Dimension>(network_shape.rank().get_length());
dims = std::vector<Dimension>(model_shape.rank().get_length());
for (size_t i = 0; i < convert.size(); i++) {
OPENVINO_ASSERT(convert[i] < dims.size(), "Convert dimension ", convert[i], " is out of bounds.");
dims[convert[i]] = old_dims[i];

View File

@ -434,7 +434,7 @@ TEST(pre_post_process, convert_color_duplicate_internal_subnames_mean) {
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NHWC");
p.input().preprocess().convert_layout("NCHW");
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
f = p.build();
}
f = create_simple_function(element::f32, PartialShape{1, 2, 2, 3});
@ -444,11 +444,11 @@ TEST(pre_post_process, convert_color_duplicate_internal_subnames_mean) {
}
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().tensor().set_spatial_static_shape(480, 640);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
EXPECT_NO_THROW(f = p.build());
}
TEST(pre_post_process, unsupported_network_color_format) {
TEST(pre_post_process, unsupported_model_color_format) {
auto f = create_simple_function(element::f32, PartialShape{1, 4, 4, 3});
EXPECT_THROW(auto p = PrePostProcessor(f); p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
f = p.build(), ov::AssertFailure);
@ -469,7 +469,7 @@ TEST(pre_post_process, unsupported_network_color_format) {
f = p.build(), ov::AssertFailure);
}
TEST(pre_post_process, unsupported_network_color_format_i420) {
TEST(pre_post_process, unsupported_model_color_format_i420) {
auto f = create_simple_function(element::f32, PartialShape{1, 4, 4, 3});
EXPECT_THROW(
{
@ -525,7 +525,7 @@ TEST(pre_post_process, test_2_inputs_basic) {
EXPECT_EQ(f->get_output_element_type(1), element::f32);
}
TEST(pre_post_process, reuse_network_layout_no_tensor_info) {
TEST(pre_post_process, reuse_model_layout_no_tensor_info) {
auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 3, 2, 1});
f->get_parameters().front()->set_layout("NC??");
auto p = PrePostProcessor(f);
@ -534,7 +534,7 @@ TEST(pre_post_process, reuse_network_layout_no_tensor_info) {
EXPECT_EQ(f->get_parameters().front()->get_layout(), "NC??");
}
TEST(pre_post_process, reuse_network_layout_tensor_info) {
TEST(pre_post_process, reuse_model_layout_tensor_info) {
auto f = create_simple_function(element::u8, PartialShape{Dimension::dynamic(), 3, 2, 1});
f->get_parameters().front()->set_layout("NC??");
auto p = PrePostProcessor(f);
@ -617,7 +617,7 @@ TEST(pre_post_process, mean_vector_dynamic_channels_shape) {
}
// Error cases for 'resize'
TEST(pre_post_process, resize_no_network_layout) {
TEST(pre_post_process, resize_no_model_layout) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
EXPECT_THROW(p.input().tensor().set_layout("NHWC"); p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
@ -676,7 +676,7 @@ TEST(pre_post_process, resize_no_tensor_height) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
EXPECT_THROW(p.input().tensor().set_layout("N?WC"); p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
p.build(), ov::AssertFailure);
}
@ -684,7 +684,7 @@ TEST(pre_post_process, resize_no_tensor_width) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
auto p = PrePostProcessor(f);
EXPECT_THROW(p.input().tensor().set_layout("NH?C"); p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
p.input().model().set_layout("NHWC");
p.build(), ov::AssertFailure);
}
@ -697,7 +697,7 @@ TEST(pre_post_process, preprocess_convert_layout_implicit) {
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NHWC");
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3}));
@ -713,7 +713,7 @@ TEST(pre_post_process, preprocess_convert_layout_default) {
p.input().tensor().set_layout("NHWC");
p.input().preprocess().convert_layout();
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3}));
@ -731,7 +731,7 @@ TEST(pre_post_process, preprocess_convert_layout_same_various) {
stream << "]";
auto l = stream.str();
p.input().tensor().set_layout(ov::Layout(l));
p.input().network().set_layout(ov::Layout(std::string(i, '?')));
p.input().model().set_layout(ov::Layout(std::string(i, '?')));
EXPECT_NO_THROW(p.build());
}
}
@ -744,7 +744,7 @@ TEST(pre_post_process, preprocess_convert_layout_same) {
p.input().tensor().set_layout("NCHW");
p.input().preprocess().convert_layout("NCHW");
p.input().network().set_layout("NCHW");
p.input().model().set_layout("NCHW");
p.build();
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 3, 2, 2}));
@ -806,28 +806,28 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined) {
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("nc???");
p.input(0).network().set_layout("????c");
p.input(0).model().set_layout("????c");
p.input(1).tensor().set_layout("...c??");
p.input(1).network().set_layout("ndhwc");
p.input(1).model().set_layout("ndhwc");
p.input(2).tensor().set_layout("?cwh...");
p.input(2).network().set_layout("...hwc");
p.input(2).model().set_layout("...hwc");
p.input(3).tensor().set_layout("...c");
p.input(3).network().set_layout("c...");
p.input(3).model().set_layout("c...");
p.input(4).tensor().set_layout("...");
p.input(4).network().set_layout("c...");
p.input(4).model().set_layout("c...");
p.input(5).tensor().set_layout("...c");
p.input(5).network().set_layout("...");
p.input(5).model().set_layout("...");
p.input(6).tensor().set_layout("ndhwc");
p.input(6).network().set_layout("ndh?c");
p.input(6).model().set_layout("ndh?c");
p.input(7).tensor().set_layout("ndh?c");
p.input(7).network().set_layout("ndhwc");
p.input(7).model().set_layout("ndhwc");
f = p.build();
EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{1, 5, 2, 3, 4}));
@ -846,16 +846,16 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_trivial) {
auto p = PrePostProcessor(f);
p.input(0).tensor().set_layout("...");
p.input(0).network().set_layout("c...");
p.input(0).model().set_layout("c...");
p.input(1).tensor().set_layout("...c");
p.input(1).network().set_layout("...");
p.input(1).model().set_layout("...");
p.input(2).tensor().set_layout("ndhwc");
p.input(2).network().set_layout("ndh?c");
p.input(2).model().set_layout("ndh?c");
p.input(3).tensor().set_layout("ndh?c");
p.input(3).network().set_layout("ndhwc");
p.input(3).model().set_layout("ndhwc");
f = p.build();
EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{1, 2, 3, 4, 5}));
@ -873,7 +873,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_error) {
{
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("nch??");
p.input().network().set_layout("???wc");
p.input().model().set_layout("???wc");
f = p.build();
},
ov::AssertFailure);
@ -882,7 +882,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_error) {
{
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("nch??");
p.input().network().set_layout("???wc?");
p.input().model().set_layout("???wc?");
f = p.build();
},
ov::AssertFailure);
@ -899,7 +899,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_error_dyn_ran
{
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("nchw");
p.input().network().set_layout("...wc");
p.input().model().set_layout("...wc");
f = p.build();
},
ov::AssertFailure);
@ -908,7 +908,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_error_dyn_ran
{
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("nchw");
p.input().network().set_layout("??wc?");
p.input().model().set_layout("??wc?");
f = p.build();
},
ov::AssertFailure);
@ -1099,10 +1099,10 @@ TEST(pre_post_process, preprocess_keep_params_order) {
}
// --- PostProcess - set/convert layout ---
TEST(pre_post_process, postprocess_set_layout_network) {
TEST(pre_post_process, postprocess_set_layout_model) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
auto p = PrePostProcessor(f);
p.output().network().set_layout("NCHW");
p.output().model().set_layout("NCHW");
p.build();
EXPECT_EQ(f->get_results()[0]->get_layout(), "NCHW");
}
@ -1112,7 +1112,7 @@ TEST(pre_post_process, postprocess_convert_layout_implicit) {
auto p = PrePostProcessor(f);
p.output().network().set_layout("NCHW");
p.output().model().set_layout("NCHW");
p.output().tensor().set_layout("NHWC");
p.build();
EXPECT_EQ(f->get_results()[0]->get_layout(), "NHWC");
@ -1123,7 +1123,7 @@ TEST(pre_post_process, postprocess_convert_layout_explicit_no_target) {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
auto p = PrePostProcessor(f);
p.output(1).network().set_layout("NCHW");
p.output(1).model().set_layout("NCHW");
p.output(1).postprocess().convert_layout("NHWC");
p.build();
EXPECT_EQ(f->get_results()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 3, 2, 2}));
@ -1135,7 +1135,7 @@ TEST(pre_post_process, postprocess_convert_layout_default) {
auto p = PrePostProcessor(f);
p.output().network().set_layout("NCHW");
p.output().model().set_layout("NCHW");
p.output().postprocess().convert_layout();
p.output().tensor().set_layout("NHWC");
p.build();
@ -1148,7 +1148,7 @@ TEST(pre_post_process, postprocess_convert_layout_default_getters) {
auto p = PrePostProcessor(f);
auto& out = p.output();
out.network().set_layout("NCHW");
out.model().set_layout("NCHW");
out.postprocess().convert_layout();
out.tensor().set_layout("NHWC");
f = p.build();
@ -1162,7 +1162,7 @@ TEST(pre_post_process, postprocess_convert_layout_same) {
auto p = PrePostProcessor(f);
p.output().network().set_layout("NCHW");
p.output().model().set_layout("NCHW");
p.output().postprocess().convert_layout("NCHW");
p.output().tensor().set_layout("NCHW");
p.build();
@ -1198,7 +1198,7 @@ TEST(pre_post_process, postprocess_convert_layout_has_layout) {
auto p = PrePostProcessor(f);
p.output().network().set_layout("NC??");
p.output().model().set_layout("NC??");
p.output().postprocess().convert_layout({0, 2, 3, 1});
p.build();
@ -1270,7 +1270,7 @@ TEST(pre_post_process, postprocess_implicit_convert_element_type_and_layout) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
auto p = PrePostProcessor(f);
p.output().network().set_layout("NCHW");
p.output().model().set_layout("NCHW");
p.output().tensor().set_layout("NHWC").set_element_type(element::u8);
p.build();
EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8);
@ -1292,8 +1292,8 @@ TEST(pre_post_process, postprocess_keep_results_order) {
auto names2 = f->output(2).get_tensor().get_names();
auto p = PrePostProcessor(f);
p.output(0).network().set_layout("NCHW");
p.output(1).network().set_layout("NCHW");
p.output(0).model().set_layout("NCHW");
p.output(1).model().set_layout("NCHW");
p.output(1).tensor().set_layout("NHWC").set_element_type(element::u8);
p.build();
ASSERT_EQ(f->get_results().size(), 3);
@ -1319,7 +1319,7 @@ TEST(pre_post_process, postprocess_many) {
bool custom_called = false;
auto p = PrePostProcessor(f);
p.output("tensor_output1").network().set_layout("NCHW");
p.output("tensor_output1").model().set_layout("NCHW");
p.output("tensor_output1")
.postprocess()
.convert_layout()

View File

@ -29,8 +29,6 @@
#include "ngraph/variant.hpp"
#include "openvino/core/deprecated.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/preprocess/input_network_info.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/util/shared_object.hpp"

View File

@ -114,7 +114,7 @@ def pre_post_processing(function: Function, app_inputs_info, input_precision: st
# set layout for model input
for port, info in enumerate(app_inputs_info):
pre_post_processor.input(port).network().set_layout(info.layout)
pre_post_processor.input(port).model().set_layout(info.layout)
function = pre_post_processor.build()