[OV2.0] Preprocessing api cleanup (#8898)

* Removed 'inline' Preprocessing API

Even though this API provided a way to specify all pre/post-processing in one line - it was considered inconvinient
With 'getters' API preprocessing code looks more clear for user, so old' inline' API is removed

* Fix pyopenvino build issues

* Update after merged PR#8717
This commit is contained in:
Mikhail Nosov
2021-11-30 12:30:13 +03:00
committed by GitHub
parent f6df0a9c13
commit e2172cd38a
19 changed files with 970 additions and 2018 deletions

View File

@@ -21,12 +21,12 @@ namespace {
struct RefPreprocessParams {
RefPreprocessParams(const std::string& val): name(val) {}
std::function<std::shared_ptr<ov::Function>()> function;
std::vector<Tensor> inputs;
std::vector<Tensor> expected;
float abs_threshold = 0.01f;
float rel_threshold = 0.01f;
std::string name;
std::function<std::shared_ptr<ov::Function>()> function;
std::vector<Tensor> inputs;
std::vector<Tensor> expected;
float abs_threshold = 0.01f;
float rel_threshold = 0.01f;
std::string name;
};
class ReferencePreprocessTest : public testing::TestWithParam<RefPreprocessParams>, public CommonReferenceTest {
@@ -95,7 +95,8 @@ static RefPreprocessParams simple_mean_scale() {
RefPreprocessParams res("simple_mean_scale");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
f = PrePostProcessor(f).input(InputInfo().preprocess(PreProcessSteps().mean(1.f).scale(2.f))).build();
auto p = PrePostProcessor(f);
p.input().preprocess().mean(1.f).scale(2.f); p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 3, 2, 2}, element::f32, std::vector<float>{1., 3., 5., 7., 9., 11., 13., 15., 17., 19., 21., 23.});
@@ -107,7 +108,8 @@ static RefPreprocessParams scale_then_mean() {
RefPreprocessParams res("scale_then_mean");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
f = PrePostProcessor(f).input(InputInfo().preprocess(PreProcessSteps().scale(2.0f).mean(2.0f))).build();
auto p = PrePostProcessor(f);
p.input().preprocess().scale(2.0f).mean(2.0f); p.build();
return f;
};
@@ -120,14 +122,15 @@ static RefPreprocessParams convert_only() {
RefPreprocessParams res("convert_only");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{1, 1, 2, 2});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::i16))
.preprocess(PreProcessSteps()
auto p = PrePostProcessor(f);
p.input()
.tensor().set_element_type(element::i16);
p.input().preprocess()
.convert_element_type(element::f32)
.scale(3.f)
.convert_element_type(element::u8)
.convert_element_type(element::f32)))
.build();
.convert_element_type(element::f32);
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 1, 2, 2}, element::i16, std::vector<int16_t>{2, 3, 4, 5});
@@ -139,14 +142,14 @@ static RefPreprocessParams convert_element_type_and_scale() {
RefPreprocessParams res("convert_element_type_and_scale");
res.function = []() {
auto f = create_simple_function(element::u8, Shape{1, 3, 2, 2});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::i16))
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.scale(2.f)
.convert_element_type(element::u8)))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_element_type(element::i16);
p.input().preprocess()
.convert_element_type(element::f32)
.scale(2.f)
.convert_element_type(element::u8);
p.build();
return f;
};
@@ -161,11 +164,11 @@ static RefPreprocessParams tensor_element_type_and_scale() {
RefPreprocessParams res("tensor_element_type_and_scale");
res.function = []() {
auto f = create_simple_function(element::i8, Shape{1, 3, 1, 1});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::f32))
.preprocess(PreProcessSteps().scale(2.0f).convert_element_type(element::i8)))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_element_type(element::f32);
p.input().preprocess().scale(2.0f).convert_element_type(element::i8);
p.build();
return f;
};
@@ -178,13 +181,13 @@ static RefPreprocessParams custom_preprocessing() {
RefPreprocessParams res("custom_preprocessing");
res.function = []() {
auto f = create_simple_function(element::i32, Shape{1, 3, 1, 1});
f = PrePostProcessor(f)
.input(InputInfo().preprocess(PreProcessSteps().custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
})))
.build();
auto p = PrePostProcessor(f);
p.input().preprocess().custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
p.build();
return f;
};
@@ -193,42 +196,24 @@ static RefPreprocessParams custom_preprocessing() {
return res;
}
static RefPreprocessParams test_lvalue() {
RefPreprocessParams res("test_lvalue");
static RefPreprocessParams test_multiple() {
RefPreprocessParams res("test_multiple");
res.function = []() {
auto f = create_simple_function(element::i8, Shape{1, 3, 1, 1});
auto p = PrePostProcessor(f);
auto p1 = std::move(p);
p = std::move(p1);
auto inputInfo = InputInfo();
auto inputInfo2 = std::move(inputInfo);
inputInfo = std::move(inputInfo2);
{
auto inputTensorInfo = InputTensorInfo();
auto inputTensorInfo2 = std::move(inputTensorInfo);
inputTensorInfo = std::move(inputTensorInfo2);
auto &same = inputTensorInfo.set_element_type(element::f32);
same.set_layout("?CHW");
inputInfo.tensor(std::move(same));
}
{
auto preprocessSteps = PreProcessSteps();
auto preprocessSteps2 = std::move(preprocessSteps);
preprocessSteps = std::move(preprocessSteps2);
preprocessSteps.mean(1.f);
preprocessSteps.scale(2.f);
preprocessSteps.mean({1.f, 2.f, 3.f});
preprocessSteps.scale({2.f, 3.f, 4.f});
preprocessSteps.custom([](const Output<Node> &node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
auto &same = preprocessSteps.convert_element_type(element::i8);
inputInfo.preprocess(std::move(same));
}
p.input(std::move(inputInfo));
f = p.build();
p1.input().tensor().set_element_type(element::f32).set_layout("?CHW");
p1.input().preprocess().mean(1.f);
p1.input().preprocess().scale(2.f);
p1.input().preprocess().mean({1.f, 2.f, 3.f});
p1.input().preprocess().scale({2.f, 3.f, 4.f});
p1.input().preprocess().custom([](const Output<Node> &node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
p1.input().preprocess().convert_element_type(element::i8);
f = p1.build();
return f;
};
@@ -241,16 +226,12 @@ static RefPreprocessParams test_2_inputs_basic() {
RefPreprocessParams res("test_2_inputs_basic");
res.function = []() {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 1});
f = PrePostProcessor(f).input(InputInfo(0)
.preprocess(
PreProcessSteps()
.mean(1.f)))
.input(
InputInfo("tensor_input2")
.preprocess(PreProcessSteps()
.mean(1.f)
.scale(2.0f)))
.build();
auto p = PrePostProcessor(f);
p.input(0).preprocess().mean(1.f);
p.input("tensor_input2").preprocess()
.mean(1.f)
.scale(2.0f);
p.build();
return f;
};
@@ -265,11 +246,11 @@ static RefPreprocessParams mean_scale_vector_tensor_layout() {
RefPreprocessParams res("mean_scale_vector_tensor_layout");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 3, 2, 1});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_layout("NC??"))
.preprocess(PreProcessSteps().mean({1.f, 2.f, 3.f}).scale({2.f, 3.f, 4.f})))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_layout("NC??");
p.input().preprocess().mean({1.f, 2.f, 3.f}).scale({2.f, 3.f, 4.f});
p.build();
return f;
};
@@ -282,11 +263,11 @@ static RefPreprocessParams mean_scale_dynamic_layout() {
RefPreprocessParams res("mean_scale_dynamic_layout");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 2, 1, 3});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_layout("N...C"))
.preprocess(PreProcessSteps().mean({1.f, 2.f, 3.f}).scale({2.f, 3.f, 4.f})))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_layout("N...C");
p.input().preprocess().mean({1.f, 2.f, 3.f}).scale({2.f, 3.f, 4.f});
p.build();
return f;
};
@@ -299,13 +280,12 @@ static RefPreprocessParams resize_to_network_height() {
RefPreprocessParams res("resize_to_network_height");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 2, 1, 1});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_dynamic_shape())
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NHWC"))
)
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
p.build();
return f;
};
res.inputs.emplace_back(element::f32, Shape{1, 4, 1, 1}, std::vector<float>{0., 2., 4., 6.});
@@ -317,12 +297,12 @@ static RefPreprocessParams resize_to_network_width() {
RefPreprocessParams res("resize_to_network_width");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 1, 2, 2});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_dynamic_shape())
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(element::f32, Shape{1, 1, 2, 6}, std::vector<float>{0., 1., 2., 3., 4., 5.,
@@ -335,14 +315,12 @@ static RefPreprocessParams resize_from_spatial_dims() {
RefPreprocessParams res("resize_from_spatial_dims");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 1, 1, 1});
auto t = InputTensorInfo();
t.set_spatial_static_shape(1, 4);
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(std::move(t))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_CUBIC))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_spatial_static_shape(1, 4);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(element::f32, Shape{1, 1, 1, 7}, std::vector<float>{0., 0.25, 1., 2.25, 4., 6.25, 9});
@@ -354,13 +332,13 @@ static RefPreprocessParams resize_i8() {
RefPreprocessParams res("resize_i8");
res.function = []() {
auto f = create_simple_function(element::i8, PartialShape{1, 3, 1, 1});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_spatial_dynamic_shape())
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(element::i8, Shape{1, 3, 2, 2}, std::vector<int8_t>{0, 0, 0, 0,
@@ -374,12 +352,12 @@ static RefPreprocessParams resize_to_network_width_height() {
RefPreprocessParams res("resize_to_network_width_height");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 1, 4, 4});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(5, 5))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_NEAREST))
.network(InputNetworkInfo().set_layout("...HW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_spatial_static_shape(5, 5);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("...HW");
p.build();
return f;
};
@@ -404,12 +382,12 @@ static RefPreprocessParams resize_to_specified_width_height() {
RefPreprocessParams res("resize_to_specified_width_height");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 1, Dimension::dynamic(), Dimension::dynamic()});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_dynamic_shape())
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_NEAREST, 4, 4))
.network(InputNetworkInfo().set_layout("...HW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor().set_spatial_dynamic_shape();
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST, 4, 4);
p.input().network().set_layout("...HW");
p.build();
return f;
};
@@ -430,52 +408,16 @@ static RefPreprocessParams resize_to_specified_width_height() {
return res;
}
static RefPreprocessParams resize_lvalues() {
RefPreprocessParams res("resize_lvalues");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 1, 1, 2});
f->get_parameters().front()->set_layout("NCHW");
auto t = InputTensorInfo();
t.set_spatial_dynamic_shape();
auto s = PreProcessSteps();
s.resize(ResizeAlgorithm::RESIZE_LINEAR, 1, 6); // to specified shape
s.resize(ResizeAlgorithm::RESIZE_LINEAR); // to network's shape
auto n = InputNetworkInfo();
n.set_layout("NCHW");
auto i = InputInfo();
i.tensor(std::move(t));
i.preprocess(std::move(s));
i.network(std::move(n));
f = PrePostProcessor(f)
.input(std::move(i))
.build();
return f;
};
// clang-format off
res.inputs.emplace_back(element::f32, Shape{1, 1, 1, 18}, std::vector<float>{0., 0., 0.,
1., 1., 1.,
2., 2., 2.,
3., 3., 3.,
4., 4., 4.,
5., 5., 5.});
// clang-format on
res.expected.emplace_back(Shape{1, 1, 2, 1}, element::f32, std::vector<float>{1., 4.});
return res;
}
static RefPreprocessParams convert_layout_nhwc_to_nchw_lvalue() {
RefPreprocessParams res("convert_layout_nhwc_to_nchw_lvalue");
static RefPreprocessParams convert_layout_nhwc_to_nchw() {
RefPreprocessParams res("convert_layout_nhwc_to_nchw");
res.function = []() {
auto f = create_simple_function(element::u8, {1, 3, 2, 2});
f->get_parameters()[0]->set_layout("NCHW");
auto p = PreProcessSteps();
p.convert_layout("NCHW");
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_layout("NHWC"))
.preprocess(std::move(p)))
.build();
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NHWC");
p.input().preprocess().convert_layout("NCHW");
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
@@ -493,13 +435,10 @@ static RefPreprocessParams convert_layout_nhwc_to_net_no_tensor_shape() {
res.function = []() {
auto f = create_simple_function(element::u8, {1, 3, 2, 2});
f->get_parameters()[0]->set_layout("NCHW");
auto p = PreProcessSteps();
p.convert_layout();
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo().set_layout("NHWC"))
.preprocess(std::move(p)))
.build();
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NHWC");
p.input().preprocess().convert_layout();
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
@@ -516,10 +455,9 @@ static RefPreprocessParams convert_layout_by_dims() {
RefPreprocessParams res("convert_layout_by_dims");
res.function = []() {
auto f = create_simple_function(element::u8, {1, 3, 2, 2});
f = PrePostProcessor(f)
.input(InputInfo()
.preprocess(PreProcessSteps().convert_layout({0, 3, 1, 2})))
.build();
auto p = PrePostProcessor(f);
p.input().preprocess().convert_layout({0, 3, 1, 2});
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
@@ -536,12 +474,10 @@ static RefPreprocessParams convert_layout_by_dims_multi() {
RefPreprocessParams res("convert_layout_by_dims_multi");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 3, 2, 2});
auto p = PreProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor(f)
.input(InputInfo().preprocess(std::move(p)))
.build();
auto p = PrePostProcessor(f);
p.input().preprocess().convert_layout({0, 1, 3, 2}) // NHWC->NHCW
.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0]
@@ -558,14 +494,12 @@ static RefPreprocessParams convert_layout_by_dims_multi_layout() {
RefPreprocessParams res("convert_layout_by_dims_multi_layout");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 3, 2, 2});
auto p = PreProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.mean({1, 2, 2}); // Apply means to 'C' channel
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor(f)
.input(InputInfo().tensor(InputTensorInfo().set_layout("N??C"))
.preprocess(std::move(p)))
.build();
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("N??C");
p.input().preprocess().convert_layout({0, 1, 3, 2}) // NHWC->NHCW
.mean({1, 2, 2}) // Apply means to 'C' channel
.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0, RGB]
@@ -582,16 +516,16 @@ static RefPreprocessParams resize_and_convert_layout() {
RefPreprocessParams res("resize_and_convert_layout");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 2, 2, 2});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout("NCHW")
.set_spatial_dynamic_shape())
.preprocess(PreProcessSteps()
.resize(ResizeAlgorithm::RESIZE_LINEAR)
.convert_layout())
.network(InputNetworkInfo().set_layout("NHWC")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_layout("NCHW")
.set_spatial_dynamic_shape();
p.input().preprocess()
.resize(ResizeAlgorithm::RESIZE_LINEAR)
.convert_layout();
p.input().network().set_layout("NHWC");
p.build();
return f;
};
@@ -620,13 +554,13 @@ static RefPreprocessParams convert_color_nv12_to_bgr_two_planes() {
res.rel_threshold = 1.f; // Ignore relative pixel values comparison (100%)
res.function = []() {
auto f = create_simple_function(element::u8, PartialShape{1, 4, 4, 3});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_color_format(ColorFormat::NV12_TWO_PLANES))
.preprocess(PreProcessSteps()
.convert_color(ColorFormat::BGR)))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_color_format(ColorFormat::NV12_TWO_PLANES);
p.input().preprocess()
.convert_color(ColorFormat::BGR);
p.build();
return f;
};
@@ -659,13 +593,13 @@ static RefPreprocessParams convert_color_nv12_single_plane() {
res.rel_threshold = 1.f; // Ignore relative pixel values comparison (100%)
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 4, 4, 3});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_color_format(ColorFormat::NV12_SINGLE_PLANE))
.preprocess(PreProcessSteps()
.convert_color(ColorFormat::RGB)))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess()
.convert_color(ColorFormat::RGB);
p.build();
return f;
};
@@ -680,7 +614,7 @@ static RefPreprocessParams convert_color_nv12_single_plane() {
255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0, // RRGG
0, 0, 255, 0, 0, 255, 255, 0, 0, 255, 0, 0, // BBRR
0, 0, 255, 0, 0, 255, 255, 0, 0, 255, 0, 0, // BBRR
};
};
auto out_shape = Shape{1, 4, 4, 3};
// clang-format on
res.inputs.emplace_back(element::f32, input_shape, input);
@@ -694,19 +628,19 @@ static RefPreprocessParams convert_color_nv12_layout_resize() {
res.rel_threshold = 1.f; // Ignore relative pixel values comparison (100%)
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 3, 2, 2});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_color_format(ColorFormat::NV12_SINGLE_PLANE)
.set_element_type(element::u8)
.set_spatial_dynamic_shape())
.preprocess(PreProcessSteps()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_NEAREST))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_color_format(ColorFormat::NV12_SINGLE_PLANE)
.set_element_type(element::u8)
.set_spatial_dynamic_shape();
p.input().preprocess()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("NCHW");
p.build();
return f;
};
@@ -734,16 +668,16 @@ static RefPreprocessParams element_type_before_convert_color_nv12() {
res.rel_threshold = 1.f; // Ignore relative pixel values comparison (100%)
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 2, 2, 3});
f = PrePostProcessor(f)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_element_type(element::u8)
.set_color_format(ColorFormat::NV12_TWO_PLANES))
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.convert_color(ColorFormat::RGB))
.network(InputNetworkInfo().set_layout("NHWC")))
.build();
auto p = PrePostProcessor(f);
p.input()
.tensor()
.set_element_type(element::u8)
.set_color_format(ColorFormat::NV12_TWO_PLANES);
p.input().preprocess()
.convert_element_type(element::f32)
.convert_color(ColorFormat::RGB);
p.input().network().set_layout("NHWC");
p.build();
return f;
};
@@ -836,15 +770,15 @@ static RefPreprocessParams postprocess_2_inputs_basic() {
RefPreprocessParams res("postprocess_2_inputs_basic");
res.function = []() {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 2});
f = PrePostProcessor(f)
.output(OutputInfo("tensor_output1")
.network(OutputNetworkInfo().set_layout("NCHW"))
.postprocess(PostProcessSteps().convert_layout())
.tensor(OutputTensorInfo().set_layout("NHWC")))
.output(OutputInfo("tensor_output2")
.postprocess(PostProcessSteps().convert_element_type())
.tensor(OutputTensorInfo().set_element_type(element::u8)))
.build();
auto p = PrePostProcessor(f);
p.output("tensor_output1")
.network().set_layout("NCHW");
p.output("tensor_output1").postprocess().convert_layout();
p.output("tensor_output1").tensor().set_layout("NHWC");
p.output("tensor_output2")
.postprocess().convert_element_type();
p.output("tensor_output2").tensor().set_element_type(element::u8);
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 3, 1, 2}, element::f32, std::vector<float>{1.1, 2.1, 3.1, 4.1, 5.1, 6.1});
@@ -858,10 +792,10 @@ static RefPreprocessParams post_convert_layout_by_dims() {
RefPreprocessParams res("post_convert_layout_by_dims");
res.function = []() {
auto f = create_simple_function(element::u8, {1, 2, 2, 3});
f = PrePostProcessor(f)
.output(OutputInfo()
.postprocess(PostProcessSteps().convert_layout({0, 3, 1, 2})))
.build();
auto p = PrePostProcessor(f);
p.output()
.postprocess().convert_layout({0, 3, 1, 2});
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::u8, std::vector<uint8_t>{1, 2, 3, // [H=0, W=0, RGB]
@@ -878,12 +812,10 @@ static RefPreprocessParams post_convert_layout_by_dims_multi() {
RefPreprocessParams res("post_convert_layout_by_dims_multi");
res.function = []() {
auto f = create_simple_function(element::f32, {1, 2, 2, 3});
auto p = PostProcessSteps();
p.convert_layout({0, 1, 3, 2}); // NHWC->NHCW
p.convert_layout({0, 2, 1, 3}); // NHCW->NCHW
f = PrePostProcessor(f)
.output(OutputInfo().postprocess(std::move(p)))
.build();
auto p = PrePostProcessor(f);
p.output().postprocess().convert_layout({0, 1, 3, 2}); // NHWC->NHCW;
p.output().postprocess().convert_layout({0, 2, 1, 3}); // NHCW->NCHW;
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 2, 2, 3}, element::f32, std::vector<float>{1, 2, 3, // [H=0, W=0]
@@ -900,20 +832,19 @@ static RefPreprocessParams pre_and_post_processing() {
RefPreprocessParams res("pre_and_post_processing");
res.function = []() {
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 2});
f = PrePostProcessor(f)
.input(InputInfo(0)
.tensor(InputTensorInfo().set_element_type(element::u8))
.preprocess(PreProcessSteps().convert_element_type(element::f32).mean(1.f)))
.input(InputInfo(1)
.preprocess(PreProcessSteps().scale(2.f)))
.output(OutputInfo("tensor_output1")
.network(OutputNetworkInfo().set_layout("NCHW"))
.postprocess(PostProcessSteps().convert_layout())
.tensor(OutputTensorInfo().set_layout("NHWC")))
.output(OutputInfo("tensor_output2")
.postprocess(PostProcessSteps().convert_element_type())
.tensor(OutputTensorInfo().set_element_type(element::u8)))
.build();
auto p = PrePostProcessor(f);
p.input(0)
.tensor().set_element_type(element::u8);
p.input(0).preprocess().convert_element_type(element::f32).mean(1.f);
p.input(1).preprocess().scale(2.f);
p.output("tensor_output1")
.network().set_layout("NCHW");
p.output("tensor_output1").postprocess().convert_layout();
p.output("tensor_output1").tensor().set_layout("NHWC");
p.output("tensor_output2")
.postprocess().convert_element_type();
p.output("tensor_output2").tensor().set_element_type(element::u8);
p.build();
return f;
};
res.inputs.emplace_back(Shape{1, 3, 1, 2}, element::u8, std::vector<uint8_t>{1, 2, 3, 4, 5, 6});
@@ -927,9 +858,10 @@ static RefPreprocessParams rgb_to_bgr() {
RefPreprocessParams res("rgb_to_bgr");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{2, 1, 1, 3});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(ColorFormat::RGB))
.preprocess(PreProcessSteps().convert_color(ColorFormat::BGR))).build();
auto p = PrePostProcessor(f);
p.input().tensor().set_color_format(ColorFormat::RGB);
p.input().preprocess().convert_color(ColorFormat::BGR);
p.build();
return f;
};
@@ -942,9 +874,10 @@ static RefPreprocessParams bgr_to_rgb() {
RefPreprocessParams res("bgr_to_rgb");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{2, 1, 1, 3});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(ColorFormat::BGR))
.preprocess(PreProcessSteps().convert_color(ColorFormat::RGB))).build();
auto p = PrePostProcessor(f);
p.input().tensor().set_color_format(ColorFormat::BGR);
p.input().preprocess().convert_color(ColorFormat::RGB);
p.build();
return f;
};
@@ -957,9 +890,10 @@ static RefPreprocessParams reverse_channels_nchw() {
RefPreprocessParams res("reverse_channels_nchw");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 2, 2, 2});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_layout("NCHW"))
.preprocess(PreProcessSteps().reverse_channels())).build();
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NCHW");
p.input().preprocess().reverse_channels();
p.build();
return f;
};
@@ -1004,14 +938,13 @@ static RefPreprocessParams color_cut_last_channel() {
return res;
}
static RefPreprocessParams reverse_channels_dyn_layout() {
RefPreprocessParams res("reverse_channels_dyn_layout");
res.function = []() {
auto f = create_simple_function(element::f32, PartialShape{1, 1, 3, 2});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(ColorFormat::BGR).set_layout("...CN"))
.preprocess(PreProcessSteps().convert_color(ColorFormat::RGB))).build();
auto p = PrePostProcessor(f);
p.input().tensor().set_color_format(ColorFormat::BGR).set_layout("...CN");
p.input().preprocess().convert_color(ColorFormat::RGB); p.build();
return f;
};
@@ -1024,12 +957,13 @@ static RefPreprocessParams reverse_dyn_shape() {
RefPreprocessParams res("reverse_dyn_shape");
res.function = []() {
auto f = create_simple_function(element::u8, PartialShape{Dimension::dynamic(),
Dimension::dynamic(),
Dimension::dynamic(),
Dimension::dynamic()});
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_layout("NCHW"))
.preprocess(PreProcessSteps().reverse_channels())).build();
Dimension::dynamic(),
Dimension::dynamic(),
Dimension::dynamic()});
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("NCHW");
p.input().preprocess().reverse_channels();
p.build();
return f;
};
@@ -1042,11 +976,10 @@ static RefPreprocessParams reverse_fully_dyn_shape() {
RefPreprocessParams res("reverse_fully_dyn_shape");
res.function = []() {
auto f = create_simple_function(element::u8, PartialShape::dynamic());
auto p = PreProcessSteps();
p.reverse_channels();
f = PrePostProcessor(f).input(InputInfo()
.tensor(InputTensorInfo().set_layout("...C??"))
.preprocess(std::move(p))).build();
auto p = PrePostProcessor(f);
p.input().tensor().set_layout("...C??");
p.input().preprocess().reverse_channels();
p.build();
return f;
};
@@ -1057,47 +990,46 @@ static RefPreprocessParams reverse_fully_dyn_shape() {
std::vector<RefPreprocessParams> allPreprocessTests() {
return std::vector<RefPreprocessParams> {
simple_mean_scale(),
scale_then_mean(),
convert_only(),
convert_element_type_and_scale(),
tensor_element_type_and_scale(),
custom_preprocessing(),
test_lvalue(),
test_2_inputs_basic(),
mean_scale_vector_tensor_layout(),
mean_scale_dynamic_layout(),
resize_to_network_height(),
resize_to_network_width(),
resize_from_spatial_dims(),
resize_i8(),
resize_to_network_width_height(),
resize_to_specified_width_height(),
resize_lvalues(),
convert_layout_nhwc_to_nchw_lvalue(),
convert_layout_nhwc_to_net_no_tensor_shape(),
convert_layout_by_dims(),
convert_layout_by_dims_multi(),
convert_layout_by_dims_multi_layout(),
resize_and_convert_layout(),
convert_color_nv12_to_bgr_two_planes(),
convert_color_nv12_single_plane(),
convert_color_nv12_layout_resize(),
element_type_before_convert_color_nv12(),
convert_color_i420_to_bgr_three_planes(),
convert_color_i420_single_plane(),
postprocess_2_inputs_basic(),
post_convert_layout_by_dims(),
post_convert_layout_by_dims_multi(),
pre_and_post_processing(),
rgb_to_bgr(),
bgr_to_rgb(),
color_cut_last_channel(),
reverse_channels_nchw(),
reverse_channels_dyn_layout(),
reverse_dyn_shape(),
reverse_fully_dyn_shape()
};
simple_mean_scale(),
scale_then_mean(),
convert_only(),
convert_element_type_and_scale(),
tensor_element_type_and_scale(),
custom_preprocessing(),
test_multiple(),
test_2_inputs_basic(),
mean_scale_vector_tensor_layout(),
mean_scale_dynamic_layout(),
resize_to_network_height(),
resize_to_network_width(),
resize_from_spatial_dims(),
resize_i8(),
resize_to_network_width_height(),
resize_to_specified_width_height(),
convert_layout_nhwc_to_nchw(),
convert_layout_nhwc_to_net_no_tensor_shape(),
convert_layout_by_dims(),
convert_layout_by_dims_multi(),
convert_layout_by_dims_multi_layout(),
resize_and_convert_layout(),
convert_color_nv12_to_bgr_two_planes(),
convert_color_nv12_single_plane(),
convert_color_nv12_layout_resize(),
element_type_before_convert_color_nv12(),
convert_color_i420_to_bgr_three_planes(),
convert_color_i420_single_plane(),
postprocess_2_inputs_basic(),
post_convert_layout_by_dims(),
post_convert_layout_by_dims_multi(),
pre_and_post_processing(),
rgb_to_bgr(),
bgr_to_rgb(),
color_cut_last_channel(),
reverse_channels_nchw(),
reverse_channels_dyn_layout(),
reverse_dyn_shape(),
reverse_fully_dyn_shape()
};
}
INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferencePreprocessTest,

View File

@@ -57,7 +57,9 @@ static std::shared_ptr<Function> create_simple_function_yuv(const PartialShape&
TEST_F(ReferencePreprocessLegacyTest, mean) {
function = create_simple_function(element::f32, Shape{1, 3, 2, 2});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().mean(1.f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().mean(1.f);
p.build();
auto f2 = create_simple_function(element::f32, Shape{1, 3, 2, 2});
legacy_network = InferenceEngine::CNNNetwork(f2);
@@ -75,7 +77,9 @@ TEST_F(ReferencePreprocessLegacyTest, mean) {
TEST_F(ReferencePreprocessLegacyTest, mean_scale) {
function = create_simple_function(element::f32, Shape{1, 3, 20, 20});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().scale(2.f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().scale(2.f);
p.build();
auto f2 = create_simple_function(element::f32, Shape{1, 3, 20, 20});
legacy_network = InferenceEngine::CNNNetwork(f2);
@@ -96,11 +100,11 @@ TEST_F(ReferencePreprocessLegacyTest, resize) {
auto f2 = create_simple_function(element::f32, Shape{1, 3, 5, 5});
legacy_network = InferenceEngine::CNNNetwork(f2);
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_layout("NCHW").set_spatial_static_shape(42, 30))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NCHW").set_spatial_static_shape(42, 30);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
p.build();
auto &preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess();
preProcess.setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR);
@@ -177,12 +181,11 @@ public:
inputData.clear();
legacy_input_blobs.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(
ColorFormat::NV12_SINGLE_PLANE))
.preprocess(PreProcessSteps().convert_color(ColorFormat::BGR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess().convert_color(ColorFormat::BGR);
p.input().network().set_layout("NCHW");
p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), ov20_input_yuv.data());

View File

@@ -109,11 +109,10 @@ TEST_F(PreprocessOpenCVReferenceTest_YUV, convert_nv12_full_color_range) {
inputData.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(
ColorFormat::NV12_SINGLE_PLANE))
.preprocess(PreProcessSteps().convert_color(ColorFormat::BGR)))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess().convert_color(ColorFormat::BGR);
function = p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), ov20_input_yuv.data());
@@ -138,12 +137,10 @@ TEST_F(PreprocessOpenCVReferenceTest_YUV, convert_nv12_colored) {
inputData.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_color_format(
ColorFormat::NV12_SINGLE_PLANE))
.preprocess(PreProcessSteps().convert_color(ColorFormat::BGR))
)
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess().convert_color(ColorFormat::BGR);
function = p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_yuv.data());
@@ -165,12 +162,11 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_simple_linear) {
inputData.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(2, 2))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(2, 2);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
@@ -204,12 +200,11 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_large_picture_linear) {
inputData.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
@@ -242,12 +237,11 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_f32_large_picture_linear) {
inputData.clear();
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
const auto &param = function->get_parameters()[0];
inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data());
@@ -271,12 +265,11 @@ TEST_F(PreprocessOpenCVReferenceTest, DISABLED_resize_f32_large_picture_cubic_sm
auto element_type = element::f32;
auto input_img = std::vector<float> {1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f, 1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f};
function = create_simple_function(element_type, func_shape);
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(input_height, input_width))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_CUBIC))
.network(InputNetworkInfo().set_layout("NCHW"))
)
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(input_height, input_width);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
function = p.build();
inputData.emplace_back(element_type, input_shape, input_img.data());

View File

@@ -136,8 +136,7 @@ public:
continue;
}
if (inType != ov::element::Type_t::undefined) {
p.input(ov::preprocess::InputInfo(i)
.tensor(ov::preprocess::InputTensorInfo().set_element_type(inType)));
p.input(i).tensor().set_element_type(inType);
}
}
}
@@ -145,8 +144,7 @@ public:
auto results = function->get_results();
for (size_t i = 0; i < results.size(); i++) {
if (outType != ov::element::Type_t::undefined) {
p.output(ov::preprocess::OutputInfo(i)
.tensor(ov::preprocess::OutputTensorInfo().set_element_type(outType)));
p.output(i).tensor().set_element_type(outType);
}
}
}

View File

@@ -76,12 +76,11 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
auto ie = ov::runtime::Core();
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
RemoteTensorSharingType sharing_type = GetParam();
@@ -258,11 +257,10 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContext) {
auto ie = ov::runtime::Core();
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net_regular = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
auto input = function->get_parameters().at(0);
@@ -302,11 +300,10 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContextWithMultipleDevices) {
auto ie = ov::runtime::Core();
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net_regular = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
auto input = function->get_parameters().at(0);
@@ -351,11 +348,10 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_out_of_order) {
auto ie = ov::runtime::Core();
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net_regular = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
auto input = function->get_parameters().at(0);
@@ -431,11 +427,10 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_in_order) {
auto ie = ov::runtime::Core();
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net_regular = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
auto input = function->get_parameters().at(0);
@@ -541,11 +536,10 @@ TEST_P(OVRemoteTensorBatched_Test, DISABLED_canInputNV12) {
auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width});
using namespace ov::preprocess;
auto function = PrePostProcessor(fn_ptr_remote)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(ov::element::i8).set_color_format(ov::preprocess::ColorFormat::NV12_TWO_PLANES))
.preprocess(PreProcessSteps().convert_element_type(ov::element::f32)))
.build();
auto p = PrePostProcessor(fn_ptr_remote);
p.input().tensor().set_element_type(ov::element::i8).set_color_format(ov::preprocess::ColorFormat::NV12_TWO_PLANES);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
auto exec_net_b = ie.compile_model(fn_ptr_remote, CommonTestUtils::DEVICE_GPU);
auto inf_req_remote = exec_net_b.create_infer_request();

View File

@@ -140,8 +140,7 @@ void SubgraphBaseTest::configure_model() {
auto& params = function->get_parameters();
for (size_t i = 0; i < params.size(); i++) {
if (inType != ov::element::Type_t::undefined) {
p.input(ov::preprocess::InputInfo(i)
.tensor(ov::preprocess::InputTensorInfo().set_element_type(inType)));
p.input(i).tensor().set_element_type(inType);
}
}
}
@@ -151,8 +150,7 @@ void SubgraphBaseTest::configure_model() {
auto results = function->get_results();
for (size_t i = 0; i < results.size(); i++) {
if (outType != ov::element::Type_t::undefined) {
p.output(ov::preprocess::OutputInfo(i)
.tensor(ov::preprocess::OutputTensorInfo().set_element_type(outType)));
p.output(i).tensor().set_element_type(outType);
}
}
}
@@ -220,7 +218,7 @@ std::vector<ov::runtime::Tensor> SubgraphBaseTest::calculate_refs() {
if (itr != inputs.end()) {
auto elementType = itr->second.get_element_type();
if (inputNodes[i].get_element_type() != elementType) {
p.input(ov::preprocess::InputInfo(i).tensor(ov::preprocess::InputTensorInfo().set_element_type(elementType)));
p.input(i).tensor().set_element_type(elementType);
}
} else {
std::stringstream errMsg;
@@ -233,7 +231,7 @@ std::vector<ov::runtime::Tensor> SubgraphBaseTest::calculate_refs() {
const auto& outputs = functionToProcess->outputs();
for (size_t i = 0; i < outputs.size(); ++i) {
if (outType != ElementType::undefined && outType != outputs[i].get_element_type()) {
p.output(ov::preprocess::OutputInfo(i).tensor(ov::preprocess::OutputTensorInfo().set_element_type(outType)));
p.output(i).tensor().set_element_type(outType);
}
}

View File

@@ -81,120 +81,108 @@ inline std::shared_ptr<Function> create_preprocess_2inputs_trivial() {
inline std::shared_ptr<Function> mean_only() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().mean(1.1f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().mean(1.1f);
function = p.build();
return function;
}
inline std::shared_ptr<Function> scale_only() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().scale(2.1f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().scale(2.1f);
function = p.build();
return function;
}
inline std::shared_ptr<Function> mean_scale() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().mean(1.1f).scale(2.1f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().mean(1.1f).scale(2.1f);
function = p.build();
return function;
}
inline std::shared_ptr<Function> scale_mean() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo().preprocess(PreProcessSteps().scale(2.1f).mean(1.1f))).build();
auto p = PrePostProcessor(function);
p.input().preprocess().scale(2.1f).mean(1.1f);
function = p.build();
return function;
}
inline std::shared_ptr<Function> mean_vector() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_layout("NCHW"))
.preprocess(PreProcessSteps().mean({2.2f, 3.3f, 4.4f}))).build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NCHW");
p.input().preprocess().mean({2.2f, 3.3f, 4.4f});
function = p.build();
return function;
}
inline std::shared_ptr<Function> scale_vector() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, Shape{1, 3, 24, 24});
function = PrePostProcessor(function).input(InputInfo()
.tensor(InputTensorInfo().set_layout("NCHW"))
.preprocess(PreProcessSteps().scale({2.2f, 3.3f, 4.4f}))).build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NCHW");
p.input().preprocess().scale({2.2f, 3.3f, 4.4f});
function = p.build();
return function;
}
inline std::shared_ptr<Function> convert_element_type_and_mean() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 24, 24});
function = PrePostProcessor(function)
.input(InputInfo()
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.mean(0.2f)
.convert_element_type(element::u8)))
.build();
auto p = PrePostProcessor(function);
p.input().preprocess().convert_element_type(element::f32).mean(0.2f).convert_element_type(element::u8);
function = p.build();
return function;
}
inline std::shared_ptr<Function> tensor_element_type_and_mean() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 12, 12});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::f32))
.preprocess(PreProcessSteps().mean(0.1f).convert_element_type(element::u8)))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_element_type(element::f32);
p.input().preprocess().mean(0.1f).convert_element_type(element::u8);
function = p.build();
return function;
}
inline std::shared_ptr<Function> custom_preprocessing() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i32, Shape{3, 4, 10, 20});
function = PrePostProcessor(function)
.input(InputInfo().preprocess(PreProcessSteps().custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
})))
.build();
auto p = PrePostProcessor(function);
p.input().preprocess().custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
function = p.build();
return function;
}
inline std::shared_ptr<Function> lvalues_multiple_ops() {
inline std::shared_ptr<Function> multiple_ops() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 3, 3});
auto p = PrePostProcessor(function);
auto p1 = std::move(p);
p = std::move(p1);
auto inputInfo = InputInfo();
auto inputInfo2 = std::move(inputInfo);
inputInfo = std::move(inputInfo2);
{
auto inputTensorInfo = InputTensorInfo();
auto inputTensorInfo2 = std::move(inputTensorInfo);
inputTensorInfo = std::move(inputTensorInfo2);
auto& same = inputTensorInfo.set_element_type(element::f32);
same.set_layout("?CHW");
inputInfo.tensor(std::move(same));
}
{
auto preprocessSteps = PreProcessSteps();
auto preprocessSteps2 = std::move(preprocessSteps);
preprocessSteps = std::move(preprocessSteps2);
preprocessSteps.mean(1.f);
preprocessSteps.scale(2.f);
preprocessSteps.mean({1.1f, 2.2f, 3.3f});
preprocessSteps.scale({2.f, 3.f, 4.f});
preprocessSteps.custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
auto& same = preprocessSteps.convert_element_type(element::u8);
inputInfo.preprocess(std::move(same));
}
p.input(std::move(inputInfo));
p.input().tensor().set_element_type(element::f32).set_layout("?CHW");
p.input().preprocess().mean(1.f)
.scale(2.f)
.mean({1.1f, 2.2f, 3.3f})
.scale({2.f, 3.f, 4.f})
.custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
});
p.input().preprocess().convert_element_type(element::u8);
function = p.build();
return function;
}
@@ -202,14 +190,18 @@ inline std::shared_ptr<Function> lvalues_multiple_ops() {
inline std::shared_ptr<Function> two_inputs_basic() {
using namespace ov::preprocess;
auto function = create_preprocess_2inputs(element::f32, Shape{1, 3, 1, 1});
function = PrePostProcessor(function).input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build();
auto p = PrePostProcessor(function);
p.input(1).preprocess().mean(1.f).scale(2.0f);
function = p.build();
return function;
}
inline std::shared_ptr<Function> two_inputs_trivial() {
using namespace ov::preprocess;
auto function = create_preprocess_2inputs_trivial();
function = PrePostProcessor(function).input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build();
auto p = PrePostProcessor(function);
p.input(1).preprocess().mean(1.f).scale(2.0f);
function = p.build();
return function;
}
@@ -217,9 +209,9 @@ inline std::shared_ptr<Function> reuse_network_layout() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{4, 3, 2, 1});
function->get_parameters().front()->set_layout("NC??");
function = PrePostProcessor(function)
.input(InputInfo().preprocess(PreProcessSteps().mean({1.1f, 2.2f, 3.3f}).scale({2.f, 3.f, 4.f})))
.build();
auto p = PrePostProcessor(function);
p.input().preprocess().mean({1.1f, 2.2f, 3.3f}).scale({2.f, 3.f, 4.f});
function = p.build();
return function;
}
@@ -227,143 +219,123 @@ inline std::shared_ptr<Function> tensor_layout() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{4, 3, 2, 1});
function->get_parameters().front()->set_layout("NC??");
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_layout("NC??"))
.preprocess(PreProcessSteps().mean({1.1f, 2.2f, 3.3f}).scale({2.f, 3.f, 4.f})))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NC??");
p.input().preprocess().mean({1.1f, 2.2f, 3.3f}).scale({2.f, 3.f, 4.f});
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_linear() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 10, 10});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(20, 20))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_nearest() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 10, 10});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(20, 20))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_NEAREST))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_linear_nhwc() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 10, 10, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(20, 20))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NHWC")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(20, 20);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NHWC");
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_cubic() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 20, 20});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_spatial_static_shape(10, 10))
.preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_CUBIC))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_spatial_static_shape(10, 10);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_and_convert_layout() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 30, 20, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout("NHWC")
.set_spatial_static_shape(40, 30))
.preprocess(PreProcessSteps()
.convert_layout()
.resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NHWC").set_spatial_static_shape(40, 30);
p.input().preprocess().convert_layout().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Function> convert_layout_by_dims() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 30, 20, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.preprocess(PreProcessSteps()
.convert_layout({0, 3, 1, 2})))
.build();
auto p = PrePostProcessor(function);
p.input().preprocess().convert_layout({0, 3, 1, 2});
function = p.build();
return function;
}
inline std::shared_ptr<Function> resize_and_convert_layout_i8() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, PartialShape{1, 30, 20, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout("NHWC")
.set_spatial_static_shape(40, 30))
.preprocess(PreProcessSteps()
.convert_layout()
.resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_layout("NHWC").set_spatial_static_shape(40, 30);
p.input().preprocess().convert_layout().resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
inline std::shared_ptr<Function> cvt_color_nv12_to_rgb_single_plane() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 20, 20, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_color_format(ColorFormat::NV12_SINGLE_PLANE))
.preprocess(PreProcessSteps().convert_color(ColorFormat::RGB)))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
p.input().preprocess().convert_color(ColorFormat::RGB);
function = p.build();
return function;
}
inline std::shared_ptr<Function> cvt_color_nv12_to_bgr_two_planes() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 20, 20, 3});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo().set_color_format(ColorFormat::NV12_TWO_PLANES))
.preprocess(PreProcessSteps().convert_color(ColorFormat::BGR)))
.build();
auto p = PrePostProcessor(function);
p.input().tensor().set_color_format(ColorFormat::NV12_TWO_PLANES);
p.input().preprocess().convert_color(ColorFormat::BGR);
function = p.build();
return function;
}
inline std::shared_ptr<Function> cvt_color_nv12_cvt_layout_resize() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{1, 3, 10, 10});
function = PrePostProcessor(function)
.input(InputInfo()
.tensor(InputTensorInfo()
.set_color_format(ColorFormat::NV12_TWO_PLANES)
.set_element_type(element::u8)
.set_spatial_static_shape(20, 20))
.preprocess(PreProcessSteps()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_LINEAR))
.network(InputNetworkInfo().set_layout("NCHW")))
.build();
auto p = PrePostProcessor(function);
p.input().tensor()
.set_color_format(ColorFormat::NV12_TWO_PLANES)
.set_element_type(element::u8)
.set_spatial_static_shape(20, 20);
p.input().preprocess()
.convert_color(ColorFormat::RGB)
.convert_layout()
.convert_element_type(element::f32)
.resize(ResizeAlgorithm::RESIZE_LINEAR);
p.input().network().set_layout("NCHW");
function = p.build();
return function;
}
@@ -407,7 +379,7 @@ inline std::vector<preprocess_func> generic_preprocess_functions() {
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(tensor_element_type_and_mean, "tensor_element_type_and_mean", 0.01f),
preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f),
preprocess_func(lvalues_multiple_ops, "lvalues_multiple_ops", 0.01f),
preprocess_func(multiple_ops, "multiple_ops", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),

View File

@@ -276,23 +276,18 @@ int main(int argc, char* argv[]) {
const Layout tensor_layout{"NHWC"};
// apply preprocessing
// clang-format off
model = ov::preprocess::PrePostProcessor(model)
// 1) InputInfo() with no args assumes a model has a single input
.input(ov::preprocess::InputInfo()
// 2) Set input tensor information:
// - precision of tensor is supposed to be 'u8'
// - layout of data is 'NHWC'
.tensor(ov::preprocess::InputTensorInfo()
.set_layout(tensor_layout)
.set_element_type(element::u8))
// 3) Here we suppose model has 'NCHW' layout for input
.network(ov::preprocess::InputNetworkInfo()
.set_layout("NCHW")))
auto proc = ov::preprocess::PrePostProcessor(model);
// 1) InputInfo() with no args assumes a model has a single input
auto& input_info = proc.input();
// 2) Set input tensor information:
// - layout of data is 'NHWC'
// - precision of tensor is supposed to be 'u8'
input_info.tensor().set_layout(tensor_layout).set_element_type(element::u8);
// 3) Here we suppose model has 'NCHW' layout for input
input_info.network().set_layout("NCHW");
// 4) Once the build() method is called, the preprocessing steps
// for layout and precision conversions are inserted automatically
.build();
// clang-format on
model = proc.build();
// -------- Step 4. Read input images --------

View File

@@ -23,31 +23,15 @@ class OPENVINO_API InputInfo final {
std::unique_ptr<InputInfoImpl> m_impl;
friend class PrePostProcessor;
public:
/// \brief Empty constructor. Should be used only if network will have only one input
///
/// \todo Consider remove it (don't allow user to create standalone objects)
/// \brief Empty constructor for internal usage
InputInfo();
/// \brief Constructor for particular input index of model
///
/// \todo Consider remove it (don't allow user to create standalone objects)
///
/// \param input_index Index to address specified input parameter of model
explicit InputInfo(size_t input_index);
public:
/// \brief Move constructor
InputInfo(InputInfo&& other) noexcept;
/// \brief Constructor for particular output of model addressed by it's input name
///
/// \todo Consider remove it (don't allow user to create standalone objects)
///
/// \param input_tensor_name Name of input tensor name
explicit InputInfo(const std::string& input_tensor_name);
/// \brief Default move constructor
InputInfo(InputInfo&&) noexcept;
/// \brief Default move assignment operator
InputInfo& operator=(InputInfo&&) noexcept;
/// \brief Move assignment operator
InputInfo& operator=(InputInfo&& other) noexcept;
/// \brief Default destructor
~InputInfo();
@@ -66,61 +50,6 @@ public:
///
/// \return Reference to current network's input information structure
InputNetworkInfo& network();
/// \brief Set input tensor information for input - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Input tensor information.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo& tensor(InputTensorInfo&& builder) &;
/// \brief Set input tensor information for input - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Input tensor information.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo&& tensor(InputTensorInfo&& builder) &&;
/// \brief Set preprocessing operations for input - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Preprocessing operations.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo& preprocess(PreProcessSteps&& builder) &;
/// \brief Set preprocessing operations for input - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Preprocessing operations.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo&& preprocess(PreProcessSteps&& builder) &&;
/// \brief Set network's tensor information for input - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Input network tensor information.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo& network(InputNetworkInfo&& builder) &;
/// \brief Set input tensor information for input - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Input network tensor information.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputInfo&& network(InputNetworkInfo&& builder) &&;
};
} // namespace preprocess

View File

@@ -16,34 +16,23 @@ namespace preprocess {
///
/// Example of usage of network 'layout':
/// Support network has input parameter with shape {1, 3, 224, 224} and user needs to resize input image to network's
/// dimensions It can be done like this
/// dimensions. It can be done like this
///
/// \code{.cpp}
/// <network has input parameter with shape {1, 3, 224, 224}>
/// auto proc =
/// PrePostProcessor(function)
/// .input(InputInfo()
/// .tensor(<input tensor info>)
/// .preprocess(PreProcessSteps().resize(ResizeAlgorithm::RESIZE_LINEAR))
/// .network(InputNetworkInfo()
/// .set_layout("NCHW"))
/// );
/// auto proc = PrePostProcessor(function);
/// proc.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
/// proc.input().network().set_layout("NCHW");
/// \endcode
class OPENVINO_API InputNetworkInfo final {
class InputNetworkInfoImpl;
std::unique_ptr<InputNetworkInfoImpl> m_impl;
friend class InputInfo;
public:
/// \brief Default empty constructor
InputNetworkInfo();
/// \brief Default move constructor
InputNetworkInfo(InputNetworkInfo&&) noexcept;
/// \brief Default move assignment
InputNetworkInfo& operator=(InputNetworkInfo&&) noexcept;
public:
/// \brief Default destructor
~InputNetworkInfo();
@@ -53,15 +42,7 @@ public:
/// \param layout Layout for network's input tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputNetworkInfo& set_layout(const ov::Layout& layout) &;
/// \brief Set layout for network's input tensor
/// This version allows chaining for Rvalue objects
///
/// \param layout Layout for network's input tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputNetworkInfo&& set_layout(const ov::Layout& layout) &&;
InputNetworkInfo& set_layout(const ov::Layout& layout);
};
} // namespace preprocess

View File

@@ -28,111 +28,56 @@ public:
};
/// \brief Information about user's input tensor. By default, it will be initialized to same data (type/shape/etc) as
/// network's input parameter User application can override particular parameters (like 'element_type') according to
/// network's input parameter. User application can override particular parameters (like 'element_type') according to
/// application's data and specify appropriate conversions in pre-processing steps
///
/// \code{.cpp}
/// auto proc =
/// PrePostProcessor(function)
/// .input(InputInfo()
/// .tensor(InputTensorInfo()
/// .set_element_type(ov::element::u8))
/// .preprocess(<add steps + conversion to network's input element type>)
/// );
/// auto proc = PrePostProcessor(function);
/// proc.input().tensor().set_element_type(ov::element::u8);
/// \endcode
class OPENVINO_API InputTensorInfo final {
class InputTensorInfoImpl;
std::unique_ptr<InputTensorInfoImpl> m_impl;
friend class InputInfo;
public:
/// \brief Default empty constructor
/// \brief Default internal empty constructor
InputTensorInfo();
/// \brief Default move constructor
InputTensorInfo(InputTensorInfo&&) noexcept;
/// \brief Default move assignment
InputTensorInfo& operator=(InputTensorInfo&&) noexcept;
public:
/// \brief Default destructor
~InputTensorInfo();
/// \brief Set element type for user's input tensor
/// This version allows chaining for Lvalue objects
///
/// \param type Element type for user's input tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo& set_element_type(const ov::element::Type& type) &;
/// \brief Set element type for user's input tensor
/// This version allows chaining for Rvalue objects
///
/// \param type Element type for user's input tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo&& set_element_type(const ov::element::Type& type) &&;
InputTensorInfo& set_element_type(const ov::element::Type& type);
/// \brief Set layout for user's input tensor
/// This version allows chaining for Lvalue objects
///
/// \param layout Layout for user's input tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo& set_layout(const ov::Layout& layout) &;
/// \brief Set layout for user's input tensor
/// This version allows chaining for Rvalue objects
///
/// \param layout Layout for user's input tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo&& set_layout(const ov::Layout& layout) &&;
InputTensorInfo& set_layout(const ov::Layout& layout);
/// \brief By default, input image shape is inherited from network input shape. This method specifies that user's
/// input image has dynamic spatial dimensions (width & height). This can be useful for adding resize preprocessing
/// from any input image to network's expected dimensions.
///
/// This version allows chaining for Lvalue objects.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_spatial_dynamic_shape() &;
/// \brief By default, input image shape is inherited from network input shape. This method specifies that user's
/// input image has dynamic spatial dimensions (width & height). This can be useful for adding resize preprocessing
/// from any input image to network's expected dimensions.
///
/// This version allows chaining for Rvalue objects.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo&& set_spatial_dynamic_shape() &&;
InputTensorInfo& set_spatial_dynamic_shape();
/// \brief By default, input image shape is inherited from network input shape. Use this method to specify different
/// width and height of user's input image. In case if input image size is not known, use
/// `set_spatial_dynamic_shape` method.
///
/// This version allows chaining for Lvalue objects.
///
/// \param height Set fixed user's input image height.
///
/// \param width Set fixed user's input image width.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_spatial_static_shape(size_t height, size_t width) &;
/// \brief By default, input image shape is inherited from network input shape. Use this method to specify different
/// width and height of user's input image. In case if input image size is not known, use
/// `set_spatial_dynamic_shape` method.
///
/// This version allows chaining for Rvalue objects.
///
/// \param height Set fixed user's input image height.
///
/// \param width Set fixed user's input image width.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo&& set_spatial_static_shape(size_t height, size_t width) &&;
InputTensorInfo& set_spatial_static_shape(size_t height, size_t width);
/// \brief Set color format for user's input tensor.
///
@@ -142,8 +87,6 @@ public:
/// place of original parameter. This means that all parameters located after will shift their positions accordingly
/// (e.g. {param1, param2} will become {param1/Y, param1/UV, param2})
///
/// This version allows chaining for Lvalue objects.
///
/// \param format Color format of input image.
///
/// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If specified, number of
@@ -152,35 +95,14 @@ public:
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_color_format(const ov::preprocess::ColorFormat& format,
const std::vector<std::string>& sub_names = {}) &;
/// \brief Set color format for user's input tensor.
///
/// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors
/// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for
/// convenient usage of plane parameters. During build stage, new parameters for each plane will be inserted to the
/// place of original parameter. This means that all parameters located after will shift their positions accordingly
/// (e.g. {param1, param2} will become {param1/Y, param1/UV, param2})
///
/// This version allows chaining for Rvalue objects.
///
/// \param format Color format of input image.
///
/// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If specified, number of
/// sub-names shall match with number of planes. If not specified, friendly name and tensor name for plane
/// parameters will be empty. It is not allowed to specify sub-names for single-plane inputs.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo&& set_color_format(const ov::preprocess::ColorFormat& format,
const std::vector<std::string>& sub_names = {}) &&;
const std::vector<std::string>& sub_names = {});
/// \brief Set memory type runtime information for user's input tensor
///
/// \param memory_type Memory type. Refer to specific plugin's documentation for exact string format
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
InputTensorInfo& set_memory_type(const std::string& memory_type) &;
InputTensorInfo&& set_memory_type(const std::string& memory_type) &&;
InputTensorInfo& set_memory_type(const std::string& memory_type);
};
} // namespace preprocess

View File

@@ -22,31 +22,15 @@ class OPENVINO_API OutputInfo final {
std::unique_ptr<OutputInfoImpl> m_impl;
friend class PrePostProcessor;
public:
/// \brief Empty constructor. Should be used only if network has exactly one output
///
/// \todo Consider making this private to not allow user to create standalone object
/// \brief Empty internal default constructor
OutputInfo();
/// \brief Constructor for particular output index of model
///
/// \todo Consider remove it (don't allow user to create standalone objects)
///
/// \param output_index Index to address specified output parameter of model
explicit OutputInfo(size_t output_index);
public:
/// \brief Move constructor
OutputInfo(OutputInfo&& other) noexcept;
/// \brief Constructor for particular output of model addressed by it's output name
///
/// \todo Consider remove it (don't allow user to create standalone objects)
///
/// \param output_tensor_name Name of output tensor name
explicit OutputInfo(const std::string& output_tensor_name);
/// \brief Default move constructor
OutputInfo(OutputInfo&&) noexcept;
/// \brief Default move assignment operator
OutputInfo& operator=(OutputInfo&&) noexcept;
/// \brief Move assignment operator
OutputInfo& operator=(OutputInfo&& other) noexcept;
/// \brief Default destructor
~OutputInfo();
@@ -65,61 +49,6 @@ public:
///
/// \return Reference to current output tensor structure
OutputTensorInfo& tensor();
/// \brief Set network's tensor information for output - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Output network tensor information.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo& network(OutputNetworkInfo&& builder) &;
/// \brief Set network's tensor information for output - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Output network tensor information.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo&& network(OutputNetworkInfo&& builder) &&;
/// \brief Set postprocessing operations for output - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Postprocessing operations.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo& postprocess(PostProcessSteps&& builder) &;
/// \brief Set postprocessing operations for output - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Postprocessing operations.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo&& postprocess(PostProcessSteps&& builder) &&;
/// \brief Set final output tensor information for output after postprocessing - Lvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Output tensor information.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo& tensor(OutputTensorInfo&& builder) &;
/// \brief Set final output tensor information for output after postprocessing - Rvalue version
///
/// \todo Consider removing it in future
///
/// \param builder Output tensor information.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
OutputInfo&& tensor(OutputTensorInfo&& builder) &&;
};
} // namespace preprocess

View File

@@ -29,34 +29,19 @@ class OPENVINO_API OutputNetworkInfo final {
std::unique_ptr<OutputNetworkInfoImpl> m_impl;
friend class OutputInfo;
public:
/// \brief Default empty constructor
/// \brief Default internal empty constructor
OutputNetworkInfo();
/// \brief Default move constructor
OutputNetworkInfo(OutputNetworkInfo&&) noexcept;
/// \brief Default move assignment
OutputNetworkInfo& operator=(OutputNetworkInfo&&) noexcept;
public:
/// \brief Default destructor
~OutputNetworkInfo();
/// \brief Set layout for network's output tensor
/// This version allows chaining for Lvalue objects
///
/// \param layout Layout for network's output tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputNetworkInfo& set_layout(const ov::Layout& layout) &;
/// \brief Set layout for network's output tensor
/// This version allows chaining for Rvalue objects
///
/// \param layout Layout for network's output tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
OutputNetworkInfo&& set_layout(const ov::Layout& layout) &&;
OutputNetworkInfo& set_layout(const ov::Layout& layout);
};
} // namespace preprocess

View File

@@ -27,52 +27,26 @@ class OPENVINO_API OutputTensorInfo final {
std::unique_ptr<OutputTensorInfoImpl> m_impl;
friend class OutputInfo;
public:
/// \brief Default empty constructor
///
/// \todo Consider making this private to not allow user to create standalone object
/// \brief Default empty internal constructor
OutputTensorInfo();
/// \brief Default move constructor
OutputTensorInfo(OutputTensorInfo&&) noexcept;
/// \brief Default move assignment
OutputTensorInfo& operator=(OutputTensorInfo&&) noexcept;
public:
/// \brief Default destructor
~OutputTensorInfo();
/// \brief Set element type for user's desired output tensor.
/// This version allows chaining for Lvalue objects.
///
/// \param type Element type for user's output tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
OutputTensorInfo& set_element_type(const ov::element::Type& type) &;
/// \brief Set element type for user's desired output tensor.
/// This version allows chaining for Rvalue objects.
///
/// \param type Element type for user's output tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
OutputTensorInfo&& set_element_type(const ov::element::Type& type) &&;
OutputTensorInfo& set_element_type(const ov::element::Type& type);
/// \brief Set layout for user's output tensor.
/// This version allows chaining for Lvalue objects
///
/// \param layout Layout for user's output tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
OutputTensorInfo& set_layout(const ov::Layout& layout) &;
/// \brief Set layout for user's output tensor.
/// This version allows chaining for Rvalue objects
///
/// \param layout Layout for user's output tensor.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
OutputTensorInfo&& set_layout(const ov::Layout& layout) &&;
OutputTensorInfo& set_layout(const ov::Layout& layout);
};
} // namespace preprocess

View File

@@ -26,36 +26,21 @@ class OPENVINO_API PostProcessSteps final {
std::unique_ptr<PostProcessStepsImpl> m_impl;
friend class OutputInfo;
public:
/// \brief Default empty constructor
///
/// \todo Consider remove it (don't allow user to create standalone objects)
/// \brief Default empty internal constructor
PostProcessSteps();
/// \brief Default move constructor
PostProcessSteps(PostProcessSteps&&) noexcept;
/// \brief Default move assignment operator
PostProcessSteps& operator=(PostProcessSteps&&) noexcept;
public:
/// \brief Default destructor
~PostProcessSteps();
/// \brief Add convert element type post-process operation - Lvalue version
/// \brief Add convert element type post-process operation
///
/// \param type Desired type of output. If not specified, type will be obtained from 'tensor' output information
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PostProcessSteps& convert_element_type(const ov::element::Type& type = {}) &;
PostProcessSteps& convert_element_type(const ov::element::Type& type = {});
/// \brief Add convert element type post-process operation - Rvalue version
///
/// \param type Desired type of output. If not specified, type will be obtained from 'tensor' output information
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PostProcessSteps&& convert_element_type(const ov::element::Type& type = {}) &&;
/// \brief Add 'convert layout' operation to specified layout - Lvalue version.
/// \brief Add 'convert layout' operation to specified layout.
///
/// \details Adds appropriate 'transpose' operation between network layout and user's desired layout.
/// Current implementation requires source and destination layout to have same number of dimensions
@@ -63,46 +48,32 @@ public:
/// \example Example: when network data has output in 'NCHW' layout ([1, 3, 224, 224]) but user needs
/// interleaved output image ('NHWC', [1, 224, 224, 3]). Post-processing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor(function)
/// .output(OutputInfo()
/// .network(OutputTensorInfo().set_layout("NCHW")) // Network output is NCHW
/// .postprocess(PostProcessSteps()
/// .convert_layout("NHWC")) // User needs output as NHWC
/// );
/// \code{.cpp} auto proc = PrePostProcessor(function);
/// proc.output().network(OutputTensorInfo().set_layout("NCHW"); // Network output is NCHW
/// proc.output().postprocess().convert_layout("NHWC"); // User needs output as NHWC
/// \endcode
///
/// \param dst_layout New layout after conversion. If not specified - destination layout is obtained from
/// appropriate tensor output properties.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PostProcessSteps& convert_layout(const Layout& dst_layout = {}) &;
/// \brief Add convert_layout operation to network dimensions - Rvalue version.
///
/// \param dst_layout New layout after conversion. If not specified - destination layout is obtained from
/// appropriate tensor output properties.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
PostProcessSteps&& convert_layout(const Layout& dst_layout = {}) &&;
PostProcessSteps& convert_layout(const Layout& dst_layout = {});
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: network produces output with shape [1, 3, 480, 640] and user's needs
/// interleaved output image [1, 480, 640, 3]. Post-processing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor(function)
/// .output(OutputInfo()
/// .postprocess(PostProcessSteps()
/// .convert_layout({0, 2, 3, 1})
/// );
/// \code{.cpp} auto proc = PrePostProcessor(function);
/// proc.output().postprocess().convert_layout({0, 2, 3, 1});
/// function = proc.build();
/// \endcode
///
/// \param dims Dimensions array specifying places for new axis. If not empty, array size (N) must match to input
/// shape rank. Array values shall contain all values from 0 to N-1. If empty, no actual conversion will be added.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PostProcessSteps& convert_layout(const std::vector<uint64_t>& dims) &;
PostProcessSteps&& convert_layout(const std::vector<uint64_t>& dims) &&;
PostProcessSteps& convert_layout(const std::vector<uint64_t>& dims);
/// \brief Signature for custom postprocessing operation. Custom postprocessing operation takes one output node and
/// produces one output node. For more advanced cases, client's code can use transformation passes over ov::Function
@@ -113,21 +84,13 @@ public:
/// \return New node after applying custom post-processing operation
using CustomPostprocessOp = std::function<ov::Output<ov::Node>(const ov::Output<ov::Node>& node)>;
/// \brief Add custom post-process operation - Lvalue version
/// \brief Add custom post-process operation.
/// Client application can specify callback function for custom action
///
/// \param postprocess_cb Client's custom postprocess operation.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PostProcessSteps& custom(const CustomPostprocessOp& postprocess_cb) &;
/// \brief Add custom post-process operation - Rvalue version
/// Client application can specify callback function for custom action
///
/// \param postprocess_cb Client's custom postprocess operation.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PostProcessSteps&& custom(const CustomPostprocessOp& postprocess_cb) &&;
PostProcessSteps& custom(const CustomPostprocessOp& postprocess_cb);
};
} // namespace preprocess

View File

@@ -94,44 +94,6 @@ public:
///
/// \return Function with added pre/post-processing operations
std::shared_ptr<Function> build();
//------------------ TODO: consider removal of rest --------
/// \brief Adds pre-processing information and steps to input of model.
///
/// \todo TODO: Consider remove this in sake of `InputInfo& input(...)` version
///
/// \param builder Pre-processing data for input tensor of model.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PrePostProcessor& input(InputInfo&& builder) &;
/// \brief Adds pre-processing information and steps to input of model - Rvalue version.
///
/// \todo TODO: Consider remove this in sake of `InputInfo& input(...)` version
///
/// \param builder Pre-processing data for input tensor of model.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PrePostProcessor&& input(InputInfo&& builder) &&;
/// \brief Adds post-processing information and steps to output of model.
///
/// \todo TODO: Consider remove this in sake of `OutputInfo& output(...)` version
///
/// \param builder Post-processing data for output tensor of model.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PrePostProcessor& output(OutputInfo&& builder) &;
/// \brief Adds pre-processing information and steps to input of model - Rvalue version.
///
/// \todo TODO: Consider remove this in sake of `OutputInfo& output(...)` version
///
/// \param builder Post-processing data for output tensor of model.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PrePostProcessor&& output(OutputInfo&& builder) &&;
};
} // namespace preprocess

View File

@@ -18,124 +18,67 @@ namespace preprocess {
/// \brief Preprocessing steps. Each step typically intends adding of some operation to input parameter
/// User application can specify sequence of preprocessing steps in a builder-like manner
/// \code{.cpp}
/// auto proc = PrePostProcessor(function)
/// .input(InputInfo()
/// .preprocess(PreProcessSteps()
/// auto proc = PrePostProcessor(function);
/// proc.input().preprocess()
/// .mean(0.2f) // Subtract 0.2 from each element
/// .scale(2.3f)) // then divide each element to 2.3
/// );
/// .scale(2.3f)); // then divide each element to 2.3
/// \endcode
class OPENVINO_API PreProcessSteps final {
class PreProcessStepsImpl;
std::unique_ptr<PreProcessStepsImpl> m_impl;
friend class InputInfo;
public:
/// \brief Default empty constructor
/// \brief Default empty internal constructor
PreProcessSteps();
/// \brief Default move constructor
PreProcessSteps(PreProcessSteps&&) noexcept;
/// \brief Default move assignment operator
PreProcessSteps& operator=(PreProcessSteps&&) noexcept;
public:
/// \brief Default destructor
~PreProcessSteps();
/// \brief Add convert element type preprocess operation - Lvalue version
/// \brief Add convert element type preprocess operation
///
/// \param type Desired type of input.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& convert_element_type(const ov::element::Type& type = {}) &;
/// \brief Add convert element type preprocess operation - Rvalue version
///
/// \param type Desired type of input.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& convert_element_type(const ov::element::Type& type = {}) &&;
PreProcessSteps& convert_element_type(const ov::element::Type& type = {});
/// \brief Converts color format for user's input tensor. Requires source color format to be specified by
/// InputTensorInfo::set_color_format.
///
/// This version allows chaining for Lvalue objects
///
/// \param dst_format Destination color format of input image
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& convert_color(const ov::preprocess::ColorFormat& dst_format) &;
PreProcessSteps& convert_color(const ov::preprocess::ColorFormat& dst_format);
/// \brief Converts color format for user's input tensor. Requires source color format to be specified by
/// InputTensorInfo::set_color_format.
///
/// This version allows chaining for Rvalue objects.
///
/// \param dst_format Color format of input image.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& convert_color(const ov::preprocess::ColorFormat& dst_format) &&;
/// \brief Add scale preprocess operation - Lvalue version
/// \brief Add scale preprocess operation
/// Divide each element of input by specified value
///
/// \param value Scaling value.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& scale(float value) &;
PreProcessSteps& scale(float value);
/// \brief Add scale preprocess operation - Rvalue version
/// Divide each element of input by specified value
///
/// \param value Scaling value.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& scale(float value) &&;
/// \brief Add scale preprocess operation - Lvalue version
/// \brief Add scale preprocess operation by specified array of scale values for each channel
///
/// \param values Scaling values. Layout runtime info with channels dimension must be specified for input tensor
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& scale(const std::vector<float>& values) &;
PreProcessSteps& scale(const std::vector<float>& values);
/// \brief Add scale preprocess operation - Rvalue version
///
/// \param values Scaling values. Layout runtime info with channels dimension must be specified for input tensor
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& scale(const std::vector<float>& values) &&;
/// \brief Add mean preprocess operation - Lvalue version
/// \brief Add mean preprocess operation
/// Subtract specified value from each element of input
///
/// \param value Value to subtract from each element.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& mean(float value) &;
PreProcessSteps& mean(float value);
/// \brief Add mean preprocess operation - Rvalue version
/// Subtract specified value from each element of input
///
/// \param value Value to subtract from each element.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& mean(float value) &&;
/// \brief Add mean preprocess operation - Lvalue version
/// \brief Add mean preprocess operation by specified array of mean values for each channel
///
/// \param values Mean values. Layout runtime info with channels dimension must be specified for input tensor
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& mean(const std::vector<float>& values) &;
/// \brief Add mean preprocess operation - Rvalue version
///
/// \param values Mean values. Layout runtime info with channels dimension must be specified for input tensor
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& mean(const std::vector<float>& values) &&;
PreProcessSteps& mean(const std::vector<float>& values);
/// \brief Signature for custom preprocessing operation. Custom preprocessing operation takes one input node and
/// produces one output node. For more advanced cases, client's code can use transformation passes over ov::Function
@@ -146,21 +89,13 @@ public:
/// \return New node after applying custom preprocessing operation
using CustomPreprocessOp = std::function<Output<Node>(const Output<Node>& node)>;
/// \brief Add custom preprocess operation - Lvalue version
/// \brief Add custom preprocess operation
/// Client application can specify callback function for custom action
///
/// \param preprocess_cb Client's custom preprocess operation.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps& custom(const CustomPreprocessOp& preprocess_cb) &;
/// \brief Add custom preprocess operation - Rvalue version
/// Client application can specify callback function for custom action
///
/// \param preprocess_cb Client's custom preprocess operation.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner
PreProcessSteps&& custom(const CustomPreprocessOp& preprocess_cb) &&;
PreProcessSteps& custom(const CustomPreprocessOp& preprocess_cb);
/// \brief Add resize operation to known dimensions - Lvalue version.
///
@@ -171,34 +106,16 @@ public:
/// \param dst_width Desired width of resized image.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width) &;
PreProcessSteps& resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width);
/// \brief Add resize operation to known dimensions - Rvalue version.
///
/// \param alg Resize algorithm.
///
/// \param dst_height Desired height of resized image.
///
/// \param dst_width Desired width of resized image.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps&& resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width) &&;
/// \brief Add resize operation to network dimensions - Lvalue version.
/// \brief Add resize operation to network's dimensions.
///
/// \param alg Resize algorithm.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& resize(ResizeAlgorithm alg) &;
PreProcessSteps& resize(ResizeAlgorithm alg);
/// \brief Add resize operation to network dimensions - Rvalue version.
///
/// \param alg Resize algorithm.
///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps&& resize(ResizeAlgorithm alg) &&;
/// \brief Add 'convert layout' operation to specified layout - Lvalue version.
/// \brief Add 'convert layout' operation to specified layout.
///
/// \details Adds appropriate 'transpose' operation between user layout and target layout.
/// Current implementation requires source and destination layout to have same number of dimensions
@@ -206,41 +123,33 @@ public:
/// \example Example: when user data has 'NHWC' layout (example is RGB image, [1, 224, 224, 3]) but network expects
/// planar input image ('NCHW', [1, 3, 224, 224]). Preprocessing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor(function)
/// .input(InputInfo()
/// .tensor(InputTensorInfo().set_layout("NHWC")) // User data is NHWC
/// .preprocess(PreProcessSteps()
/// .convert_layout("NCHW")) // Network expects input as NCHW
/// );
/// \code{.cpp} auto proc = PrePostProcessor(function);
/// proc.input().tensor().set_layout("NHWC"); // User data is NHWC
/// proc.input().preprocess().convert_layout("NCHW")) // Network expects input as NCHW
/// \endcode
///
/// \param dst_layout New layout after conversion. If not specified - destination layout is obtained from
/// appropriate network input properties.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& convert_layout(const Layout& dst_layout = {}) &;
PreProcessSteps&& convert_layout(const Layout& dst_layout = {}) &&;
PreProcessSteps& convert_layout(const Layout& dst_layout = {});
/// \brief Add convert layout operation by direct specification of transposed dimensions.
///
/// \example Example: when user data has input RGB image {1x480x640x3} but network expects
/// planar input image ('NCHW', [1, 3, 480, 640]). Preprocessing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor(function)
/// .input(InputInfo()
/// .preprocess(PreProcessSteps()
/// .convert_layout({0, 3, 1, 2})
/// );
/// \code{.cpp}
/// auto proc = PrePostProcessor(function);
/// proc.input().preprocess().convert_layout({0, 3, 1, 2});
///
/// \param dims Dimensions array specifying places for new axis. If not empty, array size (N) must match to input
/// shape rank. Array values shall contain all values from 0 to N-1. If empty, no actual conversion will be added.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& convert_layout(const std::vector<uint64_t>& dims) &;
PreProcessSteps&& convert_layout(const std::vector<uint64_t>& dims) &&;
PreProcessSteps& convert_layout(const std::vector<uint64_t>& dims);
/// \brief Reverse channels operation - Lvalue version.
/// \brief Reverse channels operation.
///
/// \details Adds appropriate operation which reverses channels layout. Operation requires layout having 'C'
/// dimension Operation convert_color (RGB<->BGR) does reversing of channels also, but only for NHWC layout
@@ -248,18 +157,14 @@ public:
/// \example Example: when user data has 'NCHW' layout (example is [1, 3, 224, 224] RGB order) but network expects
/// BGR planes order. Preprocessing may look like this:
///
/// \code{.cpp} auto proc =
/// PrePostProcessor(function)
/// .input(InputInfo()
/// .tensor(InputTensorInfo().set_layout("NCHW")) // User data is NCHW
/// .preprocess(PreProcessSteps()
/// .reverse_channels()
/// );
/// \code{.cpp}
/// auto proc = PrePostProcessor(function);
/// proc.input().tensor().set_layout("NCHW"); // User data is NCHW
/// proc.input().preprocess().reverse_channels();
/// \endcode
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
PreProcessSteps& reverse_channels() &;
PreProcessSteps&& reverse_channels() &&;
PreProcessSteps& reverse_channels();
};
} // namespace preprocess

View File

@@ -170,16 +170,6 @@ class OutputNetworkInfo::OutputNetworkInfoImpl : public NetworkInfoImpl {};
/// \brief InputInfoImpl - internal data structure
struct InputInfo::InputInfoImpl {
InputInfoImpl() = default;
explicit InputInfoImpl(size_t idx) : m_has_index(true), m_index(idx) {}
explicit InputInfoImpl(std::string name) : m_has_name(true), m_name(std::move(name)) {}
bool has_index() const {
return m_has_index;
}
bool has_name() const {
return m_has_name;
}
std::unique_ptr<InputTensorInfo::InputTensorInfoImpl>& get_tensor_data() {
return m_tensor_info.m_impl;
@@ -193,10 +183,6 @@ struct InputInfo::InputInfoImpl {
return m_network_data.m_impl;
}
bool m_has_index = false;
size_t m_index = 0;
bool m_has_name = false;
std::string m_name;
InputTensorInfo m_tensor_info;
PreProcessSteps m_preprocess;
InputNetworkInfo m_network_data;
@@ -206,16 +192,6 @@ struct InputInfo::InputInfoImpl {
/// \brief OutputInfoImpl - internal data structure
struct OutputInfo::OutputInfoImpl {
OutputInfoImpl() = default;
explicit OutputInfoImpl(size_t idx) : m_has_index(true), m_index(idx) {}
explicit OutputInfoImpl(std::string name) : m_has_name(true), m_name(std::move(name)) {}
bool has_index() const {
return m_has_index;
}
bool has_name() const {
return m_has_name;
}
std::unique_ptr<OutputTensorInfo::OutputTensorInfoImpl>& get_tensor_data() {
return m_tensor_info.m_impl;
@@ -229,28 +205,18 @@ struct OutputInfo::OutputInfoImpl {
return m_network_info.m_impl;
}
bool m_has_index = false;
size_t m_index = 0;
bool m_has_name = false;
std::string m_name;
OutputTensorInfo m_tensor_info;
PostProcessSteps m_postprocess;
OutputNetworkInfo m_network_info;
ov::Output<ov::Node> m_output_node;
};
//-------------- InputInfo ------------------
InputInfo::InputInfo() : m_impl(std::unique_ptr<InputInfoImpl>(new InputInfoImpl)) {}
InputInfo::InputInfo(size_t input_index) : m_impl(std::unique_ptr<InputInfoImpl>(new InputInfoImpl(input_index))) {}
InputInfo::InputInfo(const std::string& input_tensor_name)
: m_impl(std::unique_ptr<InputInfoImpl>(new InputInfoImpl(input_tensor_name))) {}
InputInfo::InputInfo(InputInfo&&) noexcept = default;
InputInfo& InputInfo::operator=(InputInfo&&) noexcept = default;
InputInfo::~InputInfo() = default;
InputInfo::InputInfo(InputInfo&& other) noexcept = default;
InputInfo& InputInfo::operator=(InputInfo&& other) noexcept = default;
InputInfo& InputInfo::tensor(InputTensorInfo&& builder) & {
m_impl->m_tensor_info = std::move(builder);
return *this;
}
InputInfo::~InputInfo() = default;
InputTensorInfo& InputInfo::tensor() {
return m_impl->m_tensor_info;
@@ -264,40 +230,10 @@ InputNetworkInfo& InputInfo::network() {
return m_impl->m_network_data;
}
InputInfo&& InputInfo::tensor(InputTensorInfo&& builder) && {
m_impl->m_tensor_info = std::move(builder);
return std::move(*this);
}
InputInfo&& InputInfo::preprocess(PreProcessSteps&& builder) && {
m_impl->m_preprocess = std::move(builder);
return std::move(*this);
}
InputInfo& InputInfo::preprocess(PreProcessSteps&& builder) & {
m_impl->m_preprocess = std::move(builder);
return *this;
}
InputInfo& InputInfo::network(InputNetworkInfo&& builder) & {
m_impl->m_network_data = std::move(builder);
return *this;
}
InputInfo&& InputInfo::network(InputNetworkInfo&& builder) && {
m_impl->m_network_data = std::move(builder);
return std::move(*this);
}
//-------------- OutputInfo ------------------
OutputInfo::OutputInfo() : m_impl(std::unique_ptr<OutputInfoImpl>(new OutputInfoImpl)) {}
OutputInfo::OutputInfo(size_t output_index)
: m_impl(std::unique_ptr<OutputInfoImpl>(new OutputInfoImpl(output_index))) {}
OutputInfo::OutputInfo(const std::string& output_tensor_name)
: m_impl(std::unique_ptr<OutputInfoImpl>(new OutputInfoImpl(output_tensor_name))) {}
OutputInfo::OutputInfo(OutputInfo&&) noexcept = default;
OutputInfo& OutputInfo::operator=(OutputInfo&&) noexcept = default;
OutputInfo::OutputInfo(OutputInfo&& other) noexcept = default;
OutputInfo& OutputInfo::operator=(OutputInfo&& other) noexcept = default;
OutputInfo::~OutputInfo() = default;
OutputNetworkInfo& OutputInfo::network() {
@@ -312,106 +248,44 @@ OutputTensorInfo& OutputInfo::tensor() {
return m_impl->m_tensor_info;
}
// TODO: remove this in future
OutputInfo& OutputInfo::tensor(OutputTensorInfo&& builder) & {
m_impl->m_tensor_info = std::move(builder);
return *this;
}
// TODO: remove this in future
OutputInfo&& OutputInfo::tensor(OutputTensorInfo&& builder) && {
m_impl->m_tensor_info = std::move(builder);
return std::move(*this);
}
// TODO: remove this in future
OutputInfo&& OutputInfo::postprocess(PostProcessSteps&& builder) && {
m_impl->m_postprocess = std::move(builder);
return std::move(*this);
}
// TODO: remove this in future
OutputInfo& OutputInfo::postprocess(PostProcessSteps&& builder) & {
m_impl->m_postprocess = std::move(builder);
return *this;
}
// TODO: remove this in future
OutputInfo& OutputInfo::network(OutputNetworkInfo&& builder) & {
m_impl->m_network_info = std::move(builder);
return *this;
}
// TODO: remove this in future
OutputInfo&& OutputInfo::network(OutputNetworkInfo&& builder) && {
m_impl->m_network_info = std::move(builder);
return std::move(*this);
}
// ------------------------ PrePostProcessor --------------------
struct PrePostProcessor::PrePostProcessorImpl {
public:
PrePostProcessorImpl() = default;
explicit PrePostProcessorImpl(const std::shared_ptr<ov::Function>& f) : m_function(f) {
OPENVINO_ASSERT(f, "Function can't be nullptr for PrePostProcessor");
m_inputs.reserve(m_function->inputs().size());
for (size_t i = 0; i < m_function->inputs().size(); i++) {
m_inputs.emplace_back(i);
for (size_t i = 0; i < m_function->inputs().size(); ++i) {
auto info = InputInfo();
info.m_impl->m_resolved_param = m_function->get_parameters()[i];
m_inputs.push_back(std::move(info));
}
m_outputs.reserve(m_function->outputs().size());
for (size_t i = 0; i < m_function->outputs().size(); i++) {
m_outputs.emplace_back(i);
for (size_t i = 0; i < m_function->outputs().size(); ++i) {
auto info = OutputInfo();
info.m_impl->m_output_node = m_function->output(i);
m_outputs.push_back(std::move(info));
}
}
size_t find_input_index(const std::string& tensor_name) {
InputInfo& find_input(const std::string& tensor_name) {
size_t index;
for (index = 0; index < m_function->inputs().size(); index++) {
if (m_function->input(index).get_names().count(tensor_name)) {
break;
if (m_function->input(index).get_tensor().get_names().count(tensor_name)) {
return m_inputs[index];
}
}
OPENVINO_ASSERT(index < m_inputs.size(), "Function doesn't have input with name ", tensor_name);
return index;
OPENVINO_ASSERT(false, "Function doesn't have input with name ", tensor_name);
}
size_t find_output_index(const std::string& tensor_name) {
OutputInfo& find_output(const std::string& tensor_name) {
size_t index;
for (index = 0; index < m_function->outputs().size(); index++) {
if (m_function->output(index).get_names().count(tensor_name)) {
break;
if (m_function->output(index).get_tensor().get_names().count(tensor_name)) {
return m_outputs[index];
}
}
OPENVINO_ASSERT(index < m_outputs.size(), "Function doesn't have output with name ", tensor_name);
return index;
OPENVINO_ASSERT(false, "Function doesn't have output with name ", tensor_name);
}
// TODO: this is created for compatibility, consider to remove
void add_input_info(InputInfo&& builder) {
if (builder.m_impl->m_has_index) {
OPENVINO_ASSERT(builder.m_impl->m_index < m_inputs.size(), "Index is out of range");
m_inputs[builder.m_impl->m_index] = std::move(builder);
} else if (builder.m_impl->m_has_name) {
size_t index = find_input_index(builder.m_impl->m_name);
m_inputs[index] = std::move(builder);
} else {
OPENVINO_ASSERT(1 == m_inputs.size(), "Function shall have only one input");
m_inputs[0] = std::move(builder);
}
}
// TODO: this is created for compatibility, consider to remove
void add_output_info(OutputInfo&& builder) {
if (builder.m_impl->m_has_index) {
OPENVINO_ASSERT(builder.m_impl->m_index < m_inputs.size(), "Output index is out of range");
m_outputs[builder.m_impl->m_index] = std::move(builder);
} else if (builder.m_impl->m_has_name) {
size_t index = find_output_index(builder.m_impl->m_name);
m_outputs[index] = std::move(builder);
} else {
OPENVINO_ASSERT(1 == m_inputs.size(), "Function shall have only one output");
m_outputs[0] = std::move(builder);
}
}
std::vector<InputInfo> m_inputs;
std::vector<OutputInfo> m_outputs;
std::shared_ptr<Function> m_function = nullptr;
@@ -440,8 +314,7 @@ InputInfo& PrePostProcessor::input(size_t input_index) {
}
InputInfo& PrePostProcessor::input(const std::string& tensor_name) {
size_t index = m_impl->find_input_index(tensor_name);
return m_impl->m_inputs[index];
return m_impl->find_input(tensor_name);
}
OutputInfo& PrePostProcessor::output() {
@@ -461,32 +334,7 @@ OutputInfo& PrePostProcessor::output(size_t output_index) {
}
OutputInfo& PrePostProcessor::output(const std::string& tensor_name) {
size_t index = m_impl->find_output_index(tensor_name);
return m_impl->m_outputs[index];
}
// TODO: consider to remove
PrePostProcessor& PrePostProcessor::input(InputInfo&& builder) & {
m_impl->add_input_info(std::move(builder));
return *this;
}
// TODO: consider to remove
PrePostProcessor&& PrePostProcessor::input(InputInfo&& builder) && {
m_impl->add_input_info(std::move(builder));
return std::move(*this);
}
// TODO: consider remove
PrePostProcessor& PrePostProcessor::output(OutputInfo&& builder) & {
m_impl->add_output_info(std::move(builder));
return *this;
}
// TODO: consider remove
PrePostProcessor&& PrePostProcessor::output(OutputInfo&& builder) && {
m_impl->add_output_info(std::move(builder));
return std::move(*this);
return m_impl->find_output(tensor_name);
}
std::shared_ptr<Function> PrePostProcessor::build() {
@@ -496,22 +344,10 @@ std::shared_ptr<Function> PrePostProcessor::build() {
bool tensor_data_updated = false;
for (const auto& input_info : m_impl->m_inputs) {
auto& input = input_info.m_impl;
std::shared_ptr<op::v0::Parameter> param;
Output<Node> node;
OPENVINO_ASSERT(input, "Internal error: Invalid preprocessing input, please report a problem");
if (input->has_index()) {
node = function->input(input->m_index);
} else if (input->has_name()) {
node = function->input(input->m_name);
} else {
node = function->input();
}
param = std::dynamic_pointer_cast<op::v0::Parameter>(node.get_node_shared_ptr());
// Set parameter layout from 'network' information
if (input->get_network()->is_layout_set() && param->get_layout().empty()) {
param->set_layout(input->get_network()->get_layout());
if (input->get_network()->is_layout_set() && input->m_resolved_param->get_layout().empty()) {
input->m_resolved_param->set_layout(input->get_network()->get_layout());
}
input->m_resolved_param = param;
}
auto results = function->get_results();
auto parameters_list = std::list<std::shared_ptr<op::v0::Parameter>>(function->get_parameters().begin(),
@@ -690,15 +526,7 @@ std::shared_ptr<Function> PrePostProcessor::build() {
for (const auto& output_info : m_impl->m_outputs) {
const auto& output = output_info.m_impl;
std::shared_ptr<op::v0::Result> result;
Output<Node> node;
OPENVINO_ASSERT(output, "Internal error: Invalid postprocessing output, please report a problem");
if (output->has_index()) {
node = function->output(output->m_index);
} else if (output->has_name()) {
node = function->output(output->m_name);
} else {
node = function->output();
}
Output<Node> node = output->m_output_node;
auto start_out_node_names = node.get_tensor().get_names();
node.get_tensor().set_names({});
result = std::dynamic_pointer_cast<op::v0::Result>(node.get_node_shared_ptr());
@@ -773,145 +601,79 @@ std::shared_ptr<Function> PrePostProcessor::build() {
// --------------------- InputTensorInfo ------------------
InputTensorInfo::InputTensorInfo() : m_impl(std::unique_ptr<InputTensorInfoImpl>(new InputTensorInfoImpl())) {}
InputTensorInfo::InputTensorInfo(InputTensorInfo&&) noexcept = default;
InputTensorInfo& InputTensorInfo::operator=(InputTensorInfo&&) noexcept = default;
InputTensorInfo::~InputTensorInfo() = default;
InputTensorInfo& InputTensorInfo::set_element_type(const element::Type& type) & {
InputTensorInfo& InputTensorInfo::set_element_type(const element::Type& type) {
m_impl->set_element_type(type);
return *this;
}
InputTensorInfo&& InputTensorInfo::set_element_type(const element::Type& type) && {
m_impl->set_element_type(type);
return std::move(*this);
}
InputTensorInfo& InputTensorInfo::set_layout(const Layout& layout) & {
InputTensorInfo& InputTensorInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}
InputTensorInfo&& InputTensorInfo::set_layout(const Layout& layout) && {
m_impl->set_layout(layout);
return std::move(*this);
}
InputTensorInfo& InputTensorInfo::set_spatial_dynamic_shape() & {
InputTensorInfo& InputTensorInfo::set_spatial_dynamic_shape() {
m_impl->set_spatial_dynamic_shape();
return *this;
}
InputTensorInfo&& InputTensorInfo::set_spatial_dynamic_shape() && {
m_impl->set_spatial_dynamic_shape();
return std::move(*this);
}
InputTensorInfo& InputTensorInfo::set_spatial_static_shape(size_t height, size_t width) & {
InputTensorInfo& InputTensorInfo::set_spatial_static_shape(size_t height, size_t width) {
m_impl->set_spatial_static_shape(height, width);
return *this;
}
InputTensorInfo&& InputTensorInfo::set_spatial_static_shape(size_t height, size_t width) && {
m_impl->set_spatial_static_shape(height, width);
return std::move(*this);
}
// --------------------- InputNetworkInfo ------------------
InputNetworkInfo::InputNetworkInfo() : m_impl(std::unique_ptr<InputNetworkInfoImpl>(new InputNetworkInfoImpl())) {}
InputNetworkInfo::InputNetworkInfo(InputNetworkInfo&&) noexcept = default;
InputNetworkInfo& InputNetworkInfo::operator=(InputNetworkInfo&&) noexcept = default;
InputNetworkInfo::~InputNetworkInfo() = default;
InputNetworkInfo& InputNetworkInfo::set_layout(const Layout& layout) & {
InputNetworkInfo& InputNetworkInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}
InputNetworkInfo&& InputNetworkInfo::set_layout(const Layout& layout) && {
m_impl->set_layout(layout);
return std::move(*this);
}
InputTensorInfo& InputTensorInfo::set_color_format(const ov::preprocess::ColorFormat& format,
const std::vector<std::string>& sub_names) & {
const std::vector<std::string>& sub_names) {
m_impl->set_color_format(format, sub_names);
return *this;
}
InputTensorInfo&& InputTensorInfo::set_color_format(const ov::preprocess::ColorFormat& format,
const std::vector<std::string>& sub_names) && {
m_impl->set_color_format(format, sub_names);
return std::move(*this);
}
InputTensorInfo& InputTensorInfo::set_memory_type(const std::string& memory_type) & {
InputTensorInfo& InputTensorInfo::set_memory_type(const std::string& memory_type) {
m_impl->set_memory_type(memory_type);
return *this;
}
InputTensorInfo&& InputTensorInfo::set_memory_type(const std::string& memory_type) && {
m_impl->set_memory_type(memory_type);
return std::move(*this);
}
// --------------------- PreProcessSteps ------------------
PreProcessSteps::PreProcessSteps() : m_impl(std::unique_ptr<PreProcessStepsImpl>(new PreProcessStepsImpl())) {}
PreProcessSteps::PreProcessSteps(PreProcessSteps&&) noexcept = default;
PreProcessSteps& PreProcessSteps::operator=(PreProcessSteps&&) noexcept = default;
PreProcessSteps::~PreProcessSteps() = default;
PreProcessSteps& PreProcessSteps::scale(float value) & {
PreProcessSteps& PreProcessSteps::scale(float value) {
m_impl->add_scale_impl(std::vector<float>{value});
return *this;
}
PreProcessSteps&& PreProcessSteps::scale(float value) && {
m_impl->add_scale_impl(std::vector<float>{value});
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::scale(const std::vector<float>& values) & {
PreProcessSteps& PreProcessSteps::scale(const std::vector<float>& values) {
m_impl->add_scale_impl(values);
return *this;
}
PreProcessSteps&& PreProcessSteps::scale(const std::vector<float>& values) && {
m_impl->add_scale_impl(values);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::mean(float value) & {
PreProcessSteps& PreProcessSteps::mean(float value) {
m_impl->add_mean_impl(std::vector<float>{value});
return *this;
}
PreProcessSteps&& PreProcessSteps::mean(float value) && {
m_impl->add_mean_impl(std::vector<float>{value});
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::mean(const std::vector<float>& values) & {
PreProcessSteps& PreProcessSteps::mean(const std::vector<float>& values) {
m_impl->add_mean_impl(values);
return *this;
}
PreProcessSteps&& PreProcessSteps::mean(const std::vector<float>& values) && {
m_impl->add_mean_impl(values);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_element_type(const element::Type& type) & {
PreProcessSteps& PreProcessSteps::convert_element_type(const element::Type& type) {
m_impl->add_convert_impl(type);
return *this;
}
PreProcessSteps&& PreProcessSteps::convert_element_type(const element::Type& type) && {
m_impl->add_convert_impl(type);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width) & {
PreProcessSteps& PreProcessSteps::resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width) {
OPENVINO_ASSERT(dst_height <= std::numeric_limits<int>::max() && dst_width <= std::numeric_limits<int>::max(),
"Resize: Width/Height dimensions cannot be greater than ",
std::to_string(std::numeric_limits<int>::max()));
@@ -919,55 +681,27 @@ PreProcessSteps& PreProcessSteps::resize(ResizeAlgorithm alg, size_t dst_height,
return *this;
}
PreProcessSteps&& PreProcessSteps::resize(ResizeAlgorithm alg, size_t dst_height, size_t dst_width) && {
OPENVINO_ASSERT(dst_height <= std::numeric_limits<int>::max() && dst_width <= std::numeric_limits<int>::max(),
"Resize: Width/Height dimensions cannot be greater than ",
std::to_string(std::numeric_limits<int>::max()));
m_impl->add_resize_impl(alg, static_cast<int>(dst_height), static_cast<int>(dst_width));
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::resize(ResizeAlgorithm alg) & {
PreProcessSteps& PreProcessSteps::resize(ResizeAlgorithm alg) {
m_impl->add_resize_impl(alg, -1, -1);
return *this;
}
PreProcessSteps&& PreProcessSteps::resize(ResizeAlgorithm alg) && {
m_impl->add_resize_impl(alg, -1, -1);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_layout(const Layout& dst_layout) & {
PreProcessSteps& PreProcessSteps::convert_layout(const Layout& dst_layout) {
m_impl->add_convert_layout_impl(dst_layout);
return *this;
}
PreProcessSteps&& PreProcessSteps::convert_layout(const Layout& dst_layout) && {
m_impl->add_convert_layout_impl(dst_layout);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_layout(const std::vector<uint64_t>& dims) & {
PreProcessSteps& PreProcessSteps::convert_layout(const std::vector<uint64_t>& dims) {
m_impl->add_convert_layout_impl(dims);
return *this;
}
PreProcessSteps&& PreProcessSteps::convert_layout(const std::vector<uint64_t>& dims) && {
m_impl->add_convert_layout_impl(dims);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::convert_color(const ov::preprocess::ColorFormat& dst_format) & {
PreProcessSteps& PreProcessSteps::convert_color(const ov::preprocess::ColorFormat& dst_format) {
m_impl->add_convert_color_impl(dst_format);
return *this;
}
PreProcessSteps&& PreProcessSteps::convert_color(const ov::preprocess::ColorFormat& dst_format) && {
m_impl->add_convert_color_impl(dst_format);
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) & {
PreProcessSteps& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) {
// 'true' indicates that custom preprocessing step will trigger validate_and_infer_types
m_impl->actions().emplace_back([preprocess_cb](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<ov::Function>&,
@@ -980,109 +714,55 @@ PreProcessSteps& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb
return *this;
}
PreProcessSteps&& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) && {
// 'true' indicates that custom preprocessing step will trigger validate_and_infer_types
m_impl->actions().emplace_back([preprocess_cb](const std::vector<Output<Node>>& nodes,
const std::shared_ptr<ov::Function>&,
PreprocessingContext&) {
OPENVINO_ASSERT(nodes.size() == 1,
"Can't apply custom preprocessing step for multi-plane input. Suggesting to convert "
"current image to RGB/BGR color format using 'convert_color'");
return std::make_tuple(std::vector<Output<Node>>{preprocess_cb(nodes[0])}, true);
});
return std::move(*this);
}
PreProcessSteps& PreProcessSteps::reverse_channels() & {
PreProcessSteps& PreProcessSteps::reverse_channels() {
m_impl->add_reverse_channels();
return *this;
}
PreProcessSteps&& PreProcessSteps::reverse_channels() && {
m_impl->add_reverse_channels();
return std::move(*this);
}
// --------------------- OutputTensorInfo ------------------
OutputTensorInfo::OutputTensorInfo() : m_impl(std::unique_ptr<OutputTensorInfoImpl>(new OutputTensorInfoImpl())) {}
OutputTensorInfo::OutputTensorInfo(OutputTensorInfo&&) noexcept = default;
OutputTensorInfo& OutputTensorInfo::operator=(OutputTensorInfo&&) noexcept = default;
OutputTensorInfo::~OutputTensorInfo() = default;
OutputTensorInfo& OutputTensorInfo::set_element_type(const element::Type& type) & {
OutputTensorInfo& OutputTensorInfo::set_element_type(const element::Type& type) {
m_impl->set_element_type(type);
return *this;
}
OutputTensorInfo&& OutputTensorInfo::set_element_type(const element::Type& type) && {
m_impl->set_element_type(type);
return std::move(*this);
}
OutputTensorInfo& OutputTensorInfo::set_layout(const Layout& layout) & {
OutputTensorInfo& OutputTensorInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}
OutputTensorInfo&& OutputTensorInfo::set_layout(const Layout& layout) && {
m_impl->set_layout(layout);
return std::move(*this);
}
// --------------------- OutputNetworkInfo ------------------
OutputNetworkInfo::OutputNetworkInfo() : m_impl(std::unique_ptr<OutputNetworkInfoImpl>(new OutputNetworkInfoImpl())) {}
OutputNetworkInfo::OutputNetworkInfo(OutputNetworkInfo&&) noexcept = default;
OutputNetworkInfo& OutputNetworkInfo::operator=(OutputNetworkInfo&&) noexcept = default;
OutputNetworkInfo::~OutputNetworkInfo() = default;
OutputNetworkInfo& OutputNetworkInfo::set_layout(const Layout& layout) & {
OutputNetworkInfo& OutputNetworkInfo::set_layout(const Layout& layout) {
m_impl->set_layout(layout);
return *this;
}
OutputNetworkInfo&& OutputNetworkInfo::set_layout(const Layout& layout) && {
m_impl->set_layout(layout);
return std::move(*this);
}
// --------------------- PostProcessSteps ------------------
PostProcessSteps::PostProcessSteps() : m_impl(std::unique_ptr<PostProcessStepsImpl>(new PostProcessStepsImpl())) {}
PostProcessSteps::PostProcessSteps(PostProcessSteps&&) noexcept = default;
PostProcessSteps& PostProcessSteps::operator=(PostProcessSteps&&) noexcept = default;
PostProcessSteps::~PostProcessSteps() = default;
PostProcessSteps& PostProcessSteps::convert_element_type(const element::Type& type) & {
PostProcessSteps& PostProcessSteps::convert_element_type(const element::Type& type) {
m_impl->add_convert_impl(type);
return *this;
}
PostProcessSteps&& PostProcessSteps::convert_element_type(const element::Type& type) && {
m_impl->add_convert_impl(type);
return std::move(*this);
}
PostProcessSteps& PostProcessSteps::convert_layout(const Layout& dst_layout) & {
PostProcessSteps& PostProcessSteps::convert_layout(const Layout& dst_layout) {
m_impl->add_convert_layout_impl(dst_layout);
return *this;
}
PostProcessSteps&& PostProcessSteps::convert_layout(const Layout& dst_layout) && {
m_impl->add_convert_layout_impl(dst_layout);
return std::move(*this);
}
PostProcessSteps& PostProcessSteps::convert_layout(const std::vector<uint64_t>& dims) & {
PostProcessSteps& PostProcessSteps::convert_layout(const std::vector<uint64_t>& dims) {
m_impl->add_convert_layout_impl(dims);
return *this;
}
PostProcessSteps&& PostProcessSteps::convert_layout(const std::vector<uint64_t>& dims) && {
m_impl->add_convert_layout_impl(dims);
return std::move(*this);
}
PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) & {
PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) {
// 'true' indicates that custom postprocessing step will trigger validate_and_infer_types
m_impl->actions().emplace_back([postprocess_cb](const Output<ov::Node>& node, PostprocessingContext&) {
return std::make_tuple(postprocess_cb(node), true);
@@ -1090,13 +770,5 @@ PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postproces
return *this;
}
PostProcessSteps&& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) && {
// 'true' indicates that custom postprocessing step will trigger validate_and_infer_types
m_impl->actions().emplace_back([postprocess_cb](const Output<ov::Node>& node, PostprocessingContext&) {
return std::make_tuple(postprocess_cb(node), true);
});
return std::move(*this);
}
} // namespace preprocess
} // namespace ov

File diff suppressed because it is too large Load Diff