Pszmel/bind preprocessing (#8791)

* Use reference_wrapper for preprocess bindings

* Update tests

* add bindings to I420_SINGLE_PLANE and I420_THREE_PLANES

* remove init from all classes except PrePostProcessor and add RGBX and BGRX to ColorFormat enum

* update test name

* add test for taking input and output by name

* move ref_wrapper to common.hpp

* add common.hpp include to PrePostProcessor

* update comment on ref_wrapper
This commit is contained in:
Piotr Szmelczynski 2021-11-25 21:40:19 +01:00 committed by GitHub
parent 342d835552
commit 387c35cc6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 235 additions and 364 deletions

View File

@ -43,4 +43,17 @@ namespace Common
PyObject* parse_parameter(const InferenceEngine::Parameter& param);
uint32_t get_optimal_number_of_requests(const ov::runtime::ExecutableNetwork& actual);
// Use only with classes that are not creatable by users on Python's side, because
// Objects created in Python that are wrapped with such wrapper will cause memory leaks.
template <typename T>
class ref_wrapper {
std::reference_wrapper<T> impl;
public:
explicit ref_wrapper(T* p) : impl(*p) {}
T* get() const {
return &impl.get();
}
};
}; // namespace Common

View File

@ -10,22 +10,23 @@
#include "openvino/core/function.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "pyopenvino/core/common.hpp"
namespace py = pybind11;
// Custom holder wrapping returned references to preprocessing objects
PYBIND11_DECLARE_HOLDER_TYPE(T, Common::ref_wrapper<T>)
static void regclass_graph_PreProcessSteps(py::module m) {
py::class_<ov::preprocess::PreProcessSteps, std::shared_ptr<ov::preprocess::PreProcessSteps>> steps(
py::class_<ov::preprocess::PreProcessSteps, Common::ref_wrapper<ov::preprocess::PreProcessSteps>> steps(
m,
"PreProcessSteps");
steps.doc() = "openvino.impl.preprocess.PreProcessSteps wraps ov::preprocess::PreProcessSteps";
steps.def(py::init<>());
steps.def(
"mean",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, float value) {
me->mean(value);
return me;
[](ov::preprocess::PreProcessSteps& me, float value) {
return &me.mean(value);
},
py::arg("value"),
R"(
@ -42,9 +43,8 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"mean",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, const std::vector<float>& values) {
me->mean(values);
return me;
[](ov::preprocess::PreProcessSteps& me, const std::vector<float>& values) {
return &me.mean(values);
},
py::arg("values"),
R"(
@ -61,9 +61,8 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"scale",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, float value) {
me->scale(value);
return me;
[](ov::preprocess::PreProcessSteps& me, float value) {
return &me.scale(value);
},
py::arg("value"),
R"(
@ -80,9 +79,8 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"scale",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, const std::vector<float>& values) {
me->scale(values);
return me;
[](ov::preprocess::PreProcessSteps& me, const std::vector<float>& values) {
return &me.scale(values);
},
py::arg("values"),
R"(
@ -99,9 +97,8 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"convert_element_type",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, ov::element::Type type) {
me->convert_element_type(type);
return me;
[](ov::preprocess::PreProcessSteps& me, ov::element::Type type) {
return &me.convert_element_type(type);
},
py::arg("type"),
R"(
@ -118,9 +115,8 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"custom",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, py::function op) {
me->custom(op.cast<const ov::preprocess::PreProcessSteps::CustomPreprocessOp>());
return me;
[](ov::preprocess::PreProcessSteps& me, py::function op) {
return &me.custom(op.cast<const ov::preprocess::PreProcessSteps::CustomPreprocessOp>());
},
py::arg("operation"),
R"(
@ -135,63 +131,54 @@ static void regclass_graph_PreProcessSteps(py::module m) {
)");
steps.def(
"convert_color",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, const ov::preprocess::ColorFormat& dst_format) {
me->convert_color(dst_format);
return me;
[](ov::preprocess::PreProcessSteps& me, const ov::preprocess::ColorFormat& dst_format) {
return &me.convert_color(dst_format);
},
py::arg("dst_format"));
steps.def(
"resize",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me,
[](ov::preprocess::PreProcessSteps& me,
ov::preprocess::ResizeAlgorithm alg,
size_t dst_height,
size_t dst_width) {
me->resize(alg, dst_height, dst_width);
return me;
return &me.resize(alg, dst_height, dst_width);
},
py::arg("alg"),
py::arg("dst_height"),
py::arg("dst_width"));
steps.def(
"resize",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, ov::preprocess::ResizeAlgorithm alg) {
me->resize(alg);
return me;
[](ov::preprocess::PreProcessSteps& me, ov::preprocess::ResizeAlgorithm alg) {
return &me.resize(alg);
},
py::arg("alg"));
steps.def(
"convert_layout",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, const ov::Layout& layout = {}) {
me->convert_layout(layout);
return me;
[](ov::preprocess::PreProcessSteps& me, const ov::Layout& layout = {}) {
return &me.convert_layout(layout);
},
py::arg("dst_layout"));
steps.def(
"convert_layout",
[](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me, const std::vector<uint64_t>& dims) {
me->convert_layout(dims);
return me;
[](ov::preprocess::PreProcessSteps& me, const std::vector<uint64_t>& dims) {
return &me.convert_layout(dims);
},
py::arg("dims"));
steps.def("reverse_channels", [](const std::shared_ptr<ov::preprocess::PreProcessSteps>& me) {
me->reverse_channels();
return me;
steps.def("reverse_channels", [](ov::preprocess::PreProcessSteps& me) {
return &me.reverse_channels();
});
}
static void regclass_graph_PostProcessSteps(py::module m) {
py::class_<ov::preprocess::PostProcessSteps, std::shared_ptr<ov::preprocess::PostProcessSteps>> steps(
py::class_<ov::preprocess::PostProcessSteps, Common::ref_wrapper<ov::preprocess::PostProcessSteps>> steps(
m,
"PostProcessSteps");
steps.doc() = "openvino.impl.preprocess.PostprocessSteps wraps ov::preprocess::PostProcessSteps";
steps.def(py::init<>());
steps.def(
"convert_element_type",
[](const std::shared_ptr<ov::preprocess::PostProcessSteps>& me, ov::element::Type type) {
me->convert_element_type(type);
return me;
[](ov::preprocess::PostProcessSteps& me, ov::element::Type type) {
return &me.convert_element_type(type);
},
py::arg("type"),
R"(
@ -208,23 +195,20 @@ static void regclass_graph_PostProcessSteps(py::module m) {
)");
steps.def(
"convert_layout",
[](const std::shared_ptr<ov::preprocess::PostProcessSteps>& me, const ov::Layout& layout = {}) {
me->convert_layout(layout);
return me;
[](ov::preprocess::PostProcessSteps& me, const ov::Layout& layout = {}) {
return &me.convert_layout(layout);
},
py::arg("dst_layout"));
steps.def(
"convert_layout",
[](const std::shared_ptr<ov::preprocess::PostProcessSteps>& me, const std::vector<uint64_t>& dims) {
me->convert_layout(dims);
return me;
[](ov::preprocess::PostProcessSteps& me, const std::vector<uint64_t>& dims) {
return &me.convert_layout(dims);
},
py::arg("dims"));
steps.def(
"custom",
[](const std::shared_ptr<ov::preprocess::PostProcessSteps>& me, py::function op) {
me->custom(op.cast<const ov::preprocess::PostProcessSteps::CustomPostprocessOp>());
return me;
[](ov::preprocess::PostProcessSteps& me, py::function op) {
return &me.custom(op.cast<const ov::preprocess::PostProcessSteps::CustomPostprocessOp>());
},
py::arg("operation"),
R"(
@ -240,18 +224,15 @@ static void regclass_graph_PostProcessSteps(py::module m) {
}
static void regclass_graph_InputTensorInfo(py::module m) {
py::class_<ov::preprocess::InputTensorInfo, std::shared_ptr<ov::preprocess::InputTensorInfo>> info(
py::class_<ov::preprocess::InputTensorInfo, Common::ref_wrapper<ov::preprocess::InputTensorInfo>> info(
m,
"InputTensorInfo");
info.doc() = "openvino.impl.preprocess.InputTensorInfo wraps ov::preprocess::InputTensorInfo";
info.def(py::init<>());
info.def(
"set_element_type",
[](const std::shared_ptr<ov::preprocess::InputTensorInfo>& me, const ov::element::Type& type) {
me->set_element_type(type);
return me;
[](ov::preprocess::InputTensorInfo& me, const ov::element::Type& type) {
return &me.set_element_type(type);
},
py::arg("type"),
R"(
@ -266,41 +247,34 @@ static void regclass_graph_InputTensorInfo(py::module m) {
tensor : InputTensorInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
info.def("set_layout", [](const std::shared_ptr<ov::preprocess::InputTensorInfo>& me, const ov::Layout& layout) {
me->set_layout(layout);
return me;
info.def("set_layout", [](ov::preprocess::InputTensorInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
info.def("set_spatial_dynamic_shape", [](const std::shared_ptr<ov::preprocess::InputTensorInfo>& me) {
me->set_spatial_dynamic_shape();
return me;
info.def("set_spatial_dynamic_shape", [](ov::preprocess::InputTensorInfo& me) {
return &me.set_spatial_dynamic_shape();
});
info.def("set_spatial_static_shape", [](ov::preprocess::InputTensorInfo& me, size_t height, size_t width) {
return &me.set_spatial_static_shape(height, width);
;
});
info.def("set_spatial_static_shape",
[](const std::shared_ptr<ov::preprocess::InputTensorInfo>& me, size_t height, size_t width) {
me->set_spatial_static_shape(height, width);
return me;
});
info.def("set_color_format",
[](const std::shared_ptr<ov::preprocess::InputTensorInfo>& me,
[](ov::preprocess::InputTensorInfo& me,
const ov::preprocess::ColorFormat& format,
const std::vector<std::string>& sub_names = {}) {
me->set_color_format(format, sub_names);
return me;
return &me.set_color_format(format, sub_names);
});
}
static void regclass_graph_OutputTensorInfo(py::module m) {
py::class_<ov::preprocess::OutputTensorInfo, std::shared_ptr<ov::preprocess::OutputTensorInfo>> info(
py::class_<ov::preprocess::OutputTensorInfo, Common::ref_wrapper<ov::preprocess::OutputTensorInfo>> info(
m,
"OutputTensorInfo");
info.doc() = "openvino.impl.preprocess.OutputTensorInfo wraps ov::preprocess::OutputTensorInfo";
info.def(py::init<>());
info.def(
"set_element_type",
[](const std::shared_ptr<ov::preprocess::OutputTensorInfo>& me, const ov::element::Type& type) {
me->set_element_type(type);
return me;
[](ov::preprocess::OutputTensorInfo& me, const ov::element::Type& type) {
return &me.set_element_type(type);
},
py::arg("type"),
R"(
@ -315,150 +289,60 @@ static void regclass_graph_OutputTensorInfo(py::module m) {
tensor : OutputTensorInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
info.def("set_layout", [](const std::shared_ptr<ov::preprocess::OutputTensorInfo>& me, const ov::Layout& layout) {
me->set_layout(layout);
return me;
info.def("set_layout", [](ov::preprocess::OutputTensorInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
}
static void regclass_graph_InputInfo(py::module m) {
py::class_<ov::preprocess::InputInfo, std::shared_ptr<ov::preprocess::InputInfo>> inp(m, "InputInfo");
py::class_<ov::preprocess::InputInfo, Common::ref_wrapper<ov::preprocess::InputInfo>> inp(m, "InputInfo");
inp.doc() = "openvino.impl.preprocess.InputInfo wraps ov::preprocess::InputInfo";
inp.def(py::init<>(), R"(Default constructor, can be used only for networks with exactly one input)");
inp.def(py::init<size_t>(), R"(Constructor with parameter index as argument)");
inp.def(py::init<const std::string&>(), R"(Constructor with input tensor name as argument)");
inp.def(
"tensor",
[](const std::shared_ptr<ov::preprocess::InputInfo>& me,
const std::shared_ptr<ov::preprocess::InputTensorInfo>& inputTensorInfo) {
me->tensor(std::move(*inputTensorInfo));
return me;
},
py::arg("tensor"),
R"(
Adds builder for actual tensor information of client's input.
Parameters
----------
tensor : InputTensorInfo
Client's input tensor information. It's internal data will be moved to parent InputInfo object.
Returns
----------
tensor : InputInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
inp.def(
"preprocess",
[](const std::shared_ptr<ov::preprocess::InputInfo>& me,
const std::shared_ptr<ov::preprocess::PreProcessSteps>& preProcessSteps) {
me->preprocess(std::move(*preProcessSteps));
return me;
},
py::arg("pre_process_steps"),
R"(
Adds builder for actual preprocessing steps for input parameter.
Steps can specify various actions, like 'mean', 'scale' and others.
Parameters
----------
pre_process_steps : PreProcessSteps
Preprocessing steps. It's internal data will be moved to parent InputInfo object.
Returns
----------
preprocess : InputInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
inp.def(
"network",
[](const std::shared_ptr<ov::preprocess::InputInfo>& me,
const std::shared_ptr<ov::preprocess::InputNetworkInfo>& inputNetworkInfo) {
me->network(std::move(*inputNetworkInfo));
return me;
},
py::arg("input_network_info"));
inp.def("tensor", [](ov::preprocess::InputInfo& me) {
return &me.tensor();
});
inp.def("preprocess", [](ov::preprocess::InputInfo& me) {
return &me.preprocess();
});
inp.def("network", [](ov::preprocess::InputInfo& me) {
return &me.network();
});
}
static void regclass_graph_OutputInfo(py::module m) {
py::class_<ov::preprocess::OutputInfo, std::shared_ptr<ov::preprocess::OutputInfo>> out(m, "OutputInfo");
py::class_<ov::preprocess::OutputInfo, Common::ref_wrapper<ov::preprocess::OutputInfo>> out(m, "OutputInfo");
out.doc() = "openvino.impl.preprocess.OutputInfo wraps ov::preprocess::OutputInfo";
out.def(py::init<>(), R"(Default constructor, can be used only for networks with exactly one output)");
out.def(py::init<size_t>(), R"(Constructor with parameter index as argument)");
out.def(py::init<const std::string&>(), R"(Constructor with tensor name as argument)");
out.def(
"tensor",
[](const std::shared_ptr<ov::preprocess::OutputInfo>& me,
const std::shared_ptr<ov::preprocess::OutputTensorInfo>& outputTensorInfo) {
me->tensor(std::move(*outputTensorInfo));
return me;
},
py::arg("tensor"),
R"(
Adds builder for actual tensor information of client's output.
Parameters
----------
tensor : OutputTensorInfo
Client's output tensor information. It's internal data will be moved to parent OutputInfo object.
Returns
----------
tensor : OutputInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
out.def(
"postprocess",
[](const std::shared_ptr<ov::preprocess::OutputInfo>& me,
const std::shared_ptr<ov::preprocess::PostProcessSteps>& postProcessSteps) {
me->postprocess(std::move(*postProcessSteps));
return me;
},
py::arg("post_process_steps"),
R"(
Adds builder for actual postprocessing steps for output parameter.
Parameters
----------
post_process_steps : PostProcessSteps
Postprocessing steps. It's internal data will be moved to parent OutputInfo object.
Returns
----------
preprocess : OutputInfo
Reference to itself to allow chaining of calls in client's code in a builder-like manner.
)");
out.def(
"network",
[](const std::shared_ptr<ov::preprocess::OutputInfo>& me,
const std::shared_ptr<ov::preprocess::OutputNetworkInfo>& outputNetworkInfo) {
me->network(std::move(*outputNetworkInfo));
return me;
},
py::arg("output_network_info"));
out.def("tensor", [](ov::preprocess::OutputInfo& me) {
return &me.tensor();
});
out.def("postprocess", [](ov::preprocess::OutputInfo& me) {
return &me.postprocess();
});
out.def("network", [](ov::preprocess::OutputInfo& me) {
return &me.network();
});
}
static void regclass_graph_OutputNetworkInfo(py::module m) {
py::class_<ov::preprocess::OutputNetworkInfo, std::shared_ptr<ov::preprocess::OutputNetworkInfo>> info(
py::class_<ov::preprocess::OutputNetworkInfo, Common::ref_wrapper<ov::preprocess::OutputNetworkInfo>> info(
m,
"OutputNetworkInfo");
info.doc() = "openvino.impl.preprocess.OutputNetworkInfo wraps ov::preprocess::OutputNetworkInfo";
info.def(py::init<>());
info.def("set_layout", [](const std::shared_ptr<ov::preprocess::OutputNetworkInfo>& me, const ov::Layout& layout) {
me->set_layout(layout);
return me;
info.def("set_layout", [](ov::preprocess::OutputNetworkInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
}
static void regclass_graph_InputNetworkInfo(py::module m) {
py::class_<ov::preprocess::InputNetworkInfo, std::shared_ptr<ov::preprocess::InputNetworkInfo>> info(
py::class_<ov::preprocess::InputNetworkInfo, Common::ref_wrapper<ov::preprocess::InputNetworkInfo>> info(
m,
"InputNetworkInfo");
info.doc() = "openvino.impl.preprocess.InputNetworkInfo wraps ov::preprocess::InputNetworkInfo";
info.def(py::init<>());
info.def("set_layout", [](const std::shared_ptr<ov::preprocess::InputNetworkInfo>& me, const ov::Layout& layout) {
me->set_layout(layout);
return me;
info.def("set_layout", [](ov::preprocess::InputNetworkInfo& me, const ov::Layout& layout) {
return &me.set_layout(layout);
});
}
@ -467,8 +351,12 @@ static void regenum_graph_ColorFormat(py::module m) {
.value("UNDEFINED", ov::preprocess::ColorFormat::UNDEFINED)
.value("NV12_SINGLE_PLANE", ov::preprocess::ColorFormat::NV12_SINGLE_PLANE)
.value("NV12_TWO_PLANES", ov::preprocess::ColorFormat::NV12_TWO_PLANES)
.value("I420_SINGLE_PLANE", ov::preprocess::ColorFormat::I420_SINGLE_PLANE)
.value("I420_THREE_PLANES", ov::preprocess::ColorFormat::I420_THREE_PLANES)
.value("RGB", ov::preprocess::ColorFormat::RGB)
.value("BGR", ov::preprocess::ColorFormat::BGR)
.value("RGBX", ov::preprocess::ColorFormat::RGBX)
.value("BGRX", ov::preprocess::ColorFormat::BGRX)
.export_values();
}
@ -498,43 +386,35 @@ void regclass_graph_PrePostProcessor(py::module m) {
proc.def(py::init<const std::shared_ptr<ov::Function>&>());
proc.def("input", [](ov::preprocess::PrePostProcessor& me) {
return &me.input();
});
proc.def(
"input",
[](const std::shared_ptr<ov::preprocess::PrePostProcessor>& me,
const std::shared_ptr<ov::preprocess::InputInfo>& info) {
me->input(std::move(*info));
return me;
[](ov::preprocess::PrePostProcessor& me, const std::string& tensor_name) {
return &me.input(tensor_name);
},
py::arg("input_info"),
R"(
Adds builder for preprocessing info for input parameter.
Parameters
----------
input_info : InputInfo
Preprocessing info for input parameter. It's internal data will be moved to PreProcessing object.
Returns
----------
in : PrePostProcessor
Reference to itself to allow chaining of calls in client's code.
)");
py::arg("tensor_name"));
proc.def(
"input",
[](ov::preprocess::PrePostProcessor& me, size_t input_index) {
return &me.input(input_index);
},
py::arg("input_index"));
proc.def("output", [](ov::preprocess::PrePostProcessor& me) {
return &me.output();
});
proc.def(
"output",
[](const std::shared_ptr<ov::preprocess::PrePostProcessor>& me,
const std::shared_ptr<ov::preprocess::OutputInfo>& info) {
me->output(std::move(*info));
return me;
[](ov::preprocess::PrePostProcessor& me, const std::string& tensor_name) {
return &me.output(tensor_name);
},
py::arg("output_info"),
R"(
Adds builder for preprocessing info for output parameter.
Parameters
----------
output_info : OutputInfo
Preprocessing info for output parameter. It's internal data will be moved to PreProcessing object.
Returns
----------
in : PrePostProcessor
Reference to itself to allow chaining of calls in client's code.
)");
py::arg("tensor_name"));
proc.def(
"output",
[](ov::preprocess::PrePostProcessor& me, size_t output_index) {
return &me.output(output_index);
},
py::arg("output_index"));
proc.def("build", &ov::preprocess::PrePostProcessor::build);
}

View File

@ -10,9 +10,7 @@ from openvino.impl import Function, Output, Type
from openvino.utils.decorators import custom_preprocess_function
from openvino import Core
from tests.runtime import get_runtime
from openvino.preprocess import PrePostProcessor, InputInfo, PreProcessSteps, InputTensorInfo, \
OutputTensorInfo, OutputNetworkInfo, InputNetworkInfo, ColorFormat, OutputInfo, \
PostProcessSteps, ResizeAlgorithm
from openvino.preprocess import PrePostProcessor, ColorFormat, ResizeAlgorithm
def test_ngraph_preprocess_mean():
@ -21,13 +19,11 @@ def test_ngraph_preprocess_mean():
model = parameter_a
function = Function(model, [parameter_a], "TestFunction")
function = PrePostProcessor(function)\
.input(InputInfo()
.preprocess(PreProcessSteps()
.mean(1.)
)
)\
.build()
p = PrePostProcessor(function)
inp = p.input()
prep = inp.preprocess()
prep.mean(1.0)
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[0, 1], [2, 3]]).astype(np.float32)
@ -45,14 +41,10 @@ def test_ngraph_preprocess_mean_vector():
function = Function(model, [parameter_a], "TestFunction")
layout = ov.Layout("NCHW")
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo().set_layout(layout))
.preprocess(PreProcessSteps()
.mean([1., 2.])
)
)\
.build()
p = PrePostProcessor(function)
p.input().tensor().set_layout(layout)
p.input().preprocess().mean([1., 2.])
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[0, 0], [2, 2]]).astype(np.float32)
@ -70,14 +62,11 @@ def test_ngraph_preprocess_scale_vector():
function = Function(model, [parameter_a], "TestFunction")
layout = ov.Layout("NCHW")
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo().set_layout(layout))
.preprocess(PreProcessSteps()
.scale([0.5, 2.])
)
)\
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout)
inp.preprocess().scale([0.5, 2.0])
function = p.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32)
expected_output = np.array([[2, 1], [6, 2]]).astype(np.float32)
@ -98,24 +87,13 @@ def test_ngraph_preprocess_mean_scale_convert():
def custom_preprocess(output: Output):
return ops.abs(output)
function = PrePostProcessor(function) \
.input(InputInfo(1)
.tensor(InputTensorInfo()
.set_element_type(Type.i32))
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.mean(1.)
.scale(2.)
)
) \
.input(InputInfo(0)
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.mean(1.)
.custom(custom_preprocess)
)
) \
.build()
p = PrePostProcessor(function)
inp2 = p.input(1)
inp2.tensor().set_element_type(Type.i32)
inp2.preprocess().convert_element_type(Type.f32).mean(1.).scale(2.)
inp1 = p.input(0)
inp1.preprocess().convert_element_type(Type.f32).mean(1.).custom(custom_preprocess)
function = p.build()
input_data1 = np.array([[0, 1], [2, -2]]).astype(np.int32)
input_data2 = np.array([[1, 3], [5, 7]]).astype(np.int32)
@ -129,6 +107,40 @@ def test_ngraph_preprocess_mean_scale_convert():
assert np.equal(output2, expected_output2).all()
def test_ngraph_preprocess_input_output_by_name():
shape = [2, 2]
param1 = ops.parameter(shape, dtype=np.int32, name="A")
param2 = ops.parameter(shape, dtype=np.int32, name="B")
function = Function([param1, param2], [param1, param2], "TestFunction")
@custom_preprocess_function
def custom_preprocess(output: Output):
return ops.abs(output)
p = PrePostProcessor(function)
inp2 = p.input("B")
inp2.tensor().set_element_type(Type.i32)
inp2.preprocess().convert_element_type(Type.f32).mean(1.).scale(2.)
inp1 = p.input("A")
inp1.preprocess().convert_element_type(Type.f32).mean(1.)
out1 = p.output("A")
out1.postprocess().custom(custom_preprocess)
out2 = p.output("B")
out2.postprocess().custom(custom_preprocess)
function = p.build()
input_data1 = np.array([[0, 1], [2, -2]]).astype(np.int32)
input_data2 = np.array([[-1, 3], [5, 7]]).astype(np.int32)
expected_output1 = np.array([[1, 0], [1, 3]]).astype(np.float32)
expected_output2 = np.array([[1, 1], [2, 3]]).astype(np.float32)
runtime = get_runtime()
computation = runtime.computation(function)
[output1, output2] = computation(input_data1, input_data2)
assert np.equal(output1, expected_output1).all()
assert np.equal(output2, expected_output2).all()
def test_ngraph_preprocess_output_postprocess():
shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
@ -142,20 +154,15 @@ def test_ngraph_preprocess_output_postprocess():
def custom_postprocess(output: Output):
return ops.abs(output)
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo().set_layout(layout1))
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.mean([1., 2.])
)
) \
.output(OutputInfo().postprocess(PostProcessSteps()
.convert_element_type(Type.f32)
.convert_layout(layout2)
.convert_layout(layout3)
.custom(custom_postprocess))) \
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout1)
inp.preprocess().convert_element_type(Type.f32).mean([1., 2.])
out = p.output()
out.postprocess().convert_element_type(Type.f32) \
.convert_layout(layout2) \
.convert_layout(layout3).custom(custom_postprocess)
function = p.build()
input_data = np.array([[-1, -2], [-3, -4]]).astype(np.int32)
expected_output = np.array([[2, 4], [4, 6]]).astype(np.float32)
@ -174,24 +181,16 @@ def test_ngraph_preprocess_spatial_static_shape():
layout = ov.Layout("CHW")
color_format = ColorFormat.RGB
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout(layout)
.set_spatial_static_shape(2, 2)
.set_color_format(color_format, []))
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.mean([1., 2])
)
.network(InputNetworkInfo().set_layout(layout))
) \
.output(OutputInfo()
.tensor(OutputTensorInfo()
.set_layout(layout)
.set_element_type(Type.f32))
.network(OutputNetworkInfo().set_layout(layout))) \
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout).set_spatial_static_shape(2, 2).set_color_format(color_format, [])
inp.preprocess().convert_element_type(Type.f32).mean([1., 2.])
inp.network().set_layout(layout)
out = p.output()
out.tensor().set_layout(layout).set_element_type(Type.f32)
out.network().set_layout(layout)
function = p.build()
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.int32)
expected_output = np.array([[[0, 1], [2, 3]], [[3, 4], [5, 6]]]).astype(np.float32)
@ -205,11 +204,20 @@ def test_ngraph_preprocess_spatial_static_shape():
@pytest.mark.parametrize(
"algorithm, color_format1, color_format2, is_failing",
[(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.UNDEFINED, ColorFormat.BGR, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.RGB, ColorFormat.I420_SINGLE_PLANE, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.RGB, ColorFormat.I420_THREE_PLANES, True),
(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.RGB, ColorFormat.NV12_SINGLE_PLANE, True),
(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.RGB, ColorFormat.RGBX, True),
(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.RGB, ColorFormat.BGRX, True),
(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.RGB, ColorFormat.NV12_TWO_PLANES, True),
(ResizeAlgorithm.RESIZE_LINEAR, ColorFormat.UNDEFINED, ColorFormat.I420_SINGLE_PLANE, True),
(ResizeAlgorithm.RESIZE_CUBIC, ColorFormat.RGB, ColorFormat.UNDEFINED, True),
(ResizeAlgorithm.RESIZE_CUBIC, ColorFormat.RGB, ColorFormat.BGR, False),
(ResizeAlgorithm.RESIZE_CUBIC, ColorFormat.BGR, ColorFormat.RGB, False),
(ResizeAlgorithm.RESIZE_CUBIC, ColorFormat.BGR, ColorFormat.RGBX, True),
(ResizeAlgorithm.RESIZE_CUBIC, ColorFormat.BGR, ColorFormat.BGRX, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.I420_SINGLE_PLANE, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.I420_THREE_PLANES, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.NV12_SINGLE_PLANE, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.NV12_TWO_PLANES, True),
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.UNDEFINED, True)])
@ -220,16 +228,12 @@ def test_ngraph_preprocess_steps(algorithm, color_format1, color_format2, is_fai
function = Function(model, [parameter_a], "TestFunction")
layout1 = ov.Layout("NCWH")
layout2 = ov.Layout("NCHW")
custom_processor = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout(layout1)
.set_color_format(color_format1, []))
.preprocess(PreProcessSteps()
.mean(1.)
.resize(algorithm, 3, 3)
.convert_layout(layout2)
.convert_color(color_format2)))
custom_processor = PrePostProcessor(function)
inp = custom_processor.input()
inp.tensor().set_layout(layout1).set_color_format(color_format1, [])
inp.preprocess().mean(1.).resize(algorithm, 3, 3).convert_layout(layout2).convert_color(color_format2)
if is_failing:
with pytest.raises(RuntimeError) as e:
function = custom_processor.build()
@ -253,20 +257,13 @@ def test_ngraph_preprocess_postprocess_layout():
layout1 = ov.Layout("NCWH")
layout2 = ov.Layout("NCHW")
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout(layout1))
.preprocess(PreProcessSteps()
.mean(1.)
.convert_layout(layout2)
.reverse_channels()
)
) \
.output(OutputInfo()
.postprocess(PostProcessSteps()
.convert_layout([0, 1, 2, 3]))) \
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout1)
inp.preprocess().mean(1.).convert_layout(layout2).reverse_channels()
out = p.output()
out.postprocess().convert_layout([0, 1, 2, 3])
function = p.build()
input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]).astype(np.float32)
expected_output = np.array([[[[0, 3, 6], [1, 4, 7], [2, 5, 8]]]]).astype(np.float32)
@ -284,16 +281,11 @@ def test_ngraph_preprocess_reverse_channels():
function = Function(model, [parameter_a], "TestFunction")
layout1 = ov.Layout("NCWH")
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout(layout1))
.preprocess(PreProcessSteps()
.mean(1.)
.reverse_channels()
)
) \
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout1)
inp.preprocess().mean(1.).reverse_channels()
function = p.build()
input_data = np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)
expected_output = np.array([[[[4, 5], [6, 7]], [[0, 1], [2, 3]]]]).astype(np.float32)
@ -312,15 +304,11 @@ def test_ngraph_preprocess_resize_algorithm():
resize_alg = ResizeAlgorithm.RESIZE_CUBIC
layout1 = ov.Layout("NCWH")
function = PrePostProcessor(function)\
.input(InputInfo()
.tensor(InputTensorInfo()
.set_layout(layout1))
.preprocess(PreProcessSteps()
.mean(1.)
.resize(resize_alg, 3, 3))
)\
.build()
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_layout(layout1)
inp.preprocess().mean(1.).resize(resize_alg, 3, 3)
function = p.build()
input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]).astype(np.float32)
expected_output = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.float32)
@ -398,21 +386,11 @@ def test_ngraph_preprocess_model():
def custom_preprocess(output: Output):
return ops.abs(output)
function = PrePostProcessor(function) \
.input(InputInfo(1)
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.scale(0.5)
)
) \
.input(InputInfo(0)
.preprocess(PreProcessSteps()
.convert_element_type(Type.f32)
.mean(5.))) \
.output(OutputInfo(0)
.postprocess(PostProcessSteps()
.custom(custom_preprocess))) \
.build()
p = PrePostProcessor(function)
p.input(1).preprocess().convert_element_type(Type.f32).scale(0.5)
p.input(0).preprocess().convert_element_type(Type.f32).mean(5.)
p.output(0).postprocess().custom(custom_preprocess)
function = p.build()
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.float32)
expected_output = np.array([[[2, 1], [4, 7]], [[10, 13], [16, 19]]]).astype(np.float32)