[Preprocess] InputTensorInfo::set_from implementation (#10839)

* InputTensorInfo::from implementation

If user's application already has `ov::runtime::Tensor` object created,
it will be possible to reuse basic characteristics for input (shape, precision) from tensor using InputTensorInfo::from method

* Rename 'from' to 'set_from' as  in Python 'from' keyword is used for import modules
Python bindings: from ov.Tensor and from numpy array

* Style fix (quotes)

* Apply suggestions from code review

Co-authored-by: Ilya Churaev <ilyachur@gmail.com>

* Fix code style

* Use set_from in hello_classification CPP sample

Co-authored-by: Ilya Churaev <ilyachur@gmail.com>
This commit is contained in:
Mikhail Nosov 2022-03-14 18:02:51 +03:00 committed by GitHub
parent 4c4581940a
commit 72fe6082ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 120 additions and 19 deletions

View File

@ -66,7 +66,6 @@ int tmain(int argc, tchar* argv[]) {
// just wrap image data by ov::Tensor without allocating of new memory // just wrap image data by ov::Tensor without allocating of new memory
ov::Tensor input_tensor = ov::Tensor(input_type, input_shape, input_data.get()); ov::Tensor input_tensor = ov::Tensor(input_type, input_shape, input_data.get());
const ov::Shape tensor_shape = input_tensor.get_shape();
const ov::Layout tensor_layout{"NHWC"}; const ov::Layout tensor_layout{"NHWC"};
// -------- Step 4. Configure preprocessing -------- // -------- Step 4. Configure preprocessing --------
@ -75,15 +74,9 @@ int tmain(int argc, tchar* argv[]) {
// 1) Set input tensor information: // 1) Set input tensor information:
// - input() provides information about a single model input // - input() provides information about a single model input
// - precision of tensor is supposed to be 'u8' // - reuse precision and shape from already available `input_tensor`
// - layout of data is 'NHWC' // - layout of data is 'NHWC'
// - set static spatial dimensions to input tensor to resize from ppp.input().tensor().set_from(input_tensor).set_layout(tensor_layout);
ppp.input()
.tensor()
.set_element_type(ov::element::u8)
.set_layout(tensor_layout)
.set_spatial_static_shape(tensor_shape[ov::layout::height_idx(tensor_layout)],
tensor_shape[ov::layout::width_idx(tensor_layout)]);
// 2) Adding explicit preprocessing steps: // 2) Adding explicit preprocessing steps:
// - convert layout to 'NCHW' (from 'NHWC' specified above at tensor layout) // - convert layout to 'NCHW' (from 'NHWC' specified above at tensor layout)
// - apply linear resize from tensor spatial dims to model spatial dims // - apply linear resize from tensor spatial dims to model spatial dims
@ -94,7 +87,7 @@ int tmain(int argc, tchar* argv[]) {
// - precision of tensor is supposed to be 'f32' // - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(ov::element::f32); ppp.output().tensor().set_element_type(ov::element::f32);
// 6) Apply preprocessing modifing the original 'model' // 6) Apply preprocessing modifying the original 'model'
model = ppp.build(); model = ppp.build();
// -------- Step 5. Loading a model to the device -------- // -------- Step 5. Loading a model to the device --------

View File

@ -54,13 +54,11 @@ def main():
# 1) Set input tensor information: # 1) Set input tensor information:
# - input() provides information about a single model input # - input() provides information about a single model input
# - precision of tensor is supposed to be 'u8' # - reuse precision and shape from already available `input_tensor`
# - layout of data is 'NHWC' # - layout of data is 'NHWC'
# - set static spatial dimensions to input tensor to resize from
ppp.input().tensor() \ ppp.input().tensor() \
.set_element_type(Type.u8) \ .set_from(input_tensor) \
.set_layout(Layout('NHWC')) \ .set_layout(Layout('NHWC')) # noqa: ECE001, N400
.set_spatial_static_shape(h, w) # noqa: ECE001, N400
# 2) Adding explicit preprocessing steps: # 2) Adding explicit preprocessing steps:
# - apply linear resize from tensor spatial dims to model spatial dims # - apply linear resize from tensor spatial dims to model spatial dims
@ -73,7 +71,7 @@ def main():
# - precision of tensor is supposed to be 'f32' # - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(Type.f32) ppp.output().tensor().set_element_type(Type.f32)
# 5) Apply preprocessing modifing the original 'model' # 5) Apply preprocessing modifying the original 'model'
model = ppp.build() model = ppp.build()
# --------------------------- Step 5. Loading model to the device ----------------------------------------------------- # --------------------------- Step 5. Loading model to the device -----------------------------------------------------

View File

@ -295,6 +295,21 @@ static void regclass_graph_InputTensorInfo(py::module m) {
return &self.set_memory_type(memory_type); return &self.set_memory_type(memory_type);
}, },
py::arg("memory_type")); py::arg("memory_type"));
info.def(
"set_from",
[](ov::preprocess::InputTensorInfo& self, const ov::Tensor& tensor) {
return &self.set_from(tensor);
},
py::arg("runtime_tensor"));
info.def(
"set_from",
[](ov::preprocess::InputTensorInfo& self, py::array& numpy_array) {
// Convert to contiguous array if not already C-style.
return &self.set_from(Common::tensor_from_numpy(numpy_array, false));
},
py::arg("runtime_tensor"));
} }
static void regclass_graph_OutputTensorInfo(py::module m) { static void regclass_graph_OutputTensorInfo(py::module m) {

View File

@ -237,6 +237,60 @@ def test_ngraph_preprocess_set_shape():
assert np.equal(output, expected_output).all() assert np.equal(output, expected_output).all()
def test_ngraph_preprocess_set_from_tensor():
shape = [1, 224, 224, 3]
inp_shape = [1, 480, 640, 3]
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
parameter_a.set_layout(ov.Layout("NHWC"))
model = parameter_a
function = Model(model, [parameter_a], "TestFunction")
input_data = ov.Tensor(Type.i32, inp_shape)
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_from(input_data)
inp.preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR)
function = p.build()
assert function.input().shape == ov.Shape(inp_shape)
assert function.input().element_type == Type.i32
assert function.output().shape == ov.Shape(shape)
assert function.output().element_type == Type.f32
def test_ngraph_preprocess_set_from_np_infer():
shape = [1, 1, 1]
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
model = parameter_a
function = Model(model, [parameter_a], "TestFunction")
@custom_preprocess_function
def custom_crop(out_node: Output):
start = ops.constant(np.array([1, 1, 1]), dtype=np.int32)
stop = ops.constant(np.array([2, 2, 2]), dtype=np.int32)
step = ops.constant(np.array([1, 1, 1]), dtype=np.int32)
axis = ops.constant(np.array([0, 1, 2]), dtype=np.int32)
return ops.slice(out_node, start, stop, step, axis)
input_data = np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[18, 19, 20], [21, 22, 23], [24, 25, 26]]]).astype(np.int32)
p = PrePostProcessor(function)
inp = p.input()
inp.tensor().set_from(input_data)
inp.preprocess().convert_element_type().custom(custom_crop)
function = p.build()
assert function.input().shape == ov.Shape([3, 3, 3])
assert function.input().element_type == Type.i32
expected_output = np.array([[[13]]]).astype(np.float32)
runtime = get_runtime()
computation = runtime.computation(function)
output = computation(input_data)
assert np.equal(output, expected_output).all()
def test_ngraph_preprocess_set_memory_type(): def test_ngraph_preprocess_set_memory_type():
shape = [1, 1, 1] shape = [1, 1, 1]
parameter_a = ops.parameter(shape, dtype=np.int32, name="A") parameter_a = ops.parameter(shape, dtype=np.int32, name="A")

View File

@ -9,6 +9,7 @@
#include "openvino/core/layout.hpp" #include "openvino/core/layout.hpp"
#include "openvino/core/preprocess/color_format.hpp" #include "openvino/core/preprocess/color_format.hpp"
#include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov { namespace ov {
namespace preprocess { namespace preprocess {
@ -118,6 +119,20 @@ public:
/// ///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner. /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_shape(const ov::PartialShape& shape); InputTensorInfo& set_shape(const ov::PartialShape& shape);
/// \brief Helper function to reuse element type and shape from user's created tensor. Use this only in case if
/// input tensor is already known and available before. Overwrites previously set element type & shape via
/// `set_element_type` and `set_shape`. Tensor's memory type is not reused, so if `runtime_tensor` represents remote
/// tensor with particular memory type - you should still specify appropriate memory type manually using
/// `set_memory_type`
///
/// \note As for `InputTensorInfo::set_shape`, this method shall not be used together with methods
/// 'set_spatial_dynamic_shape' and 'set_spatial_static_shape', otherwise ov::AssertFailure exception will be thrown
///
/// \param runtime_tensor User's created tensor.
///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_from(const ov::Tensor& runtime_tensor);
}; };
} // namespace preprocess } // namespace preprocess

View File

@ -249,6 +249,11 @@ InputTensorInfo& InputTensorInfo::set_shape(const PartialShape& shape) {
return *this; return *this;
} }
InputTensorInfo& InputTensorInfo::set_from(const ov::Tensor& runtime_tensor) {
m_impl->set_from(runtime_tensor);
return *this;
}
// --------------------- PreProcessSteps ------------------ // --------------------- PreProcessSteps ------------------
PreProcessSteps::PreProcessSteps() : m_impl(std::unique_ptr<PreProcessStepsImpl>(new PreProcessStepsImpl())) {} PreProcessSteps::PreProcessSteps() : m_impl(std::unique_ptr<PreProcessStepsImpl>(new PreProcessStepsImpl())) {}

View File

@ -135,14 +135,15 @@ public:
} }
void set_spatial_dynamic_shape() { void set_spatial_dynamic_shape() {
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_dynamic_shape' and 'set_shape' shall not be used together"); OPENVINO_ASSERT(!m_shape_set,
"'set_spatial_dynamic_shape' and 'set_shape/set_from' shall not be used together");
m_spatial_shape_set = true; m_spatial_shape_set = true;
m_spatial_width = -1; m_spatial_width = -1;
m_spatial_height = -1; m_spatial_height = -1;
} }
void set_spatial_static_shape(size_t height, size_t width) & { void set_spatial_static_shape(size_t height, size_t width) & {
OPENVINO_ASSERT(!m_shape_set, "'set_spatial_static_shape' and 'set_shape' shall not be used together"); OPENVINO_ASSERT(!m_shape_set, "'set_spatial_static_shape' and 'set_shape/set_from' shall not be used together");
m_spatial_shape_set = true; m_spatial_shape_set = true;
m_spatial_height = static_cast<int>(height); m_spatial_height = static_cast<int>(height);
m_spatial_width = static_cast<int>(width); m_spatial_width = static_cast<int>(width);
@ -193,11 +194,16 @@ public:
void set_shape(const PartialShape& shape) { void set_shape(const PartialShape& shape) {
OPENVINO_ASSERT( OPENVINO_ASSERT(
!m_spatial_shape_set, !m_spatial_shape_set,
"'set_spatial_static_shape', 'set_spatial_dynamic_shape', 'set_shape' shall not be used together"); "'set_spatial_static_shape', 'set_spatial_dynamic_shape', 'set_shape/set_from' shall not be used together");
m_shape = shape; m_shape = shape;
m_shape_set = true; m_shape_set = true;
} }
void set_from(const ov::runtime::Tensor& runtime_tensor) {
set_shape(runtime_tensor.get_shape());
set_element_type(runtime_tensor.get_element_type());
}
bool is_shape_set() const { bool is_shape_set() const {
return m_shape_set; return m_shape_set;
} }

View File

@ -1256,6 +1256,21 @@ TEST(pre_post_process, preprocess_memory_type_not_cleared) {
EXPECT_EQ(var0, "abc"); EXPECT_EQ(var0, "abc");
} }
TEST(pre_post_process, preprocess_from) {
auto t = ov::Tensor(element::u8, {1, 480, 640, 3});
auto f = create_simple_function(element::f32, Shape{1, 224, 224, 3});
ov::layout::set_layout(f->input(), "NHWC");
auto p = PrePostProcessor(f);
p.input().tensor().set_from(t);
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
f = p.build();
EXPECT_EQ(f->input().get_element_type(), element::u8);
EXPECT_EQ(f->input().get_shape(), (Shape{1, 480, 640, 3}));
EXPECT_EQ(f->output().get_element_type(), element::f32);
EXPECT_EQ(f->output().get_shape(), (Shape{1, 224, 224, 3}));
}
TEST(pre_post_process, preprocess_crop) { TEST(pre_post_process, preprocess_crop) {
auto model = create_n_inputs<1>(element::f32, PartialShape::dynamic()); auto model = create_n_inputs<1>(element::f32, PartialShape::dynamic());
auto p = PrePostProcessor(model); auto p = PrePostProcessor(model);