Squashed commit of the following: (#7930)

commit e692037384525e24f4f88d2b10fd5f5c09881d8a
Merge: a0ad24b16 82f8f19d1
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Wed Oct 13 15:06:20 2021 +0300

    Merge remote-tracking branch 'upstream/master' into ov20/remove_tensor_names

    # Conflicts:
    #	inference-engine/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp

commit a0ad24b16ab6ef7cf37dedb04084813178f0d627
Merge: 3c1f7067f db527fff4
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Wed Oct 13 13:45:39 2021 +0300

    Merge remote-tracking branch 'upstream/master' into ov20/remove_tensor_names

commit 3c1f7067f01634378cf66eee996d8793b635a6f2
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Wed Oct 13 13:45:13 2021 +0300

    Remove ignoring friendly names while comparing functions

commit f31e018d9a4694e83a3f12e685d8e90a9aca9045
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 23:50:20 2021 +0300

    Fix macos test

commit 2e3d0ceced3bde893e1e8237f7769c94bd193531
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 20:06:06 2021 +0300

    Fixes after merge

commit 5f047070af7c71a75f5ec019a146e17b3f95c062
Merge: 1568c9723 c323775f2
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 18:59:21 2021 +0300

    Merge remote-tracking branch 'upstream/master' into ov20/remove_tensor_names

    # Conflicts:
    #	inference-engine/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp
    #	inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp
    #	ngraph/core/src/preprocess/pre_post_process.cpp
    #	ngraph/core/src/preprocess/preprocess_steps_impl.cpp
    #	ngraph/core/src/preprocess/preprocess_steps_impl.hpp
    #	ngraph/test/util/graph_comparator.cpp

commit 1568c97238c206a43cd8b14b6aba49fcee828386
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 13:56:48 2021 +0300

    Throw an error if 'InputInfo' is specified for same parameter twice.

commit d1830ba61b750b12484322c97261c7062551f798
Merge: 4f01b396b d79020457
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 13:34:32 2021 +0300

    Merge remote-tracking branch 'upstream/master' into ov20/remove_tensor_names

commit 4f01b396ba4b68d88d583092a38836754e78531c
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 13:34:19 2021 +0300

    Insert multi-plane parameters into new parameters vector (don't append them)

commit 70f85e5f351a37f2b79cff118399778c55136845
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Tue Oct 12 00:26:43 2021 +0300

    Fixed failed CPU/GPU test, created skipped test to clearly reproduce the problem
    Don't generate friendly and tensor names for sub-planes. Test is updated
    Keep the order of created parameters and results after pre and post processing
    Added tests for params/results order

commit 8b2cbf6db6646f6b2a01fc9e75726b682aa87fc2
Merge: 6050afbce 6d322722c
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Mon Oct 11 17:55:25 2021 +0300

    Merge remote-tracking branch 'upstream/master' into ov20/remove_tensor_names

commit 6050afbce256f21430322ec7b0a38105e3f06066
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Sun Oct 10 23:54:40 2021 +0300

    IE reader: remove unnecessary convert_layout()

commit 0f43133f6dfddf41233835678872c9a80b631565
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Sun Oct 10 23:33:29 2021 +0300

    Update IE tests

commit 75afe69ccf36b9764dc9285d77fe1a65ae4146b4
Merge: c86366577 67cfc9beb
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Fri Oct 8 22:52:17 2021 +0300

    Merge commit '67cfc9beb5bb0c66916c91ab5820dc25ad164a70' of https://github.com/openvinotoolkit/openvino into ov20/remove_tensor_names

commit c86366577ede8491ed722f9d048b7556d3266c68
Author: Michael Nosov <mikhail.nosov@intel.com>
Date:   Fri Oct 8 22:26:17 2021 +0300

    First commit (IE func tests are failed)

commit 67cfc9beb5bb0c66916c91ab5820dc25ad164a70
Author: y <ilya.lavrenov@intel.com>
Date:   Fri Oct 8 16:09:11 2021 +0300

    Removed explicit preprocessing steps
This commit is contained in:
Mikhail Nosov 2021-10-14 17:54:41 +03:00 committed by GitHub
parent 686c7fd57f
commit d21572d7cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 307 additions and 219 deletions

View File

@ -278,11 +278,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
for (size_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
const auto ngraph_type = inputs[i].get_element_type(); const auto ngraph_type = inputs[i].get_element_type();
const auto legacy_type = details::toLegacyType(ngraph_type, true); const auto legacy_type = details::toLegacyType(ngraph_type, true);
prepost.input(ov::preprocess::InputInfo(i) prepost.input(ov::preprocess::InputInfo(i).tensor(InputTensorInfo().set_element_type(legacy_type)));
.tensor(InputTensorInfo().set_element_type(legacy_type))
.preprocess(PreProcessSteps()
// TODO: remove explicit type
.convert_element_type(ngraph_type)));
} }
// in order to support the following scenarios for IR v10 cases: // in order to support the following scenarios for IR v10 cases:
@ -313,9 +309,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
const auto ngraph_type = outputs[i].get_element_type(); const auto ngraph_type = outputs[i].get_element_type();
const auto legacy_type = details::toLegacyType(ngraph_type, false); const auto legacy_type = details::toLegacyType(ngraph_type, false);
prepost.output(OutputInfo(i) prepost.output(OutputInfo(i).tensor(OutputTensorInfo().set_element_type(legacy_type)));
.postprocess(PostProcessSteps().convert_element_type())
.tensor(OutputTensorInfo().set_element_type(legacy_type)));
} }
function = prepost.build(function); function = prepost.build(function);
@ -350,18 +344,10 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
networkLayout << old_api_transpose_args[i]; networkLayout << old_api_transpose_args[i];
} }
PreProcessSteps steps;
// TODO: remove explicit type
steps.convert_element_type(parameter->get_element_type());
// TODO: move steps directly to builder once we allow Layout() -> Layout transpose
if (!old_api_transpose_args.empty())
steps.convert_layout();
prepost.input( prepost.input(
ov::preprocess::InputInfo(i) ov::preprocess::InputInfo(i)
.tensor( .tensor(
InputTensorInfo().set_element_type(old_api_type).set_layout(ov::Layout(tensorLayout.str()))) InputTensorInfo().set_element_type(old_api_type).set_layout(ov::Layout(tensorLayout.str())))
.preprocess(std::move(steps))
.network(InputNetworkInfo().set_layout(ov::Layout(networkLayout.str())))); .network(InputNetworkInfo().set_layout(ov::Layout(networkLayout.str()))));
// Set version to 10 // Set version to 10
@ -395,7 +381,6 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
prepost.output(OutputInfo(i) prepost.output(OutputInfo(i)
.network(OutputNetworkInfo().set_layout(ov::Layout(networkLayout.str()))) .network(OutputNetworkInfo().set_layout(ov::Layout(networkLayout.str())))
.postprocess(PostProcessSteps().convert_layout().convert_element_type())
.tensor(OutputTensorInfo() .tensor(OutputTensorInfo()
.set_element_type(old_api_type) .set_element_type(old_api_type)
.set_layout(ov::Layout(tensorLayout.str())))); .set_layout(ov::Layout(tensorLayout.str()))));

View File

@ -160,6 +160,7 @@ TEST_F(RTInfoDeserialization, NodeV10) {
param->set_friendly_name("in1"); param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()}); param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()});
// TODO: No guarantee that exactly 'Convert' will be added
auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f16); auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f16);
auto round = std::make_shared<opset8::Round>(convert_param, auto round = std::make_shared<opset8::Round>(convert_param,
@ -316,8 +317,8 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()}); param->get_output_tensor(0).set_names({"input_tensor", param->get_friendly_name()});
auto sum = std::make_shared<opset8::Add>(param, param); auto sum = std::make_shared<opset8::Add>(param, param);
sum->set_friendly_name("sum");
// TODO: No guarantee that exactly 'convert' will be added by post-processing
auto convert_result = std::make_shared<opset8::Convert>(sum, ngraph::element::i32); auto convert_result = std::make_shared<opset8::Convert>(sum, ngraph::element::i32);
convert_result->set_friendly_name("sum"); convert_result->set_friendly_name("sum");
convert_result->get_output_tensor(0).set_names({"output_tensor", convert_result->get_friendly_name()}); convert_result->get_output_tensor(0).set_names({"output_tensor", convert_result->get_friendly_name()});
@ -472,6 +473,7 @@ TEST_F(RTInfoDeserialization, NodeV11) {
param->set_friendly_name("in1"); param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor"}); param->get_output_tensor(0).set_names({"input_tensor"});
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit pre-processing
auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f32); auto convert_param = std::make_shared<opset8::Convert>(param, ngraph::element::f32);
auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::i64, auto constant_param = std::make_shared<opset8::Constant>(ngraph::element::i64,
@ -485,16 +487,16 @@ TEST_F(RTInfoDeserialization, NodeV11) {
round->get_rt_info()[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] = round->get_rt_info()[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("Round1,Round2")); std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("Round1,Round2"));
// TODO: No guarantee that exactly 'convert, then transpose' will be added by implicit post-processing
auto convert_result = std::make_shared<opset8::Convert>(round, type);
auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::i64, auto constant_result = std::make_shared<opset8::Constant>(ngraph::element::i64,
ngraph::Shape{4}, ngraph::Shape{4},
std::vector<int64_t>{0, 3, 1, 2}); std::vector<int64_t>{0, 3, 1, 2});
auto transpose_result = std::make_shared<opset8::Transpose>(round, constant_result); auto transpose_result = std::make_shared<opset8::Transpose>(convert_result, constant_result);
transpose_result->set_friendly_name("Round");
transpose_result->get_output_tensor(0).set_names({"output_tensor"});
auto convert_result = std::make_shared<opset8::Convert>(transpose_result, type); auto result = std::make_shared<opset8::Result>(transpose_result);
convert_result->set_friendly_name("Round");
convert_result->get_output_tensor(0).set_names({"output_tensor"});
auto result = std::make_shared<opset8::Result>(convert_result);
result->set_friendly_name("output"); result->set_friendly_name("output");
auto f_10_ref = auto f_10_ref =
@ -508,7 +510,9 @@ TEST_F(RTInfoDeserialization, NodeV11) {
check_version(f_10_core, 10); check_version(f_10_core, 10);
ASSERT_GT(cnn_core.getInputsInfo().count("in1"), 0);
EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getInputsInfo()["in1"]->getPrecision()); EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getInputsInfo()["in1"]->getPrecision());
ASSERT_GT(cnn_core.getOutputsInfo().count("Round"), 0);
EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getOutputsInfo()["Round"]->getPrecision()); EXPECT_EQ(InferenceEngine::Precision::FP32, cnn_core.getOutputsInfo()["Round"]->getPrecision());
const auto fc = FunctionsComparator::with_default() const auto fc = FunctionsComparator::with_default()

View File

@ -121,9 +121,8 @@ std::vector<std::string> disabledTestPatterns() {
R"(smoke_PrePostProcess.*resize_linear_nhwc.*)", R"(smoke_PrePostProcess.*resize_linear_nhwc.*)",
// Issue 67214 // Issue 67214
R"(smoke_PrePostProcess.*resize_and_convert_layout_i8.*)", R"(smoke_PrePostProcess.*resize_and_convert_layout_i8.*)",
// Issue 67910
// TODO: R"(.*smoke_PrePostProcess.*two_inputs_trivial.*)",
R"(smoke_PrePostProcess.*two_inputs_basic_Device.*)"
}; };
#define FIX_62820 0 #define FIX_62820 0

View File

@ -74,5 +74,7 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*smoke_PrePostProcess_GPU.*convert_element_type_and_mean.*)", R"(.*smoke_PrePostProcess_GPU.*convert_element_type_and_mean.*)",
// TODO: Issue 67408 // TODO: Issue 67408
R"(.*smoke_LSTMSequenceCommonClip.*LSTMSequenceTest.*CompareWithRefs.*)", R"(.*smoke_LSTMSequenceCommonClip.*LSTMSequenceTest.*CompareWithRefs.*)",
// TODO: Issue 67910
R"(.*smoke_PrePostProcess_GPU.*two_inputs_trivial.*)",
}; };
} }

View File

@ -17,6 +17,8 @@ inline std::vector<preprocess_func> GPU_smoke_preprocess_functions() {
preprocess_func(mean_only, "mean_only", 0.01f), preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f), preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f), preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
}; };
} }

View File

@ -23,13 +23,15 @@ inline std::shared_ptr<Function> create_preprocess_1input(element::Type type,
data1->set_friendly_name("input1"); data1->set_friendly_name("input1");
data1->output(0).get_tensor().set_names({"input1"}); data1->output(0).get_tensor().set_names({"input1"});
std::shared_ptr<op::v0::Result> res; std::shared_ptr<op::v0::Result> res;
auto op1 = std::make_shared<op::v0::Abs>(data1);
if (type == element::f32) { if (type == element::f32) {
res = std::make_shared<op::v0::Result>(data1); res = std::make_shared<op::v0::Result>(op1);
} else { } else {
auto convert = std::make_shared<op::v0::Convert>(data1, element::f32); auto convert = std::make_shared<op::v0::Convert>(data1, element::f32);
res = std::make_shared<op::v0::Result>(convert); res = std::make_shared<op::v0::Result>(op1);
} }
res->set_friendly_name("Result"); res->set_friendly_name("Result1");
res->output(0).get_tensor().set_names({"Result1"});
return std::make_shared<Function>(ResultVector{res}, ParameterVector{data1}); return std::make_shared<Function>(ResultVector{res}, ParameterVector{data1});
} }
@ -42,17 +44,37 @@ inline std::shared_ptr<Function> create_preprocess_2inputs(element::Type type,
data2->set_friendly_name("input2"); data2->set_friendly_name("input2");
data2->output(0).get_tensor().set_names({"input2"}); data2->output(0).get_tensor().set_names({"input2"});
std::shared_ptr<op::v0::Result> res1, res2; std::shared_ptr<op::v0::Result> res1, res2;
auto op1 = std::make_shared<op::v0::Abs>(data1);
auto op2 = std::make_shared<op::v0::Abs>(data2);
if (type == element::f32) { if (type == element::f32) {
res1 = std::make_shared<op::v0::Result>(data1); res1 = std::make_shared<op::v0::Result>(op1);
res2 = std::make_shared<op::v0::Result>(data2); res2 = std::make_shared<op::v0::Result>(op2);
} else { } else {
auto convert1 = std::make_shared<op::v0::Convert>(data1, element::f32); auto convert1 = std::make_shared<op::v0::Convert>(op1, element::f32);
res1 = std::make_shared<op::v0::Result>(convert1); res1 = std::make_shared<op::v0::Result>(convert1);
auto convert2 = std::make_shared<op::v0::Convert>(data2, element::f32); auto convert2 = std::make_shared<op::v0::Convert>(op2, element::f32);
res2 = std::make_shared<op::v0::Result>(convert2); res2 = std::make_shared<op::v0::Result>(convert2);
} }
res1->set_friendly_name("Result1"); res1->set_friendly_name("Result1");
res1->output(0).get_tensor().set_names({"Result1"});
res2->set_friendly_name("Result2"); res2->set_friendly_name("Result2");
res2->output(0).get_tensor().set_names({"Result2"});
return std::make_shared<Function>(ResultVector{res1, res2}, ParameterVector{data1, data2});
}
inline std::shared_ptr<Function> create_preprocess_2inputs_trivial() {
auto data1 = std::make_shared<op::v0::Parameter>(element::f32, Shape{1, 3, 1, 1});
auto data2 = std::make_shared<op::v0::Parameter>(element::f32, Shape{1, 3, 1, 1});
data1->set_friendly_name("input1");
data1->output(0).get_tensor().set_names({"input1"});
data2->set_friendly_name("input2");
data2->output(0).get_tensor().set_names({"input2"});
auto res1 = std::make_shared<op::v0::Result>(data1);
auto res2 = std::make_shared<op::v0::Result>(data2);
return std::make_shared<Function>(ResultVector{res1, res2}, ParameterVector{data1, data2}); return std::make_shared<Function>(ResultVector{res1, res2}, ParameterVector{data1, data2});
} }
@ -184,6 +206,13 @@ inline std::shared_ptr<Function> two_inputs_basic() {
return function; return function;
} }
inline std::shared_ptr<Function> two_inputs_trivial() {
using namespace ov::preprocess;
auto function = create_preprocess_2inputs_trivial();
function = PrePostProcessor().input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build(function);
return function;
}
inline std::shared_ptr<Function> reuse_network_layout() { inline std::shared_ptr<Function> reuse_network_layout() {
using namespace ov::preprocess; using namespace ov::preprocess;
auto function = create_preprocess_1input(element::f32, PartialShape{4, 3, 2, 1}); auto function = create_preprocess_1input(element::f32, PartialShape{4, 3, 2, 1});
@ -340,6 +369,7 @@ inline std::vector<preprocess_func> generic_preprocess_functions() {
preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f), preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f),
preprocess_func(lvalues_multiple_ops, "lvalues_multiple_ops", 0.01f), preprocess_func(lvalues_multiple_ops, "lvalues_multiple_ops", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f), preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(two_inputs_trivial, "two_inputs_trivial", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f), preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),
preprocess_func(tensor_layout, "tensor_layout", 0.01f), preprocess_func(tensor_layout, "tensor_layout", 0.01f),
preprocess_func(resize_linear, "resize_linear", 0.01f), preprocess_func(resize_linear, "resize_linear", 0.01f),

View File

@ -11,7 +11,8 @@ namespace preprocess {
enum class ColorFormat { enum class ColorFormat {
UNDEFINED, UNDEFINED,
NV12_SINGLE_PLANE, // Image in NV12 format as single tensor NV12_SINGLE_PLANE, // Image in NV12 format as single tensor
NV12_TWO_PLANES, // Image in NV12 format represented as separate tensors for Y and UV planes /// \brief Image in NV12 format represented as separate tensors for Y and UV planes.
NV12_TWO_PLANES,
RGB, RGB,
BGR BGR
}; };

View File

@ -123,16 +123,17 @@ public:
/// ///
/// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors /// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors
/// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for /// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for
/// convenient usage of plane parameters. /// convenient usage of plane parameters. During build stage, new parameters for each plane will be inserted to the
/// place of original parameter. This means that all parameters located after will shift their positions accordingly
/// (e.g. {param1, param2} will become {param1/Y, param1/UV, param2})
/// ///
/// This version allows chaining for Lvalue objects. /// This version allows chaining for Lvalue objects.
/// ///
/// \param format Color format of input image. /// \param format Color format of input image.
/// ///
/// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If not specified, /// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If specified, number of
/// sub-names for plane parameters are auto-generated, exact names auto-generation rules depend on specific color /// sub-names shall match with number of planes. If not specified, friendly name and tensor name for plane
/// format, and client's code shall not rely on these rules. It is not allowed to specify sub-names for single-plane /// parameters will be empty. It is not allowed to specify sub-names for single-plane inputs.
/// inputs, also is specified, number of sub-names shall match with number of planes.
/// ///
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner. /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo& set_color_format(const ov::preprocess::ColorFormat& format, InputTensorInfo& set_color_format(const ov::preprocess::ColorFormat& format,
@ -142,16 +143,17 @@ public:
/// ///
/// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors /// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors
/// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for /// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for
/// convenient usage of plane parameters. /// convenient usage of plane parameters. During build stage, new parameters for each plane will be inserted to the
/// place of original parameter. This means that all parameters located after will shift their positions accordingly
/// (e.g. {param1, param2} will become {param1/Y, param1/UV, param2})
/// ///
/// This version allows chaining for Rvalue objects. /// This version allows chaining for Rvalue objects.
/// ///
/// \param format Color format of input image. /// \param format Color format of input image.
/// ///
/// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If not specified, /// \param sub_names Optional list of sub-names assigned for each plane (e.g. {"Y", "UV"}). If specified, number of
/// sub-names for plane parameters are auto-generated, exact names auto-generation rules depend on specific color /// sub-names shall match with number of planes. If not specified, friendly name and tensor name for plane
/// format, and client's code shall not rely on these rules. It is not allowed to specify sub-names for single-plane /// parameters will be empty. It is not allowed to specify sub-names for single-plane inputs.
/// inputs, also is specified, number of sub-names shall match with number of planes.
/// ///
/// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner. /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner.
InputTensorInfo&& set_color_format(const ov::preprocess::ColorFormat& format, InputTensorInfo&& set_color_format(const ov::preprocess::ColorFormat& format,

View File

@ -60,19 +60,11 @@ public:
return calculate_shape(plane_num, image_src_shape); return calculate_shape(plane_num, image_src_shape);
} }
std::string friendly_suffix(size_t plane_num) const {
OPENVINO_ASSERT(plane_num < planes_count(),
"Internal error: incorrect plane number specified for color format");
return calc_name_suffix(plane_num);
}
protected: protected:
virtual PartialShape calculate_shape(size_t plane_num, const PartialShape& image_shape) const { virtual PartialShape calculate_shape(size_t plane_num, const PartialShape& image_shape) const {
return image_shape; return image_shape;
} }
virtual std::string calc_name_suffix(size_t plane_num) const {
return {};
}
explicit ColorFormatInfo(ColorFormat format) : m_format(format) {} explicit ColorFormatInfo(ColorFormat format) : m_format(format) {}
ColorFormat m_format; ColorFormat m_format;
}; };
@ -127,12 +119,6 @@ protected:
} }
return result; return result;
} }
std::string calc_name_suffix(size_t plane_num) const override {
if (plane_num == 0) {
return "/Y";
}
return "/UV";
}
Layout default_layout() const override { Layout default_layout() const override {
return "NHWC"; return "NHWC";

View File

@ -13,6 +13,8 @@ namespace preprocess {
class FunctionGuard { class FunctionGuard {
std::shared_ptr<Function> m_function; std::shared_ptr<Function> m_function;
ParameterVector m_parameters; ParameterVector m_parameters;
ResultVector m_results;
std::vector<std::unordered_set<std::string>> m_result_tensors;
std::map<std::shared_ptr<op::v0::Parameter>, std::set<Input<Node>>> m_backup; std::map<std::shared_ptr<op::v0::Parameter>, std::set<Input<Node>>> m_backup;
bool m_done = false; bool m_done = false;
@ -22,6 +24,10 @@ public:
for (const auto& param : f->get_parameters()) { for (const auto& param : f->get_parameters()) {
m_backup.insert({param, param->output(0).get_target_inputs()}); m_backup.insert({param, param->output(0).get_target_inputs()});
} }
m_results = f->get_results();
for (const auto& result : m_results) {
m_result_tensors.push_back(result->get_default_output().get_tensor().get_names());
}
} }
virtual ~FunctionGuard() { virtual ~FunctionGuard() {
if (!m_done) { if (!m_done) {
@ -39,6 +45,18 @@ public:
} }
} }
m_function->add_parameters(m_parameters); m_function->add_parameters(m_parameters);
auto results = m_function->get_results();
// Remove results added by postprocessing
for (const auto& result : results) {
m_function->remove_result(result);
}
// Restore removed tensor names
for (size_t i = 0; i < m_results.size(); ++i) {
m_results[i]->get_default_output().get_tensor().set_names(m_result_tensors[i]);
}
m_function->add_results(m_results);
} catch (std::exception& ex) { } catch (std::exception& ex) {
// Stress condition, can't recover function to original state // Stress condition, can't recover function to original state
std::cerr << "Unrecoverable error occurred during preprocessing. Function is corrupted, exiting\n"; std::cerr << "Unrecoverable error occurred during preprocessing. Function is corrupted, exiting\n";

View File

@ -324,6 +324,7 @@ PrePostProcessor&& PrePostProcessor::output(OutputInfo&& builder) && {
std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function>& function) { std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function>& function) {
FunctionGuard guard(function); FunctionGuard guard(function);
std::tuple<std::unordered_set<std::string>, bool> existing_names{std::unordered_set<std::string>{}, false};
bool tensor_data_updated = false; bool tensor_data_updated = false;
for (const auto& input : m_impl->in_contexts) { for (const auto& input : m_impl->in_contexts) {
std::shared_ptr<op::v0::Parameter> param; std::shared_ptr<op::v0::Parameter> param;
@ -344,7 +345,8 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
input->m_resolved_param = param; input->m_resolved_param = param;
} }
auto results = function->get_results(); auto results = function->get_results();
auto parameters = function->get_parameters(); auto parameters_list = std::list<std::shared_ptr<op::v0::Parameter>>(function->get_parameters().begin(),
function->get_parameters().end());
for (const auto& input : m_impl->in_contexts) { for (const auto& input : m_impl->in_contexts) {
auto param = input->m_resolved_param; auto param = input->m_resolved_param;
@ -396,17 +398,33 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
std::vector<Output<Node>> nodes; std::vector<Output<Node>> nodes;
std::vector<std::shared_ptr<op::v0::Parameter>> new_params; std::vector<std::shared_ptr<op::v0::Parameter>> new_params;
// Create separate parameter for each plane. Shape and friendly name is based on color format // Create separate parameter for each plane. Shape is based on color format
for (size_t plane = 0; plane < color_info->planes_count(); plane++) { for (size_t plane = 0; plane < color_info->planes_count(); plane++) {
auto plane_shape = color_info->shape(plane, new_param_shape); auto plane_shape = color_info->shape(plane, new_param_shape);
auto plane_param = auto plane_param =
std::make_shared<op::v0::Parameter>(input->m_tensor_data->get_element_type(), plane_shape); std::make_shared<op::v0::Parameter>(input->m_tensor_data->get_element_type(), plane_shape);
if (plane < input->m_tensor_data->planes_sub_names().size()) { if (plane < input->m_tensor_data->planes_sub_names().size()) {
auto sub_name = std::string("/") + input->m_tensor_data->planes_sub_names()[plane]; std::unordered_set<std::string> plane_tensor_names;
inherit_friendly_names(function, param, plane_param, sub_name, false); std::string sub_name;
} else { sub_name = std::string("/") + input->m_tensor_data->planes_sub_names()[plane];
auto sub_name = color_info->friendly_suffix(plane); if (!std::get<1>(existing_names)) {
inherit_friendly_names(function, param, plane_param, sub_name); existing_names = std::make_tuple(get_function_tensor_names(function), true);
}
for (const auto& tensor_name : param->get_default_output().get_tensor().get_names()) {
auto new_name = tensor_name + sub_name;
OPENVINO_ASSERT(
std::get<0>(existing_names).count(new_name) == 0,
"Error while trying to create plane input with name '",
new_name,
"' - name already exists in network. Please specify another sub-name for set_color_format");
plane_tensor_names.insert(new_name);
}
plane_param->get_default_output().get_tensor().set_names(plane_tensor_names);
plane_param->set_friendly_name(param->get_friendly_name() + sub_name);
} else if (color_info->planes_count() == 1) {
plane_param->get_default_output().get_tensor().set_names(
param->get_default_output().get_tensor().get_names());
plane_param->set_friendly_name(param->get_friendly_name());
} }
if (!input->m_tensor_data->get_layout().empty()) { if (!input->m_tensor_data->get_layout().empty()) {
plane_param->set_layout(input->m_tensor_data->get_layout()); plane_param->set_layout(input->m_tensor_data->get_layout());
@ -466,22 +484,24 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
for (auto consumer : consumers) { for (auto consumer : consumers) {
consumer.replace_source_output(node); consumer.replace_source_output(node);
} }
for (size_t i = 0; i < parameters.size(); i++) { {
if (param == parameters[i]) { auto param_it = std::find(parameters_list.begin(), parameters_list.end(), param);
parameters[i] = new_params[0]; OPENVINO_ASSERT(param_it != parameters_list.end(),
break; "Parameter to replace has been replaced by previous steps of preprocessing. Use only one "
} "InputInfo for one input parameter");
} // Insert list of new parameters to the place of original parameter
for (size_t i = 1; i < new_params.size(); i++) { param_it = parameters_list.erase(param_it);
parameters.emplace_back(new_params[i]); parameters_list.insert(param_it, new_params.begin(), new_params.end());
} }
} }
// Add parameters with right order // Add parameters with right order
{ {
while (!function->get_parameters().empty()) while (!function->get_parameters().empty()) {
function->remove_parameter(*function->get_parameters().begin()); function->remove_parameter(*function->get_parameters().begin());
function->add_parameters(parameters); }
auto parameters_vec = ParameterVector(parameters_list.begin(), parameters_list.end());
function->add_parameters(parameters_vec);
} }
// Validate nodes after preprocessing if needed (no need to repeat it after post-processing) // Validate nodes after preprocessing if needed (no need to repeat it after post-processing)
if (tensor_data_updated) { if (tensor_data_updated) {
@ -501,6 +521,7 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
node = function->output(); node = function->output();
} }
auto start_out_node_names = node.get_tensor().get_names(); auto start_out_node_names = node.get_tensor().get_names();
node.get_tensor().set_names({});
result = std::dynamic_pointer_cast<op::v0::Result>(node.get_node_shared_ptr()); result = std::dynamic_pointer_cast<op::v0::Result>(node.get_node_shared_ptr());
// Set result layout from 'network' information // Set result layout from 'network' information
if (output->m_network_data && output->m_network_data->is_layout_set() && result->get_layout().empty()) { if (output->m_network_data && output->m_network_data->is_layout_set() && result->get_layout().empty()) {
@ -539,26 +560,28 @@ std::shared_ptr<Function> PrePostProcessor::build(const std::shared_ptr<Function
auto action_result = action({node}, context); auto action_result = action({node}, context);
node = std::get<0>(action_result); node = std::get<0>(action_result);
} }
node.get_node_shared_ptr()->set_friendly_name(
result->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
// Create result // Create result
auto new_result = std::make_shared<ov::op::v0::Result>(node); auto new_result = std::make_shared<ov::op::v0::Result>(node);
new_result->set_friendly_name(result->get_friendly_name());
if (!context.layout().empty()) { if (!context.layout().empty()) {
new_result->set_layout(context.layout()); new_result->set_layout(context.layout());
} }
node.get_tensor().set_names(start_out_node_names); node.get_tensor().set_names(start_out_node_names);
for (size_t i = 0; i < results.size(); i++) {
if (result == results[i]) { for (auto& old_result : results) {
results[i] = new_result; if (result == old_result) {
old_result = new_result;
break; break;
} }
} }
} }
// Add results with right order // Add results with right order
{
while (!function->get_results().empty()) while (!function->get_results().empty())
function->remove_result(*function->get_results().begin()); function->remove_result(*function->get_results().begin());
function->add_results(results); function->add_results(results);
}
guard.reset(); guard.reset();
return function; return function;

View File

@ -46,10 +46,8 @@ void PreStepsList::add_scale_impl(const std::vector<float>& values) {
shape = construct_mean_scale_shape(nodes[0].get_node_shared_ptr(), values.size(), context); shape = construct_mean_scale_shape(nodes[0].get_node_shared_ptr(), values.size(), context);
} }
auto constant = op::v0::Constant::create(element::f32, shape, values); auto constant = op::v0::Constant::create(element::f32, shape, values);
// inherit_friendly_names(function, nodes[0].get_node_shared_ptr(), constant, "/scale/Divide_Factor");
auto new_op = std::make_shared<op::v1::Divide>(nodes[0], constant); auto new_op = std::make_shared<op::v1::Divide>(nodes[0], constant);
// inherit_friendly_names(function, nodes[0].get_node_shared_ptr(), new_op, "/scale/Divide");
return std::make_tuple(std::vector<Output<Node>>{new_op}, false); return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
}); });
} }
@ -69,10 +67,8 @@ void PreStepsList::add_mean_impl(const std::vector<float>& values) {
shape = construct_mean_scale_shape(nodes[0], values.size(), context); shape = construct_mean_scale_shape(nodes[0], values.size(), context);
} }
auto constant = op::v0::Constant::create(element::f32, shape, values); auto constant = op::v0::Constant::create(element::f32, shape, values);
// inherit_friendly_names(function, nodes[0], constant, "/mean/Mean_Const");
auto new_op = std::make_shared<op::v1::Subtract>(nodes[0], constant); auto new_op = std::make_shared<op::v1::Subtract>(nodes[0], constant);
// inherit_friendly_names(function, nodes[0], new_op, "/mean/Subtract");
return std::make_tuple(std::vector<Output<Node>>{new_op}, false); return std::make_tuple(std::vector<Output<Node>>{new_op}, false);
}); });
} }
@ -93,7 +89,6 @@ void PreStepsList::add_convert_impl(const element::Type& type) {
"Can't insert 'convert_element_type' for dynamic source tensor type."); "Can't insert 'convert_element_type' for dynamic source tensor type.");
if (t != node.get_element_type()) { if (t != node.get_element_type()) {
auto convert = std::make_shared<op::v0::Convert>(node, t); auto convert = std::make_shared<op::v0::Convert>(node, t);
// inherit_friendly_names(function, node, convert, "/convert_element_type");
res.emplace_back(convert); res.emplace_back(convert);
convert_added = true; convert_added = true;
} else { } else {
@ -154,7 +149,6 @@ void PreStepsList::add_resize_impl(ResizeAlgorithm alg, int dst_height, int dst_
{0, 0}); {0, 0});
auto interp = std::make_shared<op::v4::Interpolate>(node, target_spatial_shape, scales, axes, attrs); auto interp = std::make_shared<op::v4::Interpolate>(node, target_spatial_shape, scales, axes, attrs);
// inherit_friendly_names(function, nodes[0], interp, "/resize");
return std::make_tuple(std::vector<Output<Node>>{interp}, true); return std::make_tuple(std::vector<Output<Node>>{interp}, true);
}); });
} }
@ -178,7 +172,6 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
} }
auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation); auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant); auto transpose = std::make_shared<op::v1::Transpose>(nodes[0], perm_constant);
// inherit_friendly_names(function, nodes[0], transpose, "/convert_layout");
context.layout() = dst_layout; // Update context's current layout context.layout() = dst_layout; // Update context's current layout
return std::make_tuple(std::vector<Output<Node>>{transpose}, true); return std::make_tuple(std::vector<Output<Node>>{transpose}, true);
}); });
@ -207,7 +200,6 @@ void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) {
color_format_name(dst_format), color_format_name(dst_format),
"' format:"); "' format:");
} }
// inherit_friendly_names(function, nodes[0], convert, "/convert_color_nv12_single");
context.color_format() = dst_format; context.color_format() = dst_format;
return std::make_tuple(std::vector<Output<Node>>{convert}, true); return std::make_tuple(std::vector<Output<Node>>{convert}, true);
} else if (context.color_format() == ColorFormat::NV12_TWO_PLANES) { } else if (context.color_format() == ColorFormat::NV12_TWO_PLANES) {
@ -226,7 +218,6 @@ void PreStepsList::add_convert_color_impl(const ColorFormat& dst_format) {
color_format_name(dst_format), color_format_name(dst_format),
"' format:"); "' format:");
} }
// inherit_friendly_names(function, nodes[0], convert, "/convert_color_nv12_two_planes");
context.color_format() = dst_format; context.color_format() = dst_format;
return std::make_tuple(std::vector<Output<Node>>{convert}, true); return std::make_tuple(std::vector<Output<Node>>{convert}, true);
} }
@ -251,7 +242,6 @@ void PostStepsList::add_convert_impl(const element::Type& type) {
!t.is_dynamic() && t != element::undefined, !t.is_dynamic() && t != element::undefined,
"Can't convert to dynamic/unknown element type, consider using of InputTensorInfo::set_element_type"); "Can't convert to dynamic/unknown element type, consider using of InputTensorInfo::set_element_type");
auto convert = std::make_shared<op::v0::Convert>(node, t); auto convert = std::make_shared<op::v0::Convert>(node, t);
inherit_friendly_names_postprocess(convert, node);
return std::make_tuple(Output<Node>(convert), true); return std::make_tuple(Output<Node>(convert), true);
}); });
} }
@ -269,7 +259,6 @@ void PostStepsList::add_convert_layout_impl(const Layout& layout) {
} }
auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation); auto perm_constant = op::v0::Constant::create<int64_t>(element::i64, Shape{permutation.size()}, permutation);
auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant); auto transpose = std::make_shared<op::v1::Transpose>(node, perm_constant);
inherit_friendly_names_postprocess(transpose, node);
context.layout() = dst_layout; // Update context's current layout context.layout() = dst_layout; // Update context's current layout
return std::make_tuple(Output<Node>(transpose), true); return std::make_tuple(Output<Node>(transpose), true);
}); });

View File

@ -60,47 +60,6 @@ inline size_t get_and_check_channels_idx(const Layout& layout, const PartialShap
return idx; return idx;
} }
inline void inherit_friendly_names(const std::shared_ptr<ov::Function>& function,
const Output<ov::Node>& src_node,
const Output<ov::Node>& dst_node,
const std::string& suffix,
bool search_for_available_name = true) {
dst_node.get_node_shared_ptr()->set_friendly_name(src_node.get_node_shared_ptr()->get_friendly_name() + suffix);
std::unordered_set<std::string> new_names;
for (const auto& tensor_name : src_node.get_tensor().get_names()) {
auto new_tensor_name = tensor_name + suffix;
if (!suffix.empty()) {
// Verify that new names are unique for a function
if (!is_tensor_name_available(new_tensor_name, function) && search_for_available_name) {
// Search for available name
size_t idx = 0;
do {
new_tensor_name = tensor_name + suffix + std::to_string(idx++);
} while (!is_tensor_name_available(new_tensor_name, function));
}
}
new_names.emplace(new_tensor_name);
}
dst_node.get_tensor().set_names(new_names);
}
// TODO: add uniqueness check like for preprocessing (or remove from pre-processing)
inline void inherit_friendly_names_postprocess(const Output<ov::Node>& inserted_output,
const Output<ov::Node>& previous_output) {
inserted_output.get_node_shared_ptr()->set_friendly_name(
previous_output.get_node_shared_ptr()->get_friendly_name());
std::unordered_set<std::string> new_names; // New name for previous node
for (const auto& tensor_name : previous_output.get_tensor().get_names()) {
auto new_tensor_name = tensor_name;
new_names.emplace(new_tensor_name);
}
previous_output.get_tensor().set_names(new_names);
// reset names for original node
previous_output.get_node_shared_ptr()->set_friendly_name({});
previous_output.get_tensor().set_names({});
}
/// \brief Context passed to each pre/post-processing operation. /// \brief Context passed to each pre/post-processing operation.
/// This is internal structure which is not shared to custom operations yet. /// This is internal structure which is not shared to custom operations yet.
class PrePostProcessingContextBase { class PrePostProcessingContextBase {

View File

@ -9,21 +9,15 @@
namespace ov { namespace ov {
/// \brief Check that specified tensor name is unique for a given function. inline std::unordered_set<std::string> get_function_tensor_names(const std::shared_ptr<Function>& function) {
/// std::unordered_set<std::string> set;
/// \param tensor_name Name to check across all tensors in a function.
/// \param function Function.
/// \return False if tensor name is already used in some function's node, True otherwise
inline bool is_tensor_name_available(const std::string& tensor_name, const std::shared_ptr<Function>& function) {
for (const auto& node : function->get_ordered_ops()) { for (const auto& node : function->get_ordered_ops()) {
for (const auto& output : node->outputs()) { for (const auto& output : node->outputs()) {
const auto& tensor = output.get_tensor(); const auto& names = output.get_tensor().get_names();
if (tensor.get_names().count(tensor_name)) { set.insert(names.begin(), names.end());
return false;
} }
} }
} return set;
return true;
} }
} // namespace ov } // namespace ov

View File

@ -6,13 +6,10 @@
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/ops.hpp" #include "ngraph/ops.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/core/preprocess/pre_post_process.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"
using namespace ov; using namespace ov;
using namespace ov::preprocess; using namespace ov::preprocess;
using namespace ngraph::test;
static std::shared_ptr<Function> create_simple_function(element::Type type, const PartialShape& shape) { static std::shared_ptr<Function> create_simple_function(element::Type type, const PartialShape& shape) {
auto data1 = std::make_shared<op::v0::Parameter>(type, shape); auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
@ -27,24 +24,24 @@ static std::shared_ptr<Function> create_simple_function(element::Type type, cons
return std::make_shared<Function>(ResultVector{res}, ParameterVector{data1}); return std::make_shared<Function>(ResultVector{res}, ParameterVector{data1});
} }
static std::shared_ptr<Function> create_2inputs(element::Type type, const PartialShape& shape) { template <int N>
static std::shared_ptr<Function> create_n_inputs(element::Type type, const PartialShape& shape) {
ResultVector res;
ParameterVector params;
for (size_t i = 0; i < N; i++) {
auto index_str = std::to_string(i);
auto data1 = std::make_shared<op::v0::Parameter>(type, shape); auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
data1->set_friendly_name("input1"); data1->set_friendly_name("input" + index_str);
data1->get_output_tensor(0).set_names({"tensor_input1"}); data1->get_output_tensor(0).set_names({"tensor_input" + index_str});
auto op1 = std::make_shared<op::v0::Relu>(data1); auto op1 = std::make_shared<op::v0::Relu>(data1);
op1->set_friendly_name("Relu1"); op1->set_friendly_name("Relu" + index_str);
auto data2 = std::make_shared<op::v0::Parameter>(type, shape);
data2->set_friendly_name("input2");
data2->get_output_tensor(0).set_names({"tensor_input2"});
auto op2 = std::make_shared<op::v0::Relu>(data2);
op2->set_friendly_name("Relu2");
auto res1 = std::make_shared<op::v0::Result>(op1); auto res1 = std::make_shared<op::v0::Result>(op1);
res1->set_friendly_name("Result1"); res1->set_friendly_name("Result" + index_str);
res1->get_output_tensor(0).set_names({"tensor_output1"}); res1->get_output_tensor(0).set_names({"tensor_output" + index_str});
auto res2 = std::make_shared<op::v0::Result>(op2); params.push_back(data1);
res2->set_friendly_name("Result2"); res.push_back(res1);
res2->get_output_tensor(0).set_names({"tensor_output2"}); }
return std::make_shared<Function>(ResultVector{res1, res2}, ParameterVector{data1, data2}); return std::make_shared<Function>(res, params);
} }
TEST(pre_post_process, simple_mean_scale) { TEST(pre_post_process, simple_mean_scale) {
@ -118,7 +115,7 @@ TEST(pre_post_process, empty_preprocess) {
} }
TEST(pre_post_process, preprocess_assert_input_without_index) { TEST(pre_post_process, preprocess_assert_input_without_index) {
auto f = create_2inputs(element::f32, Shape{1, 3, 2, 2}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
auto inp = InputInfo(); auto inp = InputInfo();
EXPECT_ANY_THROW(f = PrePostProcessor().input(std::move(inp)).build(f)); EXPECT_ANY_THROW(f = PrePostProcessor().input(std::move(inp)).build(f));
inp = InputInfo("some_non_existing_name"); inp = InputInfo("some_non_existing_name");
@ -237,12 +234,6 @@ TEST(pre_post_process, convert_color_nv12_rgb_2_planes) {
EXPECT_EQ(f->get_parameters()[1]->get_element_type(), element::f32); EXPECT_EQ(f->get_parameters()[1]->get_element_type(), element::f32);
EXPECT_EQ(f->get_parameters()[0]->get_partial_shape(), (PartialShape{5, 2, 2, 1})); EXPECT_EQ(f->get_parameters()[0]->get_partial_shape(), (PartialShape{5, 2, 2, 1}));
EXPECT_EQ(f->get_parameters()[1]->get_partial_shape(), (PartialShape{5, 1, 1, 2})); EXPECT_EQ(f->get_parameters()[1]->get_partial_shape(), (PartialShape{5, 1, 1, 2}));
EXPECT_EQ(f->get_parameters()[0]->get_friendly_name(), "input1/Y");
EXPECT_EQ(*f->get_parameters()[0]->output(0).get_tensor().get_names().begin(), "tensor_input1/Y");
EXPECT_EQ(f->get_parameters()[1]->get_friendly_name(), "input1/UV");
EXPECT_EQ(*f->get_parameters()[1]->output(0).get_tensor().get_names().begin(), "tensor_input1/UV");
} }
TEST(pre_post_process, convert_color_nv12_bgr_2_planes_u8_lvalue) { TEST(pre_post_process, convert_color_nv12_bgr_2_planes_u8_lvalue) {
@ -328,8 +319,6 @@ TEST(pre_post_process, convert_color_unsupported) {
TEST(pre_post_process, convert_color_incorrect_subnames) { TEST(pre_post_process, convert_color_incorrect_subnames) {
auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 2, 2, 3}); auto f = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 2, 2, 3});
auto name = f->get_parameters()[0]->get_friendly_name();
auto tensor_names = f->get_parameters().front()->get_output_tensor(0).get_names();
EXPECT_THROW( EXPECT_THROW(
f = PrePostProcessor() f = PrePostProcessor()
.input(InputInfo() .input(InputInfo()
@ -352,7 +341,7 @@ TEST(pre_post_process, convert_color_incorrect_subnames) {
} }
TEST(pre_post_process, convert_color_duplicate_subnames) { TEST(pre_post_process, convert_color_duplicate_subnames) {
auto f = create_2inputs(element::f32, PartialShape{1, 2, 2, 3}); auto f = create_n_inputs<2>(element::f32, PartialShape{1, 2, 2, 3});
f->get_parameters()[0]->get_output_tensor(0).set_names({"tensor_input1"}); f->get_parameters()[0]->get_output_tensor(0).set_names({"tensor_input1"});
f->get_parameters()[1]->get_output_tensor(0).set_names({"tensor_input1/CustomUV"}); f->get_parameters()[1]->get_output_tensor(0).set_names({"tensor_input1/CustomUV"});
EXPECT_THROW(f = PrePostProcessor() EXPECT_THROW(f = PrePostProcessor()
@ -438,9 +427,7 @@ TEST(pre_post_process, custom_preprocessing) {
auto f = create_simple_function(element::i32, Shape{1, 3, 1, 1}); auto f = create_simple_function(element::i32, Shape{1, 3, 1, 1});
f = PrePostProcessor() f = PrePostProcessor()
.input(InputInfo().preprocess(PreProcessSteps().custom([](const Output<Node>& node) { .input(InputInfo().preprocess(PreProcessSteps().custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node); return std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
}))) })))
.build(f); .build(f);
EXPECT_EQ(f->get_output_element_type(0), element::i32); EXPECT_EQ(f->get_output_element_type(0), element::i32);
@ -473,9 +460,7 @@ TEST(pre_post_process, test_lvalue) {
preprocessSteps.mean({1.f, 2.f, 3.f}); preprocessSteps.mean({1.f, 2.f, 3.f});
preprocessSteps.scale({2.f, 3.f, 4.f}); preprocessSteps.scale({2.f, 3.f, 4.f});
preprocessSteps.custom([](const Output<Node>& node) { preprocessSteps.custom([](const Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node); return std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node_shared_ptr()->get_friendly_name() + "/abs");
return abs;
}); });
auto& same = preprocessSteps.convert_element_type(element::i8); auto& same = preprocessSteps.convert_element_type(element::i8);
inputInfo.preprocess(std::move(same)); inputInfo.preprocess(std::move(same));
@ -490,7 +475,7 @@ TEST(pre_post_process, test_lvalue) {
} }
TEST(pre_post_process, test_2_inputs_basic) { TEST(pre_post_process, test_2_inputs_basic) {
auto f = create_2inputs(element::f32, Shape{1, 3, 1, 1}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 1});
{ f = PrePostProcessor().input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build(f); } { f = PrePostProcessor().input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build(f); }
EXPECT_EQ(f->get_output_element_type(0), element::f32); EXPECT_EQ(f->get_output_element_type(0), element::f32);
EXPECT_EQ(f->get_output_element_type(1), element::f32); EXPECT_EQ(f->get_output_element_type(1), element::f32);
@ -655,8 +640,21 @@ TEST(pre_post_process, resize_no_tensor_width) {
ov::AssertFailure); ov::AssertFailure);
} }
TEST(pre_post_process, double_input_info) {
auto f = create_simple_function(element::f32, Shape{1, 3, 224, 224});
// Parameter is replaced during first pre-processing, parameter for second step will not be resolved properly
EXPECT_THROW(f = PrePostProcessor()
.input(InputInfo().tensor(InputTensorInfo().set_element_type(element::u8)))
.input(InputInfo().tensor(InputTensorInfo().set_element_type(element::u8)))
.build(f),
ov::AssertFailure);
}
TEST(pre_post_process, preprocess_convert_layout_implicit) { TEST(pre_post_process, preprocess_convert_layout_implicit) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
auto name = f->get_results().front()->get_friendly_name();
auto name_last_op = f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name();
auto tensor_names = f->output().get_tensor().get_names();
f = PrePostProcessor() f = PrePostProcessor()
.input( .input(
@ -664,6 +662,10 @@ TEST(pre_post_process, preprocess_convert_layout_implicit) {
.build(f); .build(f);
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NHWC"); EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3})); EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3}));
EXPECT_EQ(name, f->get_results().front()->get_friendly_name());
EXPECT_EQ(name_last_op,
f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
EXPECT_EQ(tensor_names, f->output().get_tensor().get_names());
} }
TEST(pre_post_process, preprocess_convert_layout_default) { TEST(pre_post_process, preprocess_convert_layout_default) {
@ -699,6 +701,8 @@ TEST(pre_post_process, preprocess_convert_layout_same) {
TEST(pre_post_process, postprocess_convert_element_type_explicit) { TEST(pre_post_process, postprocess_convert_element_type_explicit) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
auto name = f->output().get_node_shared_ptr()->get_friendly_name();
auto name_last_op = f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name();
auto old_names = f->output().get_tensor().get_names(); auto old_names = f->output().get_tensor().get_names();
f = PrePostProcessor() f = PrePostProcessor()
.output(OutputInfo().postprocess(PostProcessSteps().convert_element_type(element::u8))) .output(OutputInfo().postprocess(PostProcessSteps().convert_element_type(element::u8)))
@ -708,7 +712,7 @@ TEST(pre_post_process, postprocess_convert_element_type_explicit) {
EXPECT_EQ(f->output().get_tensor().get_names(), old_names); EXPECT_EQ(f->output().get_tensor().get_names(), old_names);
EXPECT_EQ(old_names.count("tensor_output1"), 1); EXPECT_EQ(old_names.count("tensor_output1"), 1);
auto ops = f->get_ordered_ops(); auto ops = f->get_ordered_ops();
auto res_count = std::count_if(ops.begin(), ops.end(), [](std::shared_ptr<ov::Node> n) { auto res_count = std::count_if(ops.begin(), ops.end(), [](const std::shared_ptr<ov::Node>& n) {
return std::dynamic_pointer_cast<ov::op::v0::Result>(n) != nullptr; return std::dynamic_pointer_cast<ov::op::v0::Result>(n) != nullptr;
}); });
EXPECT_EQ(res_count, 1); EXPECT_EQ(res_count, 1);
@ -716,10 +720,16 @@ TEST(pre_post_process, postprocess_convert_element_type_explicit) {
return n->output(0).get_tensor().get_names().count("tensor_output1") > 0; return n->output(0).get_tensor().get_names().count("tensor_output1") > 0;
}); });
EXPECT_EQ(names_count, 2); // last node + result referencing to it EXPECT_EQ(names_count, 2); // last node + result referencing to it
EXPECT_EQ(name, f->output().get_node_shared_ptr()->get_friendly_name());
EXPECT_EQ(name_last_op,
f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
} }
TEST(pre_post_process, postprocess_convert_element_type_default) { TEST(pre_post_process, postprocess_convert_element_type_default) {
auto f = create_2inputs(element::f32, Shape{1, 3, 2, 2}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
auto name = f->output(1).get_node_shared_ptr()->get_friendly_name();
auto name_last_op = f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name();
auto tensor_names = f->output(1).get_tensor().get_names();
f = PrePostProcessor() f = PrePostProcessor()
.output(OutputInfo(1) .output(OutputInfo(1)
.postprocess(PostProcessSteps().convert_element_type()) .postprocess(PostProcessSteps().convert_element_type())
@ -727,6 +737,10 @@ TEST(pre_post_process, postprocess_convert_element_type_default) {
.build(f); .build(f);
EXPECT_EQ(f->get_results()[0]->get_element_type(), element::f32); EXPECT_EQ(f->get_results()[0]->get_element_type(), element::f32);
EXPECT_EQ(f->get_results()[1]->get_element_type(), element::u8); EXPECT_EQ(f->get_results()[1]->get_element_type(), element::u8);
EXPECT_EQ(name, f->output(1).get_node_shared_ptr()->get_friendly_name());
EXPECT_EQ(name_last_op,
f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name());
EXPECT_EQ(tensor_names, f->output(1).get_tensor().get_names());
} }
TEST(pre_post_process, postprocess_convert_element_type_same) { TEST(pre_post_process, postprocess_convert_element_type_same) {
@ -756,6 +770,37 @@ TEST(pre_post_process, postprocess_convert_element_type_implicit) {
EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8); EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8);
} }
TEST(pre_post_process, preprocess_keep_params_order) {
auto f = create_n_inputs<3>(element::f32, Shape{1, 2, 2, 3});
f = PrePostProcessor()
.input(InputInfo(1)
.tensor(InputTensorInfo().set_color_format(ColorFormat::NV12_TWO_PLANES, {"Y", "UV"}))
.preprocess(PreProcessSteps().convert_color(ColorFormat::RGB)))
.input(InputInfo(0).tensor(InputTensorInfo().set_layout("NCHW")))
.input(InputInfo(2)
.tensor(InputTensorInfo().set_color_format(ColorFormat::NV12_TWO_PLANES, {"Y", "UV"}))
.preprocess(PreProcessSteps().convert_color(ColorFormat::RGB)))
.build(f);
ASSERT_EQ(f->get_parameters().size(), 5);
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
EXPECT_EQ(f->get_parameters()[1]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[2]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[3]->get_layout(), "NHWC");
EXPECT_EQ(f->get_parameters()[4]->get_layout(), "NHWC");
EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{1, 2, 2, 3}));
EXPECT_EQ(f->input(1).get_partial_shape(), (PartialShape{1, 2, 2, 1}));
EXPECT_EQ(f->input(2).get_partial_shape(), (PartialShape{1, 1, 1, 2}));
EXPECT_EQ(f->input(3).get_partial_shape(), (PartialShape{1, 2, 2, 1}));
EXPECT_EQ(f->input(4).get_partial_shape(), (PartialShape{1, 1, 1, 2}));
EXPECT_EQ(f->input(0).get_tensor().get_names(), std::unordered_set<std::string>{"tensor_input0"});
EXPECT_EQ(f->input(1).get_tensor().get_names(), std::unordered_set<std::string>{"tensor_input1/Y"});
EXPECT_EQ(f->input(2).get_tensor().get_names(), std::unordered_set<std::string>{"tensor_input1/UV"});
EXPECT_EQ(f->input(3).get_tensor().get_names(), std::unordered_set<std::string>{"tensor_input2/Y"});
EXPECT_EQ(f->input(4).get_tensor().get_names(), std::unordered_set<std::string>{"tensor_input2/UV"});
}
// --- PostProcess - set/convert layout --- // --- PostProcess - set/convert layout ---
TEST(pre_post_process, postprocess_set_layout_network) { TEST(pre_post_process, postprocess_set_layout_network) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
@ -783,7 +828,7 @@ TEST(pre_post_process, postprocess_convert_layout_implicit) {
} }
TEST(pre_post_process, postprocess_convert_layout_explicit_no_target) { TEST(pre_post_process, postprocess_convert_layout_explicit_no_target) {
auto f = create_2inputs(element::f32, Shape{1, 3, 2, 2}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
f = PrePostProcessor() f = PrePostProcessor()
.output(OutputInfo(1) .output(OutputInfo(1)
.network(OutputNetworkInfo().set_layout("NCHW")) .network(OutputNetworkInfo().set_layout("NCHW"))
@ -837,18 +882,18 @@ TEST(pre_post_process, postprocess_convert_layout_default_error) {
TEST(pre_post_process, postprocess_custom_step) { TEST(pre_post_process, postprocess_custom_step) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
std::string name; bool hit = false;
f = PrePostProcessor() f = PrePostProcessor()
.output(OutputInfo().postprocess( .output(OutputInfo().postprocess(PostProcessSteps().custom([&hit](const ov::Output<Node>& node) {
PostProcessSteps().custom([&name](const ov::Output<Node>& node) -> ov::Output<Node> {
auto abs = std::make_shared<op::v0::Abs>(node); auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node()->get_friendly_name() + "/abs"); hit = true;
name = node.get_node()->get_friendly_name() + "/abs";
return abs; return abs;
}))) })))
.build(f); .build(f);
EXPECT_FALSE(name.empty()); EXPECT_TRUE(hit);
EXPECT_EQ(f->get_results()[0]->get_input_source_output(0).get_node()->get_friendly_name(), name);
EXPECT_EQ(std::string(f->get_results()[0]->get_input_source_output(0).get_node()->get_type_name()),
std::string(op::v0::Abs::get_type_info_static().name));
} }
TEST(pre_post_process, postprocess_implicit_convert_element_type_and_layout) { TEST(pre_post_process, postprocess_implicit_convert_element_type_and_layout) {
@ -864,13 +909,42 @@ TEST(pre_post_process, postprocess_implicit_convert_element_type_and_layout) {
} }
TEST(pre_post_process, postprocess_assert_output_without_index) { TEST(pre_post_process, postprocess_assert_output_without_index) {
auto f = create_2inputs(element::f32, Shape{1, 3, 2, 2}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
auto out = OutputInfo(); auto out = OutputInfo();
EXPECT_ANY_THROW(f = PrePostProcessor().output(std::move(out)).build(f)); EXPECT_ANY_THROW(f = PrePostProcessor().output(std::move(out)).build(f));
out = OutputInfo("some_non_existing_name"); out = OutputInfo("some_non_existing_name");
EXPECT_ANY_THROW(f = PrePostProcessor().output(std::move(out)).build(f)); EXPECT_ANY_THROW(f = PrePostProcessor().output(std::move(out)).build(f));
} }
TEST(pre_post_process, postprocess_keep_results_order) {
auto f = create_n_inputs<3>(element::f32, Shape{1, 3, 2, 2});
auto names0 = f->output(0).get_tensor().get_names();
auto names1 = f->output(1).get_tensor().get_names();
auto names2 = f->output(2).get_tensor().get_names();
f = PrePostProcessor()
.output(OutputInfo(0).network(OutputNetworkInfo().set_layout("NCHW")))
.output(OutputInfo(1)
.network(OutputNetworkInfo().set_layout("NCHW"))
.tensor(OutputTensorInfo().set_layout("NHWC").set_element_type(element::u8)))
.build(f);
ASSERT_EQ(f->get_results().size(), 3);
EXPECT_EQ(f->output(0).get_element_type(), element::f32);
EXPECT_EQ(f->output(1).get_element_type(), element::u8);
EXPECT_EQ(f->output(2).get_element_type(), element::f32);
EXPECT_EQ(f->get_results()[0]->get_layout(), "NCHW") << f->get_results()[0]->get_layout().to_string();
EXPECT_EQ(f->get_results()[1]->get_layout(), "NHWC") << f->get_results()[1]->get_layout().to_string();
EXPECT_EQ(f->get_results()[2]->get_layout(), "") << f->get_results()[2]->get_layout().to_string();
EXPECT_EQ(f->output(0).get_partial_shape(), (PartialShape{1, 3, 2, 2}));
EXPECT_EQ(f->output(1).get_partial_shape(), (PartialShape{1, 2, 2, 3}));
EXPECT_EQ(f->output(2).get_partial_shape(), (PartialShape{1, 3, 2, 2}));
EXPECT_EQ(f->output(0).get_tensor().get_names(), names0);
EXPECT_EQ(f->output(1).get_tensor().get_names(), names1);
EXPECT_EQ(f->output(2).get_tensor().get_names(), names2);
}
TEST(pre_post_process, postprocess_lvalues_1) { TEST(pre_post_process, postprocess_lvalues_1) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2}); auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
bool custom_called = false; bool custom_called = false;
@ -881,11 +955,9 @@ TEST(pre_post_process, postprocess_lvalues_1) {
auto steps = PostProcessSteps(); auto steps = PostProcessSteps();
steps.convert_layout(); steps.convert_layout();
steps.convert_element_type(); steps.convert_element_type();
steps.custom([&custom_called](const ov::Output<Node>& node) -> ov::Output<Node> { steps.custom([&custom_called](const ov::Output<Node>& node) {
auto abs = std::make_shared<op::v0::Abs>(node);
abs->set_friendly_name(node.get_node()->get_friendly_name() + "/abs");
custom_called = true; custom_called = true;
return abs; return std::make_shared<op::v0::Abs>(node);
}); });
auto tensorInfo = OutputTensorInfo(); auto tensorInfo = OutputTensorInfo();
@ -903,18 +975,23 @@ TEST(pre_post_process, postprocess_lvalues_1) {
f = p.build(f); f = p.build(f);
EXPECT_EQ(f->get_results().size(), 1); EXPECT_EQ(f->get_results().size(), 1);
EXPECT_EQ(f->output().get_tensor().get_names().count("tensor_output1"), 1); EXPECT_EQ(f->output().get_tensor().get_names().count("tensor_output1"), 1);
EXPECT_EQ(f->get_results()[0]->get_element_type(), element::u8); EXPECT_EQ(f->output().get_node_shared_ptr()->get_friendly_name(), "Result1");
EXPECT_EQ(f->output().get_element_type(), element::u8);
EXPECT_EQ(f->get_results()[0]->get_layout(), "NHWC"); EXPECT_EQ(f->get_results()[0]->get_layout(), "NHWC");
EXPECT_EQ(f->get_results()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3})); EXPECT_EQ(f->output().get_partial_shape(), (PartialShape{1, 2, 2, 3}));
EXPECT_TRUE(custom_called); EXPECT_TRUE(custom_called);
} }
TEST(pre_post_process, exception_safety) { TEST(pre_post_process, exception_safety) {
auto f = create_2inputs(element::f32, Shape{1, 3, 224, 224}); auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 224, 224});
auto name0 = f->get_parameters()[0]->get_friendly_name(); auto name0 = f->input(0).get_node_shared_ptr()->get_friendly_name();
auto tensor_names0 = f->get_parameters()[0]->get_output_tensor(0).get_names(); auto tensor_names0 = f->input(0).get_tensor().get_names();
auto name1 = f->get_parameters()[1]->get_friendly_name(); auto name1 = f->input(1).get_node_shared_ptr()->get_friendly_name();
auto tensor_names1 = f->get_parameters()[1]->get_output_tensor(0).get_names(); auto tensor_names1 = f->input(1).get_tensor().get_names();
auto out_name0 = f->output(0).get_node_shared_ptr()->get_friendly_name();
auto out_tensor_names0 = f->output(0).get_tensor().get_names();
auto out_name1 = f->output(1).get_node_shared_ptr()->get_friendly_name();
auto out_tensor_names1 = f->output(1).get_tensor().get_names();
EXPECT_THROW(f = PrePostProcessor() EXPECT_THROW(f = PrePostProcessor()
.input(InputInfo(0) // this one is correct .input(InputInfo(0) // this one is correct
.tensor(InputTensorInfo().set_element_type(element::u8)) .tensor(InputTensorInfo().set_element_type(element::u8))
@ -926,15 +1003,32 @@ TEST(pre_post_process, exception_safety) {
}))) })))
.build(f), .build(f),
ov::AssertFailure); ov::AssertFailure);
EXPECT_THROW(
f = PrePostProcessor()
.output(OutputInfo(0) // this one is correct
.tensor(OutputTensorInfo().set_element_type(element::u8)))
.output(OutputInfo(1) // This one is not
.postprocess(PostProcessSteps().custom([](const Output<Node>& node) -> Output<Node> {
throw ngraph::ngraph_error("test error");
})))
.build(f),
ngraph::ngraph_error);
EXPECT_EQ(f->get_parameters().size(), 2); EXPECT_EQ(f->get_parameters().size(), 2);
EXPECT_EQ(f->get_parameters()[0]->get_element_type(), element::f32); EXPECT_EQ(f->input(0).get_element_type(), element::f32);
EXPECT_EQ(f->get_parameters()[0]->get_partial_shape(), (PartialShape{1, 3, 224, 224})); EXPECT_EQ(f->input(0).get_partial_shape(), (PartialShape{1, 3, 224, 224}));
EXPECT_EQ(f->get_parameters()[0]->get_friendly_name(), name0); EXPECT_EQ(f->input(0).get_node_shared_ptr()->get_friendly_name(), name0);
EXPECT_EQ(f->get_parameters()[0]->get_output_tensor(0).get_names(), tensor_names0); EXPECT_EQ(f->input(0).get_tensor().get_names(), tensor_names0);
EXPECT_EQ(f->get_parameters()[1]->get_element_type(), element::f32); EXPECT_EQ(f->input(1).get_element_type(), element::f32);
EXPECT_EQ(f->get_parameters()[1]->get_partial_shape(), (PartialShape{1, 3, 224, 224})); EXPECT_EQ(f->input(1).get_partial_shape(), (PartialShape{1, 3, 224, 224}));
EXPECT_EQ(f->get_parameters()[1]->get_friendly_name(), name1); EXPECT_EQ(f->input(1).get_node_shared_ptr()->get_friendly_name(), name1);
EXPECT_EQ(f->get_parameters()[1]->get_output_tensor(0).get_names(), tensor_names1); EXPECT_EQ(f->input(1).get_tensor().get_names(), tensor_names1);
EXPECT_EQ(f->output(0).get_node_shared_ptr()->get_friendly_name(), out_name0);
EXPECT_EQ(f->output(0).get_tensor().get_names(), out_tensor_names0);
EXPECT_EQ(f->output(1).get_node_shared_ptr()->get_friendly_name(), out_name1);
EXPECT_EQ(f->output(1).get_tensor().get_names(), out_tensor_names1);
} }