From e026d54ed23d02062a79f834335935336de24080 Mon Sep 17 00:00:00 2001 From: Gabriele Galiero Casay Date: Thu, 21 Jan 2021 06:31:10 +0100 Subject: [PATCH] Develop ConvertLike Reference Implementation (#3857) * ConvertLike: Develop reference implementation * ConvertLike: Enable single layer tests for GPU plugin * ConvertLike: Enable bf16 precision for evaluate method * ConvertLike: Add unit tests * ConvertLike: Add dynamic shape test case * ConvertLike: Remove unnecessary ngraph namespace and using declaration for v1::ConvertLike * ConvertLike: Simplified reference::convert by using std::enable_if --- .../skip_tests_config.cpp | 2 - .../core/include/ngraph/op/convert_like.hpp | 9 +- .../ngraph/runtime/reference/convert.hpp | 17 +- ngraph/test/CMakeLists.txt | 1 + ngraph/test/backend/convert_like.in.cpp | 189 +++++++++++++++++ ngraph/test/runtime/ie/unit_test.manifest | 11 +- .../runtime/interpreter/evaluates_map.cpp | 197 +++++++++--------- .../runtime/interpreter/opset_int_tbl.hpp | 1 + 8 files changed, 320 insertions(+), 107 deletions(-) create mode 100644 ngraph/test/backend/convert_like.in.cpp diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index b7ebcc31472..25938238a95 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -23,8 +23,6 @@ std::vector disabledTestPatterns() { R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcess).*)", // TODO: Issue: 41467 -- "unsupported element type f16 op Convert" R"(.*(ConvertLayerTest).*targetPRC=FP16.*)", - // TODO: Issue: 41466 -- "Unsupported op 'ConvertLike'" - R"(.*(ConvertLikeLayerTest).*)", // TODO: Issue: 41462 R"(.*(SoftMaxLayerTest).*axis=0.*)", // TODO: Issue: 41461 diff --git a/ngraph/core/include/ngraph/op/convert_like.hpp b/ngraph/core/include/ngraph/op/convert_like.hpp index aadef91b268..cc6e57ffbe4 100644 --- a/ngraph/core/include/ngraph/op/convert_like.hpp +++ b/ngraph/core/include/ngraph/op/convert_like.hpp @@ -43,6 +43,9 @@ namespace ngraph virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; - } - } -} + + } // namespace v1 + + } // namespace op + +} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp index 558a2f9ed3f..8669a6048e0 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp @@ -25,7 +25,8 @@ namespace ngraph namespace reference { template - void convert(const TI* arg, TO* out, size_t count) + typename std::enable_if::value>::type + convert(const TI* arg, TO* out, size_t count) { for (size_t i = 0; i < count; ++i) { @@ -33,14 +34,18 @@ namespace ngraph } } - template - void convert_to_bool(const T* arg, char* out, size_t count) + template + typename std::enable_if::value>::type + convert(const TI* arg, TO* out, size_t count) { for (size_t i = 0; i < count; ++i) { out[i] = static_cast(static_cast(arg[i])); } } - } - } -} + + } // namespace reference + + } // namespace runtime + +} // namespace ngraph diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 65829cd97f7..6fee2befb67 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -266,6 +266,7 @@ set(MULTI_TEST_SRC backend/concat.in.cpp backend/constant.in.cpp backend/convert.in.cpp + backend/convert_like.in.cpp backend/convolution.in.cpp backend/cos.in.cpp backend/cosh.in.cpp diff --git a/ngraph/test/backend/convert_like.in.cpp b/ngraph/test/backend/convert_like.in.cpp new file mode 100644 index 00000000000..3f7caf2499f --- /dev/null +++ b/ngraph/test/backend/convert_like.in.cpp @@ -0,0 +1,189 @@ +//***************************************************************************** +// Copyright 2017-2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_int32) +{ + Shape input_shape{2, 3, 1}; + const auto data = make_shared(element::f32, input_shape); + const auto like = make_shared(element::i32, input_shape); + const auto convert_like = make_shared(data, like); + const auto f = make_shared(convert_like, ParameterVector{data, like}); + + vector data_vect = {-1.8, 0.2f, 1.4f, 2.1f, 3.9f, 4.3f}; + vector like_vect(shape_size(input_shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(input_shape, data_vect); + test_case.add_input(input_shape, like_vect); + test_case.add_expected_output(input_shape, {-1, 0, 1, 2, 3, 4}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_int32_float32) +{ + Shape shape{2, 2}; + const auto data = make_shared(element::i32, shape); + const auto like = make_shared(element::f32, shape); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + vector data_vect{281, 2, 3, 4}; + vector like_vect(shape_size(shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape, data_vect); + test_case.add_input(shape, like_vect); + test_case.add_expected_output(shape, {281, 2, 3, 4}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_uint16_float32) +{ + Shape shape{2, 2}; + const auto data = make_shared(element::u16, shape); + const auto like = make_shared(element::f32, shape); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + vector data_vect{1, 2, 3, 4}; + vector like_vect(shape_size(shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape, data_vect); + test_case.add_input(shape, like_vect); + test_case.add_expected_output(shape, {1, 2, 3, 4}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_int32_bool) +{ + Shape shape{2, 3}; + const auto data = make_shared(element::i32, shape); + const auto like = make_shared(element::boolean, shape); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + int32_t lowest = std::numeric_limits::lowest(); + int32_t max = std::numeric_limits::max(); + + vector data_vect{0, 12, 23, 0, lowest, max}; + vector like_vect(shape_size(shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape, data_vect); + test_case.add_input(shape, like_vect); + test_case.add_expected_output(shape, {0, 1, 1, 0, 1, 1}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_bool) +{ + Shape shape{3, 3}; + const auto data = make_shared(element::f32, shape); + const auto like = make_shared(element::boolean, shape); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + float lowest = std::numeric_limits::lowest(); + float max = std::numeric_limits::max(); + float min = std::numeric_limits::min(); + float pos_inf = std::numeric_limits::infinity(); + float neg_inf = -std::numeric_limits::infinity(); + + vector data_vect{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}; + vector like_vect(shape_size(shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape, data_vect); + test_case.add_input(shape, like_vect); + test_case.add_expected_output(shape, {0, 1, 1, 0, 1, 1, 1, 1, 1}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_bfloat16) +{ + Shape shape{1, 1, 3, 5}; + const auto data = make_shared(element::f32, shape); + const auto like = make_shared(element::bf16, shape); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + vector data_vect{ + 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; + vector like_vect(shape_size(shape), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape, data_vect); + test_case.add_input(shape, like_vect); + test_case.add_expected_output( + shape, + vector{ + 0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_bfloat16_float32) +{ + Shape shape_data{1, 1, 3, 5}; + Shape shape_like{4}; + const auto data = make_shared(element::bf16, shape_data); + const auto like = make_shared(element::f32, shape_like); + auto f = make_shared(make_shared(data, like), + ParameterVector{data, like}); + + vector data_vect{ + 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; + vector like_vect(shape_size(shape_like), 0); + + auto test_case = test::TestCase(f); + test_case.add_input(shape_data, data_vect); + test_case.add_input(shape_like, like_vect); + test_case.add_expected_output( + shape_data, {0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_like_dyn_float16_to_int64) +{ + PartialShape pshape_data{Dimension::dynamic(), 2, 2, Dimension::dynamic()}; + Shape shape_like{}; + const auto data = make_shared(element::f16, pshape_data); + const auto like = op::Constant::create(element::i64, Shape{}, {0}); + auto f = + make_shared(make_shared(data, like), ParameterVector{data}); + + vector data_vect = {-3.21f, 0.1f, 2.6f, 4.99f}; + Shape shape_data{1, 2, 2, 1}; + + auto test_case = test::TestCase(f); + test_case.add_input(shape_data, data_vect); + test_case.add_expected_output(shape_data, vector{-3, 0, 2, 4}); + test_case.run(); +} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 226e503552f..20dfdb40694 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -1120,6 +1120,14 @@ IE_CPU.backwards_log # Unsupported op detected IE_CPU.backwards_batchmatmultranspose_tensor2_tensor2 IE_CPU.round_int64 +IE_CPU.convert_like_float32_int32 +IE_CPU.convert_like_int32_float32 +IE_CPU.convert_like_uint16_float32 +IE_CPU.convert_like_int32_bool +IE_CPU.convert_like_float32_bool +IE_CPU.convert_like_float32_bfloat16 +IE_CPU.convert_like_bfloat16_float32 +IE_CPU.convert_like_dyn_float16_to_int64 # Can't convert type f16 to IE Precision! IE_CPU.fused_clamp_float16 @@ -1519,8 +1527,9 @@ IE_GPU.region_yolo_v3_mxnet # Detected op not belonging to opset1! IE_GPU.round_int64 -# Unsupported collapse op with dynamic shape +# Unsupported ops with dynamic shape IE_GPU.builder_opset1_collapse_dyn_shape +IE_GPU.convert_like_dyn_float16_to_int64 IE_GPU.onnx_model_fake_quantize_const_inputs_infer IE_GPU.onnx_model_fake_quantize_nonconst_inputs_infer diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index cda6d31c77c..2c0f836115b 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -1074,16 +1074,6 @@ namespace namespace convert_v0 { - template - inline void evaluate_bool(const shared_ptr& op, - const HostTensorVector& outputs, - const HostTensorVector& inputs) - { - using T = typename element_type_traits::value_type; - runtime::reference::convert_to_bool(inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - shape_size(inputs[0]->get_shape())); - } template inline void evaluate(const shared_ptr& op, const HostTensorVector& outputs, @@ -1102,91 +1092,109 @@ namespace const HostTensorVector& outputs, const HostTensorVector& inputs) { - if (OUT_ET == element::Type_t::boolean) + switch (inputs[0]->get_element_type()) { - switch (inputs[0]->get_element_type()) - { - case element::Type_t::boolean: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::i8: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::i16: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::i32: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::i64: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::u8: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::u16: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::u32: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::u64: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::f16: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::f32: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - case element::Type_t::f64: - convert_v0::evaluate_bool(op, outputs, inputs); - break; - default: return false; - } + case element::Type_t::boolean: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::i8: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::i16: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::i32: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::i64: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::u8: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::u16: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::u32: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::u64: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::f16: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::f32: + convert_v0::evaluate(op, outputs, inputs); + break; + case element::Type_t::f64: + convert_v0::evaluate(op, outputs, inputs); + break; + default: return false; } - else + return true; + } + + namespace convert_like_v1 + { + template + inline void evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) { - switch (inputs[0]->get_element_type()) - { - case element::Type_t::boolean: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::i8: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::i16: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::i32: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::i64: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::u8: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::u16: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::u32: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::u64: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::f16: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::f32: - convert_v0::evaluate(op, outputs, inputs); - break; - case element::Type_t::f64: - convert_v0::evaluate(op, outputs, inputs); - break; - default: return false; - } + using TI = typename element_type_traits::value_type; + using TO = typename element_type_traits::value_type; + runtime::reference::convert(inputs[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + shape_size(inputs[0]->get_shape())); + } + + } // namespace convert_like_v1 + + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { + switch (inputs[0]->get_element_type()) + { + case element::Type_t::boolean: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::u8: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::u16: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::u32: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::u64: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::i8: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::i16: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::i32: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::i64: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::bf16: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::f16: + convert_like_v1::evaluate(op, outputs, inputs); + break; + case element::Type_t::f32: + convert_like_v1::evaluate(op, outputs, inputs); + break; + default: return false; } return true; } @@ -1825,9 +1833,8 @@ namespace { case element::Type_t::boolean: return evaluate(as_type_ptr(node), outputs, inputs); - ; - // case element::Type_t::bf16: - // break; + case element::Type_t::bf16: + return evaluate(as_type_ptr(node), outputs, inputs); case element::Type_t::f16: return evaluate(as_type_ptr(node), outputs, inputs); case element::Type_t::f64: diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index d98208dbee7..816ae6e59a3 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -48,6 +48,7 @@ NGRAPH_OP(TensorIterator, op::v0) NGRAPH_OP(ROIPooling, op::v0) NGRAPH_OP(AvgPool, op::v1) +NGRAPH_OP(ConvertLike, op::v1) NGRAPH_OP(Convolution, ngraph::op::v1) NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) NGRAPH_OP(LessEqual, op::v1)