Develop ConvertLike Reference Implementation (#3857)

* ConvertLike: Develop reference implementation

* ConvertLike: Enable single layer tests for GPU plugin

* ConvertLike: Enable bf16 precision for evaluate method

* ConvertLike: Add unit tests

* ConvertLike: Add dynamic shape test case

* ConvertLike: Remove unnecessary ngraph namespace and using declaration for v1::ConvertLike

* ConvertLike: Simplified reference::convert by using std::enable_if
This commit is contained in:
Gabriele Galiero Casay
2021-01-21 06:31:10 +01:00
committed by GitHub
parent ef72e21213
commit e026d54ed2
8 changed files with 320 additions and 107 deletions

View File

@@ -23,8 +23,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcess).*)",
// TODO: Issue: 41467 -- "unsupported element type f16 op Convert"
R"(.*(ConvertLayerTest).*targetPRC=FP16.*)",
// TODO: Issue: 41466 -- "Unsupported op 'ConvertLike'"
R"(.*(ConvertLikeLayerTest).*)",
// TODO: Issue: 41462
R"(.*(SoftMaxLayerTest).*axis=0.*)",
// TODO: Issue: 41461

View File

@@ -43,6 +43,9 @@ namespace ngraph
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};
}
}
}
} // namespace v1
} // namespace op
} // namespace ngraph

View File

@@ -25,7 +25,8 @@ namespace ngraph
namespace reference
{
template <typename TI, typename TO>
void convert(const TI* arg, TO* out, size_t count)
typename std::enable_if<!std::is_same<TO, char>::value>::type
convert(const TI* arg, TO* out, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
@@ -33,14 +34,18 @@ namespace ngraph
}
}
template <typename T>
void convert_to_bool(const T* arg, char* out, size_t count)
template <typename TI, typename TO>
typename std::enable_if<std::is_same<TO, char>::value>::type
convert(const TI* arg, TO* out, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
out[i] = static_cast<char>(static_cast<bool>(arg[i]));
}
}
}
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@@ -266,6 +266,7 @@ set(MULTI_TEST_SRC
backend/concat.in.cpp
backend/constant.in.cpp
backend/convert.in.cpp
backend/convert_like.in.cpp
backend/convolution.in.cpp
backend/cos.in.cpp
backend/cosh.in.cpp

View File

@@ -0,0 +1,189 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_int32)
{
Shape input_shape{2, 3, 1};
const auto data = make_shared<op::Parameter>(element::f32, input_shape);
const auto like = make_shared<op::Parameter>(element::i32, input_shape);
const auto convert_like = make_shared<op::v1::ConvertLike>(data, like);
const auto f = make_shared<Function>(convert_like, ParameterVector{data, like});
vector<float> data_vect = {-1.8, 0.2f, 1.4f, 2.1f, 3.9f, 4.3f};
vector<int32_t> like_vect(shape_size(input_shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(input_shape, data_vect);
test_case.add_input<int32_t>(input_shape, like_vect);
test_case.add_expected_output<int>(input_shape, {-1, 0, 1, 2, 3, 4});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_int32_float32)
{
Shape shape{2, 2};
const auto data = make_shared<op::Parameter>(element::i32, shape);
const auto like = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
vector<int32_t> data_vect{281, 2, 3, 4};
vector<float> like_vect(shape_size(shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<int32_t>(shape, data_vect);
test_case.add_input<float>(shape, like_vect);
test_case.add_expected_output<float>(shape, {281, 2, 3, 4});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_uint16_float32)
{
Shape shape{2, 2};
const auto data = make_shared<op::Parameter>(element::u16, shape);
const auto like = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
vector<uint16_t> data_vect{1, 2, 3, 4};
vector<float> like_vect(shape_size(shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<uint16_t>(shape, data_vect);
test_case.add_input<float>(shape, like_vect);
test_case.add_expected_output<float>(shape, {1, 2, 3, 4});
test_case.run(MIN_FLOAT_TOLERANCE_BITS);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_int32_bool)
{
Shape shape{2, 3};
const auto data = make_shared<op::Parameter>(element::i32, shape);
const auto like = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
int32_t lowest = std::numeric_limits<int32_t>::lowest();
int32_t max = std::numeric_limits<int32_t>::max();
vector<int32_t> data_vect{0, 12, 23, 0, lowest, max};
vector<char> like_vect(shape_size(shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<int32_t>(shape, data_vect);
test_case.add_input<char>(shape, like_vect);
test_case.add_expected_output<char>(shape, {0, 1, 1, 0, 1, 1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_bool)
{
Shape shape{3, 3};
const auto data = make_shared<op::Parameter>(element::f32, shape);
const auto like = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
float lowest = std::numeric_limits<float>::lowest();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
float pos_inf = std::numeric_limits<float>::infinity();
float neg_inf = -std::numeric_limits<float>::infinity();
vector<float> data_vect{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf};
vector<char> like_vect(shape_size(shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(shape, data_vect);
test_case.add_input<char>(shape, like_vect);
test_case.add_expected_output<char>(shape, {0, 1, 1, 0, 1, 1, 1, 1, 1});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_float32_bfloat16)
{
Shape shape{1, 1, 3, 5};
const auto data = make_shared<op::Parameter>(element::f32, shape);
const auto like = make_shared<op::Parameter>(element::bf16, shape);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
vector<float> data_vect{
0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f};
vector<bfloat16> like_vect(shape_size(shape), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(shape, data_vect);
test_case.add_input<bfloat16>(shape, like_vect);
test_case.add_expected_output<bfloat16>(
shape,
vector<bfloat16>{
0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_bfloat16_float32)
{
Shape shape_data{1, 1, 3, 5};
Shape shape_like{4};
const auto data = make_shared<op::Parameter>(element::bf16, shape_data);
const auto like = make_shared<op::Parameter>(element::f32, shape_like);
auto f = make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like),
ParameterVector{data, like});
vector<bfloat16> data_vect{
0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f};
vector<float> like_vect(shape_size(shape_like), 0);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<bfloat16>(shape_data, data_vect);
test_case.add_input<float>(shape_like, like_vect);
test_case.add_expected_output<float>(
shape_data, {0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, convert_like_dyn_float16_to_int64)
{
PartialShape pshape_data{Dimension::dynamic(), 2, 2, Dimension::dynamic()};
Shape shape_like{};
const auto data = make_shared<op::Parameter>(element::f16, pshape_data);
const auto like = op::Constant::create(element::i64, Shape{}, {0});
auto f =
make_shared<Function>(make_shared<op::v1::ConvertLike>(data, like), ParameterVector{data});
vector<float16> data_vect = {-3.21f, 0.1f, 2.6f, 4.99f};
Shape shape_data{1, 2, 2, 1};
auto test_case = test::TestCase<TestEngine, ngraph::test::TestCaseType::DYNAMIC>(f);
test_case.add_input<float16>(shape_data, data_vect);
test_case.add_expected_output<int64_t>(shape_data, vector<int64_t>{-3, 0, 2, 4});
test_case.run();
}

View File

@@ -1120,6 +1120,14 @@ IE_CPU.backwards_log
# Unsupported op detected
IE_CPU.backwards_batchmatmultranspose_tensor2_tensor2
IE_CPU.round_int64
IE_CPU.convert_like_float32_int32
IE_CPU.convert_like_int32_float32
IE_CPU.convert_like_uint16_float32
IE_CPU.convert_like_int32_bool
IE_CPU.convert_like_float32_bool
IE_CPU.convert_like_float32_bfloat16
IE_CPU.convert_like_bfloat16_float32
IE_CPU.convert_like_dyn_float16_to_int64
# Can't convert type f16 to IE Precision!
IE_CPU.fused_clamp_float16
@@ -1519,8 +1527,9 @@ IE_GPU.region_yolo_v3_mxnet
# Detected op not belonging to opset1!
IE_GPU.round_int64
# Unsupported collapse op with dynamic shape
# Unsupported ops with dynamic shape
IE_GPU.builder_opset1_collapse_dyn_shape
IE_GPU.convert_like_dyn_float16_to_int64
IE_GPU.onnx_model_fake_quantize_const_inputs_infer
IE_GPU.onnx_model_fake_quantize_nonconst_inputs_infer

View File

@@ -1074,16 +1074,6 @@ namespace
namespace convert_v0
{
template <element::Type_t ET>
inline void evaluate_bool(const shared_ptr<op::v0::Convert>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
using T = typename element_type_traits<ET>::value_type;
runtime::reference::convert_to_bool<T>(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<char>(),
shape_size(inputs[0]->get_shape()));
}
template <element::Type_t ti, element::Type_t to>
inline void evaluate(const shared_ptr<op::v0::Convert>& op,
const HostTensorVector& outputs,
@@ -1102,91 +1092,109 @@ namespace
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
if (OUT_ET == element::Type_t::boolean)
switch (inputs[0]->get_element_type())
{
switch (inputs[0]->get_element_type())
{
case element::Type_t::boolean:
convert_v0::evaluate_bool<element::Type_t::boolean>(op, outputs, inputs);
break;
case element::Type_t::i8:
convert_v0::evaluate_bool<element::Type_t::i8>(op, outputs, inputs);
break;
case element::Type_t::i16:
convert_v0::evaluate_bool<element::Type_t::i16>(op, outputs, inputs);
break;
case element::Type_t::i32:
convert_v0::evaluate_bool<element::Type_t::i32>(op, outputs, inputs);
break;
case element::Type_t::i64:
convert_v0::evaluate_bool<element::Type_t::i64>(op, outputs, inputs);
break;
case element::Type_t::u8:
convert_v0::evaluate_bool<element::Type_t::u8>(op, outputs, inputs);
break;
case element::Type_t::u16:
convert_v0::evaluate_bool<element::Type_t::u16>(op, outputs, inputs);
break;
case element::Type_t::u32:
convert_v0::evaluate_bool<element::Type_t::u32>(op, outputs, inputs);
break;
case element::Type_t::u64:
convert_v0::evaluate_bool<element::Type_t::u64>(op, outputs, inputs);
break;
case element::Type_t::f16:
convert_v0::evaluate_bool<element::Type_t::f16>(op, outputs, inputs);
break;
case element::Type_t::f32:
convert_v0::evaluate_bool<element::Type_t::f32>(op, outputs, inputs);
break;
case element::Type_t::f64:
convert_v0::evaluate_bool<element::Type_t::f64>(op, outputs, inputs);
break;
default: return false;
}
case element::Type_t::boolean:
convert_v0::evaluate<element::Type_t::boolean, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i8:
convert_v0::evaluate<element::Type_t::i8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i16:
convert_v0::evaluate<element::Type_t::i16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i32:
convert_v0::evaluate<element::Type_t::i32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i64:
convert_v0::evaluate<element::Type_t::i64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u8:
convert_v0::evaluate<element::Type_t::u8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u16:
convert_v0::evaluate<element::Type_t::u16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u32:
convert_v0::evaluate<element::Type_t::u32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u64:
convert_v0::evaluate<element::Type_t::u64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f16:
convert_v0::evaluate<element::Type_t::f16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f32:
convert_v0::evaluate<element::Type_t::f32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f64:
convert_v0::evaluate<element::Type_t::f64, OUT_ET>(op, outputs, inputs);
break;
default: return false;
}
else
return true;
}
namespace convert_like_v1
{
template <element::Type_t ti, element::Type_t to>
inline void evaluate(const shared_ptr<op::v1::ConvertLike>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
switch (inputs[0]->get_element_type())
{
case element::Type_t::boolean:
convert_v0::evaluate<element::Type_t::boolean, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i8:
convert_v0::evaluate<element::Type_t::i8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i16:
convert_v0::evaluate<element::Type_t::i16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i32:
convert_v0::evaluate<element::Type_t::i32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i64:
convert_v0::evaluate<element::Type_t::i64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u8:
convert_v0::evaluate<element::Type_t::u8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u16:
convert_v0::evaluate<element::Type_t::u16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u32:
convert_v0::evaluate<element::Type_t::u32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u64:
convert_v0::evaluate<element::Type_t::u64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f16:
convert_v0::evaluate<element::Type_t::f16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f32:
convert_v0::evaluate<element::Type_t::f32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f64:
convert_v0::evaluate<element::Type_t::f64, OUT_ET>(op, outputs, inputs);
break;
default: return false;
}
using TI = typename element_type_traits<ti>::value_type;
using TO = typename element_type_traits<to>::value_type;
runtime::reference::convert<TI, TO>(inputs[0]->get_data_ptr<TI>(),
outputs[0]->get_data_ptr<TO>(),
shape_size(inputs[0]->get_shape()));
}
} // namespace convert_like_v1
template <element::Type_t OUT_ET>
bool evaluate(const shared_ptr<op::v1::ConvertLike>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
switch (inputs[0]->get_element_type())
{
case element::Type_t::boolean:
convert_like_v1::evaluate<element::Type_t::boolean, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u8:
convert_like_v1::evaluate<element::Type_t::u8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u16:
convert_like_v1::evaluate<element::Type_t::u16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u32:
convert_like_v1::evaluate<element::Type_t::u32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::u64:
convert_like_v1::evaluate<element::Type_t::u64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i8:
convert_like_v1::evaluate<element::Type_t::i8, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i16:
convert_like_v1::evaluate<element::Type_t::i16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i32:
convert_like_v1::evaluate<element::Type_t::i32, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::i64:
convert_like_v1::evaluate<element::Type_t::i64, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::bf16:
convert_like_v1::evaluate<element::Type_t::bf16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f16:
convert_like_v1::evaluate<element::Type_t::f16, OUT_ET>(op, outputs, inputs);
break;
case element::Type_t::f32:
convert_like_v1::evaluate<element::Type_t::f32, OUT_ET>(op, outputs, inputs);
break;
default: return false;
}
return true;
}
@@ -1825,9 +1833,8 @@ namespace
{
case element::Type_t::boolean:
return evaluate<element::Type_t::boolean>(as_type_ptr<T>(node), outputs, inputs);
;
// case element::Type_t::bf16:
// break;
case element::Type_t::bf16:
return evaluate<element::Type_t::bf16>(as_type_ptr<T>(node), outputs, inputs);
case element::Type_t::f16:
return evaluate<element::Type_t::f16>(as_type_ptr<T>(node), outputs, inputs);
case element::Type_t::f64:

View File

@@ -48,6 +48,7 @@ NGRAPH_OP(TensorIterator, op::v0)
NGRAPH_OP(ROIPooling, op::v0)
NGRAPH_OP(AvgPool, op::v1)
NGRAPH_OP(ConvertLike, op::v1)
NGRAPH_OP(Convolution, ngraph::op::v1)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1)
NGRAPH_OP(LessEqual, op::v1)