Revise Reference Implementation of Range op (#3409)

* Range: Align operator with spec and add unit tests

* Range: Remove output shape from range ref impl signature

* Range: Exclude backend unit tests for CPU and GPU due to unsupported dynamic ops

* Range: Add single layer test class for Range-4

* Range: Add unit test for shape inference

* Range: Add unit tests for i32 and f32

* Range: Refactor Range v0 backend test and added test for f32 type

* Range: Add floating point tolerance in unit tests to avoid failures due to precision

* Range: Add subgraph tests for Range add element-wise

* Range: Refactor Range class for single layer tests and add range add element-wise test with truncated inputs
This commit is contained in:
Gabriele Galiero Casay 2020-12-08 14:05:00 +01:00 committed by GitHub
parent e81201ea35
commit 6888ffa328
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 932 additions and 54 deletions

View File

@ -30,6 +30,7 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: Issue: 34518
R"(.*RangeLayerTest.*)",
R"(.*(RangeAddSubgraphTest).*Start=1.2.*Stop=(5.2|-5.2).*Step=(0.1|-0.1).*netPRC=FP16.*)",
R"(.*(RangeNumpyAddSubgraphTest).*netPRC=FP16.*)",
// TODO: Issue: 34083
#if (defined(_WIN32) || defined(_WIN64))
R"(.*(CoreThreadingTestsWithIterations).*(smoke_LoadNetworkAccuracy).*)",

View File

@ -19,11 +19,17 @@ const std::vector<float> negativeStart = { 1.0f, 1.2f };
const std::vector<float> negativeStop = { -5.0f, -5.2f };
const std::vector<float> negativeStep = { -1.0f, -0.1f };
const std::vector<float> trunc_start = { 1.2f, 1.9f };
const std::vector<float> trunc_stop = { 11.4f, 11.8f };
const std::vector<float> trunc_step = { 1.3f, 2.8f };
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
InferenceEngine::Precision::FP16 // "[NOT_IMPLEMENTED] Input image format FP16 is not supported yet...
};
// ------------------------------ V0 ------------------------------
INSTANTIATE_TEST_CASE_P(smoke_BasicPositive, RangeAddSubgraphTest,
::testing::Combine(
::testing::ValuesIn(positiveStart),
@ -49,4 +55,44 @@ INSTANTIATE_TEST_CASE_P(smoke_BasicNegative, RangeAddSubgraphTest,
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RangeAddSubgraphTest::getTestCaseName);
// ------------------------------ V4 ------------------------------
INSTANTIATE_TEST_CASE_P(smoke_BasicPositive, RangeNumpyAddSubgraphTest,
::testing::Combine(
::testing::ValuesIn(positiveStart),
::testing::ValuesIn(positiveStop),
::testing::ValuesIn(positiveStep),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RangeNumpyAddSubgraphTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BasicNegative, RangeNumpyAddSubgraphTest,
::testing::Combine(
::testing::ValuesIn(negativeStart),
::testing::ValuesIn(negativeStop),
::testing::ValuesIn(negativeStep),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RangeNumpyAddSubgraphTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BasicTruncateInputs, RangeNumpyAddSubgraphTest,
::testing::Combine(
::testing::ValuesIn(trunc_start),
::testing::ValuesIn(trunc_stop),
::testing::ValuesIn(trunc_step),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::I32),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
RangeNumpyAddSubgraphTest::getTestCaseName);
} // namespace

View File

@ -36,4 +36,15 @@ protected:
void SetUp() override;
};
class RangeNumpyLayerTest : public testing::WithParamInterface<RangeParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<RangeParams> obj);
void Infer() override;
protected:
void SetUp() override;
private:
float start, stop, step;
};
} // namespace LayerTestsDefinitions

View File

@ -16,6 +16,8 @@
namespace LayerTestsDefinitions {
// ------------------------------ V0 ------------------------------
class RangeAddSubgraphTest : public testing::WithParamInterface<RangeParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
@ -23,4 +25,15 @@ public:
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions
// ------------------------------ V4 ------------------------------
class RangeNumpyAddSubgraphTest : public testing::WithParamInterface<RangeParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<RangeParams> obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -74,4 +74,63 @@ TEST_P(RangeLayerTest, CompareWithRefs) {
Run();
}
std::string RangeNumpyLayerTest::getTestCaseName(testing::TestParamInfo<RangeParams> obj) {
InferenceEngine::Precision netPrc;
InferenceEngine::Precision paramPrc;
InferenceEngine::Precision outPrc;
InferenceEngine::Layout inLayout, outLayout;
float start, stop, step;
std::string targetDevice;
std::tie(start, stop, step, paramPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param;
std::ostringstream result;
const char separator = '_';
result << "Start=" << start << separator;
result << "Stop=" << stop << separator;
result << "Step=" << step << separator;
result << "paramPRC=" << paramPrc.name() << separator;
result << "netPRC=" << netPrc.name() << separator;
result << "inL=" << inLayout << separator;
result << "outL=" << outLayout << separator;
result << "trgDev=" << targetDevice;
return result.str();
}
void RangeNumpyLayerTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
auto blobStart = inferRequest.GetBlob("start");
blobStart = FuncTestUtils::createAndFillBlobWithFloatArray(blobStart->getTensorDesc(), &start, 1);
auto blobStop = inferRequest.GetBlob("stop");
blobStop = FuncTestUtils::createAndFillBlobWithFloatArray(blobStop->getTensorDesc(), &stop, 1);
auto blobStep = inferRequest.GetBlob("step");
blobStep = FuncTestUtils::createAndFillBlobWithFloatArray(blobStep->getTensorDesc(), &step, 1);
inferRequest.Infer();
}
void RangeNumpyLayerTest::SetUp() {
InferenceEngine::Precision netPrc;
InferenceEngine::Precision paramPrc;
std::tie(start, stop, step, paramPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam();
auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc);
auto ngParamPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(paramPrc);
auto params = ngraph::builder::makeParams(ngParamPrc, {std::vector<size_t>(), std::vector<size_t>(), std::vector<size_t>()});
params[0]->set_friendly_name("start");
params[1]->set_friendly_name("stop");
params[2]->set_friendly_name("step");
auto range = std::make_shared<ngraph::opset4::Range>(params[0], params[1], params[2], ngNetPrc);
const ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(range)};
function = std::make_shared<ngraph::Function>(results, params, "Range");
}
TEST_P(RangeNumpyLayerTest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions

View File

@ -6,6 +6,8 @@
namespace LayerTestsDefinitions {
// ------------------------------ V0 ------------------------------
std::string RangeAddSubgraphTest::getTestCaseName(testing::TestParamInfo<RangeParams> obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::Precision inPrc, outPrc;
@ -44,4 +46,50 @@ void RangeAddSubgraphTest::SetUp() {
TEST_P(RangeAddSubgraphTest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions
// ------------------------------ V4 ------------------------------
std::string RangeNumpyAddSubgraphTest::getTestCaseName(testing::TestParamInfo<RangeParams> obj) {
InferenceEngine::Precision netPrc;
InferenceEngine::Precision constPrc;
InferenceEngine::Precision outPrc;
InferenceEngine::Layout inLayout, outLayout;
float start, stop, step;
std::string targetDevice;
std::tie(start, stop, step, constPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param;
std::ostringstream result;
const char separator = '_';
result << "Start=" << start << separator;
result << "Stop=" << stop << separator;
result << "Step=" << step << separator;
result << "constPRC=" << constPrc.name() << separator;
result << "netPRC=" << netPrc.name() << separator;
result << "targetDevice=" << targetDevice;
return result.str();
}
void RangeNumpyAddSubgraphTest::SetUp() {
InferenceEngine::Precision netPrc;
InferenceEngine::Precision constPrc;
float start, stop, step;
std::tie(start, stop, step, constPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam();
auto ngConstPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(constPrc);
auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc);
auto startConstant = std::make_shared<ngraph::opset1::Constant>(ngConstPrc, ngraph::Shape{}, start);
auto stopConstant = std::make_shared<ngraph::opset1::Constant>(ngConstPrc, ngraph::Shape{}, stop);
auto stepConstant = std::make_shared<ngraph::opset1::Constant>(ngConstPrc, ngraph::Shape{}, step);
auto range = std::make_shared<ngraph::opset4::Range>(startConstant, stopConstant, stepConstant, ngNetPrc);
auto params = ngraph::builder::makeParams(ngNetPrc, {range->get_shape()});
auto eltwise = ngraph::builder::makeEltwise(params.front(), range, ngraph::helpers::EltwiseTypes::ADD);
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(eltwise)};
function = std::make_shared<ngraph::Function>(results, params, "RangeEltwise");
}
TEST_P(RangeNumpyAddSubgraphTest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions

View File

@ -35,12 +35,12 @@ namespace ngraph
/// \brief Constructs a range operation.
///
/// \param start The tensor producing the start value. Must be a scalar of integer
/// element type, and same element type as `stop` and `step`.
/// \param stop The tensor producing the stop value. Must be a scalar of integer
/// element type, and same element type as `start` and `step`.
/// \param step The tensor producing the step value. Must be a scalar of integer
/// element type, and same element type as `start` and `stop`.
/// \param start The tensor producing the start value. Must be a scalar of numeric
/// element type.
/// \param stop The tensor producing the stop value. Must be a scalar of numeric
/// element type.
/// \param step The tensor producing the step value. Must be a scalar of numeric
/// element type.
/// \param output_type The type of the output.
Range(const Output<Node>& start,
const Output<Node>& stop,

View File

@ -37,9 +37,9 @@ namespace ngraph
typename std::enable_if<std::is_floating_point<T>::value ||
std::is_same<T, bfloat16>::value ||
std::is_same<T, float16>::value>::type
range(const T* start, const T* step, const Shape& out_shape, T* out)
range(const T* start, const T* step, const size_t& num_elem, T* out)
{
for (size_t i = 0; i < shape_size(out_shape); i++)
for (size_t i = 0; i < num_elem; i++)
{
out[i] = *start + (static_cast<T>(i) * (*step));
}
@ -48,11 +48,11 @@ namespace ngraph
// Return type is `void`, only enabled if `T` is `is_integral`.
template <typename T>
typename std::enable_if<std::is_integral<T>::value>::type
range(const T* start, const T* step, const Shape& out_shape, T* out)
range(const T* start, const T* step, const size_t& num_elem, T* out)
{
T val = *start;
for (size_t i = 0; i < shape_size(out_shape); i++)
for (size_t i = 0; i < num_elem; i++)
{
out[i] = val;
val += *step;

View File

@ -46,6 +46,11 @@ bool ngraph::op::v4::Range::visit_attributes(AttributeVisitor& visitor)
void op::v4::Range::validate_and_infer_types()
{
NODE_VALIDATION_CHECK(this,
m_output_type.is_integral_number() || m_output_type.is_real(),
"output tensor type should be a numeric type. Got: ",
m_output_type);
set_input_is_relevant_to_shape(0);
set_input_is_relevant_to_shape(1);
set_input_is_relevant_to_shape(2);
@ -57,6 +62,22 @@ void op::v4::Range::validate_and_infer_types()
NODE_VALIDATION_CHECK(
this, get_input_partial_shape(2).compatible(Shape{}), "'step' input is not a scalar");
NODE_VALIDATION_CHECK(this,
get_input_element_type(0).is_integral_number() ||
get_input_element_type(0).is_real(),
"'start' input scalar should be a numeric type. Got: ",
get_input_element_type(0));
NODE_VALIDATION_CHECK(this,
get_input_element_type(1).is_integral_number() ||
get_input_element_type(1).is_real(),
"'stop' input scalar should be a numeric type. Got: ",
get_input_element_type(1));
NODE_VALIDATION_CHECK(this,
get_input_element_type(2).is_integral_number() ||
get_input_element_type(2).is_real(),
"'step' input scalar should be a numeric type. Got: ",
get_input_element_type(2));
auto const_start = as_type_ptr<op::Constant>(this->input_value(0).get_node_shared_ptr());
auto const_stop = as_type_ptr<op::Constant>(this->input_value(1).get_node_shared_ptr());
auto const_step = as_type_ptr<op::Constant>(this->input_value(2).get_node_shared_ptr());
@ -96,8 +117,23 @@ void op::v4::Range::validate_and_infer_types()
if (const_start != nullptr && const_stop != nullptr && const_step != nullptr)
{
double span;
// all inputs must be casted to output_type before
// the rounding for casting values are done towards zero
if (m_output_type.is_integral_number() && get_input_element_type(0).is_real())
{
start = std::trunc(start);
}
if (m_output_type.is_integral_number() && get_input_element_type(1).is_real())
{
stop = std::trunc(stop);
}
if (m_output_type.is_integral_number() && get_input_element_type(2).is_real())
{
step = std::trunc(step);
}
// the number of elements is: max(ceil((stop start) / step), 0)
double span;
if ((step > 0 && start >= stop) || (step < 0 && start <= stop))
{
span = 0;
@ -182,7 +218,8 @@ bool evaluate_v4_range(const HostTensorPtr& out,
}
Shape out_shape = Shape({static_cast<size_t>(out_size)});
out->set_shape(out_shape);
runtime::reference::range(&start_val, &step_val, out_shape, out->get_data_ptr<ET>());
runtime::reference::range(
&start_val, &step_val, shape_size(out_shape), out->get_data_ptr<ET>());
return true;
}
@ -457,7 +494,8 @@ bool try_evaluate_range(const HostTensorPtr& out,
}
Shape out_shape = Shape({static_cast<size_t>(out_size)});
out->set_shape(out_shape);
runtime::reference::range(&start_val, &step_val, out_shape, out->get_data_ptr<ET>());
runtime::reference::range(
&start_val, &step_val, shape_size(out_shape), out->get_data_ptr<ET>());
return true;
}
else

View File

@ -16,16 +16,17 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "runtime/backend.hpp"
#include "util/all_close_f.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
using namespace ngraph::test;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
template <typename T>
struct RangeTest
@ -37,26 +38,94 @@ struct RangeTest
std::vector<T> expected_result;
};
// ------------------------------ V0 ------------------------------
// TODO(amprocte): We should test this with more than just int32, but there is a bug in the
// handling of element type-changing that is currently blocking doing that easily.
NGRAPH_TEST(${BACKEND_NAME}, range)
NGRAPH_TEST(${BACKEND_NAME}, range_v0_int32)
{
// Create a graph for f(start,stop,step) = Range(start,stop,step).
auto start = make_shared<op::Parameter>(element::i32, Shape{});
auto stop = make_shared<op::Parameter>(element::i32, Shape{});
auto step = make_shared<op::Parameter>(element::i32, Shape{});
element::Type_t et = element::i32;
std::vector<RangeTest<int32_t>> int32_tests = {
RangeTest<int32_t>{0, 10, 1, Shape{10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
RangeTest<int32_t>{-5, 6, 3, Shape{4}, {-5, -2, 1, 4}},
RangeTest<int32_t>{10, 5, -3, Shape{2}, {10, 7}}};
auto range = make_shared<op::Range>(start, stop, step);
ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1)));
for (auto& test : int32_tests)
{
// Create a graph for f(start,stop,step) = Range(start,stop,step).
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.start});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.stop});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.step});
auto range = make_shared<op::Range>(start, stop, step);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{});
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{start, stop, step});
auto test_case = test::TestCase<TestEngine>(f);
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
test_case.add_expected_output<int32_t>(test.expected_result_shape, test.expected_result);
test_case.run();
}
}
auto ex = backend->compile(f);
NGRAPH_TEST(${BACKEND_NAME}, range_v0_float32)
{
element::Type_t et = element::f32;
std::vector<RangeTest<float>> float32_tests = {
RangeTest<float>{0, 1, 0.25, Shape{4}, {0.0f, 0.25f, 0.5f, 0.75f}},
RangeTest<float>{-1,
0.875,
0.2,
Shape{10},
{-1.0f, -0.8f, -0.6f, -0.4f, -0.2f, 0.0f, 0.2f, 0.4f, 0.6f, 0.8f}},
RangeTest<float>{
2, 0, -0.25, Shape{8}, {2.0f, 1.75f, 1.5f, 1.25f, 1.0f, 0.75f, 0.5f, 0.25f}}};
auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic());
for (auto& test : float32_tests)
{
// Create a graph for f(start,stop,step) = Range(start,stop,step).
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.start});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.stop});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.step});
auto range = make_shared<op::Range>(start, stop, step);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{});
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_expected_output<float>(test.expected_result_shape, test.expected_result);
test_case.run_with_tolerance_as_fp(1.0e-4f);
}
}
// ------------------------------ V4 ------------------------------
NGRAPH_TEST(${BACKEND_NAME}, range_v4_trunc_inputs)
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Parameter>(element::f32, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
auto f = make_shared<Function>(range, ParameterVector{start, stop, step});
std::vector<float> start_vect{1.2};
std::vector<float> stop_vect{11.3};
std::vector<float> step_vect{1.6f};
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(f);
test_case.add_input<float>(Shape{}, start_vect);
test_case.add_input<float>(Shape{}, stop_vect);
test_case.add_input<float>(Shape{}, step_vect);
test_case.add_expected_output<int32_t>(Shape{10},
std::vector<int32_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, range_v4_int32)
{
element::Type_t et = element::i32;
std::vector<RangeTest<int32_t>> int32_tests = {
RangeTest<int32_t>{0, 10, 1, Shape{10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
RangeTest<int32_t>{-5, 6, 3, Shape{4}, {-5, -2, 1, 4}},
@ -65,21 +134,48 @@ NGRAPH_TEST(${BACKEND_NAME}, range)
for (auto& test : int32_tests)
{
auto t_start = backend->create_tensor(element::i32, Shape{});
auto t_stop = backend->create_tensor(element::i32, Shape{});
auto t_step = backend->create_tensor(element::i32, Shape{});
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.start});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.stop});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<int32_t>{test.step});
auto range = make_shared<op::v4::Range>(start, stop, step, et);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{});
copy_data(t_start, std::vector<int32_t>{test.start});
copy_data(t_stop, std::vector<int32_t>{test.stop});
copy_data(t_step, std::vector<int32_t>{test.step});
auto test_case = test::TestCase<TestEngine>(f);
ex->call_with_validate({t_r}, {t_start, t_stop, t_step});
ASSERT_EQ(t_r->get_element_type(), element::i32);
ASSERT_EQ(t_r->get_shape(), test.expected_result_shape);
auto results = read_vector<int32_t>(t_r);
ASSERT_EQ(results, test.expected_result);
test_case.add_expected_output<int32_t>(test.expected_result_shape, test.expected_result);
test_case.run();
}
}
NGRAPH_TEST(${BACKEND_NAME}, range_v4_float32)
{
element::Type_t et = element::f32;
std::vector<RangeTest<float>> float32_tests = {
RangeTest<float>{0, 1, 0.25, Shape{4}, {0.0f, 0.25f, 0.5f, 0.75f}},
RangeTest<float>{-1,
0.875,
0.2,
Shape{10},
{-1.0f, -0.8f, -0.6f, -0.4f, -0.2f, 0.0f, 0.2f, 0.4f, 0.6f, 0.8f}},
RangeTest<float>{10, 0, 1, Shape{0}, {}},
RangeTest<float>{
2, 0, -0.25, Shape{8}, {2.0f, 1.75f, 1.5f, 1.25f, 1.0f, 0.75f, 0.5f, 0.25f}}};
for (auto& test : float32_tests)
{
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.start});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.stop});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<float>{test.step});
auto range = make_shared<op::v4::Range>(start, stop, step, et);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
auto f = make_shared<Function>(NodeVector{range}, ParameterVector{});
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_expected_output<float>(test.expected_result_shape, test.expected_result);
test_case.run_with_tolerance_as_fp(1.0e-4f);
}
}

View File

@ -671,9 +671,14 @@ relu_2Dbackprop
relu_4Dbackprop
# data [<name>] doesn't exist
range
parameter_as_output
# MKLDNNGraph::CreateGraph: No inputs for the topology
range_v0_int32
range_v0_float32
range_v4_int32
range_v4_float32
# Cannot cast ngraph node QuantizedDot to CNNLayer!
quantized_dot_u8u8
quantized_dot_int32_output
@ -1144,6 +1149,9 @@ IE_CPU.nonmaxsuppression_two_classes
# Bug in CPU plugin for ROIPooling when pooled size is 1x1 and method is bilinear
IE_CPU.roi_pooling_1x1_bilinear
# Unsupported dynamic op
IE_CPU.range_v4_trunc_inputs
# output mismatch
IE_CPU.gather_nd_batch_1d_from_3d_negative
@ -1513,3 +1521,4 @@ onnx_controlflow_loop_infinite
# unsupported dynamic ops
onnx_dyn_shapes_reduce_max_dynamic_input_rank_negative_axis
IE_GPU.range_v4_trunc_inputs

View File

@ -49,7 +49,8 @@ std::shared_ptr<op::Constant> make_range_replacement(const element::Type& et,
NGRAPH_CHECK(start_vec.size() == 1 && step_vec.size() == 1);
runtime::reference::range<T>(start_vec.data(), step_vec.data(), shape, elements.data());
runtime::reference::range<T>(
start_vec.data(), step_vec.data(), shape_size(shape), elements.data());
return make_shared<op::Constant>(et, shape, elements);
}

View File

@ -21,6 +21,16 @@
using namespace std;
using namespace ngraph;
struct RangeParams
{
double start;
double stop;
double step;
PartialShape expected_shape;
};
// ------------------------------ V0 ------------------------------
TEST(type_prop, range_nonconst_ok)
{
auto start = make_shared<op::Parameter>(element::i32, Shape{});
@ -341,14 +351,6 @@ TEST(type_prop, range_all_const_zero_stride_fails)
}
}
struct RangeParams
{
double start;
double stop;
double step;
PartialShape expected_shape;
};
template <typename T>
void run_range_test(const element::Type& et, const RangeParams& params)
{
@ -522,3 +524,557 @@ INSTANTIATE_TEST_CASE_P(type_prop,
RangeParams{-1, 1, 0.25, PartialShape{8}},
RangeParams{-1, 0.875, 0.25, PartialShape{8}}),
PrintToDummyParamName());
// ------------------------------ V4 ------------------------------
TEST(type_prop, range_v4_all_const_shape_inference)
{
int num_elems = 100;
int step_val = 5;
int start_val = 0;
int stop_val = num_elems * step_val + start_val;
element::Type_t et = element::i32;
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<int>{start_val});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<int>{stop_val});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<int>{step_val});
auto range = make_shared<op::v4::Range>(start, stop, step, et);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
ASSERT_TRUE(pshape_out.same_scheme(PartialShape{Dimension{num_elems}}));
}
TEST(type_prop, range_v4_some_const_shape_inference)
{
int step_val = 5;
int start_val = 0;
element::Type_t et = element::i32;
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<int>{start_val});
auto stop = make_shared<op::Parameter>(et, Shape{});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<int>{step_val});
auto range = make_shared<op::v4::Range>(start, stop, step, et);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
ASSERT_TRUE(pshape_out.same_scheme(PartialShape{Dimension::dynamic()}));
}
TEST(type_prop, range_v4_trunc_inputs_shape_inference)
{
element::Type_t et = element::f32;
auto start = make_shared<op::Constant>(et, Shape{}, std::vector<float>{0.9});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<float>{10.3});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<float>{1.7});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
auto pshape_out = range->get_output_partial_shape(0);
ASSERT_TRUE(pshape_out.rank().is_static() && pshape_out.rank() == Dimension{1});
ASSERT_TRUE(pshape_out.same_scheme(PartialShape{Dimension{10}}));
}
TEST(type_prop, range_v4_invalid_inputs_elem_type)
{
// invalid element type for start scalar
try
{
auto start = make_shared<op::Parameter>(element::boolean, Shape{});
auto stop = make_shared<op::Parameter>(element::i32, Shape{});
auto step = make_shared<op::Parameter>(element::i32, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
FAIL() << "Exception expected";
}
catch (ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("'start' input scalar should be a numeric type"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
// invalid element type for stop scalar
try
{
auto start = make_shared<op::Parameter>(element::dynamic, Shape{});
auto stop = make_shared<op::Parameter>(element::boolean, Shape{});
auto step = make_shared<op::Parameter>(element::i32, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
FAIL() << "Exception expected";
}
catch (ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("'stop' input scalar should be a numeric type"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
// invalid element type for step scalar
try
{
auto start = make_shared<op::Parameter>(element::i32, Shape{});
auto stop = make_shared<op::Parameter>(element::undefined, Shape{});
auto step = make_shared<op::Parameter>(element::boolean, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::i32);
FAIL() << "Exception expected";
}
catch (const ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("'step' input scalar should be a numeric type"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
}
TEST(type_prop, range_v4_invalid_output_elem_type)
{
try
{
auto start = make_shared<op::Parameter>(element::f16, Shape{1});
auto stop = make_shared<op::Parameter>(element::f16, Shape{});
auto step = make_shared<op::Parameter>(element::f16, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::boolean);
}
catch (const ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("output tensor type should be a numeric type"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
}
TEST(type_prop, range_v4_invalid_inputs_non_scalar)
{
// start input not a scalar
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{1});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Parameter>(element::f32, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "Exception expected";
}
catch (const ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("'start' input is not a scalar"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
// stop input not a scalar
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop = make_shared<op::Parameter>(element::f32, PartialShape{Dimension::dynamic()});
auto step = make_shared<op::Parameter>(element::f32, Shape{});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "Exception expected";
}
catch (const ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("'stop' input is not a scalar"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
// step input not a scalar
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(2));
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "Exception expected";
}
catch (const ngraph::NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("'step' input is not a scalar"));
}
catch (...)
{
FAIL() << "Unknown exception was thrown";
}
}
TEST(type_prop, range_v4_invalid_inputs_plus_inf)
{
// invalid start input scalar, +inf
try
{
auto start = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{std::numeric_limits<float>::infinity()});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "+Infinity start not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid stop input scalar, +inf
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{std::numeric_limits<float>::infinity()});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "+Infinity stop not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid step input scalar, +inf
try
{
auto start = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{3});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{std::numeric_limits<float>::infinity()});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "+Infinity step not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
}
TEST(type_prop, range_v4_invalid_inputs_minus_inf)
{
// invalid start input scalar, -inf
try
{
auto start = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{-std::numeric_limits<float>::infinity()});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "-Infinity start not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid stop input scalar, -inf
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{-std::numeric_limits<float>::infinity()});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "-Infinity stop not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid step input scalar, -inf
try
{
auto start = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{3});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Constant>(
element::f32, Shape{}, std::vector<float>{-std::numeric_limits<float>::infinity()});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "-Infinity step not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
}
TEST(type_prop, range_v4_invalid_inputs_nan)
{
// invalid start input scalar, nan
try
{
auto start =
make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{std::nanf("")});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "NaN start not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'start' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid stop input scalar, nan
try
{
auto start = make_shared<op::Parameter>(element::f32, Shape{});
auto stop =
make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{std::nanf("")});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "NaN stop not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'stop' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
// invalid step input scalar, nan
try
{
auto start = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto stop = make_shared<op::Parameter>(element::f32, Shape{});
auto step =
make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{std::nanf("")});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
FAIL() << "NaN step not detected";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "'step' cannot be nan or infinite.");
}
catch (...)
{
FAIL() << "Test failed for unexpected reason";
}
}
TEST(type_prop, range_v4_zero_output_elem_pos_step)
{
auto start = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{5});
auto stop = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
// if step is positive and start >= stop, number of output elements is zero
ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(0)}));
}
TEST(type_prop, range_v4_zero_output_elem_neg_step)
{
auto start = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{1});
auto stop = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{5});
auto step = make_shared<op::Constant>(element::f32, Shape{}, std::vector<float>{-1});
auto range = make_shared<op::v4::Range>(start, stop, step, element::f32);
// if step is negative and start <= stop, number of output elements is zero
ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(0)}));
}
template <typename T>
void run_range_v4_test(const element::Type& et, const RangeParams& params)
{
auto start =
make_shared<op::Constant>(et, Shape{}, std::vector<T>{static_cast<T>(params.start)});
auto stop = make_shared<op::Constant>(et, Shape{}, std::vector<T>{static_cast<T>(params.stop)});
auto step = make_shared<op::Constant>(et, Shape{}, std::vector<T>{static_cast<T>(params.step)});
auto range = make_shared<op::v4::Range>(start, stop, step, et);
EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(params.expected_shape))
<< "Expected shape " << params.expected_shape << " but got "
<< range->get_output_partial_shape(0);
}
struct RangeNumpyTest : ::testing::TestWithParam<RangeParams>
{
};
TEST_P(RangeNumpyTest, deduce_shape_i8)
{
run_range_v4_test<int8_t>(element::i8, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_i16)
{
run_range_v4_test<int16_t>(element::i16, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_i32)
{
run_range_v4_test<int32_t>(element::i32, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_i64)
{
run_range_v4_test<int64_t>(element::i64, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_u8)
{
run_range_v4_test<uint8_t>(element::u8, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_u16)
{
run_range_v4_test<uint16_t>(element::u16, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_u32)
{
run_range_v4_test<uint32_t>(element::u32, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_u64)
{
run_range_v4_test<uint64_t>(element::u64, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_bf16)
{
run_range_v4_test<bfloat16>(element::bf16, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_f16)
{
run_range_v4_test<float16>(element::f16, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_f32)
{
run_range_v4_test<float>(element::f32, GetParam());
}
TEST_P(RangeNumpyTest, deduce_shape_f64)
{
run_range_v4_test<double>(element::f64, GetParam());
}
INSTANTIATE_TEST_CASE_P(type_prop,
RangeNumpyTest,
::testing::Values(RangeParams{0, 5, 1, PartialShape{5}},
RangeParams{0, 22, 2, PartialShape{11}},
RangeParams{1, 23, 2, PartialShape{11}},
RangeParams{1, 22, 2, PartialShape{11}},
RangeParams{0, 0, 1, PartialShape{0}},
RangeParams{1, 0, 2, PartialShape{0}}),
PrintToDummyParamName());
struct RangeNumpyTestWithNegatives : ::testing::TestWithParam<RangeParams>
{
};
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_i8)
{
run_range_v4_test<int8_t>(element::i8, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_i16)
{
run_range_v4_test<int16_t>(element::i16, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_i32)
{
run_range_v4_test<int32_t>(element::i32, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_i64)
{
run_range_v4_test<int64_t>(element::i64, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_bf16)
{
run_range_v4_test<bfloat16>(element::bf16, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_f16)
{
run_range_v4_test<float16>(element::f16, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_f32)
{
run_range_v4_test<float>(element::f32, GetParam());
}
TEST_P(RangeNumpyTestWithNegatives, deduce_shape_f64)
{
run_range_v4_test<double>(element::f64, GetParam());
}
INSTANTIATE_TEST_CASE_P(type_prop,
RangeNumpyTestWithNegatives,
::testing::Values(RangeParams{2, 0, -2, PartialShape{1}},
RangeParams{2, 0, -1, PartialShape{2}},
RangeParams{-19, 19, 1, PartialShape{38}},
RangeParams{-19, 19, 3, PartialShape{13}},
RangeParams{20, -19, 1, PartialShape{0}}),
PrintToDummyParamName());
struct RangeNumpyTestFloating : ::testing::TestWithParam<RangeParams>
{
};
TEST_P(RangeNumpyTestFloating, deduce_shape_bf16)
{
run_range_v4_test<bfloat16>(element::bf16, GetParam());
}
TEST_P(RangeNumpyTestFloating, deduce_shape_f16)
{
run_range_v4_test<float16>(element::f16, GetParam());
}
TEST_P(RangeNumpyTestFloating, deduce_shape_f32)
{
run_range_v4_test<float>(element::f32, GetParam());
}
TEST_P(RangeNumpyTestFloating, deduce_shape_f64)
{
run_range_v4_test<double>(element::f64, GetParam());
}
INSTANTIATE_TEST_CASE_P(type_prop,
RangeNumpyTestFloating,
::testing::Values(RangeParams{0, 1, 0.25, PartialShape{4}},
RangeParams{-1, 1, 0.25, PartialShape{8}},
RangeParams{-1, 0.875, 0.25, PartialShape{8}}),
PrintToDummyParamName());