[GPU] Range new shape infer (#13582) (#13582)

- move dynamic functional tests to dynamic folder
This commit is contained in:
Paul Youngsoo Ahn
2022-10-28 10:29:49 +09:00
committed by GitHub
parent 33c230a40d
commit 694d2f73eb
9 changed files with 571 additions and 40 deletions

View File

@@ -10,9 +10,17 @@ namespace cldnn {
struct range: public primitive_base<range> {
CLDNN_DECLARE_PRIMITIVE(range)
range(const primitive_id &id, const std::vector<primitive_id> &input, const layout &output_layout) :
primitive_base { id, input, output_layout.data_padding }, output_layout { output_layout } {
}
/// @brief Constructs range primitive.
/// @param id This primitive id.
/// @param inputs Input primitive id vector.
/// @param output_layout requested range output layout
range(const primitive_id &id,
const std::vector<primitive_id> &input,
const layout &output_layout)
: primitive_base{ id, input, output_layout.data_padding, output_layout.data_type },
output_layout { output_layout } { }
/// @brief requested range output layout
layout output_layout;
};
} // namespace cldnn

View File

@@ -12,15 +12,23 @@
namespace cldnn {
template <>
struct typed_program_node<range> : public typed_program_node_base<range> {
using typed_program_node_base::typed_program_node_base;
private:
using parent = typed_program_node_base<range>;
public:
using parent::parent;
program_node& input(std::size_t i = 0) const { return get_dependency(i); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {0, 1, 2}; }
};
using range_node = typed_program_node<range>;
template <>
class typed_primitive_inst<range> : public typed_primitive_inst_base<range> {
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(range_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(range_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(range_node const& node);

View File

@@ -7,6 +7,8 @@
#include "json_object.h"
#include <string>
#include "range_shape_inference.hpp"
namespace cldnn {
namespace {
std::string lexical_cast(const json_base& j, int offset = 1) {
@@ -25,6 +27,40 @@ layout range_inst::calc_output_layout(range_node const& node, kernel_impl_params
return impl_param.typed_desc<range>()->output_layout;
}
template<typename ShapeType>
std::vector<layout> range_inst::calc_output_layouts(range_node const& /*node*/, kernel_impl_params const& impl_param) {
auto desc = impl_param.typed_desc<range>();
auto output_data_type = desc->output_data_type.value_or(impl_param.get_input_layout().data_type);
ov::op::v4::Range op;
op.set_output_type(data_type_to_element_type(output_data_type));
std::vector<ShapeType> output_shapes = {ShapeType::dynamic(1)};
std::vector<ShapeType> input_shapes = {ov::Shape(), ov::Shape(), ov::Shape()};
std::map<size_t, ngraph::HostTensorPtr> const_data;
auto& memory_deps = impl_param.memory_deps;
if (memory_deps.count(0) > 0 && memory_deps.count(1) > 0 && memory_deps.count(2) > 0) {
auto start_mem = memory_deps.at(0);
cldnn::mem_lock<uint8_t, mem_lock_type::read> start_mem_lock(start_mem, impl_param.prog.get_stream());
const_data.emplace(0, make_host_tensor(start_mem->get_layout(), start_mem_lock.data()));
auto stop_mem = memory_deps.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> stop_mem_lock(stop_mem, impl_param.prog.get_stream());
const_data.emplace(1, make_host_tensor(stop_mem->get_layout(), stop_mem_lock.data()));
auto step_mem = memory_deps.at(2);
cldnn::mem_lock<uint8_t, mem_lock_type::read> step_mem_lock(step_mem, impl_param.prog.get_stream());
const_data.emplace(2, make_host_tensor(step_mem->get_layout(), step_mem_lock.data()));
shape_infer(&op, input_shapes, output_shapes, const_data);
}
return {layout({output_shapes[0], output_data_type, impl_param.output_layout.format})};
}
template std::vector<layout> range_inst::calc_output_layouts<ov::PartialShape>(range_node const& node, const kernel_impl_params& impl_param);
std::string range_inst::to_string(range_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();

View File

@@ -13,7 +13,8 @@ namespace {
CommonDispatchData SetDefault(const range_params &params) {
CommonDispatchData dispatchData;
dispatchData.gws = { 1, 1, params.outputs[0].X().v }; // TODO: these could be split better
const auto& out = params.outputs[0];
dispatchData.gws = { 1, 1, out.Batch().v * out.Feature().v * out.X().v * out.Y().v * out.W().v * out.Z().v }; // TODO: these could be split better
dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo);
return dispatchData;

View File

@@ -13,16 +13,21 @@ namespace intel_gpu {
static void CreateRangeOp(Program &p, const std::shared_ptr<ngraph::op::v4::Range> &op) {
validate_inputs_count(op, { 3 });
auto &outShape = op->get_output_shape(0);
{
auto r = outShape.size();
if (r != 1)
throw std::runtime_error { "range v4 output rank is " + std::to_string(r) };
auto output_pshape = op->get_output_partial_shape(0);
auto output_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0));
std::shared_ptr<cldnn::layout> outLayout = nullptr;
if (output_pshape.is_static()) {
OPENVINO_ASSERT(output_pshape.rank().get_length() == 1 , "[GPU] range v4 output rank should be 1");
auto& out_shape = op->get_output_shape(0);
outLayout = std::make_shared<cldnn::layout>(output_dtype, cldnn::format::bfyx, cldnn::tensor(cldnn::batch(out_shape[0])));
} else {
outLayout = std::make_shared<cldnn::layout>(output_pshape, output_dtype, cldnn::format::bfyx);
}
cldnn::tensor outTensor { cldnn::spatial(outShape[0]) };
auto outDataType = cldnn::element_type_to_data_type(op->get_output_element_type(0));
cldnn::layout outLayout { outDataType, cldnn::format::bfyx, outTensor };
cldnn::range prim { layer_type_name_ID(op), p.GetInputPrimitiveIDs(op), outLayout };
cldnn::range prim(layer_type_name_ID(op),
p.GetInputPrimitiveIDs(op),
*outLayout);
p.add_primitive(*op, prim);
}

View File

@@ -0,0 +1,122 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/crop.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "range_inst.h"
#include "program_wrapper.h"
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct range_si_test_params {
ov::PartialShape input_pshape;
ov::PartialShape expected_out_pshape;
data_types out_data_type;
std::vector<double> vals; // {start, stop, step}
};
std::ostream& operator<<(std::ostream& ost, const range_si_test_params& params) {
ost << data_type_traits::name(params.out_data_type) << ",";
if (params.vals.size() > 0) {
ost << "{START:" << params.vals[0] << ",STOP:" << params.vals[1] << ",STEP:" << params.vals[2] << "},";
} else {
ost << "{},";
}
ost << "IN: " << params.input_pshape << ", EXPECTED_OUT: " << params.expected_out_pshape;
return ost;
}
class range_si_test : public testing::TestWithParam<range_si_test_params> { };
TEST_P(range_si_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
cldnn::program prog(engine);
std::vector<std::shared_ptr<primitive>> input_prims;
std::vector<std::string> input_prim_ids;
std::vector<layout> input_layouts;
const size_t num_inputs = 3;
for (size_t idx = 0; idx < num_inputs; idx++) {
auto in_layout = layout{p.input_pshape, p.out_data_type, format::bfyx};
input_layouts.push_back(in_layout);
auto prim_id = "const::data_" + std::to_string(idx);
auto const_data_prim = std::make_shared<input_layout>(prim_id, in_layout);
input_prims.push_back(const_data_prim);
input_prim_ids.push_back(prim_id);
}
auto range_prim = std::make_shared<range>("range", input_prim_ids, layout{p.expected_out_pshape, p.out_data_type, format::bfyx});
auto& range_node = prog.get_or_create(range_prim);
for (auto& iprim : input_prims) {
auto& input_node = prog.get_or_create(iprim);
program_wrapper::add_connection(prog, input_node, range_node);
}
auto params = range_node.get_kernel_impl_params();
params->memory_deps.clear();
for (size_t idx = 0; idx < num_inputs; idx++) {
auto in_layout = input_layouts[idx];
if (in_layout.is_static() && (idx < p.vals.size())) {
auto prim_mem = engine.allocate_memory(in_layout);
ASSERT_NE(p.out_data_type, data_types::bin);
switch (p.out_data_type) {
case data_types::f16:
set_values(prim_mem, {float_to_half(p.vals[idx])});
break;
case data_types::f32:
set_values(prim_mem, {static_cast<data_type_to_type<data_types::f32>::type>(p.vals[idx])});
break;
case data_types::i32:
set_values(prim_mem, {static_cast<data_type_to_type<data_types::i32>::type>(p.vals[idx])});
break;
case data_types::i64:
set_values(prim_mem, {static_cast<data_type_to_type<data_types::i64>::type>(p.vals[idx])});
break;
case data_types::i8:
set_values(prim_mem, {static_cast<data_type_to_type<data_types::i8>::type>(p.vals[idx])});
break;
case data_types::u8:
set_values(prim_mem, {static_cast<data_type_to_type<data_types::u8>::type>(p.vals[idx])});
break;
case data_types::bin:
default:
break;
}
params->memory_deps.emplace(idx, prim_mem);
}
}
auto res = range_inst::calc_output_layouts<ov::PartialShape>(range_node, *params);
auto expected_out_layout = layout{p.expected_out_pshape, p.out_data_type, format::bfyx};
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0], expected_out_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, range_si_test,
testing::ValuesIn(std::vector<range_si_test_params>{
{ov::PartialShape{}, ov::PartialShape{7}, data_types::i32, {2, 23, 3}},
{ov::PartialShape{}, ov::PartialShape{7}, data_types::i8, {2, 23, 3}},
{ov::PartialShape{}, ov::PartialShape{7}, data_types::u8, {2, 23, 3}},
{ov::PartialShape{}, ov::PartialShape{7}, data_types::i64, {23, 2, -3}},
{ov::PartialShape{}, ov::PartialShape{7}, data_types::i32, {23, 2, -3}},
{ov::PartialShape{}, ov::PartialShape{3}, data_types::f32, {1.0f, 2.5f, 0.5f}},
{ov::PartialShape{}, ov::PartialShape{3}, data_types::f16, {1.0f, 2.5f, 0.5f}},
{ov::PartialShape::dynamic(1), ov::PartialShape::dynamic(1), data_types::f16, {}},
{ov::PartialShape::dynamic(1), ov::PartialShape::dynamic(1), data_types::i8, {}}
}));
}; // shape_infer_tests

View File

@@ -6,6 +6,10 @@
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/range.hpp>
#include <intel_gpu/primitives/select.hpp>
#include <intel_gpu/primitives/data.hpp>
using namespace ::tests;
using namespace testing;
namespace cldnn {
namespace {
@@ -14,7 +18,7 @@ struct RangeArg {
primitive_id name;
memory::ptr p;
RangeArg(data_types dt, const char name[]) :
name { name }, p { tests::get_test_engine().allocate_memory( { dt, format::bfyx, tensor { spatial() } }) } {
name { name }, p { tests::get_test_engine().allocate_memory( { ov::PartialShape{}, dt, format::bfyx}) } {
}
void addTo(topology &t) const {
t.add(input_layout { name, p->get_layout() });
@@ -29,18 +33,19 @@ struct RangeArgs {
RangeArg start { dt, "start" };
RangeArg stop { dt, "stop" };
RangeArg step { dt, "step" };
explicit RangeArgs(data_types dt) :
dt { dt } {
}
memory::ptr run(int outLen) const {
explicit RangeArgs(data_types dt) : dt { dt } {}
memory::ptr run(int outLen, bool use_new_shape_infer) const {
topology topology;
start.addTo(topology);
stop.addTo(topology);
step.addTo(topology);
topology.add(range { "range", { start.name, stop.name, step.name }, { dt, format::bfyx,
tensor { spatial(outLen) } } });
topology.add(range { "range", { start.name, stop.name, step.name }, { dt, format::bfyx, tensor{batch(outLen)} } });
network network { tests::get_test_engine(), topology };
build_options bo;
bo.set_option(build_option::allow_new_shape_infer(use_new_shape_infer));
network network { tests::get_test_engine(), topology, bo };
start.setData(network);
stop.setData(network);
@@ -51,33 +56,171 @@ struct RangeArgs {
}
};
template<typename T> void doSmokeRange(T start, T stop, T step) {
RangeArgs args { type_to_data_type<T>::value };
struct range_test_params {
data_types d_types;
double start;
double stop;
double step;
bool use_new_shape_infer;
};
tests::set_values(args.start.p, { start });
tests::set_values(args.stop.p, { stop });
tests::set_values(args.step.p, { step });
std::ostream& operator<<(std::ostream& ost, const range_test_params& params) {
ost << data_type_traits::name(params.d_types) << ",";
ost << "{start:" << params.start << ",stop:" << params.stop << ",step:" << params.step << "},";
ost << " use_new_shape_infer(" << (params.use_new_shape_infer?"True":"False") << ")";
return ost;
}
T outLen = (stop - start) / step + 1;
template<typename T>
void doSmokeRange(range_test_params& params) {
auto output = args.run(outLen);
RangeArgs args(params.d_types);
T start_val = static_cast<T>(params.start);
T stop_val = static_cast<T>(params.stop);
T step_val = static_cast<T>(params.step);
tests::set_values(args.start.p, { start_val });
tests::set_values(args.stop.p, { stop_val });
tests::set_values(args.step.p, { step_val });
T outLen = (stop_val - start_val) / step_val;
auto output = args.run(outLen, params.use_new_shape_infer);
mem_lock<T> output_ptr { output, tests::get_test_stream() };
for (std::size_t i = 0; i < static_cast<size_t>(outLen); ++i)
EXPECT_EQ(start + i * step, output_ptr[i]);
for (std::size_t i = 0; i < static_cast<size_t>(outLen); ++i) {
EXPECT_EQ(start_val + i * step_val, output_ptr[i]);
}
}
void doSmokeRangeAllTypes(int start, int stop, int step) {
doSmokeRange<std::int8_t>(start, stop, step);
doSmokeRange<std::uint8_t>(start, stop, step);
doSmokeRange<int>(start, stop, step);
doSmokeRange<float>(start, stop, step);
doSmokeRange<std::int64_t>(start, stop, step);
void doSmokeRange_fp16(range_test_params& params) {
RangeArgs args(params.d_types);
auto start_val = static_cast<float>(params.start);
auto stop_val = static_cast<float>(params.stop);
auto step_val = static_cast<float>(params.step);
tests::set_values(args.start.p, { float_to_half(start_val) });
tests::set_values(args.stop.p, { float_to_half(stop_val) });
tests::set_values(args.step.p, { float_to_half(step_val) });
auto outLen = (stop_val - start_val) / step_val;
auto output = args.run(outLen, params.use_new_shape_infer);
mem_lock<uint16_t> output_ptr { output, tests::get_test_stream() };
for (std::size_t i = 0; i < static_cast<size_t>(outLen); ++i) {
EXPECT_EQ(start_val + i * step_val, half_to_float(output_ptr[i]));
}
}
TEST(smoke, Range) {
doSmokeRangeAllTypes(1, 21, 2);
doSmokeRangeAllTypes(4, 0, -1);
struct smoke_range_test : testing::TestWithParam<range_test_params> {};
TEST_P(smoke_range_test, basic) {
auto params = GetParam();
switch(params.d_types) {
case data_types::f32:
doSmokeRange<float>(params);
break;
case data_types::i32:
doSmokeRange<int>(params);
break;
case data_types::i8:
doSmokeRange<std::int8_t>(params);
break;
case data_types::u8:
doSmokeRange<std::uint8_t>(params);
break;
case data_types::i64:
doSmokeRange<std::int64_t>(params);
break;
case data_types::f16:
doSmokeRange_fp16(params);
default:
break;
}
}
struct range_test_param_generator : std::vector<range_test_params> {
range_test_param_generator& add(range_test_params params) {
push_back(params);
return *this;
}
range_test_param_generator& simple_params(std::vector<data_types>& data_types_list, double start, double stop, double step) {
std::vector<bool> flags_use_new_si = {true, false};
for (auto use_new_si : flags_use_new_si) {
for (auto type : data_types_list) {
push_back(range_test_params{ type, start, stop, step, use_new_si});
}
}
return *this;
}
};
std::vector<data_types> signed_types = {data_types::i8};
std::vector<data_types> general_types = {data_types::u8, data_types::i32, data_types::i32, data_types::f16, data_types::f32};
std::vector<data_types> float_types = {data_types::f16, data_types::f32};
INSTANTIATE_TEST_SUITE_P(range_gpu_test,
smoke_range_test,
testing::ValuesIn(
range_test_param_generator()
.simple_params(general_types, 2, 23, 3)
.simple_params(general_types, 1, 21, 2)
.simple_params(float_types, 1, 2.5f, 0.5f)
.simple_params(signed_types, 23, 2, -3)
.simple_params(signed_types, 4, 0, -1)
));
TEST(range_gpu_test, range_with_select) {
auto& engine = get_test_engine();
int32_t start_val = 0;
int32_t step_val = 1;
int32_t expected_dim = 25;
auto select_input1 = engine.allocate_memory({ { 1 }, data_types::u8, format::bfyx });
auto select_input2 = engine.allocate_memory({ { }, data_types::i32, format::bfyx });
auto select_mask = engine.allocate_memory({ { 1 }, data_types::i32, format::bfyx });
auto input0 = engine.allocate_memory({ { }, data_types::i32, format::bfyx });
auto input2 = engine.allocate_memory({ { }, data_types::i32, format::bfyx });
topology topology;
topology.add(data("select_input1", select_input1));
topology.add(data("select_input2", select_input2));
topology.add(data("select_mask", select_mask));
topology.add(data("input0", input0));
topology.add(data("input2", input2));
topology.add(cldnn::select("select", "select_input1", "select_input2", "select_mask"));
topology.add(range { "range", { "input0", "select", "input2" }, { data_types::i32, format::bfyx, tensor{batch(expected_dim)} } });
set_values<uint8_t>(select_input1, {0});
set_values<int32_t>(select_input2, {384});
set_values<int32_t>(select_mask, {expected_dim});
set_values<int32_t>(input0, {start_val});
set_values<int32_t>(input2, {step_val});
build_options bo;
bo.set_option(build_option::allow_new_shape_infer(true));
network network { tests::get_test_engine(), topology, bo };
auto outputs = network.execute();
auto output = outputs.at("range").get_memory();
mem_lock<int32_t> output_ptr { output, tests::get_test_stream() };
for (size_t i = 0; i < static_cast<size_t>(expected_dim); ++i) {
EXPECT_EQ(start_val + i * step_val, output_ptr[i]);
}
}
} // namespace
} // namespace cldnn

View File

@@ -0,0 +1,208 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "shared_test_classes/single_layer/shape_of.hpp"
#include "shared_test_classes/single_layer/strided_slice.hpp"
#include <shared_test_classes/single_layer/eltwise.hpp>
#include <common_test_utils/ov_tensor_utils.hpp>
using namespace ngraph;
using namespace InferenceEngine;
using namespace ov::test;
namespace GPULayerTestsDefinitions {
typedef std::tuple<
std::vector<InputShape>, // input shapes
std::vector<float>, // input values
ElementType, // Network precision
TargetDevice, // Device name
std::map<std::string, std::string> // Additional network configuration
> RangeDynamicGPUTestParamsSet;
class RangeDynamicGPUTest : public testing::WithParamInterface<RangeDynamicGPUTestParamsSet>,
virtual public SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<RangeDynamicGPUTestParamsSet>& obj) {
RangeDynamicGPUTestParamsSet basicParamsSet = obj.param;
std::ostringstream result;
std::vector<InputShape> inputShapes;
std::vector<float> inputValues;
ElementType netType;
TargetDevice targetDevice;
std::map<std::string, std::string> additionalConfig;
std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet;
result << "IS=";
for (const auto& shape : inputShapes) {
result << CommonTestUtils::partialShape2str({shape.first}) << "_";
for (const auto& actual_shape : shape.second) {
result << CommonTestUtils::partialShape2str({actual_shape}) << "_";
}
}
result << "IV=";
for (const auto& v : inputValues) {
result << v << "_";
}
result << "NetType=" << netType << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
protected:
void init_input_shapes(const std::vector<InputShape>& shapes) {
if (shapes.empty()) {
targetStaticShapes = {{}};
return;
}
size_t targetStaticShapeSize = shapes.front().second.size();
for (size_t i = 1; i < shapes.size(); ++i) {
if (targetStaticShapeSize < shapes[i].second.size()) {
targetStaticShapeSize = shapes[i].second.size();
}
}
targetStaticShapes.resize(targetStaticShapeSize);
for (const auto& shape : shapes) {
auto dynShape = shape.first;
inputDynamicShapes.push_back(dynShape);
for (size_t i = 0; i < targetStaticShapeSize; ++i) {
targetStaticShapes[i].push_back(i < shape.second.size() ? shape.second.at(i) : shape.second.back());
}
}
}
template<typename T>
std::shared_ptr<ov::Node> inline generate_constant(ElementType netType, ov::PartialShape& pshape, const float value) {
std::vector<T> data_vec = {static_cast<T>(value)};
return builder::makeConstant(netType, pshape.to_shape(), data_vec);
}
std::shared_ptr<ngraph::opset8::Range> generate_range_op(ElementType netType, std::vector<ov::PartialShape>& pshapes, std::vector<float>& values) {
const size_t num_inputs = 3;
std::vector<std::shared_ptr<ov::Node>> input_vec;
for (size_t idx = 0; idx < num_inputs; idx++) {
#define CASE(X) case X: input_vec.push_back(generate_constant<element_type_traits<X>::value_type>(netType, inputDynamicShapes[idx], values[idx])); break;
switch (netType) {
CASE(ov::element::Type_t::boolean)
CASE(ov::element::Type_t::i8)
CASE(ov::element::Type_t::i16)
CASE(ov::element::Type_t::i32)
CASE(ov::element::Type_t::i64)
CASE(ov::element::Type_t::u8)
CASE(ov::element::Type_t::u16)
CASE(ov::element::Type_t::u32)
CASE(ov::element::Type_t::u64)
CASE(ov::element::Type_t::bf16)
CASE(ov::element::Type_t::f16)
CASE(ov::element::Type_t::f32)
CASE(ov::element::Type_t::f64)
case ov::element::Type_t::u1:
case ov::element::Type_t::i4:
case ov::element::Type_t::u4:
input_vec.push_back(generate_constant<uint8_t>(netType, inputDynamicShapes[idx], values[idx])); break;
default: OPENVINO_UNREACHABLE("Unsupported element type: ", netType);
}
#undef CASE
}
return std::make_shared<ngraph::opset8::Range>(input_vec[0], input_vec[1], input_vec[2], netType);
}
void SetUp() override {
RangeDynamicGPUTestParamsSet basicParamsSet = this->GetParam();
std::vector<InputShape> inputShapes;
std::vector<float> inputValues;
ElementType netType;
std::map<std::string, std::string> additionalConfig;
inputValues.clear();
std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet;
auto params = builder::makeDynamicParams(netType, {});
init_input_shapes(inputShapes);
const auto range = generate_range_op(netType, inputDynamicShapes, inputValues);
ngraph::ResultVector results = {std::make_shared<ngraph::opset1::Result>(range)};
function = std::make_shared<ngraph::Function>(results, params, "shapeof_out");
}
};
TEST_P(RangeDynamicGPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
}
namespace {
std::map<std::string, std::string> emptyAdditionalConfig;
const std::vector<std::vector<ov::test::InputShape>> dynInputShapes = {
{
// Inputs for Range
{{ov::PartialShape::dynamic(0)}, {{}}},
{{ov::PartialShape::dynamic(0)}, {{}}},
{{ov::PartialShape::dynamic(0)}, {{}}}
}
};
const std::vector<std::vector<float>> inputValues = {
{
// Inputs for Range
{2, 23, 3},
{1, 21, 2},
{23, 2, -3},
{4, 0, -1},
}
};
const std::vector<ElementType> netPrecisions = {
ElementType::i8,
ElementType::i32,
ElementType::i64,
};
const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
::testing::ValuesIn(inputValues),
::testing::ValuesIn(netPrecisions), // netprec
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::Values(emptyAdditionalConfig));
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_01, RangeDynamicGPUTest,
testParams_smoke, RangeDynamicGPUTest::getTestCaseName);
const std::vector<std::vector<float>> inputFloatValues = {
{
// Inputs for Range
{1.0f, 2.5f, 0.5f},
{23.0f, 5.0f, -2.0f},
}
};
const std::vector<ElementType> netFloatPrecisions = {
ElementType::f16,
ElementType::f32,
};
const auto testFloatParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes),
::testing::ValuesIn(inputFloatValues),
::testing::ValuesIn(netFloatPrecisions), // netprec
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::Values(emptyAdditionalConfig));
INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_02, RangeDynamicGPUTest,
testFloatParams_smoke, RangeDynamicGPUTest::getTestCaseName);
} // namespace
} // namespace GPULayerTestsDefinitions