[GPU] MVN shape infer support and dynamic tests (#12685)

This commit is contained in:
Roman Lyamin 2022-08-24 18:29:15 +04:00 committed by GitHub
parent 19fd77e3d8
commit 3339d5a372
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 165 additions and 27 deletions

View File

@ -59,7 +59,7 @@ std::vector<layout> border_inst::calc_output_layouts(border_node const& /*node*/
auto& memory_deps = impl_param.memory_deps;
std::map<size_t, ngraph::HostTensorPtr> const_data;
if (!memory_deps.empty()) {
if (memory_deps.count(1) && memory_deps.count(2)) {
auto pads_begin_mem = memory_deps.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> pads_begin_lock(pads_begin_mem, impl_param.prog.get_stream());
const_data.emplace(1, make_host_tensor(pads_begin_mem->get_layout(), pads_begin_lock.data()));

View File

@ -65,15 +65,15 @@ layout gather_inst::calc_output_layout(gather_node const& node, kernel_impl_para
}
template<typename ShapeType>
std::vector<layout> gather_inst::calc_output_layouts(gather_node const& node, const kernel_impl_params& impl_param) {
auto desc = node.get_primitive();
std::vector<layout> gather_inst::calc_output_layouts(gather_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<gather>();
auto input0_layout = impl_param.get_input_layout(0);
auto input1_layout = impl_param.get_input_layout(1);
auto output_type = input0_layout.data_type;
if (node.has_fused_primitives()) {
output_type = node.get_fused_output_layout().data_type;
if (impl_param.has_fused_primitives()) {
output_type = impl_param.get_fused_output_layout().data_type;
}
ov::op::v8::Gather op;

View File

@ -23,6 +23,7 @@ public:
support_padding_all(true);
}
program_node& input() const { return get_dependency(0); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {1, 2}; }
};
using border_node = typed_program_node<border>;
@ -32,9 +33,9 @@ class typed_primitive_inst<border> : public typed_primitive_inst_base<border> {
using parent = typed_primitive_inst_base<border>;
public:
static layout calc_output_layout(border_node const& node, kernel_impl_params const& impl_param);
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(border_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(border_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(border_node const& node);
typed_primitive_inst(network& network, border_node const& node);
};

View File

@ -28,7 +28,7 @@ class typed_primitive_inst<gather> : public typed_primitive_inst_base<gather> {
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(gather_node const& node, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(gather_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(gather_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(gather_node const& node);

View File

@ -19,6 +19,7 @@ public:
using parent::parent;
program_node& input() const { return get_dependency(0); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {}; }
};
using mvn_node = typed_program_node<mvn>;
@ -28,6 +29,8 @@ class typed_primitive_inst<mvn> : public typed_primitive_inst_base<mvn> {
using parent = typed_primitive_inst_base<mvn>;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(mvn_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(mvn_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(mvn_node const& node);

View File

@ -33,7 +33,7 @@ class typed_primitive_inst<one_hot> : public typed_primitive_inst_base<one_hot>
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(const one_hot_node& node, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(const one_hot_node& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(one_hot_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(one_hot_node const& node);
typed_primitive_inst(network& network, one_hot_node const& node);

View File

@ -23,6 +23,8 @@ public:
program_node& input() const { return get_dependency(0); }
program_node& input2() const { return get_dependency(1); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {1}; }
};
using resample_node = typed_program_node<resample>;
@ -32,9 +34,9 @@ class typed_primitive_inst<resample> : public typed_primitive_inst_base<resample
using parent = typed_primitive_inst_base<resample>;
public:
static layout calc_output_layout(resample_node const& node, kernel_impl_params const& impl_param);
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(resample_node const& node, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(resample_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(resample_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(resample_node const& node);
public:

View File

@ -43,7 +43,7 @@ class typed_primitive_inst<reshape> : public typed_primitive_inst_base<reshape>
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(reshape_node const& node, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(reshape_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(reshape_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(reshape_node const& node);

View File

@ -79,7 +79,7 @@ class typed_primitive_inst<strided_slice> : public typed_primitive_inst_base<str
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(strided_slice_node const& node, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(strided_slice_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(strided_slice_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(strided_slice_node const& node);

View File

@ -20,6 +20,7 @@ public:
using parent::parent;
program_node& input() const { return get_dependency(0); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {1}; }
};
using tile_node = typed_program_node<tile>;
@ -29,9 +30,9 @@ class typed_primitive_inst<tile> : public typed_primitive_inst_base<tile> {
using parent = typed_primitive_inst_base<tile>;
public:
static layout calc_output_layout(tile_node const& node, kernel_impl_params const& impl_param);
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(tile_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(tile_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(tile_node const& node);

View File

@ -26,6 +26,25 @@ layout mvn_inst::calc_output_layout(mvn_node const& node, kernel_impl_params con
return layout(output_type, input_node_layout.format, input_node_layout.get_tensor());
}
template<typename ShapeType>
std::vector<layout> mvn_inst::calc_output_layouts(mvn_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<mvn>();
auto input_layout = impl_param.get_input_layout(0);
auto output_type = impl_param.desc->output_data_type ? *impl_param.desc->output_data_type
: input_layout.data_type;
if (impl_param.has_fused_primitives()) {
output_type = impl_param.get_fused_output_layout().data_type;
}
ShapeType input_shape = input_layout.get<ShapeType>();
ShapeType output_shape = input_shape;
format output_format = format::adjust_to_rank(input_layout.format, output_shape.size());
return { layout{output_shape, output_type, output_format} };
}
std::string mvn_inst::to_string(mvn_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();

View File

@ -48,7 +48,7 @@ layout one_hot_inst::calc_output_layout(one_hot_node const& node, kernel_impl_pa
}
template<typename ShapeType>
std::vector<layout> one_hot_inst::calc_output_layouts(const one_hot_node& node, const kernel_impl_params& impl_param) {
std::vector<layout> one_hot_inst::calc_output_layouts(const one_hot_node& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<one_hot>();
auto input_layout = impl_param.get_input_layout(0);
auto dt = desc->output_data_type.value_or(input_layout.data_type);

View File

@ -35,7 +35,7 @@ layout resample_inst::calc_output_layout(resample_node const& node, kernel_impl_
}
template<typename ShapeType>
std::vector<layout> resample_inst::calc_output_layouts(resample_node const& node, const kernel_impl_params& impl_param) {
std::vector<layout> resample_inst::calc_output_layouts(resample_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<resample>();
auto input_layout = impl_param.get_input_layout(0);
@ -75,10 +75,10 @@ std::vector<layout> resample_inst::calc_output_layouts(resample_node const& node
ov::op::v4::correct_pads_attr(&op, pads_begin, pads_end, input_shapes);
auto pattern_data = desc->output_pattern;
if (!memory_deps.empty()) {
if (memory_deps.count(1)) {
auto pattern_mem = memory_deps.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> pattern_lock(pattern_mem, node.get_program().get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> pattern_lock(pattern_mem, impl_param.prog.get_stream());
auto pattern_ptr = pattern_lock.data();
auto pattern_tensor = make_host_tensor(pattern_mem->get_layout(), pattern_ptr);

View File

@ -49,8 +49,8 @@ layout reshape_inst::calc_output_layout(reshape_node const& node, kernel_impl_pa
}
template<typename ShapeType>
std::vector<layout> reshape_inst::calc_output_layouts(reshape_node const& node, const kernel_impl_params& impl_param) {
assert(static_cast<bool>(node.get_primitive()->output_data_type) == false &&
std::vector<layout> reshape_inst::calc_output_layouts(reshape_node const& /*node*/, const kernel_impl_params& impl_param) {
assert(static_cast<bool>(impl_param.typed_desc<reshape>()->output_data_type) == false &&
"Output data type forcing is not supported for reshape_node!");
auto prim = impl_param.typed_desc<reshape>();
auto input_layout = impl_param.get_input_layout(0);
@ -77,7 +77,7 @@ std::vector<layout> reshape_inst::calc_output_layouts(reshape_node const& node,
if (!memory_deps.empty()) {
auto pattern_mem = memory_deps.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> pattern_lock(pattern_mem, node.get_program().get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> pattern_lock(pattern_mem, impl_param.prog.get_stream());
auto pattern_ptr = pattern_lock.data();
auto pattern_tensor = make_host_tensor(pattern_mem->get_layout(), pattern_ptr);

View File

@ -33,7 +33,7 @@ layout strided_slice_inst::calc_output_layout(strided_slice_node const& node, ke
}
template<typename ShapeType>
std::vector<layout> strided_slice_inst::calc_output_layouts(strided_slice_node const& node, const kernel_impl_params& impl_param) {
std::vector<layout> strided_slice_inst::calc_output_layouts(strided_slice_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<strided_slice>();
auto input0_layout = impl_param.get_input_layout(0);
@ -63,9 +63,9 @@ std::vector<layout> strided_slice_inst::calc_output_layouts(strided_slice_node c
auto mem2 = constant_mem.at(2);
auto mem3 = constant_mem.at(3);
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock1(mem1, node.get_program().get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock2(mem2, node.get_program().get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock3(mem3, node.get_program().get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock1(mem1, impl_param.prog.get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock2(mem2, impl_param.prog.get_stream());
cldnn::mem_lock<uint8_t, mem_lock_type::read> lock3(mem3, impl_param.prog.get_stream());
auto tensor1 = make_host_tensor(mem1->get_layout(), lock1.data());
auto tensor2 = make_host_tensor(mem2->get_layout(), lock2.data());

View File

@ -55,7 +55,7 @@ std::vector<layout> tile_inst::calc_output_layouts(tile_node const& /*node*/, co
};
auto& constant_mem = impl_param.memory_deps;
if (!constant_mem.empty()) {
if (constant_mem.count(1)) {
auto repeats_mem = constant_mem.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> repeats_lock(repeats_mem, impl_param.prog.get_stream());
std::map<size_t, ngraph::HostTensorPtr> const_data = {

View File

@ -83,11 +83,23 @@ INSTANTIATE_TEST_SUITE_P(smoke, interpolate_test_two_inputs,
{0.5, 2.0}, {2, 3}, InterpolateAttrs{InterpolateOp::ShapeCalcMode::SCALES},
layout{ov::PartialShape{1, 2, 24, 160}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {-1, -1, -1, -1},
{0.5, 2.0}, {2, 3}, InterpolateAttrs{InterpolateOp::ShapeCalcMode::SCALES},
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{2, 2, 3, 2}, data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {2, 2, 2, 3},
{}, {}, InterpolateAttrs(InterpolateOp::ShapeCalcMode::SIZES),
layout{ov::PartialShape{2, 2, 2, 3}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {2, 2, 2, 3},
{}, {}, InterpolateAttrs(InterpolateOp::ShapeCalcMode::SIZES),
layout{ov::PartialShape{2, 2, 2, 3}, data_types::f32, format::bfyx}
}
}));
@ -124,11 +136,23 @@ INSTANTIATE_TEST_SUITE_P(smoke, interpolate_test_single_input,
{0.5, 2.0}, {2, 3}, InterpolateAttrs{InterpolateOp::ShapeCalcMode::SCALES},
layout{ov::PartialShape{1, 2, 24, 160}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {-1, -1, -1, -1},
{0.5, 2.0}, {2, 3}, InterpolateAttrs{InterpolateOp::ShapeCalcMode::SCALES},
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{2, 2, 3, 2}, data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {2, 2, 2, 3},
{}, {}, InterpolateAttrs(InterpolateOp::ShapeCalcMode::SIZES),
layout{ov::PartialShape{2, 2, 2, 3}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {2, 2, 2, 3},
{}, {}, InterpolateAttrs(InterpolateOp::ShapeCalcMode::SIZES),
layout{ov::PartialShape{2, 2, 2, 3}, data_types::f32, format::bfyx}
}
}));

View File

@ -0,0 +1,64 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/mvn.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "mvn_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct mvn_test_params {
layout input_layout;
bool normalize_variance;
float epsilon;
bool eps_inside_sqrt;
bool across_channels;
};
class mvn_test : public testing::TestWithParam<mvn_test_params> { };
TEST_P(mvn_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input_layout_prim = std::make_shared<input_layout>("input", p.input_layout);
auto mvn_prim = std::make_shared<mvn>("output", "input", p.normalize_variance, p.epsilon, p.eps_inside_sqrt, p.across_channels);
cldnn::program prog(engine);
auto& input_layout_node = prog.get_or_create(input_layout_prim);
auto& mvn_node = prog.get_or_create(mvn_prim);
program_wrapper::add_connection(prog, input_layout_node, mvn_node);
auto res = mvn_inst::calc_output_layouts<ov::PartialShape>(mvn_node, *mvn_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0], p.input_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, mvn_test,
testing::ValuesIn(std::vector<mvn_test_params>{
{
layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx},
true, 1e-9f, true, true
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
true, 1e-9f, true, true
}
}));
} // shape_infer_tests

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2022 Intel Corporation
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -61,7 +61,7 @@ TEST_P(pad_test_three_input, shape_infer) {
program_wrapper::add_connection(prog, pads_end_node, border_node);
auto params = border_node.get_kernel_impl_params();
params->memory_deps = {{1, pads_begin_mem}, {2, pads_end_mem}}; //mb add pad_value
params->memory_deps = {{1, pads_begin_mem}, {2, pads_end_mem}};
auto res = border_inst::calc_output_layouts<ov::PartialShape>(border_node, *params);
ASSERT_EQ(res.size(), 1);
@ -77,6 +77,13 @@ INSTANTIATE_TEST_SUITE_P(smoke, pad_test_three_input,
ov::op::PadMode::CONSTANT, 1.f,
layout{ov::PartialShape{2, 8, 37, 48}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {0, 5, 2, 1},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {1, 0, 3, 7},
ov::op::PadMode::CONSTANT, 1.f,
layout{ov::PartialShape{{1, -1},{5, -1},{5, -1},{8, -1}}, data_types::f32, format::bfyx}
}
}));
class pad_test_single_input : public testing::TestWithParam<pad_test_params> { };
@ -112,6 +119,13 @@ INSTANTIATE_TEST_SUITE_P(smoke, pad_test_single_input,
ov::op::PadMode::CONSTANT, 1.f,
layout{ov::PartialShape{2, 8, 37, 48}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {0, 5, 2, 1},
layout{ov::PartialShape{4}, data_types::i64, format::bfyx}, {1, 0, 3, 7},
ov::op::PadMode::CONSTANT, 1.f,
layout{ov::PartialShape{{1, -1},{5, -1},{5, -1},{8, -1}}, data_types::f32, format::bfyx}
}
}));
} // shape_infer_tests

View File

@ -63,6 +63,11 @@ INSTANTIATE_TEST_SUITE_P(smoke, tile_test_two_inputs,
layout{ov::PartialShape{2, 3, 4}, data_types::f32, format::bfyx},
layout{ov::PartialShape{3}, data_types::i64, format::bfyx}, {1, 2, 3},
layout{ov::PartialShape{2, 6, 12}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(3), data_types::f32, format::bfyx},
layout{ov::PartialShape{3}, data_types::i64, format::bfyx}, {1, 2, 3},
layout{ov::PartialShape::dynamic(3), data_types::f32, format::bfyx}
}
}));
@ -94,6 +99,11 @@ INSTANTIATE_TEST_SUITE_P(smoke, tile_test_single_input,
layout{ov::PartialShape{2, 3, 4}, data_types::f32, format::bfyx},
layout{ov::PartialShape{3}, data_types::i64, format::bfyx}, {1, 2, 3},
layout{ov::PartialShape{2, 6, 12}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(3), data_types::f32, format::bfyx},
layout{ov::PartialShape{3}, data_types::i64, format::bfyx}, {1, 2, 3},
layout{ov::PartialShape::dynamic(3), data_types::f32, format::bfyx}
}
}));