[GPU] new shape infer for several trivial primitives (#14803)

* [GPU] new shape infer for several primitives

* [GPU] Make forwatd_input0_shape method tempalte
This commit is contained in:
Vladimir Paramuzov
2022-12-28 09:12:46 +04:00
committed by GitHub
parent 055e34a77a
commit 31d91033d9
23 changed files with 612 additions and 47 deletions

View File

@@ -45,10 +45,14 @@ class typed_primitive_inst<activation> : public typed_primitive_inst_base<activa
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(activation_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(activation_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(activation_node const& node);
public:
typed_primitive_inst(network& network, activation_node const& node);
memory::ptr slope_memory() const { return dep_memory_ptr(1); }

View File

@@ -29,6 +29,11 @@ public:
using parent = typed_primitive_inst_base<bucketize>;
using parent::parent;
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(bucketize_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(const bucketize_node& node, kernel_impl_params const& impl_param);
static std::string to_string(const bucketize_node& node);
};

View File

@@ -19,10 +19,13 @@ class typed_primitive_inst<grn> : public typed_primitive_inst_base<grn> {
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(grn_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(grn_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(grn_node const& node);
public:
typed_primitive_inst(network& network, grn_node const& node);
};

View File

@@ -19,10 +19,13 @@ class typed_primitive_inst<lrn> : public typed_primitive_inst_base<lrn> {
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(lrn_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(lrn_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(lrn_node const& node);
public:
typed_primitive_inst(network& network, lrn_node const& node);
};

View File

@@ -31,7 +31,9 @@ class typed_primitive_inst<mvn> : public typed_primitive_inst_base<mvn> {
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(mvn_node const& /*node*/, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(mvn_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(mvn_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(mvn_node const& node);

View File

@@ -30,10 +30,13 @@ class typed_primitive_inst<normalize> : public typed_primitive_inst_base<normali
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(normalize_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(normalize_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(normalize_node const& node);
public:
typed_primitive_inst(network& network, normalize_node const& node);
memory::ptr scale_memory() const { return dep_memory_ptr(1); }

View File

@@ -317,6 +317,19 @@ protected:
static std::string generic_to_string(program_node const& node, const char* type_name);
template<typename ShapeType>
static std::vector<layout> forward_input0_shape(const kernel_impl_params& impl_param) {
auto in_layout = impl_param.get_input_layout(0);
auto output_type = impl_param.desc->output_data_types[0].value_or(in_layout.data_type);
if (impl_param.has_fused_primitives()) {
output_type = impl_param.get_fused_output_layout().data_type;
}
return { layout(in_layout.get<ShapeType>(), output_type, in_layout.format) };
}
// This could be implemented via single map std::unordered_map<instrumentation::perf_counter_key, std::tuple<int64_t, size_t>>
// but the overhead on using perf_counter_key as map key is too big, thus we use hash as map key
// and store mapping onto original perf_clounter_key for further data analysis and dumps

View File

@@ -19,10 +19,13 @@ class typed_primitive_inst<reverse> : public typed_primitive_inst_base<reverse>
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(reverse_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(reverse_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(reverse_node const& node);
public:
typed_primitive_inst(network& network, reverse_node const& desc);
};

View File

@@ -19,10 +19,13 @@ class typed_primitive_inst<reverse_sequence> : public typed_primitive_inst_base<
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(reverse_sequence_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(reverse_sequence_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(reverse_sequence_node const& node);
public:
typed_primitive_inst(network& network, reverse_sequence_node const& desc);
};

View File

@@ -16,6 +16,10 @@ public:
using parent = typed_primitive_inst_base<roll>;
using parent::parent;
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(roll_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(const roll_node& node, kernel_impl_params const& impl_param);
static std::string to_string(const roll_node& node);
};

View File

@@ -20,7 +20,9 @@ class typed_primitive_inst<scatter_update> : public typed_primitive_inst_base<sc
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(scatter_update_node const& /*node*/, const kernel_impl_params& impl_param);
static std::vector<layout> calc_output_layouts(scatter_update_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(scatter_update_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(scatter_update_node const& node);

View File

@@ -27,10 +27,14 @@ class typed_primitive_inst<softmax> : public typed_primitive_inst_base<softmax>
using parent::parent;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(softmax_node const& /*node*/, const kernel_impl_params& impl_param) {
return forward_input0_shape<ShapeType>(impl_param);
}
static layout calc_output_layout(softmax_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(softmax_node const& node);
public:
typed_primitive_inst(network& network, softmax_node const& desc);
};

View File

@@ -23,26 +23,6 @@ layout mvn_inst::calc_output_layout(mvn_node const& node, kernel_impl_params con
return layout(output_type, input_node_layout.format, input_node_layout.get_tensor());
}
template<typename ShapeType>
std::vector<layout> mvn_inst::calc_output_layouts(mvn_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<mvn>();
auto input_layout = impl_param.get_input_layout(0);
auto output_type = impl_param.desc->output_data_types[0].value_or(input_layout.data_type);
if (impl_param.has_fused_primitives()) {
output_type = impl_param.get_fused_output_layout().data_type;
}
ShapeType input_shape = input_layout.get<ShapeType>();
ShapeType output_shape = input_shape;
format output_format = format::adjust_to_rank(input_layout.format, output_shape.size());
return { layout{output_shape, output_type, output_format} };
}
template std::vector<layout> mvn_inst::calc_output_layouts<ov::PartialShape>(mvn_node const& node, const kernel_impl_params& impl_param);
std::string mvn_inst::to_string(mvn_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();

View File

@@ -28,24 +28,6 @@ layout scatter_update_inst::calc_output_layout(scatter_update_node const& node,
return layout{output_type, input_format, output_shape};
}
template<typename ShapeType>
std::vector<layout> scatter_update_inst::calc_output_layouts(scatter_update_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<scatter_update>();
auto input_layout = impl_param.get_input_layout();
auto output_format = input_layout.format;
auto output_shape = input_layout.get<ShapeType>();
auto output_type = desc->output_data_types[0].value_or(input_layout.data_type);
if (impl_param.has_fused_primitives()) {
output_type = impl_param.get_fused_output_layout().data_type;
}
return { layout{output_shape, output_type, output_format} };
}
template std::vector<layout> scatter_update_inst::calc_output_layouts<ov::PartialShape>(scatter_update_node const& node, const kernel_impl_params& impl_param);
std::string scatter_update_inst::to_string(scatter_update_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();

View File

@@ -0,0 +1,58 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/activation.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "activation_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct activation_test_params {
layout input_layout;
activation_func func;
};
class activation_test : public testing::TestWithParam<activation_test_params> { };
TEST_P(activation_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input_layout_prim = std::make_shared<input_layout>("input", p.input_layout);
auto activation_prim = std::make_shared<activation>("output", input_info("input"), p.func);
cldnn::program prog(engine);
auto& input_layout_node = prog.get_or_create(input_layout_prim);
auto& activation_node = prog.get_or_create(activation_prim);
program_wrapper::add_connection(prog, input_layout_node, activation_node);
auto res = activation_inst::calc_output_layouts<ov::PartialShape>(activation_node, *activation_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0], p.input_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, activation_test,
testing::ValuesIn(std::vector<activation_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, activation_func::relu },
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, activation_func::abs },
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, activation_func::elu },
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, activation_func::erf},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, activation_func::swish}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,65 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/bucketize.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "bucketize_inst.hpp"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct bucketize_test_params {
layout input_layout;
data_types out_dt;
bool with_right_bound;
};
class bucketize_test : public testing::TestWithParam<bucketize_test_params> { };
TEST_P(bucketize_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto input1_layout_prim = std::make_shared<input_layout>("input1", layout({3}, p.input_layout.data_type, format::bfyx));
auto inputs = std::vector<input_info>{ input_info("input0"), input_info("input1")};
auto bucketize_prim = std::make_shared<bucketize>("output", inputs, p.out_dt, p.with_right_bound);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& input1_layout_node = prog.get_or_create(input1_layout_prim);
auto& bucketize_node = prog.get_or_create(bucketize_prim);
program_wrapper::add_connection(prog, input0_layout_node, bucketize_node);
program_wrapper::add_connection(prog, input1_layout_node, bucketize_node);
auto res = bucketize_inst::calc_output_layouts<ov::PartialShape>(bucketize_node, *bucketize_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
expected_layout.data_type = p.out_dt;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, bucketize_test,
testing::ValuesIn(std::vector<bucketize_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, data_types::i64, true },
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, data_types::i64, false },
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, data_types::i32, true },
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, data_types::i64, true},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, data_types::i32, false}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,60 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/grn.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "grn_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct grn_test_params {
layout input_layout;
data_types out_dt;
};
class grn_test : public testing::TestWithParam<grn_test_params> { };
TEST_P(grn_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto grn_prim = std::make_shared<grn>("output", input_info("input0"), 0.1f, p.out_dt);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& grn_node = prog.get_or_create(grn_prim);
program_wrapper::add_connection(prog, input0_layout_node, grn_node);
auto res = grn_inst::calc_output_layouts<ov::PartialShape>(grn_node, *grn_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
expected_layout.data_type = p.out_dt;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, grn_test,
testing::ValuesIn(std::vector<grn_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, data_types::f32},
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, data_types::f32},
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, data_types::f32},
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, data_types::f16},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, data_types::f32}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,63 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/lrn.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "lrn_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct lrn_test_params {
layout input_layout;
uint32_t size;
float k;
float alpha;
float beta;
lrn_norm_region norm_region;
};
class lrn_test : public testing::TestWithParam<lrn_test_params> { };
TEST_P(lrn_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto lrn_prim = std::make_shared<lrn>("output", input_info("input0"), p.size, p.k, p.alpha, p.beta, p.norm_region);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& lrn_node = prog.get_or_create(lrn_prim);
program_wrapper::add_connection(prog, input0_layout_node, lrn_node);
auto res = lrn_inst::calc_output_layouts<ov::PartialShape>(lrn_node, *lrn_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, lrn_test,
testing::ValuesIn(std::vector<lrn_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, 5, 0.1f, 0.2f, 0.75f, lrn_norm_region::lrn_norm_region_across_channel},
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, 3, 0.1f, 0.2f, 0.75f, lrn_norm_region::lrn_norm_region_within_channel},
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, 6, 0.1f, 0.2f, 0.75f, lrn_norm_region::lrn_norm_region_across_channel},
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, 2, 0.1f, 0.2f, 0.75f, lrn_norm_region::lrn_norm_region_within_channel},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, 5, 0.1f, 0.2f, 0.75, lrn_norm_region::lrn_norm_region_across_channel}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,63 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/normalize.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "normalize_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct normalize_test_params {
layout input_layout;
bool across_spatial;
};
class normalize_test : public testing::TestWithParam<normalize_test_params> { };
TEST_P(normalize_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto mem = engine.allocate_memory(layout({1}, p.input_layout.data_type, format::bfyx));
auto input1_layout_prim = std::make_shared<data>("input1", mem);
auto normalize_prim = std::make_shared<normalize>("output", input_info("input0"), "input1", p.across_spatial);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& input1_layout_node = prog.get_or_create(input1_layout_prim);
auto& normalize_node = prog.get_or_create(normalize_prim);
program_wrapper::add_connection(prog, input0_layout_node, normalize_node);
program_wrapper::add_connection(prog, input1_layout_node, normalize_node);
auto res = normalize_inst::calc_output_layouts<ov::PartialShape>(normalize_node, *normalize_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, normalize_test,
testing::ValuesIn(std::vector<normalize_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, false},
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, true},
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, false},
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, true},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, false}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,65 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/reverse_sequence.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "reverse_sequence_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct reverse_sequence_test_params {
layout input_layout;
int32_t seq_axis;
int32_t batch_axis;
};
class reverse_sequence_test : public testing::TestWithParam<reverse_sequence_test_params> { };
TEST_P(reverse_sequence_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input1_layout = layout{{p.input_layout.get_partial_shape()[p.batch_axis]}, data_types::i32, format::bfyx};
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto input1_layout_prim = std::make_shared<input_layout>("input1", input1_layout);
auto reverse_sequence_prim = std::make_shared<reverse_sequence>("output", input_info("input0"), input_info("input1"),
p.seq_axis, p.batch_axis);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& input1_layout_node = prog.get_or_create(input1_layout_prim);
auto& reverse_sequence_node = prog.get_or_create(reverse_sequence_prim);
program_wrapper::add_connection(prog, input0_layout_node, reverse_sequence_node);
program_wrapper::add_connection(prog, input1_layout_node, reverse_sequence_node);
auto res = reverse_sequence_inst::calc_output_layouts<ov::PartialShape>(reverse_sequence_node, *reverse_sequence_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, reverse_sequence_test,
testing::ValuesIn(std::vector<reverse_sequence_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, 1, 0},
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, 1, 0},
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, 1, 0},
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, 1, 0},
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, 1, 0}
}));
} // shape_infer_tests

View File

@@ -0,0 +1,63 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/reverse.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "reverse_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct reverse_test_params {
layout input_layout;
reverse_mode mode;
};
class reverse_test : public testing::TestWithParam<reverse_test_params> { };
TEST_P(reverse_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input1_layout = layout{{1}, data_types::i32, format::bfyx};
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto input1_layout_prim = std::make_shared<input_layout>("input1", input1_layout);
auto reverse_prim = std::make_shared<reverse>("output", input_info("input0"), input_info("input1"), p.mode);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& input1_layout_node = prog.get_or_create(input1_layout_prim);
auto& reverse_node = prog.get_or_create(reverse_prim);
program_wrapper::add_connection(prog, input0_layout_node, reverse_node);
program_wrapper::add_connection(prog, input1_layout_node, reverse_node);
auto res = reverse_inst::calc_output_layouts<ov::PartialShape>(reverse_node, *reverse_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, reverse_test,
testing::ValuesIn(std::vector<reverse_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, reverse_mode::index },
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, reverse_mode::mask },
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, reverse_mode::index },
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, reverse_mode::mask },
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx}, reverse_mode::index }
}));
} // shape_infer_tests

View File

@@ -0,0 +1,58 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/roll.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "roll_inst.hpp"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct roll_test_params {
layout input_layout;
};
class roll_test : public testing::TestWithParam<roll_test_params> { };
TEST_P(roll_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto roll_prim = std::make_shared<roll>("output", input_info("input0"), tensor(1));
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& roll_node = prog.get_or_create(roll_prim);
program_wrapper::add_connection(prog, input0_layout_node, roll_node);
auto res = roll_inst::calc_output_layouts<ov::PartialShape>(roll_node, *roll_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, roll_test,
testing::ValuesIn(std::vector<roll_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx} },
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx} },
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx} },
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx} },
{ layout{ov::PartialShape::dynamic(5), data_types::f32, format::bfzyx} }
}));
} // shape_infer_tests

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/softmax.hpp>
#include <intel_gpu/primitives/data.hpp>
#include "softmax_inst.h"
#include "program_wrapper.h"
#include <cmath>
#include <algorithm>
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct softmax_test_params {
layout input_layout;
int64_t axis;
};
class softmax_si_test : public testing::TestWithParam<softmax_test_params> { };
TEST_P(softmax_si_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto input0_layout_prim = std::make_shared<input_layout>("input0", p.input_layout);
auto softmax_prim = std::make_shared<softmax>("output", input_info("input0"), p.axis);
cldnn::program prog(engine);
auto& input0_layout_node = prog.get_or_create(input0_layout_prim);
auto& softmax_node = prog.get_or_create(softmax_prim);
program_wrapper::add_connection(prog, input0_layout_node, softmax_node);
auto res = softmax_inst::calc_output_layouts<ov::PartialShape>(softmax_node, *softmax_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
auto expected_layout = p.input_layout;
ASSERT_EQ(res[0], expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, softmax_si_test,
testing::ValuesIn(std::vector<softmax_test_params>{
{ layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx}, 1},
{ layout{ov::PartialShape{1, 2, 3, 4}, data_types::f16, format::bfyx}, 2},
{ layout{ov::PartialShape{1, 2, 3, 4, 5}, data_types::f32, format::bfzyx}, 4},
{ layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}, 2},
{ layout{ov::PartialShape::dynamic(5), data_types::f16, format::bfzyx}, -1}
}));
} // shape_infer_tests