[GPU] Support MVN cases with axis=-1 w/o decomposition (#17020)

This commit is contained in:
Vladimir Paramuzov 2023-04-25 12:59:03 +04:00 committed by GitHub
parent 0617ce9089
commit ca1102b855
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 238 additions and 167 deletions

View File

@ -15,7 +15,7 @@ struct mvn : public primitive_base<mvn> {
/// @brief Constructs mvn primitive.
/// @param id This primitive id.
/// @param input Input primitive id.
/// @param across_channels Determines if the normalization is done across or within channels. Default is within channels.'
/// @param reduction_axes Determines axes set for normalization.
/// @param normalize_variance Determines if normalize variance is applied. Default is true.
/// @param epsilon Epsilon for not dividing by zero while normalizing.
/// @param eps_inside_sqrt The mode of applying epsilon.
@ -24,13 +24,13 @@ struct mvn : public primitive_base<mvn> {
const bool normalize_variance,
const float epsilon,
const bool eps_inside_sqrt,
const bool across_channels = false,
const std::vector<int64_t>& reduction_axes,
const padding& output_padding = padding())
: primitive_base(id, {input}, {output_padding}),
normalize_variance(normalize_variance),
epsilon(epsilon),
eps_inside_sqrt(eps_inside_sqrt),
across_channels(across_channels) {}
reduction_axes(reduction_axes) {}
/// @brief Determines if normalize variance is applied.
bool normalize_variance;
@ -38,15 +38,15 @@ struct mvn : public primitive_base<mvn> {
float epsilon;
/// @brief The mode of applying epsilon.
bool eps_inside_sqrt;
/// @brief Determines if the normalization is done across or within channels.
bool across_channels;
/// @brief Determines axes set for normalization.
std::vector<int64_t> reduction_axes;
size_t hash() const override {
size_t seed = primitive::hash();
seed = hash_combine(seed, normalize_variance);
seed = hash_combine(seed, epsilon);
seed = hash_combine(seed, eps_inside_sqrt);
seed = hash_combine(seed, across_channels);
seed = hash_range(seed, reduction_axes.begin(), reduction_axes.end());
return seed;
}
@ -59,7 +59,31 @@ struct mvn : public primitive_base<mvn> {
return normalize_variance == rhs_casted.normalize_variance &&
epsilon == rhs_casted.epsilon &&
eps_inside_sqrt == rhs_casted.eps_inside_sqrt &&
across_channels == rhs_casted.across_channels;
reduction_axes == rhs_casted.reduction_axes;
}
bool across_channels() const {
int64_t channel_axis = 1;
if (std::find(reduction_axes.begin(), reduction_axes.end(), channel_axis) != reduction_axes.end()) {
return true;
} else {
return false;
}
}
bool requires_alignment(const ov::PartialShape& shape) const {
auto rank = static_cast<int64_t>(shape.size());
auto axes = reduction_axes;
std::for_each(axes.begin(), axes.end(), [rank](int64_t& v) { v = (v < 0) ? v + rank : v; });
// If all axes from 2 to rank-1 is a part of reduction scope,
// then it's mapped to the old MVN case and don't require alignment
for (int64_t i = 2; i < rank; i++) {
if (std::find_if(axes.begin(), axes.end(), [i, &shape](const int64_t& v){ return v == i || shape[i].get_max_length() == 1; }) == axes.end())
return true;
}
return false;
}
};
} // namespace cldnn

View File

@ -198,14 +198,13 @@ void add_required_reorders::run(program& p) {
}
}
if (!correct_layout_selected) {
throw std::runtime_error("Internal Error: no layout format available for " + usr->id() +
" (format: " + std::to_string(original_layout.format.value) +
", data_type: " + data_type_traits::name(original_layout.data_type) + ") "
"compatible with " + node.first->id() +
" (format: " + std::to_string(node.first->get_output_layout().format.value) +
", data_type: " + data_type_traits::name(node.first->get_output_layout().data_type) + ")");
}
OPENVINO_ASSERT(correct_layout_selected,
"[GPU] No layout format available for ", usr->id(), ", impl_type: ", usr->get_preferred_impl_type(),
" (format: ", original_layout.format.to_string(),
", data_type: ", data_type_traits::name(original_layout.data_type), ") ",
"compatible with ", node.first->id(),
" (format: ", node.first->get_output_layout().format.to_string(),
", data_type: ", data_type_traits::name(node.first->get_output_layout().data_type), ")");
}
}

View File

@ -594,8 +594,10 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) {
};
auto mvn_supports_fusings = [](mvn_node& node) -> bool {
auto in_dt = node.get_dependency(0).get_output_layout().data_type;
return data_type_traits::is_i8_u8(in_dt);
auto in_layout = node.get_dependency(0).get_output_layout();
if (node.get_primitive()->requires_alignment(in_layout.get_partial_shape()))
return false;
return data_type_traits::is_i8_u8(in_layout.data_type);
};
auto dts_supports_fusings = [](depth_to_space_node& node) -> bool {

View File

@ -28,8 +28,8 @@ struct mvn_impl : typed_primitive_impl_ocl<mvn> {
auto params = get_default_params<kernel_selector::mvn_params>(impl_param, is_shape_agnostic);
auto optional_params = get_default_optional_params<kernel_selector::mvn_optional_params>(impl_param.get_program());
params.mvnMode = primitive->across_channels ? kernel_selector::mvn_mode::ACROSS_CHANNELS
: kernel_selector::mvn_mode::WITHIN_CHANNELS;
params.mvnMode = primitive->across_channels() ? kernel_selector::mvn_mode::ACROSS_CHANNELS
: kernel_selector::mvn_mode::WITHIN_CHANNELS;
params.mvnNormalizeVariance = primitive->normalize_variance;
params.epsilon = primitive->epsilon;
@ -38,6 +38,47 @@ struct mvn_impl : typed_primitive_impl_ocl<mvn> {
return {params, optional_params};
}
static kernel_impl_params static_canonicalize_shapes(const kernel_impl_params& impl_params) {
auto updated_impl_params = canonicalize_fused_shapes(impl_params);
const auto& prim = impl_params.typed_desc<mvn>();
auto& input_layout = updated_impl_params.input_layouts[0];
auto input_pshape = input_layout.get_partial_shape();
auto input_rank = input_pshape.size();
if (prim->requires_alignment(input_pshape)) {
auto axes = prim->reduction_axes;
auto min_it = std::min_element(axes.begin(), axes.end());
auto min = min_it == axes.end() ? 1 : *min_it;
auto new_rank = std::max<size_t>(4, input_rank);
ov::PartialShape shape = ov::PartialShape::dynamic(new_rank);
auto& output_layout = updated_impl_params.output_layouts[0];
if (input_pshape.is_static()) {
for (size_t i = 0; i < new_rank; i++) {
shape[i] = 1;
}
// Split all dimensions into 2 parts:
// 1. normalized dimensions which are flattened and written to the last dim
// 2. not normalized dims which are flattened and written to the first dim
for (size_t i = 0; i < input_rank; i++) {
shape[static_cast<int64_t>(i) < min ? 0 : (new_rank - 1)] *= input_pshape[i];
}
}
input_layout.set_partial_shape(shape);
output_layout.set_partial_shape(shape);
}
return updated_impl_params;
}
kernel_impl_params canonicalize_shapes(const kernel_impl_params& impl_params) const override {
return static_canonicalize_shapes(impl_params);
}
void update_dispatch_data(const kernel_impl_params& impl_param) override {
auto kernel_params = get_kernel_params(impl_param, true);
(_kernel_data.update_dispatch_data_func)(kernel_params.first, _kernel_data);
@ -57,8 +98,7 @@ attach_mvn_impl::attach_mvn_impl() {
auto dyn_formats = {
format::bfyx,
format::bfzyx,
format::bfwzyx
format::bfzyx
};
implementation_map<mvn>::add(impl_types::ocl,

View File

@ -169,6 +169,7 @@ attach_reduction_onednn::attach_reduction_onednn() {
std::vector<format::type> fmt = {
format::bfyx,
format::bfzyx,
format::bfwzyx,
format::b_fs_yx_fsv16,
format::b_fs_yx_fsv32,
format::b_fs_zyx_fsv32,

View File

@ -27,7 +27,7 @@ std::string mvn_inst::to_string(mvn_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();
auto epsilon = desc->epsilon;
auto across_channels = desc->across_channels ? "true" : "false";
auto axes = desc->reduction_axes;
auto normalize_variance = desc->normalize_variance ? "true" : "false";
auto eps_inside_sqrt = desc->eps_inside_sqrt ? "true" : "false";
auto& input = node.input();
@ -37,7 +37,7 @@ std::string mvn_inst::to_string(mvn_node const& node) {
json_composite mvn_info;
mvn_info.add("input id", input.id());
mvn_info.add("epsilon", epsilon);
mvn_info.add("across_channels region", across_channels);
mvn_info.add("reduction axes", axes);
mvn_info.add("normalize_variance region", normalize_variance);
mvn_info.add("eps_inside_sqrt region", eps_inside_sqrt);

View File

@ -1427,7 +1427,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
(prim.type() != cldnn::mvn::type_id()
|| (prim.as<mvn>().input().get_output_layout().data_type != data_types::u8 &&
prim.as<mvn>().input().get_output_layout().data_type != data_types::i8)
|| prim.as<mvn>().get_primitive()->across_channels) &&
|| prim.as<mvn>().get_primitive()->across_channels()) &&
prim.type() != cldnn::arg_max_min::type_id() &&
prim.type() != cldnn::dft::type_id() &&
prim.type() != cldnn::grid_sample::type_id() &&

View File

@ -16,7 +16,7 @@ namespace ov {
namespace intel_gpu {
static void CreateCommonMVNOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
bool across_channels, bool normalize_variance, float eps, bool eps_inside_sqrt = true) {
std::vector<int64_t> axes, bool normalize_variance, float eps, bool eps_inside_sqrt = true) {
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
@ -25,7 +25,7 @@ static void CreateCommonMVNOp(Program& p, const std::shared_ptr<ngraph::Node>& o
normalize_variance,
eps,
eps_inside_sqrt,
across_channels);
axes);
p.add_primitive(*op, mvnPrim);
}
@ -37,7 +37,15 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>&
bool normalize_variance = op->get_normalize_variance();
float eps = op->get_eps();
CreateCommonMVNOp(p, op, across_channels, normalize_variance, eps);
int64_t axes_count = std::max<int64_t>(static_cast<int64_t>(op->get_input_partial_shape(0).size()) - 2, 0);
std::vector<int64_t> axes(axes_count);
std::iota(axes.begin(), axes.end(), 2);
if (across_channels) {
axes.insert(axes.begin(), 1);
}
CreateCommonMVNOp(p, op, axes, normalize_variance, eps);
}
static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>& op) {
@ -52,13 +60,11 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>&
ov::normalize_axes(op.get(), op->get_output_partial_shape(0).size(), axes);
OPENVINO_SUPPRESS_DEPRECATED_END
const size_t chanelAxis = 1;
bool across_channels = std::find(axes.begin(), axes.end(), chanelAxis) != axes.end();
bool normalize_variance = op->get_normalize_variance();
float eps = op->get_eps();
bool eps_inside_sqrt = op->get_eps_mode() == ngraph::op::MVNEpsMode::INSIDE_SQRT;
CreateCommonMVNOp(p, op, across_channels, normalize_variance, eps, eps_inside_sqrt);
CreateCommonMVNOp(p, op, axes, normalize_variance, eps, eps_inside_sqrt);
}
REGISTER_FACTORY_IMPL(v0, MVN);

View File

@ -355,24 +355,34 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
});
}
pass_config->set_callback<ov::pass::MVN6Decomposition>(
[](const_node_ptr &node) -> bool {
const auto mvn = std::dynamic_pointer_cast<const ngraph::op::v6::MVN>(node);
if (mvn != nullptr && node->get_input_size() == 2) {
if (auto axesNode = dynamic_cast<ngraph::op::v0::Constant*>(mvn->get_input_node_ptr(1))) {
auto axesVal = axesNode->cast_vector<int>();
auto& mvnShape = mvn->get_output_partial_shape(0);
for (int32_t& axis : axesVal)
axis = axis < 0 ? axis + static_cast<int>(mvnShape.size()) : axis;
std::sort(axesVal.begin(), axesVal.end());
if (mvnShape.size() == 1)
if (auto axes_node = dynamic_cast<ngraph::op::v0::Constant*>(mvn->get_input_node_ptr(1))) {
auto mvn_axes = axes_node->cast_vector<int64_t>();
auto out_rank = mvn->get_output_partial_shape(0).size();
ov::normalize_axes(mvn.get(), out_rank, mvn_axes);
std::sort(mvn_axes.begin(), mvn_axes.end());
// Supported cases:
// 2 <= out_rank <= 5
// axes set: [out_rank - 1, out_rank - 2, ... r] where r > 1
// basically impl supports cases when tensor can be reshaped to [d1, d2]
// so that d2 is set of dimensions for normalization
// Skip unsupported ranks
if (out_rank == 1 || out_rank > 5)
return false;
if (mvnShape.size() > 5 || (mvnShape.size() != axesVal.size() + 1 && mvnShape.size() != axesVal.size() + 2))
return false;
int value = static_cast<int>(mvnShape.size()) - 1;
for (int i = static_cast<int>(axesVal.size()) - 1; i >= 0; i--, value--) {
if (axesVal[i] != value)
return false;
// check axes set
for (size_t i = 0; i < mvn_axes.size(); i++) {
auto axis = mvn_axes[mvn_axes.size() - i - 1];
if (axis != static_cast<int64_t>(out_rank - i - 1) || axis == 0) {
return false;
}
}
return true;
}

View File

@ -23,7 +23,7 @@ struct mvn_test_params {
tensor elwise_size;
data_types input_type;
format input_format;
bool across_channels;
std::vector<int64_t> reduction_axes;
bool normalize_variance;
data_types default_type;
format default_format;
@ -63,46 +63,47 @@ public:
/* --------------------------------------- MVN cases --------------------------------------------------- */
/* ----------------------------------------------------------------------------------------------------- */
#define CASE_MVN_F32_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::f32, format::bfyx, false, true, data_types::f32, format::bfyx
#define CASE_MVN_F32_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::f32, format::bfyx, true, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_F32_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::f32, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_F32_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::f32, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_F32_3 { 2, 8, 4, 4, 4 }, { 2, 8, 1, 1, 1 }, data_types::f32, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_F16_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::f16, format::bfyx, false, true, data_types::f16, format::bfyx
#define CASE_MVN_F16_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::f16, format::bfyx, true, true, data_types::f16, format::bfyx
#define CASE_MVN_3D_F16_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::f16, format::bfzyx, false, true, data_types::f16, format::bfzyx
#define CASE_MVN_3D_F16_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::f16, format::bfzyx, true, true, data_types::f16, format::bfzyx
#define CASE_MVN_I8_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::i8, format::bfyx, false, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::i8, format::bfyx, true, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_3 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::i8, format::b_fs_yx_fsv16, false, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_4 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::i8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_5 { 2, 16, 8, 8 }, { 1, 1, 1, 8 }, data_types::i8, format::b_fs_yx_fsv16, false, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_6 { 2, 16, 8, 8 }, { 1, 1, 1, 1 }, data_types::i8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_7 { 2, 16, 1, 8 }, { 1, 1, 8, 1 }, data_types::i8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_I8_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_3 { 2, 16, 8, 8, 8 }, { 2, 1, 8, 8, 1 }, data_types::i8, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_4 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 1, 8 }, data_types::i8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_5 { 2, 2, 1, 2, 1 }, { 2, 2, 2, 2, 2 }, data_types::i8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_U8_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::u8, format::bfyx, false, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::u8, format::bfyx, true, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_3 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, false, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_4 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_5 { 2, 16, 8, 8 }, { 2, 1, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, false, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_6 { 2, 16, 8, 8 }, { 1, 1, 1, 8 }, data_types::u8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_7 { 1, 16, 16, 1 }, { 1, 16, 1, 16 }, data_types::u8, format::b_fs_yx_fsv16, true, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_U8_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::u8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::u8, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_3 { 2, 16, 8, 8, 8 }, { 2, 1, 1, 1, 1 }, data_types::u8, format::bfzyx, true, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_4 { 2, 16, 8, 8, 8 }, { 1, 1, 1, 1, 1 }, data_types::u8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_5 { 2, 16, 1, 8, 8 }, { 1, 1, 8, 1, 1 }, data_types::u8, format::bfzyx, false, true, data_types::f32, format::bfzyx
#define CASE_MVN_F32_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::f32, format::bfyx, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_F32_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::f32, format::bfyx, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_F32_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::f32, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_F32_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::f32, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_F32_3 { 2, 8, 4, 4, 4 }, { 2, 8, 1, 1, 1 }, data_types::f32, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_F16_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::f16, format::bfyx, {2, 3}, true, data_types::f16, format::bfyx
#define CASE_MVN_F16_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::f16, format::bfyx, {1, 2, 3}, true, data_types::f16, format::bfyx
#define CASE_MVN_3D_F16_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::f16, format::bfzyx, {2, 3, 4}, true, data_types::f16, format::bfzyx
#define CASE_MVN_3D_F16_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::f16, format::bfzyx, {1, 2, 3, 4}, true, data_types::f16, format::bfzyx
#define CASE_MVN_I8_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::i8, format::bfyx, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::i8, format::bfyx, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_3 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::i8, format::b_fs_yx_fsv16, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_4 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::i8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_5 { 2, 16, 8, 8 }, { 1, 1, 1, 8 }, data_types::i8, format::b_fs_yx_fsv16, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_6 { 2, 16, 8, 8 }, { 1, 1, 1, 1 }, data_types::i8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_7 { 2, 16, 1, 8 }, { 1, 1, 8, 1 }, data_types::i8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_I8_8 { 2, 16, 3, 8 }, { 1, 1, 3, 8 }, data_types::i8, format::b_fs_yx_fsv16, {3}, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_I8_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_3 { 2, 16, 8, 8, 8 }, { 2, 1, 8, 8, 1 }, data_types::i8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_4 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 1, 8 }, data_types::i8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_I8_5 { 2, 2, 1, 2, 1 }, { 2, 2, 2, 2, 2 }, data_types::i8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_U8_1 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::u8, format::bfyx, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_2 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::u8, format::bfyx, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_3 { 1, 16, 8, 8 }, { 1, 16, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_4 { 2, 16, 8, 8 }, { 2, 16, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_5 { 2, 16, 8, 8 }, { 2, 1, 8, 8 }, data_types::u8, format::b_fs_yx_fsv16, {2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_6 { 2, 16, 8, 8 }, { 1, 1, 1, 8 }, data_types::u8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_U8_7 { 1, 16, 16, 1 }, { 1, 16, 1, 16 }, data_types::u8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx
#define CASE_MVN_3D_U8_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::u8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::u8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_3 { 2, 16, 8, 8, 8 }, { 2, 1, 1, 1, 1 }, data_types::u8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_4 { 2, 16, 8, 8, 8 }, { 1, 1, 1, 1, 1 }, data_types::u8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
#define CASE_MVN_3D_U8_5 { 2, 16, 1, 8, 8 }, { 1, 1, 8, 1, 1 }, data_types::u8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx
class mvn_activation : public MVNFusingTest {};
TEST_P(mvn_activation, basic) {
auto p = GetParam();
create_topologies(
input_layout("input", get_input_layout(p)),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, false),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, p.reduction_axes),
activation("act", input_info("mvn"), activation_func::hyperbolic_tan),
reorder("reorder_bfyx", input_info("act"), format::bfyx, data_types::f32)
);
@ -124,6 +125,7 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_activation, ::testing::ValuesIn(std::v
mvn_test_params{ CASE_MVN_I8_2, 2, 3, 3 },
mvn_test_params{ CASE_MVN_I8_3, 2, 3, 3 },
mvn_test_params{ CASE_MVN_I8_4, 2, 3, 3 },
mvn_test_params{ CASE_MVN_I8_8, 2, 3, 3 },
mvn_test_params{ CASE_MVN_3D_I8_1, 2, 3, 3 },
mvn_test_params{ CASE_MVN_3D_I8_2, 2, 3, 3 },
mvn_test_params{ CASE_MVN_U8_1, 2, 3, 3 },
@ -139,7 +141,7 @@ TEST_P(mvn_scale_quantize_i8, basic) {
auto p = GetParam();
create_topologies(
input_layout("input", get_input_layout(p)),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, false),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, p.reduction_axes),
data("scale_data", get_mem(get_per_channel_layout(p))),
eltwise("scale", { input_info("mvn"), input_info("scale_data") }, eltwise_mode::prod, p.default_type),
data("in_low", get_mem(get_per_channel_layout(p), min_random, 0)),
@ -169,6 +171,7 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_scale_quantize_i8, ::testing::ValuesIn
mvn_test_params{ CASE_MVN_I8_2, 2, 2, 4 },
mvn_test_params{ CASE_MVN_I8_3, 2, 2, 4 },
mvn_test_params{ CASE_MVN_I8_4, 2, 2, 4 },
mvn_test_params{ CASE_MVN_I8_8, 3, 3, 4 },
mvn_test_params{ CASE_MVN_3D_I8_1, 2, 2, 4 },
mvn_test_params{ CASE_MVN_3D_I8_2, 2, 2, 4 },
mvn_test_params{ CASE_MVN_U8_1, 2, 2, 4 },
@ -184,7 +187,7 @@ TEST_P(mvn_scale_activation_eltwise_fp32_quantize_i8, basic) {
auto p = GetParam();
create_topologies(
input_layout("input", get_input_layout(p)),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, false),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, p.reduction_axes),
data("scale_data", get_mem(get_per_channel_layout(p))),
eltwise("scale", { input_info("mvn"), input_info("scale_data") }, eltwise_mode::prod, p.default_type),
activation("act", input_info("scale"), activation_func::hyperbolic_tan),
@ -220,6 +223,7 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_scale_activation_eltwise_fp32_quantize
mvn_test_params{ CASE_MVN_I8_5, 2, 4, 6 },
mvn_test_params{ CASE_MVN_I8_6, 2, 4, 6 },
mvn_test_params{ CASE_MVN_I8_7, 3, 4, 6 },
mvn_test_params{ CASE_MVN_I8_8, 3, 5, 6 },
mvn_test_params{ CASE_MVN_3D_I8_1, 2, 4, 6 },
mvn_test_params{ CASE_MVN_3D_I8_2, 2, 4, 6 },
mvn_test_params{ CASE_MVN_3D_I8_3, 2, 4, 6 },
@ -244,7 +248,7 @@ TEST_P(mvn_eltwise, basic) {
auto p = GetParam();
create_topologies(
input_layout("input", layout{ p.input_type, p.input_format, p.input_size }),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, false),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, p.reduction_axes),
data("eltw_data", get_mem(layout{ p.input_type, p.default_format, p.elwise_size })),
eltwise("eltw", { input_info("mvn"), input_info("eltw_data") }, eltwise_mode::sum, data_types::f32),
reorder("reorder_bfyx", input_info("eltw"), p.default_format, data_types::f32)
@ -280,7 +284,7 @@ TEST_P(mvn_eltwise_f16, basic) {
auto p = GetParam();
create_topologies(
input_layout("input", layout{ p.input_type, p.input_format, p.input_size }),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, false),
mvn("mvn", input_info("input"), p.normalize_variance, 1e-10f, false, p.reduction_axes),
data("eltw_data", get_mem(layout{ p.input_type, p.default_format, p.elwise_size })),
eltwise("eltw", { input_info("mvn"), input_info("eltw_data") }, eltwise_mode::sum, data_types::f16),
reorder("reorder_bfyx", input_info("eltw"), p.default_format, data_types::f32)
@ -292,5 +296,6 @@ TEST_P(mvn_eltwise_f16, basic) {
INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_eltwise_f16, ::testing::ValuesIn(std::vector<mvn_test_params>{
mvn_test_params{ CASE_MVN_I8_6, 2, 2, 3 },
mvn_test_params{ CASE_MVN_I8_8, 3, 3, 3 },
mvn_test_params{ CASE_MVN_U8_2, 2, 2, 3 },
}));

View File

@ -25,7 +25,7 @@ struct mvn_test_params {
bool normalize_variance;
float epsilon;
bool eps_inside_sqrt;
bool across_channels;
std::vector<int64_t> axes;
};
class mvn_test : public testing::TestWithParam<mvn_test_params> { };
@ -36,7 +36,7 @@ TEST_P(mvn_test, shape_infer) {
auto& engine = get_test_engine();
auto input_layout_prim = std::make_shared<input_layout>("input", p.input_layout);
auto mvn_prim = std::make_shared<mvn>("output", input_info("input"), p.normalize_variance, p.epsilon, p.eps_inside_sqrt, p.across_channels);
auto mvn_prim = std::make_shared<mvn>("output", input_info("input"), p.normalize_variance, p.epsilon, p.eps_inside_sqrt, p.axes);
cldnn::program prog(engine);
@ -53,11 +53,11 @@ INSTANTIATE_TEST_SUITE_P(smoke, mvn_test,
testing::ValuesIn(std::vector<mvn_test_params>{
{
layout{ov::PartialShape{1, 2, 3}, data_types::f32, format::bfyx},
true, 1e-9f, true, true
true, 1e-9f, true, {2, 3}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
true, 1e-9f, true, true
true, 1e-9f, true, {1, 2, 3}
}
}));

View File

@ -120,7 +120,7 @@ void test_mvn_test_across_channels_outside_sqrt_bfyx(bool is_caching_test) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, true));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, {1, 2, 3}));
cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test);
@ -154,7 +154,7 @@ void test_mvn_test_across_channels_inside_sqrt_bfyx(bool is_caching_test) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, true));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, {1, 2, 3}));
cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test);
@ -193,7 +193,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, true));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, {1, 2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -220,7 +220,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance)
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, true));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {1, 2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -247,7 +247,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, true));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, {1, 2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -274,7 +274,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, true));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {1, 2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -303,7 +303,7 @@ TEST(mvn_gpu_test, dynamic_across_channels_inside_sqrt_bfyx_normalize_variance_f
topology topology;
topology.add(input_layout("input", in_layout));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, true));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {1, 2, 3}));
ExecutionConfig config = get_test_default_config(engine);
config.set_property(ov::intel_gpu::allow_new_shape_infer(true));
@ -336,7 +336,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, false));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -363,7 +363,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt__bfyx) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, false));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -390,7 +390,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_fp16) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, false));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, false, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -417,7 +417,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_fp16) {
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, false));
topology.add(mvn("mvn", input_info("input"), false, 1e-10f, true, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -444,7 +444,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_normalize_variance
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, false));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -471,7 +471,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_normalize_variance)
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, false));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -498,7 +498,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx_normalize_variance
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, false));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, false, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -525,7 +525,7 @@ TEST(mvn_gpu_test, mvn_test_within_channels_inside_sqrt_bfyx_normalize_variance_
topology topology;
topology.add(input_layout("input", input->get_layout()));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, false));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {2, 3}));
network network(engine, topology, get_test_default_config(engine));
@ -554,7 +554,7 @@ TEST(mvn_gpu_test, dynamic_within_channels_inside_sqrt_bfyx_normalize_variance_f
topology topology;
topology.add(input_layout("input", in_layout));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, false));
topology.add(mvn("mvn", input_info("input"), true, 1e-10f, true, {2, 3}));
ExecutionConfig config = get_test_default_config(engine);
config.set_property(ov::intel_gpu::allow_new_shape_infer(true));
@ -656,10 +656,10 @@ struct mvn_random_test : ::testing::TestWithParam<mvn_basic_test_params> {
default:
break;
}
auto axes = params.across_channels ? std::vector<int64_t>{1, 2, 3} : std::vector<int64_t>{2, 3};
topology topo;
topo.add(input_layout("input", input->get_layout()));
auto prim = mvn("mvn", input_info("input"), params.normalize_variance, 1e-10f, false, params.across_channels);
auto prim = mvn("mvn", input_info("input"), params.normalize_variance, 1e-10f, false, axes);
prim.output_paddings = {output_pad};
topo.add(prim);
@ -847,9 +847,10 @@ struct mvn_random_test_bsv32 : ::testing::TestWithParam<mvn_basic_test_params> {
break;
}
auto axes = params.across_channels ? std::vector<int64_t>{1, 2, 3} : std::vector<int64_t>{2, 3};
topology topo;
topo.add(input_layout("input", input->get_layout()));
auto prim = mvn("mvn", input_info("input"), params.normalize_variance, 1e-10f, false, params.across_channels);
auto prim = mvn("mvn", input_info("input"), params.normalize_variance, 1e-10f, false, axes);
prim.output_paddings = {output_pad};
topo.add(prim);
ExecutionConfig config = get_test_default_config(engine);
@ -866,7 +867,7 @@ struct mvn_random_test_bsv32 : ::testing::TestWithParam<mvn_basic_test_params> {
topology topo_opt;
topo_opt.add(input_layout("input", input->get_layout()));
topo_opt.add(reorder("input_to_target_layout", input_info("input"), {params.input_type, params.input_format, size}));
auto prim_opt = mvn("mvn_opt", input_info("input_to_target_layout"), params.normalize_variance, 1e-10f, false, params.across_channels);
auto prim_opt = mvn("mvn_opt", input_info("input_to_target_layout"), params.normalize_variance, 1e-10f, false, axes);
prim_opt.output_paddings = {output_pad};
topo_opt.add(prim_opt);
ExecutionConfig config_opt = get_test_default_config(engine);

View File

@ -75,7 +75,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MVN_5D, Mvn6LayerTest,
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 10, 5, 7, 8}, {1, 3, 8, 9, 49}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3, 4}, {2, 3, 4}, {-3, -2, -1}, {-1, -4, -2, -3}}),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3, 4}, {2, 3, 4}, {-3, -2, -1}, {-1, -4, -2, -3}, {-1}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
@ -87,7 +87,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MVN_4D, Mvn6LayerTest,
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 10, 5, 17}, {1, 3, 8, 9}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3}, {2, 3}, {-2, -1}, {-2, -1, -3}}),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3}, {2, 3}, {-2, -1}, {-2, -1, -3}, {-1}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),

View File

@ -12,32 +12,30 @@ using namespace ov::test;
namespace GPULayerTestsDefinitions {
using basicGPUMvnParams = std::tuple<
InputShape, // Input shapes
ElementType, // Input precision
ngraph::AxisSet, // Reduction axes
bool, // Across channels
bool, // Normalize variance
double>; // Epsilon
InputShape, // Input shapes
ElementType, // Input precision
std::vector<int>, // Reduction axes
bool, // Normalize variance
double>; // Epsilon
using MvnLayerGPUTestParamSet = std::tuple<
basicGPUMvnParams,
ElementType, // CNNNetwork input precision
ElementType>; // CNNNetwork output precision
ElementType>; // CNNNetwork input precision
class MvnLayerGPUTest : public testing::WithParamInterface<MvnLayerGPUTestParamSet>,
virtual public SubgraphBaseTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<MvnLayerGPUTestParamSet> obj) {
basicGPUMvnParams basicParamsSet;
ElementType inputPrecision, outputPrecision;
std::tie(basicParamsSet, inputPrecision, outputPrecision) = obj.param;
ElementType inputPrecision;
std::tie(basicParamsSet, inputPrecision) = obj.param;
InputShape inputShapes;
ElementType netPrecision;
ngraph::AxisSet axes;
bool acrossChanels, normalizeVariance;
std::vector<int> axes;
bool normalizeVariance;
double eps;
std::tie(inputShapes, netPrecision, axes, acrossChanels, normalizeVariance, eps) = basicParamsSet;
std::tie(inputShapes, netPrecision, axes, normalizeVariance, eps) = basicParamsSet;
std::ostringstream result;
result << "IS=" << CommonTestUtils::partialShape2str({inputShapes.first}) << "_";
@ -46,15 +44,10 @@ public:
result << "(" << CommonTestUtils::vec2str(shape) << ")_";
}
result << "Precision=" << netPrecision << "_";
if (!axes.empty()) {
result << "ReductionAccess=" << CommonTestUtils::vec2str(axes.to_vector()) << "_";
} else {
result << "AcrossChannels=" << (acrossChanels ? "TRUE" : "FALSE") << "_";
}
result << "ReductionAccess=" << CommonTestUtils::vec2str(axes) << "_";
result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_";
result << "Epsilon=" << eps;
result << "_" << "CNNInpPrc=" << inputPrecision;
result << "_" << "CNNOutPrc=" << outputPrecision;
return result.str();
}
@ -64,24 +57,24 @@ protected:
basicGPUMvnParams basicParamsSet;
ElementType inPrc;
ElementType outPrc;
std::tie(basicParamsSet, inPrc, outPrc) = this->GetParam();
std::tie(basicParamsSet, inPrc) = this->GetParam();
InputShape inputShapes;
ElementType netPrecision;
ngraph::AxisSet axes;
bool acrossChanels, normalizeVariance;
std::vector<int> axes;
bool normalizeVariance;
double eps;
std::tie(inputShapes, netPrecision, axes, acrossChanels, normalizeVariance, eps) = basicParamsSet;
std::tie(inputShapes, netPrecision, axes, normalizeVariance, eps) = basicParamsSet;
init_input_shapes({inputShapes});
auto axesType = ov::element::i64;
std::string eps_mode = "inside_sqrt";
auto param = ngraph::builder::makeDynamicParams(netPrecision, inputDynamicShapes);
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(param));
auto mvn = ngraph::builder::makeMVN(paramOuts[0], acrossChanels, normalizeVariance, eps);
if (!axes.empty()) {
mvn = ngraph::builder::makeMVN(paramOuts[0], axes, normalizeVariance, eps);
}
auto axesNode = ngraph::builder::makeConstant(axesType, ngraph::Shape{axes.size()}, axes);
auto mvn = ngraph::builder::makeMVN6(paramOuts[0], axesNode, normalizeVariance, eps, eps_mode);
rel_threshold = 0.015f;
@ -89,7 +82,7 @@ protected:
for (int i = 0; i < mvn->get_output_size(); ++i) {
results.push_back(std::make_shared<ngraph::opset1::Result>(mvn->output(i)));
}
function = std::make_shared<ngraph::Function>(results, param, "Pad");
function = std::make_shared<ngraph::Function>(results, param, "MVN");
}
};
@ -225,10 +218,6 @@ const std::vector<InputShape> inputShapes_5D = {
}
};
const std::vector<bool> acrossChannels = {
true,
false
};
const std::vector<bool> normalizeVariance = {
true,
@ -239,21 +228,25 @@ const std::vector<double> epsilon = {
0.000000001
};
const std::vector<ngraph::AxisSet> emptyReductionAxes = {{}};
const std::vector<int> reduction_axes_1234 = {1, 2, 3, 4};
const std::vector<int> reduction_axes_234 = {2, 3, 4};
const std::vector<int> reduction_axes_123 = {1, 2, 3};
const std::vector<int> reduction_axes_23 = {2, 3};
const std::vector<int> reduction_axes_12 = {1, 2};
const std::vector<int> reduction_axes_3 = {3};
const std::vector<int> reduction_axes_2 = {2};
const std::vector<int> empty_reduction_axes = {};
std::vector<ElementType> inpPrc = {ElementType::i8, ElementType::f16, ElementType::f32};
std::vector<ElementType> outPrc = {ElementType::f16, ElementType::f32};
const auto Mvn3D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_3D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn({reduction_axes_12, reduction_axes_2}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn3D, MvnLayerGPUTest, Mvn3D, MvnLayerGPUTest::getTestCaseName);
@ -261,12 +254,10 @@ const auto Mvn4D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_4D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn({reduction_axes_2, reduction_axes_3, reduction_axes_12, reduction_axes_23, reduction_axes_123}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn4D, MvnLayerGPUTest, Mvn4D, MvnLayerGPUTest::getTestCaseName);
@ -274,12 +265,10 @@ const auto Mvn5D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_5D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn({reduction_axes_3, reduction_axes_23, reduction_axes_123, reduction_axes_1234}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn5D, MvnLayerGPUTest, Mvn5D, MvnLayerGPUTest::getTestCaseName);
@ -287,12 +276,10 @@ const auto Mvn1D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_1D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn({empty_reduction_axes}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn1D, MvnLayerGPUTest, Mvn1D, MvnLayerGPUTest::getTestCaseName);
@ -301,12 +288,10 @@ const auto Mvn2D = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_2D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::Values(false),
::testing::ValuesIn({empty_reduction_axes}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn2D, MvnLayerGPUTest, Mvn2D, MvnLayerGPUTest::getTestCaseName);
@ -315,12 +300,10 @@ const auto Mvn2DTrans = ::testing::Combine(
::testing::Combine(
::testing::ValuesIn(inputShapes_2D),
::testing::Values(ElementType::f32),
::testing::ValuesIn(emptyReductionAxes),
::testing::Values(true),
::testing::ValuesIn({empty_reduction_axes}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon)),
::testing::ValuesIn(inpPrc),
::testing::ValuesIn(outPrc));
::testing::ValuesIn(inpPrc));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn2DTrans, MvnLayerGPUTest, Mvn2DTrans, MvnLayerGPUTest::getTestCaseName);