[GPU] PriorBox-8 (#12995)

* fix: add missing attribute

* add single-layer test for PriorBox8

* disable PriorBox8 to PriorBox8 transformation for GPU plugin

* add kernel stuff

* add PriorBox-8 primitive and OCL implementation

* add unit tests

* enable NoScale test
This commit is contained in:
Oleksii Khovan
2022-10-04 13:53:40 +02:00
committed by GitHub
parent 4946f6d87b
commit fed0d1cebf
21 changed files with 1148 additions and 96 deletions

View File

@@ -234,6 +234,7 @@ REGISTER_FACTORY(v8, MaxPool);
REGISTER_FACTORY(v8, AdaptiveAvgPool);
REGISTER_FACTORY(v8, AdaptiveMaxPool);
REGISTER_FACTORY(v8, Softmax);
REGISTER_FACTORY(v8, PriorBox);
// ------------------------------ Supported v9 ops ------------------------------ //
REGISTER_FACTORY(v9, SoftSign)

View File

@@ -70,35 +70,45 @@ struct prior_box : public primitive_base<prior_box> {
fixed_size(fixed_size),
density(density),
clustered(false) {
this->aspect_ratios.push_back(1.f);
for (auto new_aspect_ratio : aspect_ratios) {
bool already_exist = false;
for (auto aspect_ratio : this->aspect_ratios) {
if (std::fabs(new_aspect_ratio - aspect_ratio) < 1e-6) {
already_exist = true;
break;
}
}
if (!already_exist) {
if (std::fabs(new_aspect_ratio) < std::numeric_limits<float>::epsilon()) {
throw std::runtime_error("prior_box aspect ratio can't be zero!");
}
this->aspect_ratios.push_back(new_aspect_ratio);
if (flip) {
this->aspect_ratios.push_back(1.f / new_aspect_ratio);
}
}
}
if (variance.size() > 1) {
for (size_t i = 0; i < variance.size(); ++i) {
this->variance.push_back(variance[i]);
}
} else if (variance.size() == 1) {
this->variance.push_back(variance[0]);
} else {
// Set default to 0.1.
this->variance.push_back(0.1f);
}
init(aspect_ratios, variance);
}
/// @brief Constructs prior-box primitive, which supports v8 features.
prior_box(const primitive_id& id,
const std::vector<primitive_id>& inputs,
const tensor& output_size,
const tensor& img_size,
const std::vector<float>& min_sizes,
const std::vector<float>& max_sizes = {},
const std::vector<float>& aspect_ratios = {},
const bool flip = true,
const bool clip = false,
const std::vector<float>& variance = {},
const float offset = 0.5f,
const bool scale_all_sizes = true,
const std::vector<float>& fixed_ratio = {},
const std::vector<float>& fixed_size = {},
const std::vector<float>& density = {},
const float step = 0.0f,
const bool min_max_aspect_ratios_order = true
)
: primitive_base{id, inputs},
output_size(output_size),
img_size(img_size),
min_sizes(min_sizes),
max_sizes(max_sizes),
flip(flip),
clip(clip),
offset(offset),
scale_all_sizes(scale_all_sizes),
fixed_ratio(fixed_ratio),
fixed_size(fixed_size),
density(density),
support_opset8{true},
step{step},
min_max_aspect_ratios_order{min_max_aspect_ratios_order},
clustered(false) {
init(aspect_ratios, variance);
}
/// @brief Constructs prior-box primitive, which executes clustered version.
@@ -128,42 +138,84 @@ struct prior_box : public primitive_base<prior_box> {
clustered(true) {
}
/// @brief Spatial size of generated grid with boxes.
tensor output_size{};
/// @brief Image width and height.
tensor img_size;
tensor img_size{};
/// @brief Minimum box sizes in pixels.
std::vector<float> min_sizes;
std::vector<float> min_sizes{};
/// @brief Maximum box sizes in pixels.
std::vector<float> max_sizes;
std::vector<float> max_sizes{};
/// @brief Various of aspect ratios. Duplicate ratios will be ignored.
std::vector<float> aspect_ratios;
std::vector<float> aspect_ratios{};
/// @brief If true, will flip each aspect ratio. For example, if there is aspect ratio "r", aspect ratio "1.0/r" we will generated as well.
bool flip;
bool flip{false};
/// @brief If true, will clip the prior so that it is within [0, 1].
bool clip;
bool clip{false};
/// @brief Variance for adjusting the prior boxes.
std::vector<float> variance;
std::vector<float> variance{};
/// @brief Step width.
float step_width;
float step_width{0.0f};
/// @brief Step height.
float step_height;
float step_height{0.0f};
/// @brief Offset to the top left corner of each cell.
float offset;
float offset{0.0f};
/// @brief If false, only first min_size is scaled by aspect_ratios
bool scale_all_sizes;
bool scale_all_sizes{true};
std::vector<float> fixed_ratio;
std::vector<float> fixed_size;
std::vector<float> density;
std::vector<float> fixed_ratio{};
std::vector<float> fixed_size{};
std::vector<float> density{};
// required for v8
bool support_opset8{false};
float step{0.0f};
bool min_max_aspect_ratios_order{true};
/// @brief Required for clustered version.
std::vector<float> widths;
std::vector<float> widths{};
/// @brief Required for clustered version.
std::vector<float> heights;
std::vector<float> heights{};
bool is_clustered() const { return clustered; }
private:
bool clustered;
void init(const std::vector<float>& ratios, const std::vector<float>& variances) {
constexpr auto default_aspect_ratio = 1.0f;
aspect_ratios.push_back(default_aspect_ratio);
constexpr auto aspect_ratio_threshold = 1e-6;
for (auto new_aspect_ratio : ratios) {
bool already_exist = false;
for (auto aspect_ratio : aspect_ratios) {
if (std::fabs(new_aspect_ratio - aspect_ratio) < aspect_ratio_threshold) {
already_exist = true;
break;
}
}
if (!already_exist) {
if (std::fabs(new_aspect_ratio) < std::numeric_limits<float>::epsilon()) {
throw std::runtime_error("prior_box aspect ratio can't be zero!");
}
aspect_ratios.push_back(new_aspect_ratio);
if (flip) {
aspect_ratios.push_back(1.0f / new_aspect_ratio);
}
}
}
const auto variances_size = variances.size();
if (variances_size == 0) {
constexpr auto default_variance = 0.1f;
variance.push_back(default_variance);
} else if (variances_size == 1 || variances_size == 4) {
variance.resize(variances_size);
std::copy(variances.cbegin(), variances.cend(), variance.begin());
} else {
throw std::runtime_error("Variances size must be 0, 1, or 4");
}
}
};
/// @}
/// @}

View File

@@ -20,6 +20,9 @@ void calculate_prior_boxes::run(program& p) {
continue;
auto& pb_node = node->as<prior_box>();
if (pb_node.get_primitive()->support_opset8) {
continue;
}
pb_node.calc_result();
p.remove_connection(pb_node.input(), pb_node);

View File

@@ -109,13 +109,6 @@ struct implementation_key<input_layout> {
typedef int32_t type;
type operator()(const layout&) { return -1; }
};
template <>
struct implementation_key<prior_box> {
typedef int32_t type;
type operator()(const layout&) { return -1; }
};
template <>
struct implementation_key<loop> {
typedef int32_t type;

View File

@@ -0,0 +1,102 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <prior_box/prior_box_kernel_ref.h>
#include <prior_box/prior_box_kernel_selector.h>
#include <prior_box_inst.h>
#include <impls/implementation_map.hpp>
#include <vector>
#include "intel_gpu/runtime/error_handler.hpp"
#include "primitive_base.hpp"
namespace cldnn {
namespace ocl {
struct prior_box_impl : typed_primitive_impl_ocl<prior_box> {
using parent = typed_primitive_impl_ocl<prior_box>;
using parent::parent;
std::unique_ptr<primitive_impl> clone() const override {
return make_unique<prior_box_impl>(*this);
}
static primitive_impl* create(const prior_box_node& arg, const kernel_impl_params& impl_param) {
auto params = get_default_params<kernel_selector::prior_box_params>(impl_param);
const auto& kernel_selector = kernel_selector::prior_box_kernel_selector::Instance();
const auto& primitive = arg.get_primitive();
const auto width = primitive->output_size.spatial[0];
const auto height = primitive->output_size.spatial[1];
const auto image_width = primitive->img_size.spatial[0];
const auto image_height = primitive->img_size.spatial[1];
params.min_size = primitive->min_sizes;
params.max_size = primitive->max_sizes;
params.density = primitive->density;
params.fixed_ratio = primitive->fixed_ratio;
params.fixed_size = primitive->fixed_size;
params.clip = primitive->clip;
params.flip = primitive->flip;
params.scale_all_sizes = primitive->scale_all_sizes;
params.step = primitive->step;
float step = params.step;
if (!params.scale_all_sizes) {
// mxnet-like PriorBox
if (step == -1) {
step = 1.f * image_height / height;
} else {
step *= image_height;
}
for (auto& size : params.min_size) {
size *= image_height;
}
}
params.offset = primitive->offset;
params.min_max_aspect_ratios_order = primitive->min_max_aspect_ratios_order;
params.aspect_ratio = primitive->aspect_ratios;
params.variance = primitive->variance;
params.reverse_image_width = 1.0f / image_width;
params.reverse_image_height = 1.0f / image_height;
params.width = width;
params.height = height;
if (step == 0) {
params.step_x = image_width / width;
params.step_y = image_height / height;
} else {
params.step_x = step;
params.step_y = step;
}
params.widths = primitive->widths;
params.heights = primitive->heights;
const auto output_shape = impl_param.output_layout.get_shape();
params.num_priors_4 = output_shape[1] / (params.width * params.height);
params.inputs.push_back(convert_data_tensor(impl_param.input_layouts[1]));
const auto best_kernels = kernel_selector.GetBestKernels(params, kernel_selector::prior_box_optional_params());
CLDNN_ERROR_BOOL(arg.id(),
"Best_kernel.empty()",
best_kernels.empty(),
"Cannot find a proper kernel with this arguments");
return new prior_box_impl(arg, best_kernels[0]);
}
};
namespace detail {
attach_prior_box_impl::attach_prior_box_impl() {
auto types = {data_types::i32, data_types::i64};
auto formats = {format::bfyx,
format::b_fs_yx_fsv16,
format::b_fs_yx_fsv32,
format::bs_fs_yx_bsv16_fsv16,
format::bs_fs_yx_bsv32_fsv16,
format::bs_fs_yx_bsv32_fsv32};
implementation_map<prior_box>::add(impl_types::ocl, prior_box_impl::create, types, formats);
}
} // namespace detail
} // namespace ocl
} // namespace cldnn

View File

@@ -52,6 +52,7 @@ void register_implementations() {
REGISTER_OCL(one_hot);
REGISTER_OCL(permute);
REGISTER_OCL(pooling);
REGISTER_OCL(prior_box);
REGISTER_OCL(pyramid_roi_align);
REGISTER_OCL(quantize);
REGISTER_OCL(random_uniform);

View File

@@ -128,6 +128,7 @@ REGISTER_OCL(normalize);
REGISTER_OCL(one_hot);
REGISTER_OCL(permute);
REGISTER_OCL(pooling);
REGISTER_OCL(prior_box);
REGISTER_OCL(pyramid_roi_align);
REGISTER_OCL(quantize);
REGISTER_OCL(random_uniform);

View File

@@ -25,6 +25,7 @@
#include "mvn_inst.h"
#include "depth_to_space_inst.h"
#include "region_yolo_inst.h"
#include "prior_box_inst.h"
#include <vector>
#include <memory>
#include <utility>
@@ -1586,6 +1587,10 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
}
preferred_impl = impl_candidate;
} else if (node.is_type<prior_box>()) {
if (node.as<prior_box>().get_primitive()->support_opset8) {
preferred_impl = impl_types::ocl;
}
}
return preferred_impl;

View File

@@ -217,10 +217,76 @@ void calculate_prior_box_output(memory::ptr output_mem, stream& stream, layout c
}
}
}
std::string vector_to_string(const std::vector<float>& vec) {
std::stringstream result;
for (size_t i = 0; i < vec.size(); i++)
result << vec.at(i) << ", ";
return result.str();
}
std::vector<float> normalized_aspect_ratio(const std::vector<float>& aspect_ratio, bool flip) {
std::set<float> unique_ratios;
for (auto ratio : aspect_ratio) {
unique_ratios.insert(std::round(ratio * 1e6) / 1e6);
if (flip)
unique_ratios.insert(std::round(1 / ratio * 1e6) / 1e6);
}
unique_ratios.insert(1);
return std::vector<float>(unique_ratios.begin(), unique_ratios.end());
}
int64_t number_of_priors(const std::vector<float>& aspect_ratio,
const std::vector<float>& min_size,
const std::vector<float>& max_size,
const std::vector<float>& fixed_size,
const std::vector<float>& fixed_ratio,
const std::vector<float>& densities,
bool scale_all_sizes,
bool flip) {
// Starting with 0 number of prior and then various conditions on attributes will contribute
// real number of prior boxes as PriorBox is a fat thing with several modes of
// operation that will be checked in order in the next statements.
int64_t num_priors = 0;
// Total number of boxes around each point; depends on whether flipped boxes are included
// plus one box 1x1.
int64_t total_aspect_ratios = normalized_aspect_ratio(aspect_ratio, flip).size();
if (scale_all_sizes) {
num_priors = total_aspect_ratios * min_size.size() + max_size.size();
} else {
num_priors = total_aspect_ratios + min_size.size() - 1;
}
if (!fixed_size.empty()) {
num_priors = total_aspect_ratios * fixed_size.size();
}
for (auto density : densities) {
auto rounded_density = static_cast<int64_t>(density);
auto density_2d = (rounded_density * rounded_density - 1);
if (!fixed_ratio.empty()) {
num_priors += fixed_ratio.size() * density_2d;
} else {
num_priors += total_aspect_ratios * density_2d;
}
}
return num_priors;
}
tensor get_output_shape(int32_t height, int32_t width, int32_t number_of_priors) {
return tensor{std::vector<int32_t>{2, 4 * height * width * number_of_priors}};
}
} // namespace
prior_box_node::typed_program_node(std::shared_ptr<prior_box> prim, program& prog) : parent(prim, prog) {
constant = true;
if (prim->support_opset8) {
impl_type = impl_types::ocl;
constant = false;
} else {
constant = true;
}
}
void prior_box_node::calc_result() {
@@ -356,47 +422,22 @@ void prior_box_node::calc_result() {
}
layout prior_box_inst::calc_output_layout(prior_box_node const& node, kernel_impl_params const& impl_param) {
auto desc = impl_param.typed_desc<prior_box>();
auto input_layout = impl_param.get_input_layout();
const int layer_width = input_layout.spatial(0);
const int layer_height = input_layout.spatial(1);
int num_priors = desc->is_clustered() ?
static_cast<int>(desc->widths.size()) :
desc->scale_all_sizes
? static_cast<int>(desc->aspect_ratios.size()) * static_cast<int>(desc->min_sizes.size()) + static_cast<int>(desc->max_sizes.size())
: static_cast<int>(desc->aspect_ratios.size()) + static_cast<int>(desc->min_sizes.size()) + static_cast<int>(desc->max_sizes.size()) - 1;
if (desc->fixed_size.size() > 0) {
num_priors = static_cast<int>(desc->aspect_ratios.size() * desc->fixed_size.size());
const auto primitive = impl_param.typed_desc<prior_box>();
auto number = number_of_priors(primitive->aspect_ratios,
primitive->min_sizes,
primitive->max_sizes,
primitive->fixed_size,
primitive->fixed_ratio,
primitive->density,
primitive->scale_all_sizes,
primitive->flip);
if (primitive->is_clustered()) {
number = primitive->widths.size();
}
const auto output_type = primitive->output_data_type ? *primitive->output_data_type : data_types::f32;
const auto output_shape = get_output_shape(primitive->output_size.spatial[1], primitive->output_size.spatial[0], number);
if (desc->density.size() > 0) {
for (size_t i = 0; i < desc->density.size(); ++i) {
if (desc->fixed_ratio.size() > 0) {
num_priors += static_cast<int>(desc->fixed_ratio.size()) * (static_cast<int>(pow(desc->density[i], 2)) - 1);
} else {
num_priors += static_cast<int>(desc->aspect_ratios.size()) * (static_cast<int>(pow(desc->density[i], 2)) - 1);
}
}
}
// Since all images in a batch has same height and width, we only need to
// generate one set of priors which can be shared across all images.
// 2 features. First feature stores the mean of each prior coordinate.
// Second feature stores the variance of each prior coordinate.
auto output_data_type = input_layout.data_type == data_types::f16 ? data_types::f16 : data_types::f32;
if (desc->output_data_type)
output_data_type = *desc->output_data_type;
return {output_data_type, cldnn::format::bfyx, cldnn::tensor(1, 2, 1, layer_width * layer_height * num_priors * 4)};
}
std::string vector_to_string(std::vector<float> vec) {
std::stringstream result;
for (size_t i = 0; i < vec.size(); i++) result << vec.at(i) << ", ";
return result.str();
return {output_type, impl_param.get_input_layout().format, output_shape};
}
std::string prior_box_inst::to_string(prior_box_node const& node) {
@@ -439,6 +480,7 @@ std::string prior_box_inst::to_string(prior_box_node const& node) {
step_info.add("step height", desc->step_height);
step_info.add("offset", desc->offset);
prior_info.add("step", step_info);
prior_info.add("min max aspect ratios order", desc->min_max_aspect_ratios_order);
if (node.is_clustered()) {
json_composite clustered_info;
@@ -453,7 +495,6 @@ std::string prior_box_inst::to_string(prior_box_node const& node) {
}
prior_box_inst::typed_primitive_inst(network& network, prior_box_node const& node) : parent(network, node) {
CLDNN_ERROR_MESSAGE(node.id(), "Prior box primitive instance should not be created!");
}
} // namespace cldnn

View File

@@ -1429,7 +1429,8 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
prim.type() != cldnn::roi_align::type_id() &&
prim.type() != cldnn::adaptive_pooling::type_id() &&
prim.type() != cldnn::bucketize::type_id() &&
prim.type() != cldnn::roll::type_id()) {
prim.type() != cldnn::roll::type_id() &&
prim.type() != cldnn::prior_box::type_id()) {
can_use_fsv16 = false;
}
@@ -1463,7 +1464,8 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
prim.type() != cldnn::adaptive_pooling::type_id() &&
prim.type() != cldnn::bucketize::type_id() &&
prim.type() != cldnn::roll::type_id() &&
prim.type() != cldnn::resample::type_id()) {
prim.type() != cldnn::resample::type_id() &&
prim.type() != cldnn::prior_box::type_id()) {
can_use_bs_fs_yx_bsv16_fsv16 = false;
}
}

View File

@@ -88,7 +88,8 @@ enum class KernelType {
CONVERT_COLOR,
RANDOM_UNIFORM,
ADAPTIVE_POOLING,
REVERSE
REVERSE,
PRIOR_BOX,
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@@ -0,0 +1,130 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "prior_box_kernel_ref.h"
#include <kernel_selector_utils.h>
#include <iostream>
namespace kernel_selector {
namespace {
CommonDispatchData SetDefault(const prior_box_params& params, const optional_params&) {
kernel_selector::CommonDispatchData dispatchData;
dispatchData.gws = {params.width, params.height, 1};
dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo);
return dispatchData;
}
} // namespace
KernelsData PriorBoxKernelRef::GetKernelsData(const Params& params, const optional_params& options) const {
if (!Validate(params, options)) {
return {};
}
KernelData kernel_data = KernelData::Default<prior_box_params>(params);
const prior_box_params& new_params = dynamic_cast<const prior_box_params&>(*kernel_data.params.get());
const auto dispatch_data = SetDefault(new_params, options);
const auto entry_point = GetEntryPoint(kernelName, new_params.layerID, params, options);
const auto specific_jit = GetJitConstants(new_params);
const auto jit = CreateJit(kernelName, specific_jit, entry_point);
FillCLKernelData(kernel_data.kernels[0],
dispatch_data,
params.engineInfo,
kernelName,
jit,
entry_point,
"",
false,
false,
2);
KernelsData kernelsData;
kernelsData.push_back(std::move(kernel_data));
return kernelsData;
}
KernelsPriority PriorBoxKernelRef::GetKernelsPriority(const Params& /*params*/,
const optional_params& /*options*/) const {
return DONT_USE_IF_HAVE_SOMETHING_ELSE;
}
ParamsKey PriorBoxKernelRef::GetSupportedKey() const {
ParamsKey k;
k.EnableInputDataType(Datatype::INT32);
k.EnableInputDataType(Datatype::INT64);
k.EnableOutputDataType(Datatype::F16);
k.EnableOutputDataType(Datatype::F32);
k.EnableDifferentTypes();
k.EnableInputLayout(DataLayout::bfyx);
k.EnableInputLayout(DataLayout::b_fs_yx_fsv16);
k.EnableInputLayout(DataLayout::b_fs_yx_fsv32);
k.EnableInputLayout(DataLayout::bs_fs_yx_bsv16_fsv16);
k.EnableInputLayout(DataLayout::bs_fs_yx_bsv32_fsv16);
k.EnableInputLayout(DataLayout::bs_fs_yx_bsv32_fsv32);
k.EnableOutputLayout(DataLayout::bfyx);
k.EnableOutputLayout(DataLayout::b_fs_yx_fsv16);
k.EnableOutputLayout(DataLayout::b_fs_yx_fsv32);
k.EnableOutputLayout(DataLayout::bs_fs_yx_bsv16_fsv16);
k.EnableOutputLayout(DataLayout::bs_fs_yx_bsv32_fsv16);
k.EnableOutputLayout(DataLayout::bs_fs_yx_bsv32_fsv32);
k.EnableBatching();
k.EnableTensorPitches();
return k;
}
bool PriorBoxKernelRef::Validate(const Params& params, const optional_params& optionalParams) const {
if (params.GetType() != KernelType::PRIOR_BOX || optionalParams.GetType() != KernelType::PRIOR_BOX) {
return false;
}
const auto& priorBox8arams = dynamic_cast<const prior_box_params&>(params);
if (priorBox8arams.inputs.size() != 2) {
return false;
}
return true;
}
JitConstants PriorBoxKernelRef::GetJitConstants(const prior_box_params& params) const {
JitConstants jit = MakeBaseParamsJitConstants(params);
jit.AddConstant(MakeJitConstant("MIN_SIZE", params.min_size));
jit.AddConstant(MakeJitConstant("MAX_SIZE", params.max_size));
jit.AddConstant(MakeJitConstant("DENSITY", params.density));
jit.AddConstant(MakeJitConstant("FIXED_RATIO", params.fixed_ratio));
jit.AddConstant(MakeJitConstant("FIXED_SIZE", params.fixed_size));
if (params.clip) {
jit.AddConstant(MakeJitConstant("CLIP", 1));
}
if (params.flip) {
jit.AddConstant(MakeJitConstant("FLIP", 1));
}
if (params.step != 0.0f) {
jit.AddConstant(MakeJitConstant("STEP", params.step));
}
jit.AddConstant(MakeJitConstant("OFFSET", params.offset));
jit.AddConstant(MakeJitConstant("SCALE_ALL_SIZES", params.scale_all_sizes));
if (params.min_max_aspect_ratios_order) {
jit.AddConstant(MakeJitConstant("MIN_MAX_ASPECT_RATIO_ORDER", 1));
}
jit.AddConstant(MakeJitConstant("ASPECT_RATIO", params.aspect_ratio));
jit.AddConstant(MakeJitConstant("VARIANCE", params.variance));
jit.AddConstant(MakeJitConstant("IWI", params.reverse_image_width));
jit.AddConstant(MakeJitConstant("IHI", params.reverse_image_height));
jit.AddConstant(MakeJitConstant("STEP_X", params.step_x));
jit.AddConstant(MakeJitConstant("STEP_Y", params.step_y));
jit.AddConstant(MakeJitConstant("WIDTH", params.width));
jit.AddConstant(MakeJitConstant("HEIGHT", params.height));
jit.AddConstant(MakeJitConstant("NUM_PRIORS_4", params.num_priors_4));
jit.AddConstant(MakeJitConstant("WIDTHS", params.widths));
jit.AddConstant(MakeJitConstant("HEIGHTS", params.heights));
return jit;
}
} // namespace kernel_selector

View File

@@ -0,0 +1,67 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include "kernel_base_opencl.h"
namespace kernel_selector {
/**
* Prior Box kernel params.
*/
struct prior_box_params : public base_params {
prior_box_params() : base_params{KernelType::PRIOR_BOX} {}
// operation attributes
std::vector<float> min_size;
std::vector<float> max_size;
std::vector<float> density;
std::vector<float> fixed_ratio;
std::vector<float> fixed_size;
bool clip;
bool flip;
float step;
float offset;
bool scale_all_sizes;
bool min_max_aspect_ratios_order;
std::vector<float> widths;
std::vector<float> heights;
std::vector<float> aspect_ratio;
std::vector<float> variance;
float reverse_image_width, reverse_image_height;
float step_x, step_y;
uint32_t width, height;
uint32_t num_priors_4;
};
/**
* Specific optional params is not defined for Prior Box v8 operation.
*/
struct prior_box_optional_params : optional_params {
prior_box_optional_params() : optional_params{KernelType::PRIOR_BOX} {}
};
/**
* Reference GPU kernel for the PriorBox-8 operation.
*/
class PriorBoxKernelRef : public KernelBaseOpenCL {
public:
PriorBoxKernelRef() : KernelBaseOpenCL{"prior_box_ref"} {}
private:
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
bool Validate(const Params& params, const optional_params& optionalParams) const override;
JitConstants GetJitConstants(const prior_box_params& params) const;
};
} /* namespace kernel_selector */

View File

@@ -0,0 +1,18 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "prior_box_kernel_selector.h"
#include "prior_box_kernel_ref.h"
namespace kernel_selector {
KernelsData kernel_selector::prior_box_kernel_selector::GetBestKernels(const Params& params,
const optional_params& options) const {
return GetNaiveBestKernel(params, options, KernelType::PRIOR_BOX);
}
prior_box_kernel_selector::prior_box_kernel_selector() {
Attach<PriorBoxKernelRef>();
}
} // namespace kernel_selector

View File

@@ -0,0 +1,27 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "kernel_selector.h"
#pragma once
namespace kernel_selector {
/**
* GPU kernel selector for the PriorBox operation
*/
class prior_box_kernel_selector : public kernel_selector_base {
public:
static prior_box_kernel_selector& Instance() {
static prior_box_kernel_selector instance_;
return instance_;
}
KernelsData GetBestKernels(const Params& params, const optional_params& options) const override;
private:
prior_box_kernel_selector();
};
} // namespace kernel_selector

View File

@@ -0,0 +1,194 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "include/batch_headers/data_types.cl"
#include "include/batch_headers/fetch_data.cl"
inline OUTPUT_TYPE FUNC(clip_great)(OUTPUT_TYPE x, OUTPUT_TYPE threshold) {
return x < threshold ? x : threshold;
}
inline OUTPUT_TYPE FUNC(clip_less)(OUTPUT_TYPE x, OUTPUT_TYPE threshold) {
return x > threshold ? x : threshold;
}
inline uint FUNC(get_index)(INPUT0_TYPE w, INPUT0_TYPE h) {
return (w + h * WIDTH) * NUM_PRIORS_4;
}
inline void FUNC(calculate_data)(OUTPUT_TYPE center_x,
OUTPUT_TYPE center_y,
OUTPUT_TYPE box_width,
OUTPUT_TYPE box_height,
bool clip,
uint* output_index,
__global OUTPUT_TYPE* dst_data) {
uint idx = *output_index;
if (clip) {
// order: xmin, ymin, xmax, ymax
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = FUNC_CALL(clip_less)((center_x - box_width) * IWI, 0);
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = FUNC_CALL(clip_less)((center_y - box_height) * IHI, 0);
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = FUNC_CALL(clip_great)((center_x + box_width) * IWI, 1);
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = FUNC_CALL(clip_great)((center_y + box_height) * IHI, 1);
} else {
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = (center_x - box_width) * IWI;
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = (center_y - box_height) * IHI;
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = (center_x + box_width) * IWI;
dst_data[OUTPUT_GET_INDEX(0, idx++, 0, 0)] = (center_y + box_height) * IHI;
*output_index = idx;
}
};
KERNEL(ref)
(const __global INPUT0_TYPE* output_size, const __global INPUT1_TYPE* image_size, __global OUTPUT_TYPE* output) {
const uint w = get_global_id(0);
const uint h = get_global_id(1);
uint out_index = FUNC_CALL(get_index)(w, h);
const uint start_out_index = out_index;
OUTPUT_TYPE center_x, center_y;
#ifdef STEP
center_x = (OFFSET + w) * STEP;
center_y = (OFFSET + h) * STEP;
#else
center_x = (w + 0.5f) * STEP_X;
center_y = (h + 0.5f) * STEP_Y;
#endif
OUTPUT_TYPE box_width, box_height;
for (uint s = 0; s < FIXED_SIZE_SIZE; ++s) {
#if FIXED_SIZE_SIZE > 0
OUTPUT_TYPE fixed_size_ = FIXED_SIZE[s];
#else
OUTPUT_TYPE fixed_size_ = 0;
#endif
box_height = box_width = fixed_size_ * 0.5f;
#if FIXED_RATIO_SIZE > 0
for (uint k = 0; k < FIXED_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = FIXED_RATIO[k];
uint density_ = DENSITY[s];
uint shift = FIXED_SIZE[s] / density_;
ar = sqrt(ar);
OUTPUT_TYPE box_width_ratio = FIXED_SIZE[s] * 0.5f * ar;
OUTPUT_TYPE box_height_ratio = FIXED_SIZE[s] * 0.5f / ar;
for (uint r = 0; r < density_; ++r) {
for (uint c = 0; c < density_; ++c) {
OUTPUT_TYPE center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
OUTPUT_TYPE center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
FUNC_CALL(calculate_data)
(center_x_temp, center_y_temp, box_width_ratio, box_height_ratio, true, &out_index, output);
}
}
}
#else
#if DENSITY_SIZE > 0 && FIXED_SIZE_SIZE > 0
uint density_ = DENSITY[s];
uint shift = FIXED_SIZE[s] / density_;
for (uint r = 0; r < density_; ++r) {
for (uint c = 0; c < density_; ++c) {
OUTPUT_TYPE center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
OUTPUT_TYPE center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
FUNC_CALL(calculate_data)
(center_x_temp, center_y_temp, box_width, box_height, true, &out_index, output);
}
}
#endif
// Rest of priors
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
#if DENSITY_SIZE > 0 && FIXED_SIZE_SIZE > 0
uint density_ = DENSITY[s];
uint shift = FIXED_SIZE[s] / density_;
ar = sqrt(ar);
OUTPUT_TYPE box_width_ratio = FIXED_SIZE[s] * 0.5f * ar;
OUTPUT_TYPE box_height_ratio = FIXED_SIZE[s] * 0.5f / ar;
for (uint r = 0; r < density_; ++r) {
for (uint c = 0; c < density_; ++c) {
OUTPUT_TYPE center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
OUTPUT_TYPE center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
FUNC_CALL(calculate_data)
(center_x_temp, center_y_temp, box_width_ratio, box_height_ratio, true, &out_index, output);
}
}
#endif
}
#endif
}
for (uint ms_idx = 0; ms_idx < MIN_SIZE_SIZE; ++ms_idx) {
box_width = MIN_SIZE[ms_idx] * 0.5f;
box_height = MIN_SIZE[ms_idx] * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
#ifdef MIN_MAX_ASPECT_RATIO_ORDER
if (MAX_SIZE_SIZE > ms_idx) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
}
ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
}
#else
if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
};
ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
}
if (MAX_SIZE_SIZE > ms_idx) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
#endif
}
#ifdef CLIP
for (uint i = start_out_index; i < out_index; ++i) {
const uint out_idx = OUTPUT_GET_INDEX(0, 0, 0, i);
output[out_idx] = (min)((max)(output[out_idx], 0.0f), 1.0f);
}
#endif
const uint channel_size = OUTPUT_LENGTH / 2;
#if VARIANCE_SIZE == 1
for (uint i = start_out_index; i < out_index; ++i) {
output[OUTPUT_GET_INDEX(1, i, 0, 0)] = VARIANCE[0];
}
#elif VARIANCE_SIZE == 4
for (uint i = start_out_index; i < out_index; ++i) {
for (uint j = 0; j < 4; ++j) {
output[OUTPUT_GET_INDEX(1, i * 4 + j, 0, 0)] = VARIANCE[j];
}
}
#else
#error Invalid Variances size
#endif
}

View File

@@ -116,8 +116,53 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
p.add_primitive(*op, priorBoxPrim);
}
static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v8::PriorBox>& op) {
validate_inputs_count(op, {2});
const auto inputs = p.GetInputPrimitiveIDs(op);
std::string layer_name = layer_type_name_ID(op);
const auto& attrs = op->get_attrs();
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!(output_size_constant && image_size_constant)) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
const auto output_size = output_size_constant->cast_vector<int64_t>();
const auto width = output_size[0];
const auto height = output_size[1];
const cldnn::tensor output_size_tensor{cldnn::spatial(width, height)};
const auto image_size = image_size_constant->cast_vector<int64_t>();
const auto image_width = image_size[0];
const auto image_height = image_size[1];
const cldnn::tensor img_size_tensor{cldnn::spatial(image_width, image_height)};
const cldnn::prior_box prior_box{layer_name,
inputs,
output_size_tensor,
img_size_tensor,
attrs.min_size,
attrs.max_size,
attrs.aspect_ratio,
attrs.flip,
attrs.clip,
attrs.variance,
attrs.offset,
attrs.scale_all_sizes,
attrs.fixed_ratio,
attrs.fixed_size,
attrs.density,
attrs.step,
attrs.min_max_aspect_ratios_order};
p.add_primitive(*op, prior_box);
}
REGISTER_FACTORY_IMPL(v0, PriorBoxClustered);
REGISTER_FACTORY_IMPL(v0, PriorBox);
REGISTER_FACTORY_IMPL(v8, PriorBox);
} // namespace intel_gpu
} // namespace ov

View File

@@ -77,6 +77,7 @@
#include "transformations/op_conversions/softmax_decomposition.hpp"
#include <transformations/op_conversions/gelu7_downgrade.hpp>
#include <transformations/op_conversions/convert_softmax_downgrade.hpp>
#include <transformations/op_conversions/convert_prior_box_v8_to_v0.hpp>
#include <transformations/convert_precision.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/rt_info/fused_names_attribute.hpp>
@@ -160,6 +161,7 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
manager.register_pass<ngraph::pass::ConvertNMS5ToNMS9>();
manager.register_pass<ngraph::pass::ConvertNMS9ToNMSIEInternal>();
manager.register_pass<ngraph::pass::ConvertGather0D>();
manager.register_pass<ngraph::pass::ConvertPriorBox8To0, false>();
precisions_array convert_precision_list {
{ngraph::element::f64, ngraph::element::f32},

View File

@@ -0,0 +1,264 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <algorithm>
#include <intel_gpu/primitives/data.hpp>
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/prior_box.hpp>
#include <random>
#include <vector>
#include "test_utils.h"
using namespace cldnn;
using namespace ::tests;
namespace {
struct prior_box_attributes {
std::vector<float> min_sizes; // Desired min_size of prior boxes
std::vector<float> max_sizes; // Desired max_size of prior boxes
std::vector<float> aspect_ratios; // Aspect ratios of prior boxes
std::vector<float> densities; // This is the square root of the number of boxes of each type
std::vector<float> fixed_ratios; // This is an aspect ratio of a box
std::vector<float> fixed_sizes; // This is an initial box size (in pixels)
bool clip; // Clip output to [0,1]
bool flip; // Flip aspect ratios
float step; // Distance between prior box centers
float offset; // Box offset relative to top center of image
std::vector<float> variances; // Values to adjust prior boxes with
bool scale_all_sizes; // Scale all sizes
bool min_max_aspect_ratios_order; // Order of output prior box
};
template <class InputType, class OutputType>
using prior_box_param = std::tuple<format, // Input and output format
std::vector<InputType>, // output_size
std::vector<InputType>, // image_size
prior_box_attributes, // attributes
std::vector<OutputType>>; // expected values
template <class InputType, class OutputType>
class PriorBoxGPUTest : public ::testing::TestWithParam<prior_box_param<InputType, OutputType>> {
public:
void SetUp() override {
const auto input_data_type = type_to_data_type<InputType>::value;
const auto output_data_type = type_to_data_type<OutputType>::value;
const auto plain_format = format::bfyx;
format::type target_format;
std::vector<InputType> output_size;
std::vector<InputType> image_size;
prior_box_attributes attrs;
std::vector<OutputType> expected_values;
auto &engine = get_test_engine();
std::tie(target_format, output_size, image_size, attrs, expected_values) = this->GetParam();
const auto output_size_input = engine.allocate_memory({input_data_type, plain_format, tensor{2}});
const auto image_size_input = engine.allocate_memory({input_data_type, plain_format, tensor{2}});
const cldnn::tensor output_size_tensor{cldnn::spatial(output_size[0], output_size[1])};
const cldnn::tensor img_size_tensor{cldnn::spatial(image_size[0], image_size[1])};
topology topo;
topo.add(data("output_size", output_size_input));
topo.add(reorder("reordered_output_size", "output_size", target_format, input_data_type));
topo.add(data("image_size", image_size_input));
topo.add(reorder("reordered_image_size", "image_size", target_format, input_data_type));
set_values<InputType>(output_size_input, output_size);
set_values<InputType>(image_size_input, image_size);
std::vector<primitive_id> inputs{"reordered_output_size", "reordered_image_size"};
const auto prior_box = cldnn::prior_box("blocked_prior_box",
inputs,
output_size_tensor,
img_size_tensor,
attrs.min_sizes,
attrs.max_sizes,
attrs.aspect_ratios,
attrs.flip,
attrs.clip,
attrs.variances,
attrs.offset,
attrs.scale_all_sizes,
attrs.fixed_ratios,
attrs.fixed_sizes,
attrs.densities,
attrs.step,
attrs.min_max_aspect_ratios_order);
topo.add(prior_box);
topo.add(reorder("prior_box", "blocked_prior_box", plain_format, output_data_type));
build_options bo;
bo.set_option(build_option::optimize_data(false));
network network(engine, topo, bo);
const auto outputs = network.execute();
const auto output = outputs.at("prior_box").get_memory();
cldnn::mem_lock<OutputType> output_ptr(output, get_test_stream());
ASSERT_EQ(output_ptr.size(), expected_values.size());
for (size_t i = 0; i < output_ptr.size(); ++i) {
EXPECT_NEAR(output_ptr[i], expected_values[i], 2e-3)
<< "target_format=" << fmt_to_str(target_format) << ", i=" << i;
}
}
};
using prior_box_test_i32_f32 = PriorBoxGPUTest<int32_t, float>;
TEST_P(prior_box_test_i32_f32, prior_box_test_i32_f32) {}
INSTANTIATE_TEST_SUITE_P(
prior_box_test_all_formats,
prior_box_test_i32_f32,
testing::Combine(
testing::ValuesIn(
std::vector<format>{format::bfyx,
format::b_fs_yx_fsv16,
format::b_fs_yx_fsv32,
format::bs_fs_yx_bsv16_fsv16,
format::bs_fs_yx_bsv32_fsv16,
format::bs_fs_yx_bsv32_fsv32}),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, false, false, 0.0f, 0.0f, {}, true, false}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.127526, 0.16835, 0.372474, 0.33165, 0.0918861, 0.0918861, 0.408114, 0.408114,
0.65, 0.15, 0.85, 0.35, 0.627526, 0.16835, 0.872474, 0.33165, 0.591886, 0.0918861, 0.908114, 0.408114,
0.15, 0.65, 0.35, 0.85, 0.127526, 0.66835, 0.372474, 0.83165, 0.0918861, 0.591886, 0.408114, 0.908114,
0.65, 0.65, 0.85, 0.85, 0.627526, 0.66835, 0.872474, 0.83165, 0.591886, 0.591886, 0.908114, 0.908114,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
})));
INSTANTIATE_TEST_SUITE_P(
prior_box_test_clip_flip,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, true, true, 0.0f, 0.0f, {}, true, false}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.127526, 0.16835, 0.372474, 0.33165, 0.16835, 0.127526, 0.33165, 0.372474,
0.0918861, 0.0918861, 0.408114, 0.408114,
0.65, 0.15, 0.85, 0.35, 0.627526, 0.16835, 0.872474, 0.33165, 0.66835, 0.127526, 0.83165, 0.372474,
0.591886, 0.0918861, 0.908114, 0.408114,
0.15, 0.65, 0.35, 0.85, 0.127526, 0.66835, 0.372474, 0.83165, 0.16835, 0.627526, 0.33165, 0.872474,
0.0918861, 0.591886, 0.408114, 0.908114,
0.65, 0.65, 0.85, 0.85, 0.627526, 0.66835, 0.872474, 0.83165, 0.66835, 0.627526, 0.83165, 0.872474,
0.591886, 0.591886, 0.908114, 0.908114,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
})));
INSTANTIATE_TEST_SUITE_P(
prior_box_test_minmax_aspect_ratio,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, true, true, 0.0f, 0.0f, {}, true, true}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.0918861, 0.0918861, 0.408114, 0.408114, 0.127526, 0.16835, 0.372474, 0.33165,
0.16835, 0.127526, 0.33165, 0.372474,
0.65, 0.15, 0.85, 0.35, 0.591886, 0.0918861, 0.908114, 0.408114, 0.627526, 0.16835, 0.872474, 0.33165,
0.66835, 0.127526, 0.83165, 0.372474,
0.15, 0.65, 0.35, 0.85, 0.0918861, 0.591886, 0.408114, 0.908114, 0.127526, 0.66835, 0.372474, 0.83165,
0.16835, 0.627526, 0.33165, 0.872474,
0.65, 0.65, 0.85, 0.85, 0.591886, 0.591886, 0.908114, 0.908114, 0.627526, 0.66835, 0.872474, 0.83165,
0.66835, 0.627526, 0.83165, 0.872474,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
})));
INSTANTIATE_TEST_SUITE_P(
prior_box_test_four_variances,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, false, false, 0.0f, 0.0f, {0.1, 0.2, 0.3, 0.4}, true, true}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.0918861, 0.0918861, 0.408114, 0.408114, 0.127526, 0.16835, 0.372474, 0.33165,
0.65, 0.15, 0.85, 0.35,
0.591886, 0.0918861, 0.908114, 0.408114, 0.627526, 0.16835, 0.872474, 0.33165, 0.15, 0.65, 0.35, 0.85,
0.0918861, 0.591886, 0.408114, 0.908114,
0.127526, 0.66835, 0.372474, 0.83165, 0.65, 0.65, 0.85, 0.85, 0.591886, 0.591886, 0.908114, 0.908114,
0.627526, 0.66835, 0.872474, 0.83165,
0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4,
0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4,
0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4,
0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4
})));
INSTANTIATE_TEST_SUITE_P(
DISABLED_prior_box_test_dont_scale,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, false, false, 0.0f, 0.0f, {}, false, true}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.0918861, 0.0918861, 0.408114, 0.408114, 0.127526, 0.16835, 0.372474, 0.33165,
0.65, 0.15, 0.85, 0.35, 0.591886, 0.0918861, 0.908114, 0.408114, 0.627526, 0.16835, 0.872474, 0.33165,
0.15, 0.65, 0.35, 0.85, 0.0918861, 0.591886, 0.408114, 0.908114, 0.127526, 0.66835, 0.372474, 0.83165,
0.65, 0.65, 0.85, 0.85, 0.591886, 0.591886, 0.908114, 0.908114, 0.627526, 0.66835, 0.872474, 0.83165,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
})));
INSTANTIATE_TEST_SUITE_P(
DISABLED_prior_box_test_fixed_density,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {0.2, 0.5}, {2.0, 3.0}, {0.1, 0.5}, true, true, 0.0f, 0.0f, {}, true, false}),
testing::Values(std::vector<float>{
0.15, 0.15, 0.35, 0.35, 0.127526, 0.16835, 0.372474, 0.33165, 0.16835, 0.127526, 0.33165, 0.372474, 0.0918861,
0.0918861, 0.408114, 0.408114, 0.65, 0.15, 0.85, 0.35, 0.627526, 0.16835, 0.872474, 0.33165, 0.66835, 0.127526,
0.83165, 0.372474, 0.591886, 0.0918861, 0.908114, 0.408114,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
})));
INSTANTIATE_TEST_SUITE_P(
DISABLED_prior_box_test_step_offset,
prior_box_test_i32_f32,
testing::Combine(
testing::Values(format::bfyx),
testing::Values(std::vector<int32_t>{2, 2}),
testing::Values(std::vector<int32_t>{10, 10}),
testing::Values(
prior_box_attributes{{2.0f}, {5.0f}, {1.5f}, {}, {}, {}, false, false, 4.0f, 1.0f, {}, false, false}),
testing::Values(std::vector<float>{
0.3, 0.3, 0.5, 0.5, 0.277526, 0.31835, 0.522474, 0.48165, 0.241886, 0.241886, 0.558114, 0.558114,
0.7, 0.3, 0.9, 0.5, 0.677526, 0.31835, 0.922475, 0.48165, 0.641886, 0.241886, 0.958114, 0.558114,
0.3, 0.7, 0.5, 0.9, 0.277526, 0.71835, 0.522474, 0.88165, 0.241886, 0.641886, 0.558114, 0.958114,
0.7, 0.7, 0.9, 0.9, 0.677526, 0.71835, 0.922475, 0.88165, 0.641886, 0.641886, 0.958114, 0.958114,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
})));
} // namespace

View File

@@ -0,0 +1,99 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/prior_box.hpp"
#include <vector>
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::I32,
InferenceEngine::Precision::U16};
const std::vector<std::vector<float>> min_sizes = {{256.0f}};
const std::vector<std::vector<float>> max_sizes = {{315.0f}};
const std::vector<std::vector<float>> aspect_ratios = {{2.0f}};
const std::vector<std::vector<float>> densities = {{1.0f}};
const std::vector<std::vector<float>> fixed_ratios = {{}};
const std::vector<std::vector<float>> fixed_sizes = {{}};
const std::vector<bool> clips = {false, true};
const std::vector<bool> flips = {false, true};
const std::vector<float> steps = {
1.0f,
};
const std::vector<float> offsets = {
0.0f,
};
const std::vector<std::vector<float>> variances = {{}};
const std::vector<bool> min_max_aspect_ratios_order = {false, true};
const std::vector<size_t> inputShape = {300, 300};
const std::vector<size_t> imageShape = {32, 32};
const auto scaleSizesParams = ::testing::Combine(::testing::ValuesIn(min_sizes),
::testing::ValuesIn(max_sizes),
::testing::ValuesIn(aspect_ratios),
::testing::ValuesIn(densities),
::testing::ValuesIn(fixed_ratios),
::testing::ValuesIn(fixed_sizes),
::testing::ValuesIn(clips),
::testing::ValuesIn(flips),
::testing::ValuesIn(steps),
::testing::ValuesIn(offsets),
::testing::ValuesIn(variances),
::testing::Values(true),
::testing::ValuesIn(min_max_aspect_ratios_order));
INSTANTIATE_TEST_SUITE_P(smoke_PriorBox8_Scale,
PriorBoxLayerTest,
::testing::Combine(scaleSizesParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::I32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(inputShape),
::testing::Values(imageShape),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
PriorBoxLayerTest::getTestCaseName);
const auto noScaleSizesParams = ::testing::Combine(::testing::ValuesIn(min_sizes),
::testing::ValuesIn(max_sizes),
::testing::ValuesIn(aspect_ratios),
::testing::ValuesIn(densities),
::testing::ValuesIn(fixed_ratios),
::testing::ValuesIn(fixed_sizes),
::testing::ValuesIn(clips),
::testing::ValuesIn(flips),
::testing::ValuesIn(steps),
::testing::ValuesIn(offsets),
::testing::ValuesIn(variances),
::testing::Values(false),
::testing::ValuesIn(min_max_aspect_ratios_order));
INSTANTIATE_TEST_SUITE_P(smoke_PriorBox8_NoScale,
PriorBoxLayerTest,
::testing::Combine(scaleSizesParams,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::I32),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(inputShape),
::testing::Values(imageShape),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
PriorBoxLayerTest::getTestCaseName);

View File

@@ -3,6 +3,7 @@
//
#include "shared_test_classes/single_layer/prior_box.hpp"
#include <openvino/pass/constant_folding.hpp>
namespace LayerTestsDefinitions {
std::string PriorBoxLayerTest::getTestCaseName(const testing::TestParamInfo<priorBoxLayerParams>& obj) {
@@ -79,6 +80,7 @@ void PriorBoxLayerTest::SetUp() {
attributes.offset = offset;
attributes.clip = clip;
attributes.flip = flip;
attributes.scale_all_sizes = scale_all_sizes;
attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order;
auto shape_of_1 = std::make_shared<ngraph::opset3::ShapeOf>(params[0]);
@@ -88,6 +90,8 @@ void PriorBoxLayerTest::SetUp() {
shape_of_2,
attributes);
ov::pass::disable_constant_folding(priorBox);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(priorBox)};
function = std::make_shared <ngraph::Function>(results, params, "PriorBoxFunction");
}