[GPU] Enable Deconv/GroupDeconv shape infer (#15005)
* Update deconv primitive APIs for dilations and output_padding Signed-off-by: Andrew Park <andrew.park@intel.com> * Add calc_output_layouts for deconv Signed-off-by: Andrew Park <andrew.park@intel.com> * Add deconv shape inference TCs for ov_gpu_unit_tests Signed-off-by: Andrew Park <andrew.park@intel.com> * Add deconv dynamic TCs for ov_gpu_func_tests - Disable pre_replace_deconv for dynamic shape - Update get_expected_layout for deconv_node during reorder_input - Add I/O swap of weights for shape infernece Signed-off-by: Andrew Park <andrew.park@intel.com> * Add group deconv dynamic TCs for ov_gpu_func_tests - Update op creation for group deconv Signed-off-by: Andrew Park <andrew.park@intel.com> * Fix clang-format Signed-off-by: Andrew Park <andrew.park@intel.com> * Update test params with swapped I/O dimensions for ov_gpu_unit_tests Signed-off-by: Andrew Park <andrew.park@intel.com> * Update for optional output_shape const/param input support Signed-off-by: Andrew Park <andrew.park@intel.com> * Update deconv/group deconv dynamic TCs w/ output_shape for ov_gpu_func_tests Signed-off-by: Andrew Park <andrew.park@intel.com> * Update deconv/group deconv shape inference TCs w/ output_shape for ov_gpu_unit_tests Signed-off-by: Andrew Park <andrew.park@intel.com> * Apply code review Signed-off-by: Andrew Park <andrew.park@intel.com> Signed-off-by: Andrew Park <andrew.park@intel.com>
This commit is contained in:
parent
35ed485d82
commit
e0359d3085
@ -433,7 +433,7 @@ int64_t calculate_num_spatial(const ConvType* op,
|
||||
num_non_spatial_filter_dims);
|
||||
if (const auto& size = op->m_output_padding.size()) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
num_spatial == -1 || num_spatial == size,
|
||||
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
|
||||
"Output padding should be defined for all and only spatial dimensions.");
|
||||
num_spatial = static_cast<int64_t>(size);
|
||||
}
|
||||
@ -589,8 +589,9 @@ void shape_infer(const ConvolutionBackpropData* op,
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
op,
|
||||
(static_cast<int64_t>(input_shape.size()) == (num_spatial + num_non_spatial_data_dims)) &&
|
||||
(static_cast<int64_t>(filters_shape.size()) == (num_spatial + num_non_spatial_filter_dims)),
|
||||
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>(num_spatial + num_non_spatial_data_dims)) &&
|
||||
(static_cast<int64_t>(filters_shape.size()) ==
|
||||
static_cast<int64_t>(num_spatial + num_non_spatial_filter_dims)),
|
||||
"Data and filters rank do not match (data batch shape: ",
|
||||
input_shape,
|
||||
", filters shape: ",
|
||||
@ -610,7 +611,7 @@ void shape_infer(const ConvolutionBackpropData* op,
|
||||
if (input_size == 3) {
|
||||
if (output_shape_from_input.rank().is_static()) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
output_shape_from_input.size() == num_spatial,
|
||||
static_cast<int64_t>(output_shape_from_input.size()) == num_spatial,
|
||||
"Output shape should be specified only and for all spatial dimensions.");
|
||||
for (int64_t i = 0; i < num_spatial; ++i)
|
||||
output_shape[i + num_non_spatial_data_dims] = output_shape_from_input[i];
|
||||
@ -670,8 +671,9 @@ void shape_infer(const GroupConvolutionBackpropData* op,
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
op,
|
||||
(static_cast<int64_t>(input_shape.size()) == (num_spatial + num_non_spatial_data_dims)) &&
|
||||
(static_cast<int64_t>(filters_shape.size()) == (num_spatial + num_non_spatial_filter_dims)),
|
||||
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>(num_spatial + num_non_spatial_data_dims)) &&
|
||||
(static_cast<int64_t>(filters_shape.size()) ==
|
||||
static_cast<int64_t>(num_spatial + num_non_spatial_filter_dims)),
|
||||
"Data and filters rank do not match (data batch shape: ",
|
||||
input_shape,
|
||||
", filters shape: ",
|
||||
@ -713,7 +715,7 @@ void shape_infer(const GroupConvolutionBackpropData* op,
|
||||
if (input_size == 3) {
|
||||
if (output_shape_from_input.rank().is_static()) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
output_shape_from_input.size() == num_spatial,
|
||||
static_cast<int64_t>(output_shape_from_input.size()) == num_spatial,
|
||||
"Output shape should be specified only and for all spatial dimensions.");
|
||||
for (int64_t i = 0; i < num_spatial; ++i)
|
||||
output_shape[i + num_non_spatial_data_dims] = output_shape_from_input[i];
|
||||
|
@ -32,13 +32,20 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
const std::vector<primitive_id>& bias,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(false),
|
||||
groups(1),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(bias) {}
|
||||
/// @brief Constructs deconvolution primitive.
|
||||
@ -58,13 +65,20 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
uint32_t groups,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(false),
|
||||
groups(groups),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(bias) {}
|
||||
|
||||
@ -81,13 +95,20 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
const std::vector<primitive_id>& weights,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(false),
|
||||
groups(1),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(std::vector<primitive_id>(0)) {}
|
||||
|
||||
@ -106,13 +127,20 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
uint32_t groups,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(false),
|
||||
groups(groups),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(std::vector<primitive_id>(0)) {}
|
||||
|
||||
@ -132,15 +160,22 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
const std::vector<primitive_id>& bias,
|
||||
ov::Strides stride,
|
||||
ov::CoordinateDiff pad,
|
||||
ov::Strides dilations,
|
||||
tensor output_size,
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(true),
|
||||
output_size(output_size),
|
||||
groups(1),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(bias) {}
|
||||
|
||||
@ -162,16 +197,62 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
uint32_t groups,
|
||||
ov::Strides stride,
|
||||
ov::CoordinateDiff pad,
|
||||
ov::Strides dilations,
|
||||
tensor output_size,
|
||||
bool grouped_weights_shape,
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(true),
|
||||
output_size(output_size),
|
||||
groups(groups),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(grouped_weights_shape),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(bias) {}
|
||||
|
||||
/// @brief Constructs deconvolution primitive with dynamic shape.
|
||||
/// @param id This primitive id.
|
||||
/// @param input Input primitive id.
|
||||
/// @param weights List of primitive ids containing weights data.
|
||||
/// @param bias List of primitive ids containing bias data. Provide empty vector if using next parameters without bias.
|
||||
/// @param groups Number of filter groups.
|
||||
/// @param pad Defines logical pad value added to input tensor
|
||||
/// @param stride Defines shift in input buffer between adjacent calculations of output values.
|
||||
/// @param with_activation Enables Relu activation.
|
||||
/// @param activation_slp Relu activation slope.
|
||||
/// @param output_size User-defined output data size of the primitive (w/o padding).
|
||||
deconvolution(const primitive_id& id,
|
||||
const input_info& input,
|
||||
const std::vector<primitive_id>& weights,
|
||||
const std::vector<primitive_id>& bias,
|
||||
uint32_t groups,
|
||||
ov::Strides stride,
|
||||
ov::CoordinateDiff pad,
|
||||
ov::Strides dilations,
|
||||
ov::CoordinateDiff pads_begin,
|
||||
ov::CoordinateDiff pads_end,
|
||||
ov::CoordinateDiff out_padding,
|
||||
bool grouped_weights_shape,
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(false),
|
||||
groups(groups),
|
||||
pads_begin(pads_begin),
|
||||
pads_end(pads_end),
|
||||
out_padding(out_padding),
|
||||
grouped_weights_shape(grouped_weights_shape),
|
||||
output_partial_shape({}),
|
||||
output_shape_id(""),
|
||||
weights(weights),
|
||||
bias(bias) {}
|
||||
|
||||
@ -189,14 +270,19 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
const std::vector<primitive_id>& weights,
|
||||
ov::Strides stride,
|
||||
ov::CoordinateDiff pad,
|
||||
ov::Strides dilations,
|
||||
tensor output_size,
|
||||
const padding& output_padding = padding())
|
||||
: primitive_base(id, {input}, {output_padding}),
|
||||
pad(pad),
|
||||
stride(stride),
|
||||
dilations(dilations),
|
||||
with_output_size(true),
|
||||
output_size(output_size),
|
||||
groups(1),
|
||||
pads_begin(pad.size(), 0),
|
||||
pads_end(pad.size(), 0),
|
||||
out_padding(pad.size(), 0),
|
||||
grouped_weights_shape(false),
|
||||
weights(weights),
|
||||
bias(std::vector<primitive_id>(0)) {}
|
||||
@ -219,6 +305,7 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
tensor output_size,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding()) {
|
||||
return deconvolution(id,
|
||||
input,
|
||||
@ -226,6 +313,7 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
bias,
|
||||
stride,
|
||||
pad,
|
||||
dilations,
|
||||
output_size,
|
||||
output_padding);
|
||||
}
|
||||
@ -246,12 +334,14 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
tensor output_size,
|
||||
ov::Strides stride = {1, 1},
|
||||
ov::CoordinateDiff pad = {0, 0},
|
||||
ov::Strides dilations = {1, 1},
|
||||
const padding& output_padding = padding()) {
|
||||
return deconvolution(id,
|
||||
input,
|
||||
weights,
|
||||
stride,
|
||||
pad,
|
||||
dilations,
|
||||
output_size,
|
||||
output_padding);
|
||||
}
|
||||
@ -260,25 +350,39 @@ struct deconvolution : public primitive_base<deconvolution> {
|
||||
ov::CoordinateDiff pad;
|
||||
/// @brief Defines shift in input buffer between adjacent calculations of output values.
|
||||
ov::Strides stride;
|
||||
/// @brief Defines the distance in width and height between elements in the filter.
|
||||
ov::Strides dilations;
|
||||
/// @brief Indicates that the primitive has user-defined output size (non-zero value).
|
||||
bool with_output_size;
|
||||
/// @brief User-defined output data size of the primitive (w/o padding).
|
||||
tensor output_size;
|
||||
/// @brief Number of feature groups (grouped convolution). If more than 1 then weights/bias count needs to be 1.
|
||||
uint32_t groups;
|
||||
/// @brief Defines a padding added to input image on left (x axis) and top (y axis).
|
||||
ov::CoordinateDiff pads_begin;
|
||||
/// @brief Defines a padding added to input image on right (x axis) and bottom (y axis).
|
||||
ov::CoordinateDiff pads_end;
|
||||
/// @brief Defines additional amount of paddings per each spatial axis added to output tensor.
|
||||
ov::CoordinateDiff out_padding;
|
||||
/// @param grouped_weights_shape Defines if weights tensor has explicit group dimension.
|
||||
bool grouped_weights_shape;
|
||||
/// @brief Defines spatial shape of the output.
|
||||
ov::PartialShape output_partial_shape;
|
||||
/// @brief Data primitive id containing spatial shape of the output.
|
||||
primitive_id output_shape_id;
|
||||
/// @brief List of primitive ids containing weights data.
|
||||
const primitive_id_arr weights;
|
||||
/// @brief List of primitive ids containing bias data.
|
||||
const primitive_id_arr bias;
|
||||
|
||||
|
||||
protected:
|
||||
std::vector<std::reference_wrapper<const primitive_id>> get_dependencies() const override {
|
||||
std::vector<std::reference_wrapper<const primitive_id>> ret;
|
||||
ret.reserve(weights.size() + bias.size());
|
||||
ret.reserve(weights.size() + bias.size() + (output_shape_id.empty() ? 0 : 1));
|
||||
for (auto& w : weights) ret.push_back(std::ref(w));
|
||||
for (auto& b : bias) ret.push_back(std::ref(b));
|
||||
if (!output_shape_id.empty()) ret.push_back(output_shape_id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include "json_object.h"
|
||||
#include <string>
|
||||
|
||||
#include "convolution_shape_inference.hpp"
|
||||
|
||||
using namespace ov::intel_gpu;
|
||||
|
||||
namespace cldnn {
|
||||
@ -93,6 +95,131 @@ layout deconvolution_inst::calc_output_layout(deconvolution_node const& node, ke
|
||||
return {data_type, out_fmt, output_size};
|
||||
}
|
||||
|
||||
template<typename ShapeType>
|
||||
std::vector<layout> deconvolution_inst::calc_output_layouts(deconvolution_node const& node, const kernel_impl_params& impl_param) {
|
||||
auto desc = impl_param.typed_desc<deconvolution>();
|
||||
|
||||
auto input_layout = impl_param.get_input_layout(0);
|
||||
auto weights_layout = *impl_param.weights_layout;
|
||||
weights_layout = weights_layout.convert_to_weights_layout(desc->grouped_weights_shape);
|
||||
|
||||
if (input_layout.is_dynamic())
|
||||
return {layout{ShapeType::dynamic(input_layout.get<ShapeType>().rank()), input_layout.data_type, input_layout.format}};
|
||||
|
||||
auto input_type = input_layout.data_type;
|
||||
auto output_type = input_type;
|
||||
if ((input_type == data_types::i8 || input_type == data_types::u8) && !impl_param.has_fused_primitives()) {
|
||||
output_type = data_types::f32;
|
||||
}
|
||||
|
||||
if (impl_param.has_fused_primitives()) {
|
||||
output_type = impl_param.get_fused_output_layout().data_type;
|
||||
}
|
||||
|
||||
auto strides = desc->stride;
|
||||
auto dilations = desc->dilations;
|
||||
auto pads_begin = desc->pads_begin;
|
||||
auto pads_end = desc->pads_end;
|
||||
auto output_padding = desc->out_padding;
|
||||
auto output_partial_shape = desc->output_partial_shape;
|
||||
|
||||
int32_t number_of_features = weights_layout.group() * weights_layout.ofm();
|
||||
|
||||
format out_fmt = input_layout.format;
|
||||
if (node.get_preferred_impl_type() == impl_types::onednn && node.get_preferred_output_fmt() != format::any) {
|
||||
out_fmt = node.get_preferred_output_fmt();
|
||||
}
|
||||
|
||||
if (desc->with_output_size) {
|
||||
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
|
||||
"User-defined output spatial X",
|
||||
desc->output_size.spatial[0],
|
||||
"value 0",
|
||||
0,
|
||||
"User-defined size of output layout must be positive (>= 1)");
|
||||
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
|
||||
"User-defined output spatial Y",
|
||||
desc->output_size.spatial[1],
|
||||
"value 0",
|
||||
0,
|
||||
"User-defined size of output layout must be positive (>= 1)");
|
||||
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
|
||||
"User-defined output spatial Z",
|
||||
desc->output_size.spatial[2],
|
||||
"value 0",
|
||||
0,
|
||||
"User-defined size of output layout must be positive (>= 1)");
|
||||
|
||||
tensor output_size(input_layout.batch(),
|
||||
number_of_features,
|
||||
desc->output_size.spatial[0],
|
||||
desc->output_size.spatial[1],
|
||||
desc->output_size.spatial[2]);
|
||||
return {layout{output_type, out_fmt, output_size}};
|
||||
}
|
||||
|
||||
std::vector<ShapeType> input_shapes = {
|
||||
input_layout.get<ShapeType>()
|
||||
};
|
||||
std::vector<ShapeType> output_shapes = {ShapeType()};
|
||||
auto& memory_deps = impl_param.memory_deps;
|
||||
// Dimensions order of weights is IOYX, but the selected format is OIYX by default and I/O dimensions are
|
||||
// already swapped when creating constant op. So we need to swap I/O dimensions according to the original
|
||||
// dimension order for shape inference.
|
||||
auto weights_pshape = weights_layout.get_partial_shape();
|
||||
if (desc->groups > 1) {
|
||||
ov::op::v1::GroupConvolutionBackpropData op;
|
||||
op.set_strides(strides);
|
||||
op.set_dilations(dilations);
|
||||
op.set_output_padding(output_padding);
|
||||
op.set_auto_pad(ov::op::PadType::EXPLICIT);
|
||||
std::swap(weights_pshape[2], weights_pshape[1]);
|
||||
input_shapes.push_back(weights_pshape);
|
||||
if (output_partial_shape.size() != 0) {
|
||||
ShapeType output_shape = ov::Shape{ output_partial_shape.size() };
|
||||
input_shapes.push_back(output_shape);
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_partial_shape, input_shapes, output_shapes);
|
||||
} else if (memory_deps.count(2)) {
|
||||
auto mem = memory_deps.at(2);
|
||||
std::vector<int64_t> dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
|
||||
ov::Shape shape(dims.begin(), dims.end());
|
||||
ov::PartialShape output_pshape(shape);
|
||||
ShapeType output_shape = ov::Shape{ output_pshape.size() };
|
||||
input_shapes.push_back(output_shape);
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_pshape, input_shapes, output_shapes);
|
||||
} else {
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, ov::PartialShape{}, input_shapes, output_shapes);
|
||||
}
|
||||
} else {
|
||||
ov::op::v1::ConvolutionBackpropData op;
|
||||
op.set_strides(strides);
|
||||
op.set_dilations(dilations);
|
||||
op.set_output_padding(output_padding);
|
||||
op.set_auto_pad(ov::op::PadType::EXPLICIT);
|
||||
std::swap(weights_pshape[1], weights_pshape[0]);
|
||||
input_shapes.push_back(weights_pshape);
|
||||
if (output_partial_shape.size() != 0) {
|
||||
ShapeType output_shape = ov::Shape{ output_partial_shape.size() };
|
||||
input_shapes.push_back(output_shape);
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_partial_shape, input_shapes, output_shapes);
|
||||
} else if (memory_deps.count(2)) {
|
||||
auto mem = memory_deps.at(2);
|
||||
std::vector<int64_t> dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
|
||||
ov::Shape shape(dims.begin(), dims.end());
|
||||
ov::PartialShape output_pshape(shape);
|
||||
ShapeType output_shape = ov::Shape{ output_pshape.size() };
|
||||
input_shapes.push_back(output_shape);
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_pshape, input_shapes, output_shapes);
|
||||
} else {
|
||||
ov::op::v1::shape_infer(&op, pads_begin, pads_end, ov::PartialShape{}, input_shapes, output_shapes);
|
||||
}
|
||||
}
|
||||
return {layout{output_shapes[0], output_type, out_fmt.value}};
|
||||
}
|
||||
|
||||
template std::vector<layout> deconvolution_inst::calc_output_layouts<ov::PartialShape>(deconvolution_node const& node,
|
||||
const kernel_impl_params& impl_param);
|
||||
|
||||
std::string deconvolution_inst::to_string(deconvolution_node const& node) {
|
||||
auto desc = node.get_primitive();
|
||||
auto strd = desc->stride;
|
||||
@ -122,6 +249,8 @@ std::string deconvolution_inst::to_string(deconvolution_node const& node) {
|
||||
|
||||
deconvolution_inst::typed_primitive_inst(network& network, deconvolution_node const& node)
|
||||
: parent(network, node) {
|
||||
if (node.is_dynamic())
|
||||
return;
|
||||
auto stride = argument->stride;
|
||||
auto pad = argument->pad;
|
||||
|
||||
|
@ -27,6 +27,8 @@ void pre_replace_deconv::run(program& p) {
|
||||
auto& node = (*node_itr).second;
|
||||
// find deconvolution primitives with stride 1 and change them to convolution with trasposed weights
|
||||
if (node->is_type<deconvolution>()) {
|
||||
if (node->is_dynamic())
|
||||
continue;
|
||||
if (!p.get_options().get<build_option_type::optimize_data>()->enabled())
|
||||
continue;
|
||||
|
||||
|
@ -526,6 +526,7 @@ void prepare_primitive_fusing::fuse_bias(program &p) {
|
||||
desc->groups,
|
||||
desc->stride,
|
||||
desc->pad,
|
||||
desc->dilations,
|
||||
deconv.get_output_layout().get_tensor(),
|
||||
desc->grouped_weights_shape);
|
||||
|
||||
|
@ -32,6 +32,8 @@ public:
|
||||
|
||||
bool bias_term() const { return !get_primitive()->bias.empty();}
|
||||
|
||||
std::vector<size_t> get_shape_infer_dependencies() const override { return {2}; }
|
||||
|
||||
using parent::get_kernel_impl_params;
|
||||
std::unique_ptr<kernel_impl_params> get_kernel_impl_params(const std::vector<layout>& in_layouts, const std::vector<layout>& out_layouts) const override {
|
||||
auto params = parent::get_kernel_impl_params(in_layouts, out_layouts);
|
||||
@ -41,7 +43,6 @@ public:
|
||||
return params;
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
uint32_t groups;
|
||||
};
|
||||
@ -54,6 +55,8 @@ class typed_primitive_inst<deconvolution> : public typed_primitive_inst_base<dec
|
||||
using parent::parent;
|
||||
|
||||
public:
|
||||
template<typename ShapeType>
|
||||
static std::vector<layout> calc_output_layouts(deconvolution_node const& node, const kernel_impl_params& impl_param);
|
||||
static layout calc_output_layout(deconvolution_node const& node, kernel_impl_params const& impl_param);
|
||||
static std::string to_string(deconvolution_node const& node);
|
||||
|
||||
|
@ -1171,9 +1171,20 @@ layout layout_optimizer::get_expected_layout(layout const& current_layout,
|
||||
deconvolution_node const& node,
|
||||
layout const& output_or_weights_layout) {
|
||||
auto prim = node.get_primitive();
|
||||
auto expected_tensor = current_layout.get_tensor();
|
||||
auto expected_data_type = current_layout.data_type;
|
||||
auto expected_format = current_layout.format;
|
||||
auto input_layout = node.get_dependency(0).get_output_layout();
|
||||
auto output_layout = node.calc_output_layout();
|
||||
|
||||
if (input_layout.is_dynamic() || output_layout.is_dynamic()) {
|
||||
if (input_layout.get_partial_shape().size() <= 4)
|
||||
expected_format = format::b_fs_yx_fsv16;
|
||||
else if (input_layout.get_partial_shape().size() == 5)
|
||||
expected_format = format::b_fs_zyx_fsv16;
|
||||
return layout(current_layout.get_partial_shape(), expected_data_type, expected_format);
|
||||
}
|
||||
|
||||
auto expected_tensor = current_layout.get_tensor();
|
||||
bool use_onednn_impls = _optimization_attributes.use_onednn_impls;
|
||||
|
||||
if (use_onednn_impls && is_node_for_onednn(node)) {
|
||||
|
@ -161,26 +161,56 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
|
||||
|
||||
auto strides = op->get_strides();
|
||||
auto pads_begin = op->get_pads_begin();
|
||||
auto pads_end = op->get_pads_end();
|
||||
auto output_padding = op->get_output_padding();
|
||||
|
||||
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
|
||||
strides.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
|
||||
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
1,
|
||||
strides,
|
||||
pads_begin,
|
||||
tensor_from_dims(op->get_output_tensor(0).get_shape()),
|
||||
weights_have_group_dim);
|
||||
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
if (!op->is_dynamic()) {
|
||||
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
|
||||
strides.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
dilations.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
1,
|
||||
strides,
|
||||
pads_begin,
|
||||
dilations,
|
||||
tensor_from_dims(op->get_output_tensor(0).get_shape()),
|
||||
weights_have_group_dim);
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
} else {
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
1,
|
||||
strides,
|
||||
pads_begin,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
output_padding,
|
||||
weights_have_group_dim);
|
||||
if (op->get_input_size() == 3) {
|
||||
auto output_shape_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2));
|
||||
if (output_shape_constant) {
|
||||
auto output_shape = output_shape_constant->cast_vector<int64_t>();
|
||||
ov::Shape shape(output_shape.begin(), output_shape.end());
|
||||
ov::PartialShape output_pshape(shape);
|
||||
deconvPrim.output_partial_shape = output_pshape;
|
||||
} else {
|
||||
deconvPrim.output_shape_id = inputs[2].pid;
|
||||
}
|
||||
}
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
}
|
||||
}
|
||||
|
||||
static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolutionBackpropData>& op) {
|
||||
validate_inputs_count(op, {2});
|
||||
// 3rd input is an optional output shape
|
||||
validate_inputs_count(op, {2, 3});
|
||||
auto inputs = p.GetInputInfo(op);
|
||||
std::string layerName = layer_type_name_ID(op);
|
||||
|
||||
@ -221,22 +251,52 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
|
||||
|
||||
auto strides = op->get_strides();
|
||||
auto pads_begin = op->get_pads_begin();
|
||||
auto pads_end = op->get_pads_end();
|
||||
auto output_padding = op->get_output_padding();
|
||||
|
||||
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
|
||||
strides.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
|
||||
if (!op->is_dynamic()) {
|
||||
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
|
||||
strides.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
dilations.resize(std::max<size_t>(2, strides.size()), 1);
|
||||
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
|
||||
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
groups,
|
||||
strides,
|
||||
pads_begin,
|
||||
tensor_from_dims(op->get_output_tensor(0).get_shape()),
|
||||
weights_have_group_dim);
|
||||
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
groups,
|
||||
strides,
|
||||
pads_begin,
|
||||
dilations,
|
||||
tensor_from_dims(op->get_output_tensor(0).get_shape()),
|
||||
weights_have_group_dim);
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
} else {
|
||||
auto deconvPrim = cldnn::deconvolution(layerName,
|
||||
inputs[0],
|
||||
weights,
|
||||
{},
|
||||
groups,
|
||||
strides,
|
||||
pads_begin,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
output_padding,
|
||||
weights_have_group_dim);
|
||||
if (op->get_input_size() == 3) {
|
||||
auto output_shape_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2));
|
||||
if (output_shape_constant) {
|
||||
auto output_shape = output_shape_constant->cast_vector<int64_t>();
|
||||
ov::Shape shape(output_shape.begin(), output_shape.end());
|
||||
ov::PartialShape output_pshape(shape);
|
||||
deconvPrim.output_partial_shape = output_pshape;
|
||||
} else {
|
||||
deconvPrim.output_shape_id = inputs[2].pid;
|
||||
}
|
||||
}
|
||||
p.add_primitive(*op, deconvPrim);
|
||||
}
|
||||
}
|
||||
|
||||
static void DeformableConvolutionImpl(Program& p,
|
||||
|
@ -0,0 +1,152 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "test_utils.h"
|
||||
|
||||
#include <intel_gpu/primitives/input_layout.hpp>
|
||||
#include <intel_gpu/primitives/data.hpp>
|
||||
|
||||
#include "deconvolution_inst.h"
|
||||
|
||||
#include "program_wrapper.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <algorithm>
|
||||
|
||||
using namespace cldnn;
|
||||
using namespace ::tests;
|
||||
|
||||
namespace shape_infer_tests {
|
||||
|
||||
struct deconvolution_test_params {
|
||||
ov::PartialShape input_shape;
|
||||
ov::PartialShape weight_shape;
|
||||
uint32_t groups;
|
||||
ov::Strides stride;
|
||||
ov::Strides dilations;
|
||||
ov::CoordinateDiff pads_begin;
|
||||
ov::CoordinateDiff pads_end;
|
||||
ov::CoordinateDiff output_padding;
|
||||
bool with_output_shape;
|
||||
ov::PartialShape output_pshape;
|
||||
layout expected_layout;
|
||||
};
|
||||
|
||||
class deconvolution_si_test : public testing::TestWithParam<deconvolution_test_params> { };
|
||||
|
||||
TEST_P(deconvolution_si_test, shape_infer) {
|
||||
auto p = GetParam();
|
||||
|
||||
auto& engine = get_test_engine();
|
||||
auto input_data_layout = layout{p.input_shape, data_types::f32, format::bfyx};
|
||||
auto weight_layout = layout{p.weight_shape, data_types::f32, format::bfyx};
|
||||
|
||||
std::vector<cldnn::primitive_id> weights = {"weight"};
|
||||
std::vector<cldnn::primitive_id> bias = {};
|
||||
|
||||
auto input_prim = std::make_shared<input_layout>("data", input_data_layout);
|
||||
auto weight_prim = std::make_shared<input_layout>("weight", weight_layout);
|
||||
auto deconv_prim = std::make_shared<deconvolution>("deconv", input_info("data"), weights, bias, p.groups,
|
||||
p.stride, p.pads_begin, p.dilations, p.pads_begin,
|
||||
p.pads_end, p.output_padding, false);
|
||||
if (p.with_output_shape) {
|
||||
deconv_prim->output_partial_shape = p.output_pshape;
|
||||
}
|
||||
|
||||
cldnn::program prog(engine);
|
||||
|
||||
auto& input_node = prog.get_or_create(input_prim);
|
||||
auto& weight_node = prog.get_or_create(weight_prim);
|
||||
auto& deconv_node = prog.get_or_create(deconv_prim);
|
||||
program_wrapper::add_connection(prog, input_node, deconv_node);
|
||||
program_wrapper::add_connection(prog, weight_node, deconv_node);
|
||||
|
||||
auto params = deconv_node.get_kernel_impl_params();
|
||||
auto res = deconvolution_inst::calc_output_layouts<ov::PartialShape>(deconv_node, *params);
|
||||
|
||||
ASSERT_EQ(res.size(), 1);
|
||||
ASSERT_EQ(res[0], p.expected_layout);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke, deconvolution_si_test,
|
||||
testing::ValuesIn(std::vector<deconvolution_test_params>{
|
||||
// 2d deconv
|
||||
{
|
||||
ov::PartialShape{1, 20, 224, 224}, ov::PartialShape{10, 20, 3, 3},
|
||||
1, {2, 2}, {1, 1},
|
||||
std::vector<ptrdiff_t>{1, 1}, std::vector<ptrdiff_t>{1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0},
|
||||
false, {},
|
||||
layout{ov::PartialShape{1, 10, 447, 447}, data_types::f32, format::bfyx}
|
||||
},
|
||||
// 2d deconv with output padding
|
||||
{
|
||||
ov::PartialShape{1, 20, 2, 2}, ov::PartialShape{10, 20, 3, 3},
|
||||
1, {3, 3}, {1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0}, std::vector<ptrdiff_t>{0, 0},
|
||||
std::vector<ptrdiff_t>{2, 2},
|
||||
false, {},
|
||||
layout{ov::PartialShape{1, 10, 8, 8}, data_types::f32, format::bfyx}
|
||||
},
|
||||
// 2d deconv with dynamic shape
|
||||
{
|
||||
ov::PartialShape::dynamic(4), ov::PartialShape{10, 20, 3, 3},
|
||||
1, {3, 3}, {1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0}, std::vector<ptrdiff_t>{0, 0},
|
||||
std::vector<ptrdiff_t>{2, 2},
|
||||
false, {},
|
||||
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}
|
||||
},
|
||||
// 1d groupdeconv
|
||||
{
|
||||
ov::PartialShape{1, 20, 224}, ov::PartialShape{4, 2, 5, 3},
|
||||
4, {2}, {1},
|
||||
std::vector<ptrdiff_t>{1}, std::vector<ptrdiff_t>{1},
|
||||
std::vector<ptrdiff_t>{0},
|
||||
false, {},
|
||||
layout{ov::PartialShape{1, 8, 447}, data_types::f32, format::bfyx}
|
||||
},
|
||||
// 2d groupdeconv
|
||||
{
|
||||
ov::PartialShape{1, 20, 224, 224}, ov::PartialShape{4, 2, 5, 3, 3},
|
||||
4, {2, 2}, {1, 1},
|
||||
std::vector<ptrdiff_t>{1, 1}, std::vector<ptrdiff_t>{1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0},
|
||||
false, {},
|
||||
layout{ov::PartialShape{1, 8, 447, 447}, data_types::f32, format::bfyx}
|
||||
},
|
||||
}));
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_with_output_shape, deconvolution_si_test,
|
||||
testing::ValuesIn(std::vector<deconvolution_test_params>{
|
||||
// 2d deconv with output shape
|
||||
{
|
||||
ov::PartialShape{1, 20, 224, 224}, ov::PartialShape{10, 20, 3, 3},
|
||||
1, {2, 2}, {1, 1},
|
||||
std::vector<ptrdiff_t>{1, 1}, std::vector<ptrdiff_t>{1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0},
|
||||
true, ov::PartialShape{500, 500},
|
||||
layout{ov::PartialShape{1, 10, 500, 500}, data_types::f32, format::bfyx}
|
||||
},
|
||||
// 1d groupdeconv with output shape
|
||||
{
|
||||
ov::PartialShape{1, 20, 224}, ov::PartialShape{4, 2, 5, 3},
|
||||
4, {2}, {1},
|
||||
std::vector<ptrdiff_t>{1}, std::vector<ptrdiff_t>{1},
|
||||
std::vector<ptrdiff_t>{0},
|
||||
true, ov::PartialShape{500},
|
||||
layout{ov::PartialShape{1, 8, 500}, data_types::f32, format::bfyx}
|
||||
},
|
||||
// 2d groupdeconv with output shape
|
||||
{
|
||||
ov::PartialShape{1, 20, 224, 224}, ov::PartialShape{4, 2, 5, 3, 3},
|
||||
4, {2, 2}, {1, 1},
|
||||
std::vector<ptrdiff_t>{1, 1}, std::vector<ptrdiff_t>{1, 1},
|
||||
std::vector<ptrdiff_t>{0, 0},
|
||||
true, ov::PartialShape{500, 500},
|
||||
layout{ov::PartialShape{1, 8, 500, 500}, data_types::f32, format::bfyx}
|
||||
},
|
||||
}));
|
||||
|
||||
} // shape_infer_tests
|
@ -1715,7 +1715,7 @@ TYPED_TEST(deconvolution_basic, basic_f16_k9x9_s2x2_pad4x4) {
|
||||
reorder("reordered_input", input_info("input"), this->input_layout_format, data_types::f16),
|
||||
data("weights", weights),
|
||||
data("biases", biases),
|
||||
deconvolution("deconv", input_info("reordered_input"), { "weights" }, { "biases" }, { 2, 2 }, { 4, 4 }, tensor{ 1, 1, 32, 32 }),
|
||||
deconvolution("deconv", input_info("reordered_input"), { "weights" }, { "biases" }, { 2, 2 }, { 4, 4 }, { 1, 1 }, tensor{ 1, 1, 32, 32 }),
|
||||
reorder("plane_output", input_info("deconv"), format::bfyx, cldnn::data_types::f16)
|
||||
);
|
||||
|
||||
|
@ -0,0 +1,340 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/convolution_backprop_data.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
using DeconvSpecParams = LayerTestsDefinitions::convBackpropDataSpecificParams;
|
||||
|
||||
using DeconvInputData = std::tuple<InputShape, // data shape
|
||||
ngraph::helpers::InputLayerType, // 'output_shape' input type
|
||||
std::vector<std::vector<int32_t>>>; // values for 'output_shape'
|
||||
|
||||
using DeconvLayerTestParamsSet = std::tuple<DeconvSpecParams,
|
||||
DeconvInputData,
|
||||
ElementType,
|
||||
LayerTestsUtils::TargetDevice,
|
||||
std::map<std::string, std::string>>;
|
||||
|
||||
class DeconvolutionLayerGPUTest : public testing::WithParamInterface<DeconvLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<DeconvLayerTestParamsSet> obj) {
|
||||
DeconvSpecParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
ElementType prec;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet;
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=";
|
||||
result << CommonTestUtils::partialShape2str({inputShape.first}) << "_";
|
||||
result << "TS=";
|
||||
for (const auto& shape : inputShape.second) {
|
||||
result << "(";
|
||||
result << CommonTestUtils::vec2str(shape);
|
||||
result << ")_";
|
||||
}
|
||||
result << "PRC=" << prec << "_";
|
||||
result << "K=" << CommonTestUtils::vec2str(kernel) << "_";
|
||||
result << "S=" << CommonTestUtils::vec2str(stride) << "_";
|
||||
result << "PB=" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||
result << "PE=" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||
result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
|
||||
result << "OP=" << CommonTestUtils::vec2str(outPadding) << "_";
|
||||
result << "O=" << convOutChannels << "_";
|
||||
result << "AP=" << padType << "_";
|
||||
result << "OUT_SH=" << outShapeType << "_";
|
||||
result << "OUT_D=";
|
||||
for (const auto& data : outShapeData) {
|
||||
result << "(";
|
||||
result << CommonTestUtils::vec2str(data);
|
||||
result << ")_";
|
||||
}
|
||||
result << "config=(";
|
||||
for (const auto configEntry : additionalConfig) {
|
||||
result << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
result << ")_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (int i = 0; i < funcInputs.size(); ++i) {
|
||||
const auto& funcInput = funcInputs[i];
|
||||
ov::Tensor tensor;
|
||||
|
||||
if (i == 1) {
|
||||
tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], outShapeData[inferRequestNum].data());
|
||||
} else {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256);
|
||||
}
|
||||
|
||||
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
|
||||
}
|
||||
inferRequestNum++;
|
||||
}
|
||||
|
||||
void init_ref_function(std::shared_ptr<ov::Model> &funcRef, const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
if (function->get_parameters().size() == 1) {
|
||||
ngraph::helpers::resize_function(funcRef, targetInputStaticShapes);
|
||||
} else {
|
||||
// WA: output_shape depends on 3rd deconvolution input data
|
||||
// but the reference implementation doesn't implement shape inference
|
||||
// so we need to build a new ngraph function and replace the 3rd input parameter with a constant
|
||||
// to get valid output shapes
|
||||
funcRef = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT);
|
||||
}
|
||||
}
|
||||
|
||||
void validate() override {
|
||||
auto actualOutputs = get_plugin_outputs();
|
||||
if (function->get_parameters().size() == 2) {
|
||||
auto pos = std::find_if(inputs.begin(), inputs.end(),
|
||||
[](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor> ¶ms) {
|
||||
return params.first->get_friendly_name() == "param_1";
|
||||
});
|
||||
IE_ASSERT(pos != inputs.end());
|
||||
inputs.erase(pos);
|
||||
}
|
||||
auto expectedOutputs = calculate_refs();
|
||||
if (expectedOutputs.empty()) {
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(actualOutputs.size(), expectedOutputs.size())
|
||||
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
|
||||
|
||||
abs_threshold = 1e-2f;
|
||||
compare(expectedOutputs, actualOutputs);
|
||||
}
|
||||
|
||||
void configure_model() override {
|
||||
ov::preprocess::PrePostProcessor p(function);
|
||||
{
|
||||
auto& params = function->get_parameters();
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
if (i > 0) {
|
||||
continue;
|
||||
}
|
||||
if (inType != ov::element::Type_t::undefined) {
|
||||
p.input(i).tensor().set_element_type(inType);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
auto results = function->get_results();
|
||||
for (size_t i = 0; i < results.size(); i++) {
|
||||
if (outType != ov::element::Type_t::undefined) {
|
||||
p.output(i).tensor().set_element_type(outType);
|
||||
}
|
||||
}
|
||||
}
|
||||
function = p.build();
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ngraph::helpers::InputLayerType outShapeType) {
|
||||
auto params = ngraph::builder::makeDynamicParams(prec, {inShapes.front()});
|
||||
std::shared_ptr<ov::Node> outShapeNode;
|
||||
if (!outShapeData.empty()) {
|
||||
if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
IE_ASSERT(inputDynamicShapes.size() == 2);
|
||||
auto outShapeParam = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i32, inputDynamicShapes.back());
|
||||
params.push_back(outShapeParam);
|
||||
outShapeNode = outShapeParam;
|
||||
} else {
|
||||
outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
params[i]->set_friendly_name(std::string("param_") + std::to_string(i));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
IE_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, false, outPadding);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
for (int i = 0; i < deconv->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(deconv->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "Deconv");
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
DeconvSpecParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam();
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet;
|
||||
|
||||
std::vector<InputShape> paramsShapes;
|
||||
paramsShapes.push_back(inputShape);
|
||||
if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
const auto outShapeDims = ov::Shape{outShapeData.front().size()};
|
||||
paramsShapes.push_back(InputShape{outShapeDims, std::vector<ov::Shape>(inputShape.second.size(), outShapeDims)});
|
||||
}
|
||||
|
||||
init_input_shapes(paramsShapes);
|
||||
|
||||
function = createGraph(inputDynamicShapes, outShapeType);
|
||||
}
|
||||
|
||||
private:
|
||||
ElementType prec;
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
size_t inferRequestNum = 0;
|
||||
};
|
||||
|
||||
TEST_P(DeconvolutionLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<std::vector<ptrdiff_t>> emptyOutputPadding = { {} };
|
||||
|
||||
/* ============= Deconvolution params ============= */
|
||||
const InferenceEngine::SizeVector numOutChannels = { 6 };
|
||||
|
||||
/* ============= Deconvolution params (2D) ============= */
|
||||
const std::vector<InferenceEngine::SizeVector> kernels2d = { {3, 3}, {1, 1} };
|
||||
const std::vector<InferenceEngine::SizeVector> strides2d = { {1, 1}, {2, 2} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2d = { {0, 0} };
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2d = { {0, 0} };
|
||||
const std::vector<InferenceEngine::SizeVector> dilations2d = { {1, 1} };
|
||||
|
||||
/* ============= Deconvolution (2D) ============= */
|
||||
const auto convParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels2d),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)
|
||||
);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_smoke = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
convParams_ExplicitPadding_2D,
|
||||
::testing::ValuesIn(dyn_2D_inputs_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
DeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
{{15, 15}, {9, 10}, {15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_OutputShape_FP32, DeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)),
|
||||
::testing::ValuesIn(dyn_2D_inputs_with_output_shape),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
DeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
@ -0,0 +1,345 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "openvino/core/preprocess/pre_post_process.hpp"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace ov::test;
|
||||
|
||||
namespace GPULayerTestsDefinitions {
|
||||
|
||||
using GroupDeconvSpecParams = LayerTestsDefinitions::groupConvBackpropSpecificParams;
|
||||
|
||||
using DeconvInputData = std::tuple<InputShape, // data shape
|
||||
ngraph::helpers::InputLayerType, // 'output_shape' input type
|
||||
std::vector<std::vector<int32_t>>>; // values for 'output_shape'
|
||||
|
||||
using GroupDeconvLayerTestParamsSet = std::tuple<GroupDeconvSpecParams,
|
||||
DeconvInputData,
|
||||
ElementType,
|
||||
LayerTestsUtils::TargetDevice,
|
||||
std::map<std::string, std::string>>;
|
||||
|
||||
class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface<GroupDeconvLayerTestParamsSet>,
|
||||
virtual public SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<GroupDeconvLayerTestParamsSet> obj) {
|
||||
GroupDeconvSpecParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
ElementType prec;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param;
|
||||
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels, groupNum;
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet;
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=";
|
||||
result << CommonTestUtils::partialShape2str({inputShape.first}) << "_";
|
||||
result << "TS=";
|
||||
for (const auto& shape : inputShape.second) {
|
||||
result << "(";
|
||||
result << CommonTestUtils::vec2str(shape);
|
||||
result << ")_";
|
||||
}
|
||||
result << "PRC=" << prec << "_";
|
||||
result << "K=" << CommonTestUtils::vec2str(kernel) << "_";
|
||||
result << "S=" << CommonTestUtils::vec2str(stride) << "_";
|
||||
result << "PB=" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||
result << "PE=" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||
result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
|
||||
result << "OP=" << CommonTestUtils::vec2str(outPadding) << "_";
|
||||
result << "O=" << convOutChannels << "_";
|
||||
result << "G=" << groupNum << "_";
|
||||
result << "AP=" << padType << "_";
|
||||
result << "OUT_SH=" << outShapeType << "_";
|
||||
result << "OUT_D=";
|
||||
for (const auto& data : outShapeData) {
|
||||
result << "(";
|
||||
result << CommonTestUtils::vec2str(data);
|
||||
result << ")_";
|
||||
}
|
||||
result << "config=(";
|
||||
for (const auto configEntry : additionalConfig) {
|
||||
result << configEntry.first << ", " << configEntry.second << ":";
|
||||
}
|
||||
result << ")_";
|
||||
result << "trgDev=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (int i = 0; i < funcInputs.size(); ++i) {
|
||||
const auto& funcInput = funcInputs[i];
|
||||
ov::Tensor tensor;
|
||||
|
||||
if (i == 1) {
|
||||
tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i], outShapeData[inferRequestNum].data());
|
||||
} else {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256);
|
||||
}
|
||||
|
||||
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
|
||||
}
|
||||
inferRequestNum++;
|
||||
}
|
||||
|
||||
void init_ref_function(std::shared_ptr<ov::Model> &funcRef, const std::vector<ov::Shape>& targetInputStaticShapes) override {
|
||||
if (function->get_parameters().size() == 1) {
|
||||
ngraph::helpers::resize_function(funcRef, targetInputStaticShapes);
|
||||
} else {
|
||||
// WA: output_shape depends on 3rd deconvolution input data
|
||||
// but the reference implementation doesn't implement shape inference
|
||||
// so we need to build a new ngraph function and replace the 3rd input parameter with a constant
|
||||
// to get valid output shapes
|
||||
funcRef = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT);
|
||||
}
|
||||
}
|
||||
|
||||
void validate() override {
|
||||
auto actualOutputs = get_plugin_outputs();
|
||||
if (function->get_parameters().size() == 2) {
|
||||
auto pos = std::find_if(inputs.begin(), inputs.end(),
|
||||
[](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor> ¶ms) {
|
||||
return params.first->get_friendly_name() == "param_1";
|
||||
});
|
||||
IE_ASSERT(pos != inputs.end());
|
||||
inputs.erase(pos);
|
||||
}
|
||||
auto expectedOutputs = calculate_refs();
|
||||
if (expectedOutputs.empty()) {
|
||||
return;
|
||||
}
|
||||
ASSERT_EQ(actualOutputs.size(), expectedOutputs.size())
|
||||
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
|
||||
|
||||
abs_threshold = 1e-2f;
|
||||
compare(expectedOutputs, actualOutputs);
|
||||
}
|
||||
|
||||
void configure_model() override {
|
||||
ov::preprocess::PrePostProcessor p(function);
|
||||
{
|
||||
auto& params = function->get_parameters();
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
if (i > 0) {
|
||||
continue;
|
||||
}
|
||||
if (inType != ov::element::Type_t::undefined) {
|
||||
p.input(i).tensor().set_element_type(inType);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
auto results = function->get_results();
|
||||
for (size_t i = 0; i < results.size(); i++) {
|
||||
if (outType != ov::element::Type_t::undefined) {
|
||||
p.output(i).tensor().set_element_type(outType);
|
||||
}
|
||||
}
|
||||
}
|
||||
function = p.build();
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Model> createGraph(const std::vector<ov::PartialShape>& inShapes, ngraph::helpers::InputLayerType outShapeType) {
|
||||
auto params = ngraph::builder::makeDynamicParams(prec, {inShapes.front()});
|
||||
std::shared_ptr<ov::Node> outShapeNode;
|
||||
if (!outShapeData.empty()) {
|
||||
if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
IE_ASSERT(inputDynamicShapes.size() == 2);
|
||||
auto outShapeParam = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i32, inputDynamicShapes.back());
|
||||
params.push_back(outShapeParam);
|
||||
outShapeNode = outShapeParam;
|
||||
} else {
|
||||
outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
params[i]->set_friendly_name(std::string("param_") + std::to_string(i));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> deconv;
|
||||
if (!outShapeData.empty()) {
|
||||
IE_ASSERT(outShapeNode != nullptr);
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum);
|
||||
} else {
|
||||
deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], prec, kernel, stride, padBegin,
|
||||
padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding);
|
||||
}
|
||||
|
||||
ngraph::ResultVector results;
|
||||
for (int i = 0; i < deconv->get_output_size(); i++)
|
||||
results.push_back(std::make_shared<ngraph::opset1::Result>(deconv->output(i)));
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, "GroupDeconv");
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetUp() override {
|
||||
GroupDeconvSpecParams basicParamsSet;
|
||||
DeconvInputData inputData;
|
||||
std::map<std::string, std::string> additionalConfig;
|
||||
std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam();
|
||||
|
||||
InputShape inputShape;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::tie(inputShape, outShapeType, outShapeData) = inputData;
|
||||
|
||||
std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet;
|
||||
|
||||
std::vector<InputShape> paramsShapes;
|
||||
paramsShapes.push_back(inputShape);
|
||||
if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
const auto outShapeDims = ov::Shape{outShapeData.front().size()};
|
||||
paramsShapes.push_back(InputShape{outShapeDims, std::vector<ov::Shape>(inputShape.second.size(), outShapeDims)});
|
||||
}
|
||||
|
||||
init_input_shapes(paramsShapes);
|
||||
|
||||
function = createGraph(inputDynamicShapes, outShapeType);
|
||||
}
|
||||
|
||||
private:
|
||||
ElementType prec;
|
||||
ngraph::op::PadType padType;
|
||||
InferenceEngine::SizeVector kernel, stride, dilation;
|
||||
std::vector<ptrdiff_t> padBegin, padEnd, outPadding;
|
||||
size_t convOutChannels, groupNum;
|
||||
ngraph::helpers::InputLayerType outShapeType;
|
||||
std::vector<std::vector<int32_t>> outShapeData;
|
||||
size_t inferRequestNum = 0;
|
||||
};
|
||||
|
||||
TEST_P(GroupDeconvolutionLayerGPUTest, CompareWithRefs) {
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
|
||||
run();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
std::map<std::string, std::string> emptyAdditionalConfig;
|
||||
|
||||
const std::vector<std::vector<size_t >> emptyOutputShape = {{}};
|
||||
const std::vector<std::vector<ptrdiff_t>> emptyOutputPadding = {{}};
|
||||
|
||||
/* ============= GroupConvolution params ============= */
|
||||
const InferenceEngine::SizeVector numOutChannels = {6};
|
||||
const InferenceEngine::SizeVector numGroups = {2, 3};
|
||||
|
||||
/* ============= GroupConvolution params (2D) ============= */
|
||||
const std::vector<InferenceEngine::SizeVector> kernels2d = {{3, 3}, {1, 1}};
|
||||
const std::vector<InferenceEngine::SizeVector> strides2d = {{1, 1}, {2, 2}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins2d = {{0, 0}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds2d = {{0, 0}};
|
||||
const std::vector<InferenceEngine::SizeVector> dilations2d = {{1, 1}};
|
||||
|
||||
/* ============= GroupConvolution (2D) ============= */
|
||||
const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels2d),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)
|
||||
);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_smoke = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
groupConvParams_ExplicitPadding_2D,
|
||||
::testing::ValuesIn(dyn_2D_inputs_smoke),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
GroupDeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
const std::vector<DeconvInputData> dyn_2D_inputs_with_output_shape = {
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::PARAMETER,
|
||||
{{15, 15}, {9, 10}, {15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
},
|
||||
DeconvInputData{
|
||||
InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}},
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
{{15, 15}}
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_OutputShape_FP32, GroupDeconvolutionLayerGPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Combine(
|
||||
::testing::Values(SizeVector{3, 3}),
|
||||
::testing::ValuesIn(strides2d),
|
||||
::testing::ValuesIn(padBegins2d),
|
||||
::testing::ValuesIn(padEnds2d),
|
||||
::testing::ValuesIn(dilations2d),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::ValuesIn(numGroups),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT),
|
||||
::testing::ValuesIn(emptyOutputPadding)),
|
||||
::testing::ValuesIn(dyn_2D_inputs_with_output_shape),
|
||||
::testing::Values(ElementType::f32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GPU),
|
||||
::testing::Values(emptyAdditionalConfig)),
|
||||
GroupDeconvolutionLayerGPUTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace GPULayerTestsDefinitions
|
Loading…
Reference in New Issue
Block a user