Review convolution classes for shape inference aspects (#16375)

* Review adaptive max pool shape inference

* Review AvgPool and MaxPool

* Review convolution operator

* Review GroupConvolution shape inference

* Review ConvolutionBackpropData operator

* Review GroupConvolutionBackpropData op

* Review BinaryConvolution operator
- add common bases for convolution ops
- refactor convolution ops

* Review DeformableConvolution operator

* Use new convolution shape_infer in GPU

* Fix build and test issues

* Correct set output spatial shape
in default constructed back prop convolutions

* The convolution shape_infer use pads as parameters
the external padding can be operators or other class padding properties shape_infer should not modify operators padding when
called from plugin

* Apply code formatting

* Fix padding validation and update

* Use shape inference with padding instead fallback
for DeformableConvolution from opset1

* Update convertPadding function to be template
This commit is contained in:
Pawel Raasz 2023-03-28 21:10:08 +02:00 committed by GitHub
parent 8d90c11a35
commit 796bd98913
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 3359 additions and 2274 deletions

View File

@ -7,6 +7,7 @@
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/convolution_base.hpp"
namespace ov {
namespace op {
@ -14,9 +15,9 @@ namespace v1 {
/// \brief BinaryConvolution operation.
///
/// \ingroup ov_ops_cpp_api
class OPENVINO_API BinaryConvolution : public Op {
class OPENVINO_API BinaryConvolution : public util::ConvolutionFwdPropBase {
public:
OPENVINO_OP("BinaryConvolution", "opset1", op::Op);
OPENVINO_OP("BinaryConvolution", "opset1", op::util::ConvolutionFwdPropBase);
enum class BinaryConvolutionMode {
// Interpret input data and kernel values: 0 as -1, 1 as 1
@ -63,46 +64,6 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.")
void set_adding_above(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The mode of convolution.
const BinaryConvolutionMode& get_mode() const {
return m_mode;
@ -120,13 +81,9 @@ public:
protected:
BinaryConvolutionMode mode_from_string(const std::string& mode) const;
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
BinaryConvolutionMode m_mode;
float m_pad_value;
PadType m_auto_pad;
};
} // namespace v1
} // namespace op

View File

@ -7,6 +7,8 @@
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/convolution_backprop_base.hpp"
#include "openvino/op/util/convolution_base.hpp"
namespace ov {
namespace op {
@ -14,9 +16,9 @@ namespace v1 {
/// \brief Batched convolution operation, with optional window dilation and stride.
///
/// \ingroup ov_ops_cpp_api
class OPENVINO_API Convolution : public Op {
class OPENVINO_API Convolution : public util::ConvolutionFwdPropBase {
public:
OPENVINO_OP("Convolution", "opset1", op::Op);
OPENVINO_OP("Convolution", "opset1", op::util::ConvolutionFwdPropBase);
/// \brief Constructs a batched convolution operation.
Convolution() = default;
@ -51,85 +53,13 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.")
void set_adding_above(const CoordinateDiff& pads_end) {
set_pads_end(pads_end);
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
int64_t m_num_spatial = -1;
private:
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend void update_and_validate_attributes(ConvType* op, int64_t num_spatial);
template <class ConvType, class ShapeType>
friend bool resolve_auto_pad_for_shape(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class T>
friend void shape_infer(const Convolution* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
};
/// \brief Data batch backprop for batched convolution operation.
/// \ingroup ov_ops_cpp_api
class OPENVINO_API ConvolutionBackpropData : public Op {
class OPENVINO_API ConvolutionBackpropData : public util::ConvolutionBackPropBase {
public:
OPENVINO_OP("ConvolutionBackpropData", "opset1", op::Op);
OPENVINO_OP("ConvolutionBackpropData", "opset1", op::util::ConvolutionBackPropBase);
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
@ -197,48 +127,7 @@ public:
/// \return The output spatial dimensions shape.
const PartialShape get_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The output padding.
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
/// \brief Calculates output spatial features size.
///
/// \param[in] input_data_shape The input data partial shape
@ -251,6 +140,7 @@ public:
/// \param output_spatial_shape The placeholder for computed output spatial partial
/// shape.
///
OPENVINO_DEPRECATED("This member function is deprecated and will be removed soon.")
void infer_conv_backprop_output_spatial_shape(const std::vector<Dimension>& input_data_shape,
const std::vector<Dimension>& filters_shape,
const Strides& strides,
@ -259,54 +149,6 @@ public:
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding,
std::vector<Dimension>& output_spatial_shape);
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
int64_t m_num_spatial = -1;
private:
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const PartialShape& output_shapes_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend void update_and_validate_attributes(ConvType* op, int64_t num_spatial);
template <class ConvType>
friend void update_and_validate_attributes_back_prop(ConvType* op, int64_t num_spatial);
template <class ConvType, class ShapeType>
friend bool resolve_auto_pad_for_shape_back_prop(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
ShapeType& output_spatial_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class T>
friend void shape_infer(const ConvolutionBackpropData* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const T& output_shape_from_input,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
};
} // namespace v1
} // namespace op

View File

@ -53,6 +53,7 @@ public:
const int64_t deformable_group = 1);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
};
} // namespace v1

View File

@ -7,14 +7,15 @@
#include "openvino/op/convolution.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/convolution_base.hpp"
namespace ov {
namespace op {
namespace v1 {
/// \brief Batched convolution operation, with optional window dilation and stride.
class OPENVINO_API GroupConvolution : public Op {
class OPENVINO_API GroupConvolution : public util::ConvolutionFwdPropBase {
public:
OPENVINO_OP("GroupConvolution", "opset1", op::Op);
OPENVINO_OP("GroupConvolution", "opset1", op::util::ConvolutionFwdPropBase);
/// \brief Constructs a batched convolution operation.
GroupConvolution() = default;
@ -49,84 +50,12 @@ public:
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.")
void set_adding_above(const CoordinateDiff& pads_end) {
set_pads_end(pads_end);
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
int64_t m_num_spatial = -1;
private:
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend void update_and_validate_attributes(ConvType* op, int64_t num_spatial);
template <class ConvType, class ShapeType>
friend bool resolve_auto_pad_for_shape(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class T>
friend void shape_infer(const GroupConvolution* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
};
/// \brief Data batch backprop for batched convolution operation.
class OPENVINO_API GroupConvolutionBackpropData : public Op {
class OPENVINO_API GroupConvolutionBackpropData : public util::ConvolutionBackPropBase {
public:
OPENVINO_OP("GroupConvolutionBackpropData", "opset1", op::Op);
OPENVINO_OP("GroupConvolutionBackpropData", "opset1", op::util::ConvolutionBackPropBase);
/// \brief Constructs a batched-convolution data batch-backprop operation.
GroupConvolutionBackpropData();
@ -243,97 +172,6 @@ public:
/// \return The spatial shape of the output.
const PartialShape get_convolution_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The number of pixels to add to the beginning along each axis.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The number of pixels to add to the ending along each axis.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The output padding.
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
int64_t m_num_spatial = -1;
private:
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const PartialShape& output_shapes_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class ConvType>
friend void update_and_validate_attributes(ConvType* op, int64_t num_spatial);
template <class ConvType>
friend void update_and_validate_attributes_back_prop(ConvType* op, int64_t num_spatial);
template <class ConvType, class ShapeType>
friend bool resolve_auto_pad_for_shape_back_prop(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
ShapeType& output_spatial_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims);
template <class T>
friend void shape_infer(const GroupConvolutionBackpropData* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const T& output_shape_from_input,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
};
} // namespace v1
} // namespace op

View File

@ -0,0 +1,64 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "convolution_base.hpp"
namespace ov {
namespace op {
namespace util {
/// \brief Base class for operations like back propagation convolution
class OPENVINO_API ConvolutionBackPropBase : public ConvolutionBase {
public:
OPENVINO_OP("ConvolutionBackPropBase", "util");
/// \brief Constructs a conversion operation.
ConvolutionBackPropBase() = default;
/// \brief Constructs a conversion operation.
/// \param strides Convolution strides.
/// \param pads_begin Amount of padding to be added to the beginning along
/// each axis. For example in case of a 2D input the value
/// of (1, 2) means that 1 element will be added to the
/// top and 2 elements to the left.
/// \param pads_end Amount of padding to be added to the end along each
/// axis.
/// \param dilations The distance in width and height between the weights
/// in the filters tensor.
/// \param auto_pad Specifies how the automatic calculation of padding
/// should be done.
/// \param output_padding The output padding adds additional amount of paddings per
/// each spatial axis in the output tensor. clang-format on
ConvolutionBackPropBase(const OutputVector& arguments,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {})
: ConvolutionBase(arguments, strides, pads_begin, pads_end, dilations, auto_pad),
m_output_padding{output_padding} {}
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
protected:
CoordinateDiff m_output_padding;
void resize_attributes(size_t num_spatial) {
ConvolutionBase::resize_attributes(num_spatial);
if (m_output_padding.empty()) {
m_output_padding.resize(num_spatial, 0);
}
}
};
} // namespace util
} // namespace op
} // namespace ov

View File

@ -0,0 +1,144 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace ov {
namespace op {
namespace util {
/// \brief Base class for operations like convolutions
class OPENVINO_API ConvolutionBase : public Op {
public:
OPENVINO_OP("ConvolutionBase", "util");
/// \brief Constructs a conversion operation.
ConvolutionBase() = default;
/// \brief Constructs a conversion operation.
/// \param strides Convolution strides.
/// \param pads_begin Amount of padding to be added to the beginning along
/// each axis. For example in case of a 2D input the value
/// of (1, 2) means that 1 element will be added to the
/// top and 2 elements to the left.
/// \param pads_end Amount of padding to be added to the end along each
/// axis.
/// \param dilations The distance in width and height between the weights
/// in the filters tensor.
/// \param auto_pad Specifies how the automatic calculation of padding
/// should be done.
ConvolutionBase(const OutputVector& arguments,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT)
: Op(arguments),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad) {}
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.")
void set_adding_above(const CoordinateDiff& pads_end) {
set_pads_end(pads_end);
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
size_t m_num_spatial = std::numeric_limits<size_t>::max();
void resize_attributes(size_t num_spatial) {
if (m_strides.empty()) {
m_strides.resize(num_spatial, 1);
}
if (m_dilations.empty()) {
m_dilations.resize(num_spatial, 1);
}
}
void set_num_spatial(size_t num_spatial, const std::vector<PartialShape>& input_shapes) {
if (input_shapes[0].rank().is_static() && input_shapes[1].rank().is_static()) {
m_num_spatial = num_spatial;
}
}
private:
friend bool is_attr_validation_required(const ConvolutionBase* op);
friend size_t get_num_spatial(const ConvolutionBase* op);
};
/// \brief Base class for operations like back propagation convolution
class OPENVINO_API ConvolutionFwdPropBase : public ConvolutionBase {
public:
OPENVINO_OP("ConvolutionFwdPropBase", "util");
/// \brief Constructs a conversion operation.
ConvolutionFwdPropBase() = default;
/// \brief Constructs a conversion operation.
/// \param strides Convolution strides.
/// \param pads_begin Amount of padding to be added to the beginning along
/// each axis. For example in case of a 2D input the value
/// of (1, 2) means that 1 element will be added to the
/// top and 2 elements to the left.
/// \param pads_end Amount of padding to be added to the end along each
/// axis.
/// \param dilations The distance in width and height between the weights
/// in the filters tensor.
/// \param auto_pad Specifies how the automatic calculation of padding
/// should be done.
ConvolutionFwdPropBase(const OutputVector& arguments,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT)
: ConvolutionBase(arguments, strides, pads_begin, pads_end, dilations, auto_pad) {}
private:
friend bool is_attr_validation_required(const ConvolutionBase* op);
};
} // namespace util
} // namespace op
} // namespace ov

View File

@ -7,13 +7,14 @@
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
#include "openvino/op/util/convolution_base.hpp"
namespace ov {
namespace op {
namespace util {
/// \brief Base class for operations DeformableConvolution v1 and DeformableConvolution
/// v8.
class OPENVINO_API DeformableConvolutionBase : public Op {
class OPENVINO_API DeformableConvolutionBase : public util::ConvolutionBase {
public:
OPENVINO_OP("DeformableConvolutionBase", "util");
@ -46,38 +47,7 @@ public:
int64_t deformable_group = 1);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
int64_t get_group() const {
return m_group;
}
@ -92,11 +62,6 @@ public:
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
int64_t m_group;
int64_t m_deformable_group;
};

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/binary_convolution.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace convolution {
namespace validate {
/**
* @brief Specific check of data shape for binary convolution data shape must be rank 4.
*
* The shape_infer is same as for Convolution operator except this check. @see convolution_shape_inference.hpp
*/
template <class TShape>
void data_shape(const v1::BinaryConvolution* op, const TShape& data_shape) {
NODE_VALIDATION_CHECK(op, data_shape.rank().compatible(4), "Expected 4D for the input. Got: ", data_shape);
}
} // namespace validate
} // namespace convolution
} // namespace op
} // namespace ov

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "convolution_backprop_shape_inference_util.hpp"
#include "convolution_shape_inference_util.hpp"
#include "openvino/op/convolution.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const ConvolutionBackpropData* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
const auto inputs_count = input_shapes.size();
const auto has_spatial_shape = inputs_count == 3;
NODE_VALIDATION_CHECK(op, inputs_count == 2 || has_spatial_shape);
using namespace ov::util;
TShape out_spatial_shape;
if (has_spatial_shape) {
const auto& spatial_shape = input_shapes[2];
NODE_VALIDATION_CHECK(op,
spatial_shape.rank().compatible(1),
"Input delivering output shape must have rank 1.");
if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) {
if (spatial_shape.is_static()) {
out_spatial_shape.resize(spatial_shape[0].get_length());
} else {
out_spatial_shape = PartialShape::dynamic();
}
}
}
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape);
TShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
NODE_VALIDATION_CHECK(
op,
!has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial,
"Output shape should be defined for all and only spatial dimensions.");
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
convolution::validate::filter_shape(op, filters_shape, data_shape);
if (is_attr_validation_required(op)) {
convolution::validate::data_shape(op, data_shape);
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
}
convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end);
output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_shape.rank().is_static() ? data_shape[0] : dim::inf_bound);
output_shape.emplace_back(filters_shape.rank().is_static() ? filters_shape[1] : dim::inf_bound);
if (has_spatial_shape) {
output_shape.insert(output_shape.end(),
std::make_move_iterator(out_spatial_shape.begin()),
std::make_move_iterator(out_spatial_shape.end()));
} else {
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
}
} else {
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,191 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "convolution_shape_inference_util.hpp"
#include "openvino/op/util/convolution_backprop_base.hpp"
namespace ov {
namespace op {
namespace convolution {
namespace validate {
template <class TShape>
void filter_shape(const ov::op::util::ConvolutionBackPropBase* op,
const TShape& filters_shape,
const TShape& data_shape) {
const auto& data_rank = data_shape.rank();
const auto& filters_rank = filters_shape.rank();
NODE_VALIDATION_CHECK(op,
data_rank.compatible(filters_rank),
"Data batch and filters rank do not match (data batch shape: ",
data_shape,
", filters shape: ",
filters_shape,
").");
NODE_VALIDATION_CHECK(
op,
data_rank.is_dynamic() || filters_rank.is_dynamic() || data_shape[1].compatible(filters_shape[0]),
"Data batch channel count (",
data_shape[1],
") does not match filter input channel count (",
filters_shape[0],
").");
}
} // namespace validate
template <class TOp,
class TShape,
typename std::enable_if<std::is_base_of<util::ConvolutionBackPropBase, TOp>::value>::type* = nullptr>
size_t calculate_num_spatial(const TOp* op, const std::vector<TShape>& input_shapes, const TShape& out_spatial_shape) {
NODE_VALIDATION_CHECK(op, input_shapes.size() > 1);
auto num_spatial = util::get_num_spatial(op);
if (num_spatial == num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
num_spatial = util::num_spatial_from_shapes(data_shape, filters_shape, filter_non_spatial_dims_count<TOp>());
}
if (num_spatial == num_spatial_undefined && out_spatial_shape.rank().is_static() && out_spatial_shape.size() > 0) {
num_spatial = out_spatial_shape.size();
}
if (num_spatial == num_spatial_undefined) {
num_spatial = num_spatial_from_attr(op);
}
return num_spatial;
}
/**
* @brief Apply auto padding for backward convolution.
*
* The auto padding can be applied only if inputs and attributes of operator are validated.
* The input shapes must have got static ranks.
*
* @param op Pointer to convolution operator.
* @param data_shape Input data shape (must be static rank).
* @param filters_shape Input filter shape (must be static rank).
* @param out_spatial_shape Reference to input with out spatial shape.
* @param pads_begin Iterator to begin of pads begin.
* @param pads_end Iterator to begin of pads end.
*/
template <class TOp, class TShape, class TIter>
void apply_auto_pad(const TOp* op,
const TShape& data_shape,
const TShape& filters_shape,
const TShape& out_spatial_shape,
TIter pads_begin,
TIter pads_end) {
const auto& strides = op->get_strides();
const auto& dilations = op->get_dilations();
const auto& out_padding = op->get_output_padding();
const auto num_spatial = strides.size();
auto data_dim = data_shape.cend() - num_spatial;
auto filter_dim = filters_shape.cend() - num_spatial;
const auto padding_swap = op->get_auto_pad() == PadType::SAME_UPPER;
auto& pad_b = padding_swap ? pads_end : pads_begin;
auto& pad_e = padding_swap ? pads_begin : pads_end;
for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++filter_dim) {
using namespace ov::util;
if (data_dim->is_static() && filter_dim->is_static() && out_spatial_shape[i].is_static()) {
const auto dilated_filter = dim::dilated(*filter_dim, dilations[i]);
const auto dim_len = static_cast<int64_t>(data_dim->get_length() - 1);
const auto padding = std::max<int64_t>(
dim_len * strides[i] + dilated_filter.get_length() - out_spatial_shape[i].get_length() + out_padding[i],
0);
*pad_b = padding / 2;
*pad_e = padding - *pad_b;
} else {
*pad_b = 0;
*pad_e = 0;
}
}
}
/**
* @brief Apply auto padding for back propagation convolutions.
*
* @tparam TShape Shape type.
* @param op Pointer to back propagation convolution operator.
* @param data_shape Input data shape.
* @param filters_shape Input filter shape.
* @param out_spatial_shape Input output spatial shape.
*/
template <class TShape>
void apply_padding(const util::ConvolutionBackPropBase* op,
const std::vector<TShape>& input_shapes,
const TShape& out_spatial_shape,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
// apply padding if required
if (input_shapes.size() == 3 && convolution::is_auto_pad(op) && data_shape.rank().is_static() &&
filters_shape.rank().is_static()) {
convolution::apply_auto_pad(op,
data_shape,
filters_shape,
out_spatial_shape,
pads_begin.begin(),
pads_end.begin());
} else if (convolution::is_auto_pad(op) || op->get_auto_pad() == op::PadType::VALID) {
std::fill(pads_begin.begin(), pads_begin.end(), 0);
std::fill(pads_end.begin(), pads_end.end(), 0);
} else if (op->get_auto_pad() == op::PadType::EXPLICIT) {
std::copy(op->get_pads_begin().begin(), op->get_pads_begin().end(), pads_begin.begin());
std::copy(op->get_pads_end().begin(), op->get_pads_end().end(), pads_end.begin());
}
}
/**
* @brief Append spatial dimension at end of output shape of back propagation convolution.
*
* @tparam TOp Back propagation convolution operator type.
* @tparam TShape Type of shape.
* @param op Pointer to operator.
* @param data_shape Input data shape.
* @param filters_shape Input filter shape.
* @param out_shape Output shape to append spatial dimensions.
*/
template <class TOp,
class TShape,
class TContainer,
typename std::enable_if<std::is_base_of<ov::op::util::ConvolutionBackPropBase, TOp>::value>::type* = nullptr>
void append_spatial_shape(const TOp* op,
const TShape& data_shape,
const TShape& filters_shape,
const TContainer& pads_begin,
const TContainer& pads_end,
TShape& out_shape) {
using namespace ov::util;
const auto& strides = op->get_strides();
const auto& dilations = op->get_dilations();
const auto& output_padding = op->get_output_padding();
const auto spatial_num = strides.size();
const auto& d_shape = data_shape.rank().is_static() ? data_shape : PartialShape::dynamic(spatial_num);
auto data_dim = d_shape.cend() - spatial_num;
const auto& f_shape = filters_shape.rank().is_static() ? filters_shape : PartialShape::dynamic(spatial_num);
auto filters_dim = f_shape.cend() - spatial_num;
for (size_t i = 0; i < spatial_num; ++i, ++data_dim, ++filters_dim) {
auto dim = (*data_dim - 1) * strides[i];
dim += dim::dilated(*filters_dim, dilations[i]);
out_shape.push_back(dim::padded(dim, output_padding[i] - pads_begin[i] - pads_end[i]));
}
}
} // namespace convolution
} // namespace op
} // namespace ov

View File

@ -3,736 +3,49 @@
//
#pragma once
#include <openvino/op/convolution.hpp>
#include <openvino/op/group_conv.hpp>
#include "convolution_shape_inference_util.hpp"
#include "openvino/op/convolution.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v1 {
template <class TFrowardConv, class TShape, class TContainer>
std::vector<TShape> shape_infer(const TFrowardConv* op,
const std::vector<TShape>& input_shapes,
TContainer& pads_begin,
TContainer& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using namespace ov::util;
template <class ConvType>
int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
int64_t num_spatial = op->m_num_spatial;
if (num_spatial == -1) {
const auto& input_rank = input_shape.rank();
const auto& filters_rank = filters_shape.rank();
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes);
if (input_rank.is_static())
num_spatial = input_rank.get_length() - num_non_spatial_data_dims;
if (filters_rank.is_static())
num_spatial = filters_rank.get_length() - num_non_spatial_filter_dims;
TShape output_shape;
if (num_spatial != util::num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
const auto data_rank = data_shape.rank();
const auto filters_rank = filters_shape.rank();
if (const auto& size = op->m_dilations.size()) {
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
"Dilations should be defined for all and only spatial dimensions.");
num_spatial = static_cast<int64_t>(size);
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
convolution::validate::filter_shape(op, filters_shape, data_shape);
if (is_attr_validation_required(op)) {
convolution::validate::data_shape(op, data_shape);
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
}
if (const auto& size = op->m_strides.size()) {
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
"Strides should be defined for all and only spatial dimensions.");
num_spatial = static_cast<int64_t>(size);
}
if (const auto& size = op->m_pads_begin.size()) {
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
"Pads begin should be defined for all and only spatial dimensions.");
num_spatial = static_cast<int64_t>(size);
}
if (const auto& size = op->m_pads_end.size()) {
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
"Pads end should be defined for all and only spatial dimensions.");
num_spatial = static_cast<int64_t>(size);
}
}
return num_spatial;
}
convolution::apply_padding(op, data_shape, filters_shape, pads_begin, pads_end);
template <class ConvType, class ShapeType>
int64_t calculate_num_spatial(const ConvType* op,
const ShapeType& input_shape,
const ShapeType& filters_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
return calculate_num_spatial(op,
input_shape.to_partial_shape(),
filters_shape.to_partial_shape(),
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
}
template <class ConvType>
void update_and_validate_attributes(ConvType* op, int64_t num_spatial) {
if (num_spatial != -1) {
auto& strides = op->m_strides;
auto& dilations = op->m_dilations;
auto& pad_begin = op->m_pads_begin;
auto& pad_end = op->m_pads_end;
auto& auto_pad = op->m_auto_pad;
if (strides.empty())
strides = Strides(num_spatial, 1);
if (dilations.empty())
dilations = Strides(num_spatial, 1);
if (pad_begin.empty() || auto_pad == op::PadType::VALID)
pad_begin = CoordinateDiff(num_spatial, 0);
if (pad_end.empty() || auto_pad == op::PadType::VALID)
pad_end = CoordinateDiff(num_spatial, 0);
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(strides.size()) == num_spatial,
"Strides should be defined for all and only spatial dimensions..");
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(dilations.size()) == num_spatial,
"Dilations should be defined for all and only spatial dimensions..");
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(pad_begin.size()) == num_spatial &&
static_cast<int64_t>(pad_end.size()) == num_spatial,
"Pads should be defined for all and only spatial dimensions..");
NODE_VALIDATION_CHECK(op,
std::all_of(dilations.begin(),
dilations.end(),
[](const size_t& i) {
return i > 0;
}),
"Filter dilation (",
dilations,
") has zero dimension.");
NODE_VALIDATION_CHECK(op,
std::all_of(strides.begin(),
strides.end(),
[](const size_t& i) {
return i > 0;
}),
"Filter strides (",
strides,
") has zero dimension.");
} else if (op->m_num_spatial != -1) {
update_and_validate_attributes(op, op->m_num_spatial);
}
}
template <class T>
inline bool dynamic_check(const int64_t& num_spatial) {
OPENVINO_ASSERT(num_spatial != -1,
"Convolution shape inference doesn't have enough information for static shape calculation");
return true;
}
template <>
inline bool dynamic_check<PartialShape>(const int64_t& num_spatial) {
return num_spatial != -1;
}
template <class ConvType, class ShapeType>
bool resolve_auto_pad_for_shape(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
const auto& auto_pad = op->m_auto_pad;
if (auto_pad != op::PadType::SAME_UPPER && auto_pad != op::PadType::SAME_LOWER) {
pads_begin = op->m_pads_begin;
pads_end = op->m_pads_end;
return true;
}
auto input_shape = input_shapes[0];
auto filters_shape = input_shapes[1];
const auto num_spatial = op->m_num_spatial != -1 ? op->m_num_spatial
: calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
if (!dynamic_check<ShapeType>(num_spatial))
return false;
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + num_non_spatial_data_dims);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + num_non_spatial_filter_dims);
const auto& strides = op->m_strides;
const auto& dilations = op->m_dilations;
pads_begin.resize(num_spatial);
pads_end.resize(num_spatial);
bool status = true;
for (int64_t i = 0; i < num_spatial; ++i) {
const auto& input_dim = input_shape[i + num_non_spatial_data_dims];
const auto& filters_dim = filters_shape[i + num_non_spatial_filter_dims];
if (input_dim.is_static() && filters_dim.is_static()) {
const int64_t& window_dilated_dim = (filters_dim.get_length() - 1) * dilations[i] + 1;
NODE_VALIDATION_CHECK(op,
window_dilated_dim > 0,
"Window after dilation has dimension less than 1 (dim: ",
window_dilated_dim,
") at axis ",
i,
".");
const int64_t& image_size = input_dim.get_length();
const int64_t& filter_stride = strides[i];
const int64_t& output_size = (image_size + filter_stride - 1) / filter_stride;
const int64_t& tmp = (output_size - 1) * filter_stride + window_dilated_dim;
const int64_t& padding_needed = tmp > image_size ? tmp - image_size : 0;
const size_t& padding_lhs = static_cast<size_t>(padding_needed / 2);
const size_t& padding_rhs = static_cast<size_t>(padding_needed - padding_lhs);
pads_begin[i] = auto_pad == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs;
pads_end[i] = auto_pad == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs;
} else {
status = false;
}
}
return status;
}
template <class DimType>
void divide_ceil(const DimType& dividend, const typename DimType::value_type& divisor, DimType& quotient) {
OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0");
if (dividend.get_max_length() == -1) {
quotient = -1;
output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound);
output_shape.emplace_back(filters_rank.is_static() ? filters_shape[0] : dim::inf_bound);
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
} else {
auto lb = static_cast<int64_t>(ceil(1. * dividend.get_min_length() / divisor));
auto ub = static_cast<int64_t>(ceil(1. * dividend.get_max_length() / divisor));
quotient = DimType(lb, ub);
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
template <class DimType>
void divide_floor(const DimType& dividend, const typename DimType::value_type& divisor, DimType& quotient) {
OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0");
if (dividend.get_max_length() == -1) {
quotient = -1;
} else {
auto lb = static_cast<size_t>(floor(1. * dividend.get_min_length() / divisor));
auto ub = static_cast<size_t>(floor(1. * dividend.get_max_length() / divisor));
quotient = DimType(lb, ub);
}
}
template <class ConvType, class ShapeType>
void calculate_output_spatial_dims_for_convolution(const ConvType* op,
const ShapeType& input_shape,
const ShapeType& filters_shape,
ShapeType& output_shape,
const int64_t& num_spatial,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
bool auto_pad = op->get_auto_pad() == op::PadType::SAME_UPPER || op->get_auto_pad() == op::PadType::SAME_LOWER;
for (int64_t i = 0; i < num_spatial; ++i) {
auto input_dim = input_shape[i + num_non_spatial_data_dims];
if (auto_pad) {
divide_ceil(input_dim, strides[i], output_shape[i + num_non_spatial_data_dims]);
continue;
}
const auto& filters_dim = filters_shape[i + num_non_spatial_filter_dims];
const auto& window_dilated_dim = (filters_dim - 1) * dilations[i] + 1;
const auto& data_padded_dilated_dim = input_dim + pads_begin[i] + pads_end[i];
if (input_dim.is_static() && filters_dim.is_static()) {
NODE_VALIDATION_CHECK(op,
window_dilated_dim.get_length() > 0,
"Window after dilation has dimension less than 1 (dim: ",
window_dilated_dim,
") at axis ",
i,
".");
NODE_VALIDATION_CHECK(op,
window_dilated_dim.get_length() <= data_padded_dilated_dim.get_length(),
"Window after dilation has dimension (dim: ",
window_dilated_dim,
") larger than the data shape after padding (dim: ",
data_padded_dilated_dim,
") at axis ",
i,
".");
}
divide_floor(data_padded_dilated_dim - window_dilated_dim,
strides[i],
output_shape[i + num_non_spatial_data_dims]);
output_shape[i + num_non_spatial_data_dims] += 1;
}
}
template <class T>
void shape_infer(const Convolution* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
constexpr size_t num_non_spatial_data_dims = 2, num_non_spatial_filter_dims = 2;
auto input_shape = input_shapes[0], filters_shape = input_shapes[1];
const auto num_spatial = op->m_num_spatial != -1 ? op->m_num_spatial
: calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(op,
num_spatial != -1,
"Convolution shape_infer should be provided with correct num_spatial attribute");
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + 2);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + 2);
NODE_VALIDATION_CHECK(
op,
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>(num_spatial + num_non_spatial_data_dims)) &&
(static_cast<int64_t>(filters_shape.size()) ==
static_cast<int64_t>(num_spatial + num_non_spatial_filter_dims)),
"Data batch and filters rank do not match (data batch shape: ",
input_shape,
", filters shape: ",
filters_shape,
").");
// ranks are originally static or aligned with num_spatial, attributes assumed to be valid
auto& output_shape = output_shapes[0];
output_shape.resize(num_spatial + num_non_spatial_data_dims);
output_shape[0] = input_shape[0];
output_shape[1] = filters_shape[0];
NODE_VALIDATION_CHECK(op,
input_shape[1].compatible(filters_shape[1]),
"Data batch channel count (",
input_shape[1],
") does not match filter input ",
"channel count (",
filters_shape[1],
").");
calculate_output_spatial_dims_for_convolution(op,
input_shape,
filters_shape,
output_shape,
num_spatial,
op->m_strides,
op->m_dilations,
pads_begin,
pads_end,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
}
template <class T>
void shape_infer(const GroupConvolution* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
auto input_shape = input_shapes[0], filters_shape = input_shapes[1];
constexpr size_t num_non_spatial_data_dims = 2, num_non_spatial_filter_dims = 3;
const auto num_spatial = op->m_num_spatial != -1 ? op->m_num_spatial
: calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(op,
num_spatial != -1,
"GroupConvolution shape_infer should be provided with correct num_spatial attribute");
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + num_non_spatial_data_dims);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(
op,
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>((num_spatial + num_non_spatial_data_dims))) &&
(static_cast<int64_t>(filters_shape.size()) ==
static_cast<int64_t>((num_spatial + num_non_spatial_filter_dims))),
"Data batch and filters rank do not match (data batch shape: ",
input_shape,
", filters shape: ",
filters_shape,
").");
// ranks are originally static or aligned with num_spatial, attributes assumed to be valid
auto& output_shape = output_shapes[0];
output_shape.resize(num_spatial + num_non_spatial_data_dims);
output_shape[0] = input_shape[0];
auto groups = filters_shape[0];
if (groups.is_dynamic()) {
// [N, GROUPS * C_IN, ...] x [GROUPS, C_OUT, C_IN, ...] = [N, GROUPS * C_OUT, ...]
if (input_shape[1].is_static() && filters_shape[2].is_static()) {
using DimensionType = typename std::iterator_traits<typename T::iterator>::value_type;
auto n_data_channels = input_shape[1].get_length();
auto input_channels = filters_shape[2].get_length();
NODE_VALIDATION_CHECK(op, (n_data_channels % input_channels) == 0);
groups = DimensionType(n_data_channels / input_channels);
}
}
if (input_shape[1].is_static()) {
// GROUPS and C_IN consistency checks
if (groups.is_static() && filters_shape[2].is_static()) {
NODE_VALIDATION_CHECK(op,
input_shape[1].get_length() / groups.get_length() == filters_shape[2].get_length(),
"Input channels dimension of data batch has incompatible value "
"with filter shape.");
} else if (groups.is_static()) {
NODE_VALIDATION_CHECK(op,
input_shape[1].get_length() % groups.get_length() == 0,
"Input channels dimension of data batch not a multiple of group size.");
}
}
output_shape[1] = groups * filters_shape[1];
calculate_output_spatial_dims_for_convolution(op,
input_shape,
filters_shape,
output_shape,
num_spatial,
op->m_strides,
op->m_dilations,
pads_begin,
pads_end,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
}
template <class ConvType>
int64_t calculate_num_spatial(const ConvType* op,
const PartialShape& input_shape,
const PartialShape& filters_shape,
const PartialShape& output_shapes_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
auto num_spatial = op->m_num_spatial;
if (num_spatial == -1) {
num_spatial = calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
if (const auto& size = op->m_output_padding.size()) {
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == static_cast<int64_t>(size),
"Output padding should be defined for all and only spatial dimensions.");
num_spatial = static_cast<int64_t>(size);
}
if (output_shapes_shape.is_static()) {
NODE_VALIDATION_CHECK(op,
output_shapes_shape.size() == 1,
"Input delivering output shape must have rank 1");
NODE_VALIDATION_CHECK(op,
num_spatial == -1 || num_spatial == output_shapes_shape[0].get_length(),
"Output shape should be specified only and for all spatial dimensions.");
num_spatial = static_cast<int64_t>(output_shapes_shape[0].get_length());
}
}
return num_spatial;
}
template <class ConvType, class ShapeType>
int64_t calculate_num_spatial(const ConvType* op,
const ShapeType& input_shape,
const ShapeType& filters_shape,
const ShapeType& output_shapes_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
return calculate_num_spatial(op,
input_shape.to_partial_shape(),
filters_shape.to_partial_shape(),
output_shapes_shape.to_partial_shape(),
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
}
template <class ConvType>
void update_and_validate_attributes_back_prop(ConvType* op, int64_t num_spatial) {
if (num_spatial != -1) {
update_and_validate_attributes(op, num_spatial);
auto& output_padding = op->m_output_padding;
if (output_padding.empty())
output_padding = CoordinateDiff(num_spatial, 0);
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(output_padding.size()) == num_spatial,
"Output padding should be defined for all and only "
"spatial dimensions..");
} else if (op->m_num_spatial != -1) {
update_and_validate_attributes_back_prop(op, op->m_num_spatial);
}
}
template <class ConvType, class ShapeType>
bool resolve_auto_pad_for_shape_back_prop(const ConvType* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::vector<ShapeType>& input_shapes,
ShapeType& output_spatial_shape,
const int64_t& num_non_spatial_data_dims,
const int64_t& num_non_spatial_filter_dims) {
const auto& auto_pad = op->m_auto_pad;
if (auto_pad != PadType::SAME_UPPER && auto_pad != PadType::SAME_LOWER) {
pads_begin = op->m_pads_begin;
pads_end = op->m_pads_end;
return true;
}
const auto& num_spatial = op->m_num_spatial;
if (!dynamic_check<ShapeType>(num_spatial))
return false;
if (input_shapes.size() != 3) {
pads_begin = CoordinateDiff(num_spatial, 0);
pads_end = CoordinateDiff(num_spatial, 0);
return true;
}
OPENVINO_ASSERT(input_shapes.size() == 3 && (auto_pad == PadType::SAME_UPPER || auto_pad == PadType::SAME_LOWER));
pads_begin = CoordinateDiff(num_spatial, 0);
pads_end = CoordinateDiff(num_spatial, 0);
if (output_spatial_shape.rank().is_dynamic())
output_spatial_shape.resize(num_spatial);
auto input_shape = input_shapes[0];
auto filters_shape = input_shapes[1];
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + num_non_spatial_data_dims);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + num_non_spatial_filter_dims);
bool status = true;
for (auto i = 0; i < num_spatial; ++i) {
const auto& data_dim = input_shape[i + num_non_spatial_data_dims];
const auto& filter_dim = filters_shape[i + num_non_spatial_filter_dims];
const auto& output_dim = output_spatial_shape[i];
const auto& output_padding = op->m_output_padding[i];
if (data_dim.is_static() && filter_dim.is_static() && output_dim.is_static()) {
const auto& strides = op->m_strides[i];
const auto& dilations = op->m_dilations[i];
int total_padding = std::max<int>(
static_cast<int>(strides * (data_dim.get_length() - 1) + dilations * (filter_dim.get_length() - 1) + 1 -
output_dim.get_length() + output_padding),
0);
if (auto_pad != op::PadType::SAME_UPPER) {
pads_begin[i] = total_padding / 2;
pads_end[i] = total_padding - pads_begin[i];
} else {
pads_end[i] = total_padding / 2;
pads_begin[i] = total_padding - pads_end[i];
}
} else {
status = false;
}
}
return status;
}
template <class T>
void shape_infer(const ConvolutionBackpropData* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const T& output_shape_from_input,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
constexpr size_t num_non_spatial_data_dims = 2, num_non_spatial_filter_dims = 2;
size_t input_size = input_shapes.size();
NODE_VALIDATION_CHECK(op, (input_size == 2 || input_size == 3) && output_shapes.size() == 1);
auto input_shape = input_shapes[0], filters_shape = input_shapes[1];
const auto num_spatial = op->m_num_spatial != -1
? op->m_num_spatial
: input_size == 3 ? calculate_num_spatial(op,
input_shape,
filters_shape,
input_shapes[2],
num_non_spatial_data_dims,
num_non_spatial_filter_dims)
: calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(op,
num_spatial != -1,
"ConvolutionBackpropData shape_infer should be provided with correct num_spatial attribute");
NODE_VALIDATION_CHECK(op,
num_spatial == 1 || num_spatial == 2 || num_spatial == 3,
"Data and filters inputs must have rank 3, 4 or 5");
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + num_non_spatial_data_dims);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(
op,
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>(num_spatial + num_non_spatial_data_dims)) &&
(static_cast<int64_t>(filters_shape.size()) ==
static_cast<int64_t>(num_spatial + num_non_spatial_filter_dims)),
"Data and filters rank do not match (data batch shape: ",
input_shape,
", filters shape: ",
filters_shape,
").");
// ranks are originally static or aligned with num_spatial, attributes assumed to be valid
auto& output_shape = output_shapes[0];
output_shape.resize(num_spatial + num_non_spatial_data_dims);
output_shape[0] = input_shape[0];
output_shape[1] = filters_shape[1];
NODE_VALIDATION_CHECK(op,
input_shape[1].compatible(filters_shape[0]),
"Input channels dimension of data and filters inputs must be equal");
if (input_size == 3) {
if (output_shape_from_input.rank().is_static()) {
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(output_shape_from_input.size()) == num_spatial,
"Output shape should be specified only and for all spatial dimensions.");
for (int64_t i = 0; i < num_spatial; ++i)
output_shape[i + num_non_spatial_data_dims] = output_shape_from_input[i];
}
} else {
const auto& strides = op->m_strides;
const auto& dilations = op->m_dilations;
const auto& output_padding = op->m_output_padding;
for (int64_t i = 0; i < num_spatial; ++i) {
const auto &data_idx = i + num_non_spatial_data_dims, filter_idx = i + num_non_spatial_filter_dims;
output_shape[data_idx] = (input_shape[data_idx] - 1) * strides[i] +
(filters_shape[filter_idx] - 1) * dilations[i] + 1 - pads_begin[i] - pads_end[i] +
output_padding[i];
}
}
}
template <class T>
void shape_infer(const GroupConvolutionBackpropData* op,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const T& output_shape_from_input,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
constexpr size_t num_non_spatial_data_dims = 2, num_non_spatial_filter_dims = 3;
size_t input_size = input_shapes.size();
NODE_VALIDATION_CHECK(op, (input_size == 2 || input_size == 3) && output_shapes.size() == 1);
auto input_shape = input_shapes[0], filters_shape = input_shapes[1];
const auto num_spatial = op->m_num_spatial != -1
? op->m_num_spatial
: input_size == 3 ? calculate_num_spatial(op,
input_shape,
filters_shape,
input_shapes[2],
num_non_spatial_data_dims,
num_non_spatial_filter_dims)
: calculate_num_spatial(op,
input_shape,
filters_shape,
num_non_spatial_data_dims,
num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(
op,
num_spatial != -1,
"GroupConvolutionBackpropData shape_infer should be provided with correct num_spatial attribute");
NODE_VALIDATION_CHECK(op,
num_spatial == 1 || num_spatial == 2 || num_spatial == 3,
"Data and filters inputs must have rank 3, 4 or 5");
if (input_shape.rank().is_dynamic())
input_shape.resize(num_spatial + num_non_spatial_data_dims);
if (filters_shape.rank().is_dynamic())
filters_shape.resize(num_spatial + num_non_spatial_filter_dims);
NODE_VALIDATION_CHECK(
op,
(static_cast<int64_t>(input_shape.size()) == static_cast<int64_t>(num_spatial + num_non_spatial_data_dims)) &&
(static_cast<int64_t>(filters_shape.size()) ==
static_cast<int64_t>(num_spatial + num_non_spatial_filter_dims)),
"Data and filters rank do not match (data batch shape: ",
input_shape,
", filters shape: ",
filters_shape,
").");
// ranks are originally static or aligned with num_spatial, attributes assumed to be valid
auto& output_shape = output_shapes[0];
output_shape.resize(num_spatial + num_non_spatial_data_dims);
output_shape[0] = input_shape[0];
auto groups = filters_shape[0];
if (groups.is_dynamic()) {
// [N, GROUPS * C_IN, ...] x [GROUPS, C_IN, C_OUT, ...] = [N, GROUPS * C_OUT, ...]
if (input_shape[1].is_static() && filters_shape[1].is_static()) {
using DimensionType = typename std::iterator_traits<typename T::iterator>::value_type;
auto n_data_channels = input_shape[1].get_length();
auto input_channels = filters_shape[1].get_length();
NODE_VALIDATION_CHECK(op, (n_data_channels % input_channels) == 0);
groups = DimensionType(n_data_channels / input_channels);
}
}
if (input_shape[1].is_static()) {
// GROUPS and C_IN consistency checks
if (groups.is_static() && filters_shape[1].is_static()) {
NODE_VALIDATION_CHECK(op,
input_shape[1].get_length() / groups.get_length() == filters_shape[1].get_length(),
"Input channels dimension of data batch has incompatible value "
"with filter shape.");
} else if (groups.is_static()) {
NODE_VALIDATION_CHECK(op,
input_shape[1].get_length() % groups.get_length() == 0,
"Input channels dimension of data batch not a multiple of group size.");
}
}
output_shape[1] = filters_shape[2] * groups;
if (input_size == 3) {
if (output_shape_from_input.rank().is_static()) {
NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(output_shape_from_input.size()) == num_spatial,
"Output shape should be specified only and for all spatial dimensions.");
for (int64_t i = 0; i < num_spatial; ++i)
output_shape[i + num_non_spatial_data_dims] = output_shape_from_input[i];
}
} else {
const auto& strides = op->m_strides;
const auto& dilations = op->m_dilations;
const auto& output_padding = op->m_output_padding;
for (int64_t i = 0; i < num_spatial; ++i) {
const auto &data_idx = i + num_non_spatial_data_dims, filter_idx = i + num_non_spatial_filter_dims;
output_shape[data_idx] = (input_shape[data_idx] - 1) * strides[i] +
(filters_shape[filter_idx] - 1) * dilations[i] + 1 - pads_begin[i] - pads_end[i] +
output_padding[i];
}
}
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,357 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "dimension_util.hpp"
#include "openvino/op/util/convolution_backprop_base.hpp"
#include "openvino/op/util/convolution_base.hpp"
#include "pooling_shape_inference_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace util {
constexpr size_t num_spatial_undefined = std::numeric_limits<size_t>::max();
constexpr size_t spatial_dim_offset = 2;
/**
* @brief Get num of spatial form convolution operator.
*
* Tries get value from operator member if is not deduced (has -1 value) then tries evaluate it from input shapes.
*
* @tparam TConv Convolution type (this function must be a friend of TConv to access private member).
* @tparam TShape Shape type.
* @param op Pointer to convolution operator.
* @param data_shape Input data shape.
* @param flter_shape Input filter shape.
* @return Value of spatial dimension number or infinite bound (-1) if cannot evaluate.
*/
template <class TShape>
size_t num_spatial_from_shapes(const TShape& data_shape,
const TShape& filter_shape,
const size_t filter_non_spatial_dims_count) {
const auto& data_rank = data_shape.rank();
const auto& filters_rank = filter_shape.rank();
size_t num_spatial;
if (data_rank.is_static()) {
num_spatial = data_rank.get_length() - spatial_dim_offset;
} else if (filters_rank.is_static()) {
num_spatial = filters_rank.get_length() - filter_non_spatial_dims_count;
} else {
num_spatial = num_spatial_undefined;
}
return num_spatial;
}
/**
* @brief Checks if validation attributes is required.
*
* @param op Pointer to convolution base operator.
* @return True if internal number of spatial dimension not defined otherwise false.
*/
inline bool is_attr_validation_required(const ConvolutionBase* op) {
return num_spatial_undefined == op->m_num_spatial;
}
/**
* @brief Get the num spatil object
*
* @param op
* @return size_t
*/
inline size_t get_num_spatial(const ConvolutionBase* op) {
return op->m_num_spatial;
}
} // namespace util
namespace convolution {
constexpr auto num_spatial_undefined = util::num_spatial_undefined;
constexpr size_t spatial_dim_offset = 2;
/**
* @brief Provides convolution filter non spatial dimension count.
*
* @note If specific convolution operator requires different value provide specialization for this operator.
* @tparam TConv Type of convolution operator.
* @return Default value for convolution operators (2).
*/
template <class TConv>
constexpr size_t filter_non_spatial_dims_count() {
return 2;
}
/**
* @brief Checks if Op property auto_pad is set to same lower or upper.
*
* @tparam TOp Type of operator (must have get_auto_pad member function).
* @param op Pointer to operator.
* @return True if auto pad enabled.
*/
template <class TOp>
bool is_auto_pad(const TOp* op) {
return (op->get_auto_pad() == PadType::SAME_LOWER) || (op->get_auto_pad() == PadType::SAME_UPPER);
}
/**
* @brief Resize paddings if empty to number of spatial dimensions.
*
* @param num_spatial Number of spatial dimensions.
* @param pads_begin Begin padding to resize.
* @param pads_end End padding to resize.
*/
inline void resize_empty_padding(const size_t num_spatial, CoordinateDiff& pads_begin, CoordinateDiff& pads_end) {
if (pads_begin.empty()) {
pads_begin.resize(num_spatial);
}
if (pads_end.empty()) {
pads_end.resize(num_spatial);
}
}
inline size_t num_spatial_from_attr(const util::ConvolutionBase* op) {
size_t num_spatial;
if (!op->get_strides().empty()) {
num_spatial = op->get_strides().size();
} else if (!op->get_dilations().empty()) {
num_spatial = op->get_dilations().size();
} else if (!op->get_pads_begin().empty()) {
num_spatial = op->get_pads_begin().size();
} else if (!op->get_pads_end().empty()) {
num_spatial = op->get_pads_end().size();
} else {
num_spatial = num_spatial_undefined;
}
return num_spatial;
}
template <class TOp,
class TShape,
typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value>::type* = nullptr>
size_t calculate_num_spatial(const TOp* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() > 1);
auto num_spatial = get_num_spatial(op);
if (num_spatial == num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
num_spatial = util::num_spatial_from_shapes(data_shape, filters_shape, filter_non_spatial_dims_count<TOp>());
}
if (num_spatial == num_spatial_undefined) {
num_spatial = num_spatial_from_attr(op);
}
return num_spatial;
}
/**
* @brief Apply auto padding for forward convolution.
*
* The auto padding can be applied only if inputs and attributes of operator are validated.
* The input shapes must have got static ranks.
*
* @param op Pointer to convolution operator.
* @param data_shape Input data shape (must be static rank).
* @param filters_shape Input filter shape (must be static rank).
* @param pads_begin Iterator to begin of pads begin.
* @param pads_end Iterator to begin of pads end.
*/
template <class TOp,
class TShape,
class TIter,
typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value ||
std::is_base_of<util::DeformableConvolutionBase, TOp>::value>::type* = nullptr>
void apply_auto_pad(const TOp* op,
const TShape& data_shape,
const TShape& filters_shape,
TIter pads_begin,
TIter pads_end) {
const auto& dilations = op->get_dilations();
const auto& strides = op->get_strides();
const auto num_spatial = strides.size();
auto data_dim = data_shape.cend() - num_spatial;
auto kernel_dim = filters_shape.cend() - num_spatial;
const auto padding_swap = op->get_auto_pad() == PadType::SAME_UPPER;
auto& pad_b = padding_swap ? pads_begin : pads_end;
auto& pad_e = padding_swap ? pads_end : pads_begin;
for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++kernel_dim) {
using namespace ov::util;
if (kernel_dim->is_static()) {
std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, kernel_dim->get_length(), dilations[i], strides[i]);
} else {
*pad_b = 0;
*pad_e = 0;
}
}
}
/**
* @brief Apply padding to forward propagation convolution besed on padding.
*
* @tparam TShape
*
* @param op Pointer to coevolution operator.
* @param data_shape Input data shapes for shape inference.
* @param filters_shape Input filters shape for shape inference.
* @param pads_begin Begin padding to updated.
* @param pads_end End padding to update.
*/
template <class TOp,
class TShape,
typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value ||
std::is_base_of<util::DeformableConvolutionBase, TOp>::value>::type* = nullptr>
void apply_padding(const TOp* op,
const TShape& data_shape,
const TShape& filters_shape,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end) {
if (convolution::is_auto_pad(op) && data_shape.rank().is_static() && filters_shape.rank().is_static()) {
convolution::apply_auto_pad(op, data_shape, filters_shape, pads_begin.begin(), pads_end.begin());
} else if (op->get_auto_pad() == op::PadType::VALID) {
std::fill(pads_begin.begin(), pads_begin.end(), 0);
std::fill(pads_end.begin(), pads_end.end(), 0);
} else if (op->get_auto_pad() == op::PadType::EXPLICIT) {
std::copy(op->get_pads_begin().begin(), op->get_pads_begin().end(), pads_begin.begin());
std::copy(op->get_pads_end().begin(), op->get_pads_end().end(), pads_end.begin());
}
}
/**
* @brief Append spatial dimension at end of output shape of forward propagation convolution.
*
* @tparam TOp Forward propagation convolution operator type.
* @tparam TShape Type of shape.
* @param op Pointer to operator.
* @param data_shape Input data shape.
* @param filters_shape Input filter shape.
* @param out_shape Output shape to append spatial dimensions.
*/
template <class TOp,
class TShape,
typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value ||
std::is_base_of<util::DeformableConvolutionBase, TOp>::value>::type* = nullptr>
void append_spatial_shape(const TOp* op,
const TShape& data_shape,
const TShape& filters_shape,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
TShape& out_shape) {
using namespace ov::util;
using TDim = typename TShape::value_type;
const auto& strides = op->get_strides();
const auto spatial_num = strides.size();
const auto& d_shape = data_shape.rank().is_static() ? data_shape : PartialShape::dynamic(spatial_num);
auto data_dim = d_shape.cend() - spatial_num;
if (is_auto_pad(op)) {
std::transform(data_dim, d_shape.cend(), strides.cbegin(), std::back_inserter(out_shape), &dim::ceil_div<TDim>);
} else {
const auto& f_shape = filters_shape.rank().is_static() ? filters_shape : PartialShape::dynamic(spatial_num);
auto filters_dim = f_shape.cend() - spatial_num;
const auto& dilations = op->get_dilations();
for (size_t i = 0; i < spatial_num; ++i, ++data_dim, ++filters_dim) {
auto dim = *data_dim + (pads_begin[i] + pads_end[i]);
const auto filter_dilated = dim::dilated(*filters_dim, dilations[i]);
if (dim.is_static() && filter_dilated.is_static()) {
// Use check from pooling op as it is same.
pooling::valid_dilated_kernel_with_dim(op, filter_dilated.get_length(), dim, i);
}
dim = dim::floor_div(dim - filter_dilated, strides[i]);
dim += 1;
out_shape.push_back(std::move(dim));
}
}
}
namespace validate {
template <class TShape>
void data_shape(const ov::op::util::ConvolutionBase* op, const TShape& data_shape) {
NODE_VALIDATION_CHECK(op,
is_rank_compatible_any_of(data_shape.rank(), {3, 4, 5}),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
}
template <class TShape>
void filter_shape(const ov::op::util::ConvolutionBase* op, const TShape& filters_shape, const TShape& data_shape) {
const auto& data_rank = data_shape.rank();
const auto& filters_rank = filters_shape.rank();
NODE_VALIDATION_CHECK(op,
data_rank.compatible(filters_rank),
"Data batch and filters rank do not match (data batch shape: ",
data_shape,
", filters shape: ",
filters_shape,
").");
NODE_VALIDATION_CHECK(
op,
data_rank.is_dynamic() || filters_rank.is_dynamic() || data_shape[1].compatible(filters_shape[1]),
"Data batch channel count (",
data_shape[1],
") does not match filter input channel count (",
filters_shape[1],
").");
}
inline void common_attributes(const util::ConvolutionBase* op,
const size_t num_spatial,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end) {
auto& strides = op->get_strides();
auto& dilations = op->get_dilations();
NODE_VALIDATION_CHECK(op,
strides.size() == num_spatial,
"Strides should be defined for all and only spatial dimensions.");
NODE_VALIDATION_CHECK(op,
dilations.size() == num_spatial,
"Dilations should be defined for all and only spatial dimensions.");
NODE_VALIDATION_CHECK(op,
pads_begin.size() == num_spatial && pads_end.size() == pads_begin.size(),
"Pads begin and end should be defined for all and only spatial dimensions.");
constexpr auto is_zero = cmp::Equal<size_t>(0);
NODE_VALIDATION_CHECK(op,
std::none_of(strides.cbegin(), strides.cend(), is_zero),
"Strides has zero dimension(s). ",
strides);
NODE_VALIDATION_CHECK(op,
std::none_of(dilations.cbegin(), dilations.cend(), is_zero),
"Filter dilations has zero dimension(s). ",
dilations);
}
inline void common_attributes(const util::ConvolutionBackPropBase* op,
const size_t num_spatial,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end) {
common_attributes(static_cast<const util::ConvolutionBase*>(op), num_spatial, pads_begin, pads_end);
NODE_VALIDATION_CHECK(op,
op->get_output_padding().size() == num_spatial,
"Output padding should be defined for all and only spatial dimensions.");
}
} // namespace validate
} // namespace convolution
} // namespace op
} // namespace ov

View File

@ -0,0 +1,246 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <array>
#include "convolution_shape_inference_util.hpp"
#include "openvino/op/util/deformable_convolution_base.hpp"
namespace ov {
namespace op {
namespace deformable_conv {
template <class TShape>
size_t calculate_num_spatial(const util::DeformableConvolutionBase* op, const std::vector<TShape>& input_shapes) {
constexpr auto non_spatial_count = convolution::filter_non_spatial_dims_count<util::DeformableConvolutionBase>();
auto num_spatial = util::num_spatial_from_shapes(input_shapes[0], input_shapes[2], non_spatial_count);
if (num_spatial == convolution::num_spatial_undefined && input_shapes[1].rank().is_static()) {
num_spatial = input_shapes[1].size() - non_spatial_count;
}
return num_spatial;
}
namespace validate {
template <class TDeformableConv, class TShape>
void input_shape(const TDeformableConv* op, const TShape& shape, const std::string& name) {
const auto& shape_rank = shape.rank();
NODE_VALIDATION_CHECK(op, shape_rank.compatible(4), name, " must be of rank 4. Got: ", shape_rank);
}
template <class TDeformableConv>
void group_attribute(const TDeformableConv* op, int64_t group, const std::string& name) {
NODE_VALIDATION_CHECK(op, group > 0, "Attribute '", name, "' must be any value starting from 1. Got: ", group);
}
template <class TDeformableConv, class TDim>
void group_divisible_dimension(const TDeformableConv* op, const TDim& dim, const std::string name) {
const auto group = op->get_group();
NODE_VALIDATION_CHECK(op,
ov::util::dim::is_divisible(dim, group),
name,
" channels dimension (",
dim,
") must be evenly divisible by the 'group': ",
group);
}
template <class TDeformableConv, class TDim>
void deformable_group_divisible_dimension(const TDeformableConv* op, const TDim& dim, const std::string name) {
const auto group = op->get_deformable_group();
NODE_VALIDATION_CHECK(op,
ov::util::dim::is_divisible(dim, group),
name,
" channels dimension (",
dim,
") must be evenly divisible by the 'deformable group': ",
group);
}
} // namespace validate
} // namespace deformable_conv
namespace util {
template <class TShape>
std::vector<TShape> shape_infer(const DeformableConvolutionBase* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
static constexpr std::array<const char*, 4> names{"Input", "Offsets", "Filters", "Mask"};
using namespace ov::util;
using TDim = typename TShape::value_type;
const auto num_spatial = deformable_conv::calculate_num_spatial(op, input_shapes);
TShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& offsets_shape = input_shapes[1];
const auto& filters_shape = input_shapes[2];
const auto data_rank = data_shape.rank();
const auto filters_rank = filters_shape.rank();
const auto offsets_rank = offsets_shape.rank();
output_shape.reserve(num_spatial + util::spatial_dim_offset);
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
for (size_t i = 0; i < input_shapes.size(); ++i) {
deformable_conv::validate::input_shape(op, input_shapes[i], names[i]);
}
deformable_conv::validate::group_attribute(op, op->get_group(), "group");
deformable_conv::validate::group_attribute(op, op->get_deformable_group(), "deformable group");
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
convolution::apply_padding(op, data_shape, filters_shape, pads_begin, pads_end);
// add to output shape number of batches
if (data_rank.is_static()) {
deformable_conv::validate::group_divisible_dimension(op, data_shape[1], names[0]);
output_shape.push_back(data_shape[0]);
} else {
output_shape.emplace_back(dim::inf_bound);
}
if (offsets_rank.is_static()) {
if (filters_rank.is_static()) {
auto offsets_channels = filters_shape[2] * filters_shape[3] * 2 * op->get_deformable_group();
NODE_VALIDATION_CHECK(op,
offsets_shape[1].compatible(offsets_channels),
"The channels dimension of offsets input is not compatible with filters and "
"'deformable group' attribute. Offsets input shape: ",
offsets_shape,
", deformable 'group' attribute value: ",
op->get_deformable_group(),
", filters shape: ",
filters_shape);
}
deformable_conv::validate::deformable_group_divisible_dimension(op, offsets_shape[1], names[1]);
NODE_VALIDATION_CHECK(op,
TDim::merge(output_shape[0], offsets_shape[0], output_shape[0]),
"Data batch and offsets batch dimension must be same value. Got: ",
output_shape[0],
" and ",
data_shape[0]);
}
// add to output shape number output channels
if (filters_rank.is_static()) {
deformable_conv::validate::group_divisible_dimension(op, filters_shape[0], names[2]);
NODE_VALIDATION_CHECK(
op,
data_rank.is_dynamic() || data_shape[1].compatible(filters_shape[1] * op->get_group()),
"Data batch channel count (",
data_shape[1],
") does not match filter input channel count (",
filters_shape[1] * op->get_group(),
")");
output_shape.push_back(filters_shape[0]);
} else {
output_shape.emplace_back(dim::inf_bound);
}
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
// post infer check.
if (offsets_rank.is_static()) {
auto offset_dim = offsets_shape.begin() + util::spatial_dim_offset;
NODE_VALIDATION_CHECK(op,
std::all_of(output_shape.begin() + util::spatial_dim_offset,
output_shape.end(),
[&offset_dim](const TDim& d) {
return d.compatible(*offset_dim++);
}),
"Spatial dimensions of offsets and output must be compatible.",
output_shape);
}
} else {
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
} // namespace util
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const DeformableConvolution* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
return util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data);
}
} // namespace v1
namespace v8 {
template <class TShape>
std::vector<TShape> shape_infer(const DeformableConvolution* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
const auto has_mask_shape = input_shapes.size() == 4;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || has_mask_shape);
using TDim = typename TShape::value_type;
const auto& data_shape = input_shapes[0];
const auto& offsets_shape = input_shapes[1];
const auto& filters_shape = input_shapes[2];
const auto data_rank = data_shape.rank();
const auto filters_rank = filters_shape.rank();
const auto offsets_rank = offsets_shape.rank();
if (has_mask_shape) {
const auto mask_shape = input_shapes[3];
if (mask_shape.rank().is_static()) {
if (filters_rank.is_static()) {
auto offsets_channels = filters_shape[2] * filters_shape[3] * op->get_deformable_group();
NODE_VALIDATION_CHECK(op,
mask_shape[1].compatible(offsets_channels),
"The channels dimension of mask input is not "
"compatible with filters and 'deformable group' attribute. "
"Mask input shape: ",
mask_shape,
", deformable 'group' attribute value: ",
op->get_deformable_group(),
", filters shape: ",
filters_shape);
}
deformable_conv::validate::deformable_group_divisible_dimension(op, mask_shape[1], "Mask");
NODE_VALIDATION_CHECK(op,
data_rank.is_dynamic() || mask_shape[0].compatible(data_shape[0]),
"Data batch and mask batch dimension must be same value. Got: ",
mask_shape[0],
" and ",
data_shape[0]);
}
}
auto output_shapes = util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data);
// post infer checks
if (has_mask_shape && input_shapes[3].rank().is_static() && output_shapes[0].rank().is_static()) {
auto mask_dim = input_shapes[3].begin() + util::spatial_dim_offset;
NODE_VALIDATION_CHECK(op,
std::all_of(output_shapes[0].begin() + util::spatial_dim_offset,
output_shapes[0].end(),
[&mask_dim](const TDim& d) {
return d.compatible(*mask_dim++);
}),
"Spatial dimensions of mask and output must be compatible.");
}
return output_shapes;
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -5,6 +5,7 @@
#include <cstdint>
#include "openvino/core/dimension.hpp"
#include "openvino/util/common_util.hpp"
namespace ov {
@ -25,17 +26,82 @@ constexpr auto dilated(const T dim, const T dilation) -> T {
return (dim < 1) ? inf_bound : dilation * (dim - 1) + 1;
}
/**
* @brief Calculate dilated dimension.
*
* @tparam TDim Dimension type.
* @param dim Dimension.
* @param dilation Dilation value.
* @return Return dimension after dilation.
*/
template <class TDim>
constexpr auto dilated(const TDim& dim, const typename TDim::value_type dilation) -> TDim {
return (dim - 1) * dilation + 1;
}
/**
* @brief Calculate padded dimension size as dim size + padding size
*
* @tparam TDim Dimension type as dimension class value type or any arithmetic value.
* @param dim Dimension size value.
* @param pad_num Number of padded dimension.
* @param pad_num Number of padding to add.
* @return Padded dimension value or infinite bound.
*/
constexpr auto padded(const int64_t dim, const int64_t pad_num) -> int64_t {
template <class TDim>
constexpr typename std::enable_if<std::is_arithmetic<TDim>::value, TDim>::type padded(const TDim dim,
const int64_t pad_num) {
return ((dim == inf_bound) || (dim + pad_num < 0)) ? inf_bound : dim + pad_num;
}
/**
* @brief Calculate padded dimension size as dim + padding size
*
* @note the Dimension + operator cannot be used if padding is '-1' which result add dynamic dimension.
*
* @tparam TDim Dimension type as dimension class.
* @param dim Dimension.
* @param pad_num Number padding to add.
* @return Padded dimension.
*/
template <class TDim>
typename std::enable_if<std::is_class<TDim>::value, TDim>::type padded(const TDim& dim, const int64_t pad_num) {
auto ub = padded(dim.get_max_length(), pad_num);
if (dim.is_static()) {
return {ub};
} else {
return {padded(dim.get_min_length(), pad_num), ub};
}
}
/**
* @brief Calculate dimension padding required by filter/kernel properties.
*
* Provides pair of padding values as left padding is total value of required padding divided by 2 and right as
* total required padding minus left padding.
*
* @param dim input dimension to calculate its padding.
* @param filter_size Kernel size for input dimension.
* @param dilation Kernel dilation.
* @param stride Kernel stride.
* @return Pair of left, right padding values for input dimension.
*/
template <class TDim, class T = typename TDim::value_type>
inline std::pair<T, T> padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) {
if (dim.is_static()) {
const auto dim_size = static_cast<int64_t>(dim.get_length());
const auto dilated_kernel = dilated(kernel_size, dilation);
const int64_t tmp = (dim_size + stride - 1) / stride;
const auto padding = std::max<int64_t>(0, (tmp - 1) * stride + dilated_kernel - dim_size);
const auto left_padding = padding / 2;
return {left_padding, padding - left_padding};
} else {
// If input dimension is infinite or interval the padding will be set to 0
// as operator cannot store paddings for both bounds.
return {0, 0};
}
}
/**
* @brief Divide dimension using ceil rounding.
*
@ -46,8 +112,9 @@ constexpr auto padded(const int64_t dim, const int64_t pad_num) -> int64_t {
* @param divisor Dimension division.
* @return Divided dimension with bounds round up.
*/
template <class TDim, class T = typename TDim::value_type>
auto ceil_div(const TDim& dim, const T divisor) -> TDim {
template <class TDim>
auto ceil_div(const TDim& dim, const typename TDim::value_type divisor) -> TDim {
using T = decltype(divisor);
if (dim.is_static()) {
return {util::ceil_div<T>(dim.get_length(), divisor)};
} else if (dim.get_max_length() == static_cast<T>(dim::inf_bound)) {
@ -67,8 +134,9 @@ auto ceil_div(const TDim& dim, const T divisor) -> TDim {
* @param divisor Dimension division.
* @return Divided dimension with bound round down.
*/
template <class TDim, class T = typename TDim::value_type>
auto floor_div(const TDim& dim, const T divisor) -> TDim {
template <class TDim>
auto floor_div(const TDim& dim, const typename TDim::value_type divisor) -> TDim {
using T = decltype(divisor);
if (dim.is_static()) {
return {dim.get_length() / divisor};
} else if (dim.get_max_length() == static_cast<T>(dim::inf_bound)) {
@ -78,6 +146,24 @@ auto floor_div(const TDim& dim, const T divisor) -> TDim {
}
}
/**
* @brief Check if dimension is evenly divisible.
*
* @tparam TDim Dimension type.
* @param quotient Dimension to check.
* @param dividend Dividend to check.
* @return true if dimension is divisible other wise false.
*/
template <class TDim>
bool is_divisible(const TDim& quotient, const typename TDim::value_type dividend) {
return quotient / dividend != TDim{};
}
template <>
inline bool is_divisible<Dimension>(const Dimension& quotient, const typename Dimension::value_type dividend) {
return !(quotient / dividend).get_interval().empty();
}
} // namespace dim
} // namespace util
} // namespace ov

View File

@ -0,0 +1,118 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "convolution_backprop_shape_inference.hpp"
#include "openvino/op/group_conv.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace convolution {
/**
* @brief Defines non-spatial dimension for filters for group convolution back propagation operator.
* @return Value of non-spatial filter dimensions (3).
*/
template <>
constexpr size_t filter_non_spatial_dims_count<v1::GroupConvolutionBackpropData>() {
return 3;
}
} // namespace convolution
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const GroupConvolutionBackpropData* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
const auto inputs_count = input_shapes.size();
const auto has_spatial_shape = inputs_count == 3;
NODE_VALIDATION_CHECK(op, inputs_count == 2 || has_spatial_shape);
using namespace ov::util;
TShape out_spatial_shape;
if (has_spatial_shape) {
const auto& spatial_shape = input_shapes[2];
NODE_VALIDATION_CHECK(op,
spatial_shape.rank().compatible(1),
"Input delivering output shape must have rank 1.");
if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) {
if (spatial_shape.is_static()) {
out_spatial_shape.resize(spatial_shape[0].get_length());
} else {
out_spatial_shape = PartialShape::dynamic();
}
}
}
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape);
TShape output_shape;
if (num_spatial != util::num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
const auto data_rank = data_shape.rank();
const auto filters_rank = filters_shape.rank();
NODE_VALIDATION_CHECK(
op,
!has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial,
"Output shape should be defined for all and only spatial dimensions.");
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
if (is_attr_validation_required(op)) {
convolution::validate::data_shape(op, data_shape);
NODE_VALIDATION_CHECK(op,
data_rank.compatible(filters_rank - 1),
"Data and filters rank do not match (data batch shape: ",
data_shape,
", filters shape: ",
filters_shape,
").");
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
}
convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end);
output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound);
// add groups dimension
if (filters_rank.is_static()) {
auto groups = filters_shape[0];
if (data_rank.is_static() && filters_shape[1].is_static()) {
NODE_VALIDATION_CHECK(
op,
groups.merge(groups, groups, (data_shape[1] / filters_shape[1].get_length())),
"Input channels dimension of data batch is incompatible with filter groups or input channels.");
}
groups *= filters_shape[2];
output_shape.push_back(std::move(groups));
} else {
output_shape.emplace_back(dim::inf_bound);
}
// add spatial dimensions
if (has_spatial_shape) {
output_shape.insert(output_shape.end(),
std::make_move_iterator(out_spatial_shape.begin()),
std::make_move_iterator(out_spatial_shape.end()));
} else {
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
}
} else {
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,87 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "convolution_shape_inference_util.hpp"
#include "openvino/op/group_conv.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace convolution {
/**
* @brief Defines non-spatial dimension for filters for group convolution operator.
* @return Value of non-spatial filter dimensions (3).
*/
template <>
constexpr size_t filter_non_spatial_dims_count<v1::GroupConvolution>() {
return 3;
}
} // namespace convolution
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const GroupConvolution* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using namespace ov::util;
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes);
TShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1];
const auto data_rank = data_shape.rank();
const auto filters_rank = filters_shape.rank();
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
if (is_attr_validation_required(op)) {
convolution::validate::data_shape(op, data_shape);
NODE_VALIDATION_CHECK(op,
data_rank.compatible(filters_rank - 1),
"Data batch and filters rank do not match (data batch shape: ",
data_shape,
", filters shape: ",
filters_shape,
").");
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
}
convolution::apply_padding(op, data_shape, filters_shape, pads_begin, pads_end);
output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound);
if (filters_rank.is_static()) {
auto groups = filters_shape[0];
if (data_rank.is_static() && filters_shape[2].is_static()) {
NODE_VALIDATION_CHECK(
op,
groups.merge(groups, groups, (data_shape[1] / filters_shape[2].get_length())),
"Input channels dimension of data batch is incompatible with filter groups or input channels.");
}
groups *= filters_shape[1];
output_shape.push_back(std::move(groups));
} else {
output_shape.emplace_back(dim::inf_bound);
}
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
} else {
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -43,8 +43,7 @@ std::vector<TShape> shape_infer(const MaxPool* op, const std::vector<TShape>& in
pooling::update_and_validate_attributes(const_cast<MaxPool*>(op), data_shape, dilations);
auto output_shape = pooling::out_shape_infer(op, data_shape, dilations);
return {2, output_shape};
return {2, pooling::out_shape_infer(op, data_shape, dilations)};
}
template <class TShape>

View File

@ -12,35 +12,6 @@ namespace op {
namespace pooling {
constexpr size_t spatial_dim_offset = 2;
/**
* @brief Calculate dimension padding required by filter/kernel properties.
*
* Provides pair of padding values as left padding is total value of required padding divided by 2 and right as
* total required padding minus left padding.
*
* @param dim input dimension to calculate its padding.
* @param filter_size Kernel size for input dimension.
* @param dilation Kernel dilation.
* @param stride Kernel stride.
* @return Pair of left, right padding values for input dimension.
*/
template <class TDim, class T = typename TDim::value_type>
inline std::pair<T, T> dim_padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) {
if (dim.is_static()) {
const auto dim_size = static_cast<int64_t>(dim.get_length());
const auto dilated_kernel = ov::util::dim::dilated(kernel_size, dilation);
const int64_t tmp = (dim_size + stride - 1) / stride;
const auto padding = std::max<int64_t>(0, (tmp - 1) * stride + dilated_kernel - dim_size);
const auto left_padding = padding / 2;
return {left_padding, padding - left_padding};
} else {
// If input dimension is infinite or interval the padding will be set to 0
// as operator cannot store paddings for both bounds.
return {0, 0};
}
}
template <class TOp, class TShape>
void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Strides& dilations) {
const auto& data_rank = data_shape.rank();
@ -97,7 +68,8 @@ void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Str
auto& pad_right = auto_pad == PadType::SAME_UPPER ? pad_end_ins : pad_begin_ins;
for (size_t i = 0; i < num_spatial; ++i, ++pad_left, ++pad_right, ++data_dim) {
std::tie(*pad_left, *pad_right) = dim_padding(*data_dim, kernel[i], dilations[i], strides[i]);
using namespace ov::util;
std::tie(*pad_left, *pad_right) = dim::padding(*data_dim, kernel[i], dilations[i], strides[i]);
}
op->set_pads_begin(pads_begin);

View File

@ -2,15 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/binary_convolution.hpp"
#include "openvino/op/binary_convolution.hpp"
#include "binary_convolution_shape_inference.hpp"
#include "convolution_shape_inference.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/core/attribute_visitor.hpp"
#include "openvino/core/axis_vector.hpp"
#include "openvino/core/coordinate_diff.hpp"
using namespace std;
@ -23,14 +22,9 @@ ov::op::v1::BinaryConvolution::BinaryConvolution(const Output<Node>& data,
BinaryConvolutionMode mode,
float pad_value,
const PadType& auto_pad)
: Op({data, kernel}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
: ConvolutionFwdPropBase({data, kernel}, strides, pads_begin, pads_end, dilations, auto_pad),
m_mode(mode),
m_pad_value(pad_value),
m_auto_pad(auto_pad) {
m_pad_value(pad_value) {
constructor_validate_and_infer_types();
}
@ -43,23 +37,16 @@ ov::op::v1::BinaryConvolution::BinaryConvolution(const Output<Node>& data,
const std::string& mode,
float pad_value,
const PadType& auto_pad)
: Op({data, kernel}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
: ConvolutionFwdPropBase({data, kernel}, strides, pads_begin, pads_end, dilations, auto_pad),
m_mode(mode_from_string(mode)),
m_pad_value(pad_value),
m_auto_pad(auto_pad) {
m_pad_value(pad_value) {
constructor_validate_and_infer_types();
}
void ov::op::v1::BinaryConvolution::validate_and_infer_types() {
OV_OP_SCOPE(v1_BinaryConvolution_validate_and_infer_types);
const ov::PartialShape& data_batch_pshape = get_input_partial_shape(0);
element::Type data_batch_et = get_input_element_type(0);
const ov::PartialShape& filters_pshape = get_input_partial_shape(1);
const auto& data_batch_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this,
data_batch_et.is_real() || data_batch_et.is_integral_number(),
"Data batch element type must be numeric. Got: ",
@ -67,25 +54,15 @@ void ov::op::v1::BinaryConvolution::validate_and_infer_types() {
// TODO: Add NodeValidationCheck to filters et once u1 is supported in nGraph Python API
// (#52715)
const auto input_shapes = get_node_input_partial_shapes(*this);
Rank result_ps_rank;
NODE_VALIDATION_CHECK(this,
Rank::merge(result_ps_rank, data_batch_pshape.rank(), filters_pshape.rank()),
"Data batch and filters inputs must have same rank. Got: ",
data_batch_pshape,
" and ",
filters_pshape);
auto num_spatial = convolution::calculate_num_spatial(this, input_shapes);
if (num_spatial != util::num_spatial_undefined) {
resize_attributes(num_spatial);
}
ov::PartialShape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this,
result_ps_rank,
data_batch_pshape,
filters_pshape,
m_auto_pad,
m_strides,
m_dilations,
m_pads_begin,
m_pads_end);
set_output_type(0, data_batch_et, result_shape);
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, data_batch_et, output_shapes[0]);
}
shared_ptr<ov::Node> ov::op::v1::BinaryConvolution::clone_with_new_inputs(const OutputVector& new_args) const {
@ -116,11 +93,11 @@ bool ov::op::v1::BinaryConvolution::visit_attributes(AttributeVisitor& visitor)
namespace ov {
template <>
NGRAPH_API EnumNames<ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode>&
EnumNames<ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode>::get() {
static auto enum_names = EnumNames<ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode>(
OPENVINO_API EnumNames<ov::op::v1::BinaryConvolution::BinaryConvolutionMode>&
EnumNames<op::v1::BinaryConvolution::BinaryConvolutionMode>::get() {
static auto enum_names = EnumNames<op::v1::BinaryConvolution::BinaryConvolutionMode>(
"op::v1::BinaryConvolution::BinaryConvolutionMode",
{{"xnor-popcount", ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT}});
{{"xnor-popcount", op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT}});
return enum_names;
}
} // namespace ov

View File

@ -2,21 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/convolution.hpp"
#include <convolution_shape_inference.hpp>
#include "openvino/op/convolution.hpp"
#include "bound_evaluate.hpp"
#include "convolution_backprop_shape_inference.hpp"
#include "convolution_shape_inference.hpp"
#include "itt.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
// *** Convolution OP SET 1 ***
op::v1::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
@ -25,12 +21,7 @@ op::v1::Convolution::Convolution(const Output<Node>& data_batch,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad)
: Op({data_batch, filters}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad) {
: ConvolutionFwdPropBase({data_batch, filters}, strides, pads_begin, pads_end, dilations, auto_pad) {
constructor_validate_and_infer_types();
}
@ -46,8 +37,8 @@ bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor) {
void op::v1::Convolution::validate_and_infer_types() {
OV_OP_SCOPE(v1_Convolution_validate_and_infer_types);
element::Type data_batch_et = get_input_element_type(0);
element::Type filters_et = get_input_element_type(1);
const auto& data_batch_et = get_input_element_type(0);
const auto& filters_et = get_input_element_type(1);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
@ -62,21 +53,17 @@ void op::v1::Convolution::validate_and_infer_types() {
result_et.is_real() || result_et.is_integral_number(),
"Element types must be numeric. Got: ",
result_et);
auto& data_shape = get_input_partial_shape(0);
auto& filter_shape = get_input_partial_shape(1);
m_num_spatial = calculate_num_spatial(this, data_shape, filter_shape, 2, 2);
update_and_validate_attributes(this, m_num_spatial);
const auto input_shapes = get_node_input_partial_shapes(*this);
std::vector<ov::PartialShape> input_shapes = {data_shape, filter_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
if (m_num_spatial != -1) {
resolve_auto_pad_for_shape(this, m_pads_begin, m_pads_end, input_shapes, 2, 2);
shape_infer(this, m_pads_begin, m_pads_end, input_shapes, output_shapes);
auto num_spatial = convolution::calculate_num_spatial(this, input_shapes);
if (num_spatial != util::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
set_num_spatial(num_spatial, input_shapes);
}
shared_ptr<Node> op::v1::Convolution::clone_with_new_inputs(const OutputVector& new_args) const {
@ -101,13 +88,13 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& dat
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters, output_shape}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad),
m_output_padding(output_padding) {
: ConvolutionBackPropBase({data, filters, output_shape},
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
output_padding) {
ov::mark_as_precision_sensitive(input(2));
constructor_validate_and_infer_types();
}
@ -131,44 +118,45 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& dat
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad),
m_output_padding(output_padding) {
: ConvolutionBackPropBase({data, filters}, strides, pads_begin, pads_end, dilations, auto_pad, output_padding) {
constructor_validate_and_infer_types();
}
bool op::v1::ConvolutionBackpropData::is_dynamic() const {
bool is_dynamic = Node::is_dynamic();
if (inputs().size() == 3 && !is_dynamic) {
return !has_and_set_equal_bounds(input_value(2));
}
return is_dynamic;
return Node::is_dynamic() || (get_input_size() == 3 && !has_and_set_equal_bounds(input_value(2)));
}
const ov::PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const {
ov::PartialShape shape;
if (get_input_size() == 3 && evaluate_as_partial_shape(input_value(2), shape))
return shape;
auto shape = PartialShape::dynamic();
auto data_pshape = get_input_partial_shape(0);
auto filter_pshape = get_input_partial_shape(1);
if (get_input_size() < 3 || !evaluate_as_partial_shape(input_value(2), shape)) {
const auto& data_rank = get_input_partial_shape(0).rank();
const auto& filter_rank = get_input_partial_shape(1).rank();
if (data_rank.is_static()) {
shape.resize(data_rank.get_length() - convolution::spatial_dim_offset);
} else if (filter_rank.is_static()) {
shape.resize(filter_rank.get_length() - convolution::spatial_dim_offset);
} else if (get_input_size() == 3) {
const auto& out_spatial_shape = get_input_partial_shape(2);
if (out_spatial_shape.is_static()) {
shape.resize(out_spatial_shape[0].get_length());
}
}
}
if (data_pshape.rank().is_static())
shape = ov::PartialShape::dynamic(data_pshape.rank().get_length() - 2);
else if (filter_pshape.rank().is_static())
shape = ov::PartialShape::dynamic(filter_pshape.rank().get_length() - 2);
else
shape = ov::PartialShape::dynamic();
return shape;
}
void op::v1::ConvolutionBackpropData::set_output_shape(const ov::Shape& shape) {
this->input(2).replace_source_output(
op::v0::Constant::create(this->get_input_element_type(2), ov::Shape{shape.size()}, shape)->output(0));
element::Type_t et = (get_input_size() == 3) ? get_input_element_type(2) : element::i64;
if (get_input_size() == 0) {
// Add dummy inputs when adding output shape and op has no inputs at all.
auto dummy = std::make_shared<v0::Constant>(et, ov::Shape{0});
set_argument(0, dummy);
set_argument(1, dummy);
}
set_argument(2, v0::Constant::create(et, Shape{shape.size()}, shape));
}
void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape(
@ -200,8 +188,8 @@ void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape(
void op::v1::ConvolutionBackpropData::validate_and_infer_types() {
OV_OP_SCOPE(v1_ConvolutionBackpropData_validate_and_infer_types);
element::Type delta_et = get_input_element_type(0);
element::Type filters_et = get_input_element_type(1);
const auto& delta_et = get_input_element_type(0);
const auto& filters_et = get_input_element_type(1);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
@ -217,9 +205,8 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() {
"Element type of inputs must be numeric. Got: ",
result_et);
bool is_output_shape_present = inputs().size() == 3;
if (is_output_shape_present) {
const element::Type output_shape_et = get_input_element_type(2);
if (get_input_size() == 3) {
const auto& output_shape_et = get_input_element_type(2);
NODE_VALIDATION_CHECK(this,
output_shape_et.is_integral_number(),
"Element type for output shape should be of integer type ",
@ -228,26 +215,17 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() {
").");
}
bool output_shape_input_present = get_input_size() == 3;
const auto input_shapes = get_node_input_partial_shapes(*this);
const auto out_spatial_shape = get_output_shape();
auto num_spatial = convolution::calculate_num_spatial(this, input_shapes, out_spatial_shape);
const auto& data_shape = get_input_partial_shape(0);
const auto& filter_shape = get_input_partial_shape(1);
auto& output_shapes_shape = output_shape_input_present ? get_input_partial_shape(2) : PartialShape::dynamic();
m_num_spatial = calculate_num_spatial(this, data_shape, filter_shape, output_shapes_shape, 2, 2);
update_and_validate_attributes_back_prop(this, m_num_spatial);
std::vector<ov::PartialShape> input_shapes = {data_shape, filter_shape};
if (output_shape_input_present)
input_shapes.push_back(get_input_partial_shape(2));
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
if (m_num_spatial != -1) {
ov::PartialShape output_spatial_shape = get_output_shape();
resolve_auto_pad_for_shape_back_prop(this, m_pads_begin, m_pads_end, input_shapes, output_spatial_shape, 2, 2);
shape_infer(this, m_pads_begin, m_pads_end, output_spatial_shape, input_shapes, output_shapes);
if (num_spatial != util::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
set_num_spatial(num_spatial, input_shapes);
set_input_is_relevant_to_shape(0);
set_input_is_relevant_to_shape(1);
@ -277,3 +255,4 @@ shared_ptr<Node> op::v1::ConvolutionBackpropData::clone_with_new_inputs(const Ou
m_output_padding);
}
}
} // namespace ov

View File

@ -2,18 +2,13 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/deformable_convolution.hpp"
#include "openvino/op/deformable_convolution.hpp"
#include "deformable_convolution_shape_inference.hpp"
#include "itt.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/runtime/reference/deformable_convolution.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
op::v8::DeformableConvolution::DeformableConvolution(const Output<Node>& arg,
const Output<Node>& offsets,
const Output<Node>& filters,
@ -70,74 +65,45 @@ bool op::v8::DeformableConvolution::visit_attributes(AttributeVisitor& visitor)
void op::v8::DeformableConvolution::validate_and_infer_types() {
OV_OP_SCOPE(DeformableConvolution_v8_validate_and_infer_types);
DeformableConvolutionBase::validate_and_infer_types();
if (inputs().size() == 4) {
const ov::PartialShape& data_pshape = get_input_partial_shape(0);
const ov::PartialShape& filters_pshape = get_input_partial_shape(2);
const ov::PartialShape& mask_pshape = get_input_partial_shape(3);
const auto& data_batch_et = get_input_element_type(0);
const auto& offsets_et = get_input_element_type(1);
const auto& filters_et = get_input_element_type(2);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
element::Type::merge(result_et, data_batch_et, offsets_et) &&
element::Type::merge(result_et, result_et, filters_et),
"Element types of inputs do not match. Got: data batch (",
data_batch_et,
"), offsets (",
offsets_et,
") and filters (",
filters_et,
")");
NODE_VALIDATION_CHECK(this,
result_et.is_real() || result_et.is_integral_number(),
"Element type of inputs must be numeric. Got: ",
result_et);
if (get_input_size() == 4) {
element::Type mask_et = get_input_element_type(3);
NODE_VALIDATION_CHECK(this,
mask_et.is_real() || mask_et.is_integral_number(),
"Element type of Mask input must be numeric. Got: ",
mask_et);
NODE_VALIDATION_CHECK(this,
mask_pshape.rank().compatible(4),
"Mask input must be of rank 4. Got: ",
mask_pshape.rank());
if (mask_pshape.rank().is_static() && mask_pshape[1].is_static()) {
if (filters_pshape.rank().is_static() && filters_pshape[2].is_static() && filters_pshape[3].is_static()) {
auto offsets_channels =
m_deformable_group * filters_pshape[2].get_length() * filters_pshape[3].get_length();
NODE_VALIDATION_CHECK(this,
mask_pshape[1].get_length() == offsets_channels,
"The channels dimension of mask input is not "
"compatible with filters and 'deformable group' attribute. "
"Mask input shape: ",
mask_pshape,
", deformable 'group' attribute value: ",
m_deformable_group,
", filters shape: ",
filters_pshape);
}
// At least we can check if mask channels is evenly divisible by deformable
// group attribute
NODE_VALIDATION_CHECK(this,
mask_pshape[1].get_length() % m_deformable_group == 0,
"The channels dimension of mask input must be "
"evenly divisible by the 'deformable group' value along the "
"channels axis. Offsets input shape: ",
mask_pshape,
", 'deformable group' attribute value: ",
m_deformable_group);
if (data_pshape.rank().is_static()) {
NODE_VALIDATION_CHECK(this,
mask_pshape[0].compatible(data_pshape[0]),
"Data batch and mask batch dimension must be same value. Got: ",
mask_pshape[0],
" and ",
data_pshape[0]);
}
}
ov::PartialShape result_pshape = get_output_partial_shape(0);
if (result_pshape.rank().is_static() && mask_pshape.rank().is_static()) {
NODE_VALIDATION_CHECK(
this,
result_pshape[2].compatible(mask_pshape[2]) && result_pshape[3].compatible(mask_pshape[3]),
"Spatial dimensions of mask and output must be equal. Got: ",
mask_pshape[2],
", ",
mask_pshape[3],
" and ",
result_pshape[2],
", ",
result_pshape[3]);
}
}
const auto input_shapes = get_node_input_partial_shapes(*this);
auto num_spatial = deformable_conv::calculate_num_spatial(this, input_shapes);
if (num_spatial != convolution::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
}
std::shared_ptr<Node> op::v8::DeformableConvolution::clone_with_new_inputs(const OutputVector& new_args) const {
@ -208,3 +174,39 @@ std::shared_ptr<Node> op::v1::DeformableConvolution::clone_with_new_inputs(const
m_group,
m_deformable_group);
}
void op::v1::DeformableConvolution::validate_and_infer_types() {
OV_OP_SCOPE(DeformableConvolution_v1_validate_and_infer_types);
const auto& data_batch_et = get_input_element_type(0);
const auto& offsets_et = get_input_element_type(1);
const auto& filters_et = get_input_element_type(2);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
element::Type::merge(result_et, data_batch_et, offsets_et) &&
element::Type::merge(result_et, result_et, filters_et),
"Element types of inputs do not match. Got: data batch (",
data_batch_et,
"), offsets (",
offsets_et,
") and filters (",
filters_et,
")");
NODE_VALIDATION_CHECK(this,
result_et.is_real() || result_et.is_integral_number(),
"Element type of inputs must be numeric. Got: ",
result_et);
const auto input_shapes = get_node_input_partial_shapes(*this);
auto num_spatial = deformable_conv::calculate_num_spatial(this, input_shapes);
if (num_spatial != convolution::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
}
} // namespace ov

View File

@ -2,22 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/group_conv.hpp"
#include <convolution_shape_inference.hpp>
#include "openvino/op/group_conv.hpp"
#include "bound_evaluate.hpp"
#include "group_convolution_backprop_shape_inference.hpp"
#include "group_convolution_shape_inference.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
using namespace std;
using namespace ngraph;
//------------------------------------------------------------------------------
// v1::GroupConvolution
//------------------------------------------------------------------------------
namespace ov {
op::v1::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& strides,
@ -25,16 +23,11 @@ op::v1::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad)
: Op({data_batch, filters}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad) {
: ConvolutionFwdPropBase({data_batch, filters}, strides, pads_begin, pads_end, dilations, auto_pad) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visitor) {
bool op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v1_GroupConvolution_visit_attributes);
visitor.on_attribute("strides", m_strides);
visitor.on_attribute("pads_begin", m_pads_begin);
@ -46,8 +39,8 @@ bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visito
void op::v1::GroupConvolution::validate_and_infer_types() {
OV_OP_SCOPE(v1_GroupConvolution_validate_and_infer_types);
element::Type data_batch_et = get_input_element_type(0);
element::Type filters_et = get_input_element_type(1);
const auto& data_batch_et = get_input_element_type(0);
const auto& filters_et = get_input_element_type(1);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
@ -63,21 +56,16 @@ void op::v1::GroupConvolution::validate_and_infer_types() {
"Element type of inputs must be numeric. Got: ",
result_et);
auto& data_shape = get_input_partial_shape(0);
auto& filter_shape = get_input_partial_shape(1);
const auto input_shapes = get_node_input_partial_shapes(*this);
m_num_spatial = calculate_num_spatial(this, data_shape, filter_shape, 2, 3);
update_and_validate_attributes(this, m_num_spatial);
std::vector<ov::PartialShape> input_shapes = {data_shape, filter_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
if (m_num_spatial != -1) {
resolve_auto_pad_for_shape(this, m_pads_begin, m_pads_end, input_shapes, 2, 3);
shape_infer(this, m_pads_begin, m_pads_end, input_shapes, output_shapes);
auto num_spatial = convolution::calculate_num_spatial(this, input_shapes);
if (num_spatial != convolution::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
set_num_spatial(num_spatial, input_shapes);
}
shared_ptr<Node> op::v1::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const {
@ -96,14 +84,7 @@ shared_ptr<Node> op::v1::GroupConvolution::clone_with_new_inputs(const OutputVec
// v1::GroupConvolutionBackpropData
//------------------------------------------------------------------------------
op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData()
: Op(),
m_strides(),
m_dilations(),
m_pads_begin(),
m_pads_end(),
m_auto_pad(),
m_output_padding() {}
op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData() : ConvolutionBackPropBase() {}
op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
@ -114,13 +95,13 @@ op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData(const Output<
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters, output_shape}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad),
m_output_padding(output_padding) {
: ConvolutionBackPropBase({data, filters, output_shape},
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
output_padding) {
ov::mark_as_precision_sensitive(input(2));
constructor_validate_and_infer_types();
}
@ -150,17 +131,11 @@ op::v1::GroupConvolutionBackpropData::GroupConvolutionBackpropData(const Output<
const Strides& dilations,
const PadType& auto_pad,
const CoordinateDiff& output_padding)
: Op({data, filters}),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad),
m_output_padding(output_padding) {
: ConvolutionBackPropBase({data, filters}, strides, pads_begin, pads_end, dilations, auto_pad, output_padding) {
constructor_validate_and_infer_types();
}
bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) {
bool op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) {
OV_OP_SCOPE(v1_GroupConvolutionBackpropData_visit_attributes);
visitor.on_attribute("strides", m_strides);
visitor.on_attribute("pads_begin", m_pads_begin);
@ -172,33 +147,40 @@ bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVis
}
bool op::v1::GroupConvolutionBackpropData::is_dynamic() const {
bool is_dynamic = Node::is_dynamic();
if (inputs().size() == 3 && !is_dynamic) {
return !has_and_set_equal_bounds(input_value(2));
}
return is_dynamic;
return Node::is_dynamic() || (get_input_size() == 3 && !has_and_set_equal_bounds(input_value(2)));
}
const ov::PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_shape() const {
ov::PartialShape shape;
if (get_input_size() == 3 && evaluate_as_partial_shape(input_value(2), shape))
return shape;
auto shape = PartialShape::dynamic();
auto data_pshape = get_input_partial_shape(0);
auto filter_pshape = get_input_partial_shape(1);
if (get_input_size() < 3 || !evaluate_as_partial_shape(input_value(2), shape)) {
const auto& data_rank = get_input_partial_shape(0).rank();
const auto& filter_rank = get_input_partial_shape(1).rank();
if (data_rank.is_static()) {
shape.resize(data_rank.get_length() - util::spatial_dim_offset);
} else if (filter_rank.is_static()) {
shape.resize(filter_rank.get_length() - util::spatial_dim_offset);
} else if (get_input_size() == 3) {
const auto& out_spatial_shape = get_input_partial_shape(2);
if (out_spatial_shape.is_static()) {
shape.resize(out_spatial_shape[0].get_length());
}
}
}
if (data_pshape.rank().is_static())
shape = ov::PartialShape::dynamic(data_pshape.rank().get_length() - 2);
else if (filter_pshape.rank().is_static())
shape = ov::PartialShape::dynamic(filter_pshape.rank().get_length() - 2);
else
shape = ov::PartialShape::dynamic();
return shape;
}
void op::v1::GroupConvolutionBackpropData::set_output_shape(const ov::Shape& shape) {
this->input(2).replace_source_output(
op::v0::Constant::create(this->get_input_element_type(2), ov::Shape{shape.size()}, shape)->output(0));
element::Type_t et = (get_input_size() == 3) ? get_input_element_type(2) : element::i64;
if (get_input_size() == 0) {
// Add dummy inputs when adding output shape and op has no inputs at all.
auto dummy = std::make_shared<v0::Constant>(et, ov::Shape{0});
set_argument(0, dummy);
set_argument(1, dummy);
}
set_argument(2, v0::Constant::create(et, Shape{shape.size()}, shape));
}
void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape(
@ -211,9 +193,9 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh
const CoordinateDiff& output_padding,
vector<Dimension>& output_spatial_shape) {
size_t num_spatial_dims = input_data_shape.size();
NGRAPH_CHECK(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims &&
dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims &&
pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims);
OPENVINO_ASSERT(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims &&
dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims &&
pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims);
for (size_t i = 0; i < num_spatial_dims; ++i) {
if (input_data_shape[i].is_static() && filters_shape[i].is_static()) {
@ -229,8 +211,8 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh
void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() {
OV_OP_SCOPE(v1_GroupConvolutionBackpropData_validate_and_infer_types);
element::Type data_et = get_input_element_type(0);
element::Type filters_et = get_input_element_type(1);
const auto& data_et = get_input_element_type(0);
const auto& filters_et = get_input_element_type(1);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
@ -257,25 +239,17 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() {
").");
}
const auto& data_shape = get_input_partial_shape(0);
const auto& filter_shape = get_input_partial_shape(1);
const auto input_shapes = get_node_input_partial_shapes(*this);
const auto out_spatial_shape = get_convolution_output_shape();
auto num_spatial = convolution::calculate_num_spatial(this, input_shapes, out_spatial_shape);
auto& output_shapes_shape = output_shape_input_present ? get_input_partial_shape(2) : PartialShape::dynamic();
m_num_spatial = calculate_num_spatial(this, data_shape, filter_shape, output_shapes_shape, 2, 3);
update_and_validate_attributes_back_prop(this, m_num_spatial);
std::vector<ov::PartialShape> input_shapes = {data_shape, filter_shape};
if (output_shape_input_present)
input_shapes.push_back(get_input_partial_shape(2));
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
if (m_num_spatial != -1) {
ov::PartialShape output_spatial_shape = get_convolution_output_shape();
resolve_auto_pad_for_shape_back_prop(this, m_pads_begin, m_pads_end, input_shapes, output_spatial_shape, 2, 3);
shape_infer(this, m_pads_begin, m_pads_end, output_spatial_shape, input_shapes, output_shapes);
if (num_spatial != util::num_spatial_undefined) {
resize_attributes(num_spatial);
}
const auto output_shapes = shape_infer(this, input_shapes, m_pads_begin, m_pads_end);
set_output_type(0, result_et, output_shapes[0]);
set_num_spatial(num_spatial, input_shapes);
set_input_is_relevant_to_shape(0);
set_input_is_relevant_to_shape(1);
@ -305,3 +279,4 @@ shared_ptr<Node> op::v1::GroupConvolutionBackpropData::clone_with_new_inputs(con
m_output_padding);
}
}
} // namespace ov

View File

@ -2,14 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/util/deformable_convolution_base.hpp"
#include "openvino/op/util/deformable_convolution_base.hpp"
#include "itt.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
@ -21,12 +16,7 @@ ov::op::util::DeformableConvolutionBase::DeformableConvolutionBase(const OutputV
const PadType& auto_pad,
const int64_t group,
const int64_t deformable_group)
: Op(arguments),
m_strides(strides),
m_dilations(dilations),
m_pads_begin(pads_begin),
m_pads_end(pads_end),
m_auto_pad(auto_pad),
: ConvolutionBase(arguments, strides, pads_begin, pads_end, dilations, auto_pad),
m_group(group),
m_deformable_group(deformable_group) {}
@ -41,154 +31,3 @@ bool ov::op::util::DeformableConvolutionBase::visit_attributes(AttributeVisitor&
visitor.on_attribute("deformable_group", m_deformable_group);
return true;
}
void ov::op::util::DeformableConvolutionBase::validate_and_infer_types() {
OV_OP_SCOPE(util_DeformableConvolutionBase_validate_and_infer_types);
const PartialShape& data_batch_pshape = get_input_partial_shape(0);
const PartialShape& offsets_pshape = get_input_partial_shape(1);
const PartialShape& filters_pshape = get_input_partial_shape(2);
element::Type data_batch_et = get_input_element_type(0);
element::Type offsets_et = get_input_element_type(1);
element::Type filters_et = get_input_element_type(2);
element::Type result_et;
NODE_VALIDATION_CHECK(this,
element::Type::merge(result_et, data_batch_et, offsets_et) &&
element::Type::merge(result_et, result_et, filters_et),
"Element types of inputs do not match. Got: data batch (",
data_batch_et,
"), offsets (",
offsets_et,
") and filters (",
filters_et,
")");
NODE_VALIDATION_CHECK(this,
result_et.is_real() || result_et.is_integral_number(),
"Element type of inputs must be numeric. Got: ",
result_et);
Rank result_ps_rank{};
NODE_VALIDATION_CHECK(this,
Rank::merge(result_ps_rank, data_batch_pshape.rank(), offsets_pshape.rank()) &&
Rank::merge(result_ps_rank, result_ps_rank, filters_pshape.rank()),
"Ranks of inputs do not match. Got: data batch shape ",
data_batch_pshape,
", offsets shape ",
offsets_pshape,
", filters shape ",
filters_pshape);
NODE_VALIDATION_CHECK(this, result_ps_rank.compatible(4), "Inputs must be of rank 4. Got: ", result_ps_rank);
NODE_VALIDATION_CHECK(this, m_group > 0, "Attribute 'group' must be any value starting from 1. Got: ", m_group);
NODE_VALIDATION_CHECK(this,
m_deformable_group > 0,
"Attribute 'deformable group' must be any value starting from 1. Got: ",
m_deformable_group);
if (offsets_pshape.rank().is_static()) {
if (offsets_pshape[1].is_static()) {
if (filters_pshape.rank().is_static() && filters_pshape[2].is_static() && filters_pshape[3].is_static()) {
auto offsets_channels =
m_deformable_group * filters_pshape[2].get_length() * filters_pshape[3].get_length() * 2;
NODE_VALIDATION_CHECK(this,
offsets_pshape[1].get_length() == offsets_channels,
"The channels dimension of offsets input is not "
"compatible with filters and 'deformable group' attribute. "
"Offsets input shape: ",
offsets_pshape,
", deformable 'group' attribute value: ",
m_deformable_group,
", filters shape: ",
filters_pshape);
} else {
// At least we can check if offsets channels is evenly divisible by deformable
// group attribute
NODE_VALIDATION_CHECK(this,
offsets_pshape[1].get_length() % m_deformable_group == 0,
"The channels dimension of offsets input must be "
"evenly divisible by the 'deformable group' value along the "
"channels axis. Offsets input shape: ",
offsets_pshape,
", 'deformable group' attribute value: ",
m_deformable_group);
}
}
if (data_batch_pshape.rank().is_static()) {
NODE_VALIDATION_CHECK(this,
offsets_pshape[0].compatible(data_batch_pshape[0]),
"Data batch and offsets batch dimension must be same value. Got: ",
offsets_pshape[0],
" and ",
data_batch_pshape[0]);
}
}
if (data_batch_pshape.rank().is_static() && data_batch_pshape[1].is_static()) {
NODE_VALIDATION_CHECK(this,
data_batch_pshape[1].get_length() % m_group == 0,
"The input data shape must be evenly divisible by the 'group' value "
"along the channels axis. Current input shape: ",
data_batch_pshape,
", 'group' attribute value: ",
m_group);
}
if (filters_pshape.rank().is_static() && filters_pshape[0].is_static()) {
NODE_VALIDATION_CHECK(this,
filters_pshape[0].get_length() % m_group == 0,
"The filters shape must be evenly divisible by the 'group' value along "
"the channels axis. Current filters shape: ",
filters_pshape,
", 'group' attribute value: ",
m_group);
}
// adjust filter shape to reuse regular infer_convolution_forward()
const auto new_filters_pshape = [&](int64_t groups) {
auto new_shape(filters_pshape);
if (new_shape.rank().is_static()) {
new_shape[1] *= groups;
}
return new_shape;
}(m_group);
PartialShape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this,
result_ps_rank,
data_batch_pshape,
new_filters_pshape,
m_auto_pad,
m_strides,
m_dilations,
m_pads_begin,
m_pads_end);
if (result_shape.rank().is_static() && offsets_pshape.rank().is_static()) {
PartialShape result_spatial_shape = [&result_shape]() {
vector<Dimension> result_spatial_dims{result_shape};
result_spatial_dims.erase(result_spatial_dims.begin(), result_spatial_dims.begin() + 2);
return PartialShape{result_spatial_dims};
}();
PartialShape offsets_spatial_shape = [&offsets_pshape]() {
vector<Dimension> offsets_spatial_dims{offsets_pshape};
offsets_spatial_dims.erase(offsets_spatial_dims.begin(), offsets_spatial_dims.begin() + 2);
return PartialShape{offsets_spatial_dims};
}();
NODE_VALIDATION_CHECK(this,
offsets_spatial_shape.compatible(result_spatial_shape),
"Spatial dimensions of offsets and output must be equal. Got: ",
offsets_spatial_shape,
" and ",
result_spatial_shape);
if (result_shape[0].is_dynamic()) {
result_shape[0] = offsets_pshape[0]; // batch size
}
}
set_output_type(0, result_et, result_shape);
}

View File

@ -2,16 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
using namespace testing;
TEST(type_prop, bin_convolution_auto_padding_same) {
const PartialShape data_batch_shape{1, 1, 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
PartialShape data_batch_shape{1, 1, 5, 5};
PartialShape filters_shape{1, 1, 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
@ -33,14 +37,17 @@ TEST(type_prop, bin_convolution_auto_padding_same) {
pad_value,
auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, 1, 5, 5}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, bin_convolution_auto_padding_same_lower_spatial_dims_static) {
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 3, 3};
PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
@ -62,9 +69,10 @@ TEST(type_prop, bin_convolution_auto_padding_same_lower_spatial_dims_static) {
pad_value,
auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, bin_convolution_auto_padding_same_upper_spatial_dims_static) {
@ -91,14 +99,16 @@ TEST(type_prop, bin_convolution_auto_padding_same_upper_spatial_dims_static) {
pad_value,
auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, bin_convolution_auto_padding_same_data_batch_spatial_dims_dynamic) {
const PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5};
const PartialShape filters_shape{Dimension::dynamic(), 1, 3, 3};
PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5};
PartialShape filters_shape{Dimension::dynamic(), 1, 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
@ -120,9 +130,10 @@ TEST(type_prop, bin_convolution_auto_padding_same_data_batch_spatial_dims_dynami
pad_value,
auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({1, Dimension::dynamic(), Dimension::dynamic(), 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 1}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, 12, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 5}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 1}));
}
TEST(type_prop, bin_convolution_dyn_data_batch) {
@ -131,7 +142,7 @@ TEST(type_prop, bin_convolution_dyn_data_batch) {
const auto auto_pad = op::PadType::EXPLICIT;
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters,
Strides{},
@ -141,9 +152,8 @@ TEST(type_prop, bin_convolution_dyn_data_batch) {
mode,
pad_value,
auto_pad);
ASSERT_TRUE(bin_conv->get_output_partial_shape(0).rank().same_scheme(Rank{3}));
ASSERT_TRUE(
bin_conv->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()}));
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{-1, 1, {1, -1}, {1, -1}}));
}
TEST(type_prop, bin_convolution_dyn_filters) {
@ -162,9 +172,8 @@ TEST(type_prop, bin_convolution_dyn_filters) {
mode,
pad_value,
auto_pad);
ASSERT_TRUE(bin_conv->get_output_partial_shape(0).rank().same_scheme(Rank{4}));
ASSERT_TRUE(bin_conv->get_output_partial_shape(0).same_scheme(
PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{1, -1, {1, 5}, {1, 5}}));
}
TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) {
@ -183,8 +192,8 @@ TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) {
mode,
pad_value,
auto_pad);
ASSERT_TRUE(bin_conv->get_output_partial_shape(0).rank().is_dynamic());
ASSERT_TRUE(bin_conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
EXPECT_EQ(bin_conv->get_output_partial_shape(0), PartialShape::dynamic());
}
TEST(type_prop, bin_convolution_invalid_inputs_et) {
@ -263,7 +272,7 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) {
// data batch and filters have incompatible ranks
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters inputs must have same rank");
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match");
} catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason";
}
@ -285,7 +294,7 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) {
// data batch and filters have incompatible ranks
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters inputs must have same rank");
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match");
} catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason";
}
@ -320,20 +329,21 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
// Strides have incompatible number of spatial dimensions
FAIL() << "Incompatible stride number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Strides should be defined for all and only spatial features."));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Strides should be defined for all and only spatial dimensions."));
} catch (...) {
FAIL() << "Strides validation check failed for unexpected reason.";
}
try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3});
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters,
strides_1d,
Strides{1, 1},
CoordinateDiff{},
CoordinateDiff{},
dilations_2d,
dilations_3d,
mode,
pad_value,
auto_pad);
@ -341,28 +351,87 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
FAIL() << "Incompatible dilations number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Dilations should be defined for all and only spatial features."));
std::string("Dilations should be defined for all and only spatial dimensions."));
} catch (...) {
FAIL() << "Dilations validation check failed for unexpected reason.";
}
try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3, 3});
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters,
strides_3d,
Strides{1, 1},
pads_begin_3d,
pads_end_2d,
dilations_3d,
dilations_2d,
mode,
pad_value,
auto_pad);
// Pads have incompatible number of spatial dimensions
FAIL() << "Incompatible pads number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Pads should be defined for all and only spatial features."));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Pads begin and end should be defined for all and only spatial dimensions."));
} catch (...) {
FAIL() << "Pads validation check failed for unexpected reason.";
}
}
class TypePropBinaryConvolutionV1Test : public TypePropOpTest<op::v1::BinaryConvolution> {
protected:
CoordinateDiff empty_pad{};
};
TEST_F(TypePropBinaryConvolutionV1Test, default_ctor) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto op = make_op();
op->set_arguments(OutputVector{data, filters});
op->set_strides({1, 3});
op->set_dilations({1, 2});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_mode(op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT);
op->set_pad_value(1.0f);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 3}));
EXPECT_EQ(op->get_dilations(), Strides({1, 2}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 2, 6, 1}));
}
TEST_F(TypePropBinaryConvolutionV1Test, interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, 1, {1, 5}, {3, 10}};
PartialShape filters_pshape{2, {1, 3}, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
constexpr auto et = element::f32;
constexpr auto auto_pad = op::PadType::EXPLICIT;
constexpr auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
constexpr auto pad_value = 1.0f;
const auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
const auto filters = make_shared<op::Parameter>(et, filters_pshape);
const auto op = make_op(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
mode,
pad_value,
auto_pad);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 3}, 2, {1, 3}, {1, 8}}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0}));
}

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_assertions.hpp"
#include "convolution_shape_inference.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
@ -9,8 +10,9 @@
using namespace std;
using namespace ngraph;
using namespace testing;
TEST(type_prop, conv_v1_partial_rank) {
TEST(type_prop, convolution_v1_partial_rank) {
PartialShape data_batch_shape{PartialShape::dynamic()};
PartialShape filters_shape{PartialShape::dynamic()};
Strides window_movement_strides{1, 1};
@ -28,12 +30,14 @@ TEST(type_prop, conv_v1_partial_rank) {
padding_above,
window_dilation_strides);
ASSERT_TRUE(conv->get_output_partial_shape(0).is_dynamic());
EXPECT_EQ(conv->get_output_partial_shape(0), PartialShape({-1, -1, {1, -1}, {1, -1}}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same) {
const PartialShape data_batch_shape{1, 1, 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
TEST(type_prop, convolution_v1_partial_auto_padding_same) {
PartialShape data_batch_shape{1, 1, 5, 5};
PartialShape filters_shape{1, 1, 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
@ -46,14 +50,17 @@ TEST(type_prop, conv_v1_partial_auto_padding_same) {
auto conv =
make_shared<op::v1::Convolution>(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, 1, 5, 5}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower) {
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
TEST(type_prop, convolution_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower) {
PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
PartialShape filters_shape{1, 1, 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
@ -66,12 +73,13 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower) {
auto conv =
make_shared<op::v1::Convolution>(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_EQ(conv->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 1, 5, 5}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper) {
TEST(type_prop, convolution_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper) {
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 2, 2};
Strides strides{1, 1};
@ -91,10 +99,12 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper) {
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_spatial_dims_dynamic) {
const PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
TEST(type_prop, convolution_v1_partial_auto_padding_same_spatial_dims_dynamic) {
PartialShape data_batch_shape{1, 1, Dimension::dynamic(), {3, 5}};
PartialShape filters_shape{1, 1, 3, 3};
set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20);
Strides strides{2, 2};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
@ -106,12 +116,13 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_spatial_dims_dynamic) {
auto conv =
make_shared<op::v1::Convolution>(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_EQ(conv->get_output_partial_shape(0), PartialShape({1, 1, Dimension::dynamic(), 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 1}));
EXPECT_EQ(conv->get_output_partial_shape(0), PartialShape({1, 1, Dimension::dynamic(), {2, 3}}));
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, 12, ov::no_label));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 0}));
}
TEST(type_prop, conv_v1_partial_data_shape_dynamic) {
TEST(type_prop, convolution_v1_partial_data_shape_dynamic) {
const PartialShape data_batch_shape{PartialShape::dynamic()};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
@ -126,22 +137,149 @@ TEST(type_prop, conv_v1_partial_data_shape_dynamic) {
auto conv =
make_shared<op::v1::Convolution>(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_EQ(conv->get_output_partial_shape(0),
EXPECT_EQ(conv->get_output_partial_shape(0),
PartialShape({Dimension::dynamic(), 1, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 0}));
}
TEST(type_prop, convolution_default_constructed) {
auto conv = make_shared<op::v1::Convolution>();
conv->set_auto_pad(op::PadType::SAME_LOWER);
class TypePropConvolutionV1Test : public TypePropOpTest<op::v1::Convolution> {
protected:
CoordinateDiff empty_pad{};
};
const auto &input_shape = ov::PartialShape::dynamic(), filters_shape = ov::PartialShape{1, 1, 3, 3};
const auto& input_shapes = std::vector<ov::PartialShape>{input_shape, filters_shape};
std::vector<ov::PartialShape> output_shapes(1);
auto pad_begin = CoordinateDiff{}, pad_end = CoordinateDiff{};
TEST_F(TypePropConvolutionV1Test, default_ctor) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
int64_t num_spatial = calculate_num_spatial(conv.get(), input_shape, filters_shape, 2, 2);
update_and_validate_attributes(conv.get(), num_spatial);
EXPECT_NO_THROW(shape_infer(conv.get(), pad_begin, pad_end, input_shapes, output_shapes));
const auto op = make_op();
op->set_arguments(OutputVector{data, filters});
op->set_strides({1, 3});
op->set_dilations({1, 2});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 3}));
EXPECT_EQ(op->get_dilations(), Strides({1, 2}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 2, 6, 1}));
}
TEST_F(TypePropConvolutionV1Test, data_dynamic_rank_filters_2d) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_UPPER);
EXPECT_THAT(op->get_pads_begin(), ElementsAre(0, 0));
EXPECT_THAT(op->get_pads_end(), ElementsAre(0, 0));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, 2, -1, -1}));
}
TEST_F(TypePropConvolutionV1Test, data_rank_to_low) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Expected a 3D, 4D or 5D tensor for the input"));
}
TEST_F(TypePropConvolutionV1Test, data_rank_to_high) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Expected a 3D, 4D or 5D tensor for the input"));
}
TEST_F(TypePropConvolutionV1Test, data_and_filters_rank_not_compatible) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Data batch and filters rank do not match"));
}
TEST_F(TypePropConvolutionV1Test, data_and_filters_channel_number_not_compatible) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 2, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Data batch channel count (2) does not match filter input channel count (3)"));
}
TEST_F(TypePropConvolutionV1Test, strides_not_defined_only_for_spatial_dims) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Strides should be defined for all and only spatial dimensions."));
}
TEST_F(TypePropConvolutionV1Test, dilations_not_defined_only_for_spatial_dims) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Dilations should be defined for all and only spatial dimensions."));
}
TEST_F(TypePropConvolutionV1Test, strides_has_zeros) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 0};
const auto dilations = Strides{1, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Strides has zero dimension"));
}
TEST_F(TypePropConvolutionV1Test, dilations_has_zeros) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{0, 1};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, empty_pad, empty_pad, dilations, op::PadType::SAME_LOWER),
NodeValidationFailure,
HasSubstr("Filter dilations has zero dimension"));
}
TEST_F(TypePropConvolutionV1Test, pads_not_defined_for_spatial_only) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4});
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{2, 2};
const auto pads_end = CoordinateDiff{2, 2, 2};
OV_EXPECT_THROW(auto op = make_op(data, filters, strides, pads_begin, pads_end, dilations),
NodeValidationFailure,
HasSubstr("Pads begin and end should be defined for all and only spatial dimensions."));
}

View File

@ -21,6 +21,7 @@
using namespace std;
using namespace ngraph;
using namespace testing;
// ---------------------------- v1 ----------------------------
TEST(type_prop, convolution_backprop_data_partial_auto_padding_upper) {
@ -72,9 +73,10 @@ TEST(type_prop, convolution_backprop_data_partial_auto_padding_lower) {
}
TEST(type_prop, convolution_backprop_data_auto_pad_explicit_with_output_padding) {
const PartialShape data_pshape{1, 16, 2, 2};
const PartialShape filters_pshape{16, 6, 3, 3};
PartialShape data_pshape{1, 16, 2, 2};
PartialShape filters_pshape{16, 6, 3, 3};
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
const Strides strides{2, 2};
const Strides dilations{1, 1};
const CoordinateDiff padding_begin{1, 1};
@ -94,6 +96,8 @@ TEST(type_prop, convolution_backprop_data_auto_pad_explicit_with_output_padding)
auto_pad,
output_padding);
EXPECT_THAT(get_shape_labels(conv_backprop->get_output_partial_shape(0)),
ElementsAre(10, 21, ov::no_label, ov::no_label));
ASSERT_EQ(conv_backprop->get_output_partial_shape(0), PartialShape(PartialShape{1, 6, 4, 4}));
ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{1, 1}));
@ -125,10 +129,10 @@ TEST(type_prop, convolution_backprop_data_auto_pad_same_with_output_padding_and_
auto_pad,
output_padding);
ASSERT_EQ(conv_backprop->get_output_partial_shape(0), PartialShape(PartialShape{1, 6, 3, 3}));
ASSERT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{2, 2}));
ASSERT_EQ(conv_backprop->get_output_padding(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv_backprop->get_output_partial_shape(0), PartialShape(PartialShape{1, 6, 3, 3}));
EXPECT_EQ(conv_backprop->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(conv_backprop->get_pads_end(), (CoordinateDiff{2, 2}));
EXPECT_EQ(conv_backprop->get_output_padding(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, convolution_backprop_data_output_shape_as_const) {
@ -222,8 +226,10 @@ TEST(type_prop, convolution_backprop_data_with_output_shape_dyn_static_ranks_fil
}
TEST(type_prop, convolution_backprop_data_with_output_shape_dyn_static_ranks_filters_cin_cout_dyn) {
const PartialShape data_pshape{Dimension::dynamic(), 16, 5, 5};
const PartialShape filters_pshape{Dimension::dynamic(), Dimension::dynamic(), 3, 3};
PartialShape data_pshape{Dimension::dynamic(), 16, 5, 5};
PartialShape filters_pshape{Dimension::dynamic(), Dimension::dynamic(), 3, 3};
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
auto data = make_shared<op::Parameter>(et, data_pshape);
@ -238,6 +244,8 @@ TEST(type_prop, convolution_backprop_data_with_output_shape_dyn_static_ranks_fil
Strides{},
op::PadType::SAME_UPPER);
EXPECT_THAT(get_shape_labels(conv_backprop->get_output_partial_shape(0)),
ElementsAre(10, 21, ov::no_label, ov::no_label));
ASSERT_EQ(conv_backprop->get_output_partial_shape(0),
PartialShape(PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}));
}
@ -548,7 +556,7 @@ TEST(type_prop, convolution_backprop_data_invalid_input_ranks) {
Strides{});
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data and filters rank do not match");
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match");
} catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason";
}
@ -568,7 +576,7 @@ TEST(type_prop, convolution_backprop_data_invalid_input_ranks) {
Strides{});
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data and filters inputs must have rank 3, 4 or 5");
EXPECT_HAS_SUBSTRING(error.what(), "Expected a 3D, 4D or 5D tensor for the input. Got:");
} catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason";
}
@ -588,7 +596,7 @@ TEST(type_prop, convolution_backprop_data_invalid_input_ranks) {
Strides{});
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data and filters inputs must have rank 3, 4 or 5");
EXPECT_HAS_SUBSTRING(error.what(), "Expected a 3D, 4D or 5D tensor for the input. Got:");
} catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason";
}
@ -633,8 +641,9 @@ TEST(type_prop, convolution_backprop_data_invalid_input_channel_dims) {
// data input shape does not have correct dimension C_IN
FAIL() << "Incompatibile input shapes not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input channels dimension of data and filters inputs must be equal"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Data batch channel count (32) does not match filter input channel count (16)"));
} catch (...) {
FAIL() << "Input shapes validation check failed for unexpected reason.";
}
@ -656,11 +665,11 @@ TEST(type_prop, convolution_backprop_data_invalid_output_shape_spatial_dims) {
CoordinateDiff{},
CoordinateDiff{},
Strides{});
// output_shape has invalid spatials dimensions (should be 2)
// output_shape has invalid spatial dimensions (should be 2)
FAIL() << "Incompatible output shape optional input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Output shape should be specified only and for all spatial dimensions."));
std::string("Output shape should be defined for all and only spatial dimensions."));
} catch (...) {
FAIL() << "Output shape validation check failed for unexpected reason.";
}
@ -752,7 +761,7 @@ TEST(type_prop, convolution_backprop_data_invalid_conv_param_spatial_dims) {
make_shared<op::v1::ConvolutionBackpropData>(data, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin should be defined for all and only spatial dimensions.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -768,7 +777,7 @@ TEST(type_prop, convolution_backprop_data_invalid_conv_param_spatial_dims) {
make_shared<op::v1::ConvolutionBackpropData>(data, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads end should be defined for all and only spatial dimensions.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -825,16 +834,59 @@ TEST(type_prop, convolution_backprop_data_invalid_conv_param_spatial_dims) {
}
TEST(type_prop, convolution_back_prop_data_default_constructed) {
auto conv = make_shared<op::v1::ConvolutionBackpropData>();
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 1, 3, 3});
const auto out_spatial = op::Constant::create(element::i32, Shape{3}, {5, 4, 10});
const auto &input_shape = ov::PartialShape::dynamic(), filters_shape = ov::PartialShape{1, 1, 3, 3},
output_spatial_shape_shape = ov::PartialShape({2});
const auto& input_shapes = std::vector<ov::PartialShape>{input_shape, filters_shape, output_spatial_shape_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
auto pad_begin = CoordinateDiff{}, pad_end = CoordinateDiff{};
const auto& output_spatial_shape = ov::PartialShape{3, 3};
int64_t num_spatial =
calculate_num_spatial(conv.get(), input_shape, filters_shape, output_spatial_shape_shape, 2, 2);
update_and_validate_attributes_back_prop(conv.get(), num_spatial);
EXPECT_NO_THROW(shape_infer(conv.get(), pad_begin, pad_end, output_spatial_shape, input_shapes, output_shapes));
const auto op = make_shared<op::v1::ConvolutionBackpropData>();
op->set_arguments(OutputVector{data, filters, out_spatial});
op->set_strides({1, 1, 1});
op->set_dilations({1, 1, 1});
op->set_pads_begin({2, 2, 2});
op->set_pads_end({2, 2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 3);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 1, 1}));
EXPECT_EQ(op->get_dilations(), Strides({1, 1, 1}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, 1, 5, 4, 10}));
}
TEST(type_prop, convolution_back_prop_data_interval_shapes_output_shape_as_shape_of) {
PartialShape data_pshape{{1, 3}, {2, 6}, {1, 5}, {3, 10}, {20, 100}};
PartialShape filters_pshape{{2, 3}, {1, 3}, 3, 3, 3};
PartialShape out_spatial_pshape{3, {2, 4}, 3};
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
set_shape_labels(out_spatial_pshape, 30);
const element::Type_t et = element::f32;
Strides strides{1, 2, 1};
Strides dilations{1, 1, 1};
CoordinateDiff pads_begin{0, 2, 1};
CoordinateDiff pads_end{0, 0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(et, data_pshape);
auto filters = make_shared<op::Parameter>(et, filters_pshape);
auto out_spatial = make_shared<op::Parameter>(element::i32, out_spatial_pshape);
auto spatial_shape_of = std::make_shared<op::v0::ShapeOf>(out_spatial);
const auto op = make_shared<op::v1::ConvolutionBackpropData>(data_batch,
filters,
spatial_shape_of,
strides,
pads_begin,
pads_end,
dilations,
auto_pad);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 21, 30, 31, 32));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 3}, {1, 3}, 3, {2, 4}, 3}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0, 0}));
}

View File

@ -8,11 +8,15 @@
using namespace std;
using namespace ngraph;
using namespace testing;
TEST(type_prop, deformable_convolution_partial_auto_padding_same) {
const PartialShape data_batch_pshape{1, 4, 5, 5};
const PartialShape offsets_pshape{1, 36, 5, 5};
const PartialShape filters_pshape{4, 1, 3, 3};
PartialShape data_batch_pshape{1, 4, 5, 5};
PartialShape offsets_pshape{1, 36, 5, 5};
PartialShape filters_pshape{4, 1, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(offsets_pshape, 20);
set_shape_labels(filters_pshape, 30);
const element::Type_t et = element::f32;
Strides strides{1, 1};
@ -37,9 +41,11 @@ TEST(type_prop, deformable_convolution_partial_auto_padding_same) {
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_THAT(get_shape_labels(deformable_conv->get_output_partial_shape(0)),
ElementsAre(10, 30, ov::no_label, ov::no_label));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_partial_auto_padding_same_lower_data_batch_nc_dims_dynamic) {
@ -70,9 +76,9 @@ TEST(type_prop, deformable_convolution_partial_auto_padding_same_lower_data_batc
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_partial_auto_padding_same_upper_data_batch_nc_dims_dynamic) {
@ -103,9 +109,9 @@ TEST(type_prop, deformable_convolution_partial_auto_padding_same_upper_data_batc
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_partial_auto_padding_same_spatial_dims_dynamic) {
@ -136,15 +142,17 @@ TEST(type_prop, deformable_convolution_partial_auto_padding_same_spatial_dims_dy
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme({1, 4, Dimension::dynamic(), 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, Dimension::dynamic(), 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 1}));
}
TEST(type_prop, deformable_convolution_data_batch_dynamic) {
const PartialShape data_batch_pshape{PartialShape::dynamic()};
const PartialShape offsets_pshape{2, 36, 5, 5};
const PartialShape filters_pshape{4, 4, 3, 3};
PartialShape data_batch_pshape{PartialShape::dynamic()};
PartialShape offsets_pshape{2, 36, 5, 5};
PartialShape filters_pshape{4, 4, 3, 3};
set_shape_labels(offsets_pshape, 20);
set_shape_labels(filters_pshape, 30);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::EXPLICIT;
@ -165,13 +173,14 @@ TEST(type_prop, deformable_convolution_data_batch_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{2, 4, Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{2, 4, {1, -1}, {1, -1}}));
EXPECT_THAT(get_shape_labels(deformable_conv->get_output_partial_shape(0)),
ElementsAre(20, 30, ov::no_label, ov::no_label));
}
TEST(type_prop, deformable_convolution_offsets_dynamic) {
@ -198,17 +207,17 @@ TEST(type_prop, deformable_convolution_offsets_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
}
TEST(type_prop, deformable_convolution_auto_pad_same_filters_dynamic) {
const PartialShape data_batch_pshape{1, 4, 5, 5};
const PartialShape offsets_pshape{1, 36, 3, 3};
const PartialShape offsets_pshape{1, 36, 5, 5};
const PartialShape filters_pshape{PartialShape::dynamic()};
const element::Type_t et = element::f32;
@ -230,13 +239,12 @@ TEST(type_prop, deformable_convolution_auto_pad_same_filters_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_UPPER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_UPPER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 5, 5}));
}
TEST(type_prop, deformable_convolution_deformable_data_batch_and_filters_dynamic) {
@ -263,13 +271,12 @@ TEST(type_prop, deformable_convolution_deformable_data_batch_and_filters_dynamic
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, -1, {1, -1}, {1, -1}}));
}
TEST(type_prop, deformable_convolution_deformable_all_inputs_dynamic) {
@ -294,12 +301,12 @@ TEST(type_prop, deformable_convolution_deformable_all_inputs_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape::dynamic()));
}
TEST(type_prop, deformable_convolution_invalid_et_inputs) {
@ -324,9 +331,9 @@ TEST(type_prop, deformable_convolution_invalid_et_inputs) {
// data batch input must be of same element type as filters and deformable values
FAIL() << "Invalid element type of inputs not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Element types of inputs do not match. Got: data batch (f16), "
"offsets (f32) and filters (f32)");
EXPECT_HAS_SUBSTRING(
error.what(),
"Element types of inputs do not match. Got: data batch (f16), offsets (f32) and filters (f32)");
} catch (...) {
FAIL() << "Element types of inputs validation check failed for unexpected reason.";
}
@ -416,10 +423,7 @@ TEST(type_prop, deformable_convolution_invalid_input_ranks) {
// data batch has invalid rank 5, should be 4
FAIL() << "Incompatible data batch input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch "
"shape [1,4,5,5,5], offsets shape [1,4,4,4], filters "
"shape [4,4,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of data batch input failed for unexpected reason";
}
@ -443,10 +447,7 @@ TEST(type_prop, deformable_convolution_invalid_input_ranks) {
// deformable values has invalid rank 5, should be 4
FAIL() << "Incompatible offsets input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch shape "
"[1,4,5,5], offsets shape [1,4,4,4,4], filters shape "
"[4,4,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Offsets must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of offsets input failed for unexpected reason";
}
@ -470,10 +471,7 @@ TEST(type_prop, deformable_convolution_invalid_input_ranks) {
// filters has invalid rank 5, should be 4
FAIL() << "Incompatible filter input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch shape "
"[1,4,5,5], offsets shape [1,4,4,4], filters shape "
"[4,4,3,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Filters must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of filter input failed for unexpected reason";
}
@ -497,7 +495,7 @@ TEST(type_prop, deformable_convolution_invalid_input_ranks) {
// inputs have rank 5, should be 4
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Inputs must be of rank 4");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check for 2 spatial dimension inputs failed for unexpected reason";
}
@ -521,7 +519,7 @@ TEST(type_prop, deformable_convolution_invalid_input_ranks) {
// inputs have rank 3, should be 4
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Inputs must be of rank 4");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4. Got: 3");
} catch (...) {
FAIL() << "Rank validation check for 2 spatial dimension inputs failed for unexpected reason";
}
@ -679,9 +677,7 @@ TEST(type_prop, deformable_convolution_invalid_offsets_channels_dim) {
FAIL() << "Invalid channels dimension of offsets input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The channels dimension of offsets input must be "
"evenly divisible by the 'deformable group' value along the "
"channels axis.");
"Offsets channels dimension (35) must be evenly divisible by the 'deformable group': 2");
} catch (...) {
FAIL() << "Channels dimension of offsets input validation check failed for "
"unexpected reason.";
@ -759,9 +755,7 @@ TEST(type_prop, deformable_convolution_invalid_data_batch_channels_dim_with_grou
// data batch channels is not evenly divisible by the attribute group value
FAIL() << "Invalid channels dimension of data batch input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The input data shape must be evenly divisible by the 'group' value "
"along the channels axis.");
EXPECT_HAS_SUBSTRING(error.what(), "Input channels dimension (5) must be evenly divisible by the 'group': 4");
} catch (...) {
FAIL() << "Data batch channel dimension validation check failed for unexpected "
"reason.";
@ -800,9 +794,7 @@ TEST(type_prop, deformable_convolution_invalid_filters_channels_dim_with_group)
// filters channels output is not evenly divisible by the attribute group value
FAIL() << "Invalid channels output dimension of filters input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The filters shape must be evenly divisible by the 'group' value along "
"the channels axis");
EXPECT_HAS_SUBSTRING(error.what(), "Filters channels dimension (5) must be evenly divisible by the 'group'");
} catch (...) {
FAIL() << "Filters channels output dimension validation check failed for unexpected "
"reason.";
@ -882,7 +874,7 @@ TEST(type_prop, deformable_convolution_invalid_offsets_spatial_dims) {
// deformable values has incorrect spatial dimensions
FAIL() << "Invalid spatial dimensions of offsets not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of offsets and output must be equal");
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of offsets and output must be compatible");
} catch (...) {
FAIL() << "Spatial dimension of offsets validation check failed for unexpected reason";
}
@ -913,7 +905,7 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid strides spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial dimension");
} catch (...) {
FAIL() << "Strides spatial dimensions validation check failed for unexpected reason";
}
@ -935,7 +927,7 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid strides spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial dimension");
} catch (...) {
FAIL() << "Strides spatial dimensions validation check failed for unexpected reason";
}
@ -959,7 +951,7 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid dilations spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial dimensions");
} catch (...) {
FAIL() << "Dilations spatial dimensions validation check failed for unexpected reason";
}
@ -981,7 +973,7 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid dilations spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial dimensions");
} catch (...) {
FAIL() << "Dilations spatial dimensions validation check failed for unexpected reason";
}
@ -1005,7 +997,7 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -1027,8 +1019,61 @@ TEST(type_prop, deformable_convolution_invalid_conv_param_spatial_dims) {
dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
}
class TypePropDeformableConvolutionV1Test : public TypePropOpTest<op::v1::DeformableConvolution> {
protected:
CoordinateDiff empty_pad{};
};
TEST_F(TypePropDeformableConvolutionV1Test, default_ctor) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, 4, 5, 5});
const auto offsets = make_shared<op::Parameter>(element::f32, PartialShape{1, 36, 7, 2});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{4, 1, 3, 3});
const auto op = make_op();
op->set_arguments(OutputVector{data, offsets, filters});
op->set_strides({1, 3});
op->set_dilations({1, 2});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_group(4);
op->set_deformable_group(2);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 3);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 3}));
EXPECT_EQ(op->get_dilations(), Strides({1, 2}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 4, 7, 2}));
}
TEST_F(TypePropDeformableConvolutionV1Test, interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, {2, 6}, {1, 5}, {3, 10}};
PartialShape offsets_shape{1, 36, 4, 5};
PartialShape filters_pshape{{2, 5}, {1, 3}, {2, 3}, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(offsets_shape, 20);
set_shape_labels(filters_pshape, 30);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::EXPLICIT;
const auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
const auto offsets = make_shared<op::Parameter>(et, offsets_shape);
const auto filters = make_shared<op::Parameter>(et, filters_pshape);
const auto op = make_op(data_batch, offsets, filters, Strides{}, empty_pad, empty_pad, Strides{}, auto_pad, 4, 2);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 30, ov::no_label, ov::no_label));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, {2, 5}, {1, 4}, {1, 8}}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0}));
}

View File

@ -4,17 +4,21 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset8.hpp"
#include "openvino/opsets/opset8.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
using namespace ngraph::opset8;
using namespace ov::opset8;
using namespace testing;
TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same) {
const PartialShape data_batch_pshape{1, 4, 5, 5};
const PartialShape offsets_pshape{1, 36, 5, 5};
const PartialShape filters_pshape{4, 1, 3, 3};
PartialShape data_batch_pshape{1, 4, 5, 5};
PartialShape offsets_pshape{1, 36, 5, 5};
PartialShape filters_pshape{4, 1, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(offsets_pshape, 20);
set_shape_labels(filters_pshape, 30);
const element::Type_t et = element::f32;
Strides strides{1, 1};
@ -39,9 +43,11 @@ TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same) {
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_THAT(get_shape_labels(deformable_conv->get_output_partial_shape(0)),
ElementsAre(10, 30, ov::no_label, ov::no_label));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_lower_data_batch_nc_dims_dynamic) {
@ -72,9 +78,9 @@ TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_lower_da
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_upper_data_batch_nc_dims_dynamic) {
@ -105,9 +111,9 @@ TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_upper_da
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_spatial_dims_dynamic) {
@ -138,9 +144,9 @@ TEST(type_prop, deformable_convolution_opset8_partial_auto_padding_same_spatial_
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme({1, 4, Dimension::dynamic(), 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, Dimension::dynamic(), 5}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 1}));
}
TEST(type_prop, deformable_convolution_opset8_data_batch_dynamic) {
@ -167,13 +173,12 @@ TEST(type_prop, deformable_convolution_opset8_data_batch_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{2, 4, Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{2, 4, {1, -1}, {1, -1}}));
}
TEST(type_prop, deformable_convolution_opset8_offsets_dynamic) {
@ -200,17 +205,17 @@ TEST(type_prop, deformable_convolution_opset8_offsets_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
}
TEST(type_prop, deformable_convolution_opset8_auto_pad_same_filters_dynamic) {
const PartialShape data_batch_pshape{1, 4, 5, 5};
const PartialShape offsets_pshape{1, 36, 3, 3};
const PartialShape offsets_pshape{1, 36, 5, 5};
const PartialShape filters_pshape{PartialShape::dynamic()};
const element::Type_t et = element::f32;
@ -232,13 +237,12 @@ TEST(type_prop, deformable_convolution_opset8_auto_pad_same_filters_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_UPPER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_UPPER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 5, 5}));
}
TEST(type_prop, deformable_convolution_opset8_deformable_data_batch_and_filters_dynamic) {
@ -265,11 +269,11 @@ TEST(type_prop, deformable_convolution_opset8_deformable_data_batch_and_filters_
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{0, 0}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
}
@ -296,11 +300,11 @@ TEST(type_prop, deformable_convolution_opset8_deformable_all_inputs_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::EXPLICIT);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
@ -418,10 +422,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_input_ranks) {
// data batch has invalid rank 5, should be 4
FAIL() << "Incompatible data batch input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch "
"shape [1,4,5,5,5], offsets shape [1,4,4,4], filters "
"shape [4,4,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of data batch input failed for unexpected reason";
}
@ -445,10 +446,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_input_ranks) {
// deformable values has invalid rank 5, should be 4
FAIL() << "Incompatible offsets input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch shape "
"[1,4,5,5], offsets shape [1,4,4,4,4], filters shape "
"[4,4,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Offsets must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of offsets input failed for unexpected reason";
}
@ -472,10 +470,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_input_ranks) {
// filters has invalid rank 5, should be 4
FAIL() << "Incompatible filter input rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Ranks of inputs do not match. Got: data batch shape "
"[1,4,5,5], offsets shape [1,4,4,4], filters shape "
"[4,4,3,3,3]");
EXPECT_HAS_SUBSTRING(error.what(), "Filters must be of rank 4. Got: 5");
} catch (...) {
FAIL() << "Rank validation check of filter input failed for unexpected reason";
}
@ -499,7 +494,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_input_ranks) {
// inputs have rank 5, should be 4
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Inputs must be of rank 4");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4");
} catch (...) {
FAIL() << "Rank validation check for 2 spatial dimension inputs failed for unexpected reason";
}
@ -523,7 +518,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_input_ranks) {
// inputs have rank 3, should be 4
FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Inputs must be of rank 4");
EXPECT_HAS_SUBSTRING(error.what(), "Input must be of rank 4");
} catch (...) {
FAIL() << "Rank validation check for 2 spatial dimension inputs failed for unexpected reason";
}
@ -681,9 +676,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_offsets_channels_dim) {
FAIL() << "Invalid channels dimension of offsets input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The channels dimension of offsets input must be "
"evenly divisible by the 'deformable group' value along the "
"channels axis.");
"Offsets channels dimension (35) must be evenly divisible by the 'deformable group'");
} catch (...) {
FAIL() << "Channels dimension of offsets input validation check failed for "
"unexpected reason.";
@ -761,9 +754,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_data_batch_channels_dim_wi
// data batch channels is not evenly divisible by the attribute group value
FAIL() << "Invalid channels dimension of data batch input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The input data shape must be evenly divisible by the 'group' value "
"along the channels axis.");
EXPECT_HAS_SUBSTRING(error.what(), "Input channels dimension (5) must be evenly divisible by the 'group'");
} catch (...) {
FAIL() << "Data batch channel dimension validation check failed for unexpected "
"reason.";
@ -802,9 +793,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_filters_channels_dim_with_
// filters channels output is not evenly divisible by the attribute group value
FAIL() << "Invalid channels output dimension of filters input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The filters shape must be evenly divisible by the 'group' value along "
"the channels axis");
EXPECT_HAS_SUBSTRING(error.what(), "Filters channels dimension (5) must be evenly divisible by the 'group'");
} catch (...) {
FAIL() << "Filters channels output dimension validation check failed for unexpected "
"reason.";
@ -884,7 +873,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_offsets_spatial_dims) {
// deformable values has incorrect spatial dimensions
FAIL() << "Invalid spatial dimensions of offsets not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of offsets and output must be equal");
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of offsets and output must be compatible");
} catch (...) {
FAIL() << "Spatial dimension of offsets validation check failed for unexpected reason";
}
@ -910,7 +899,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid strides spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Strides spatial dimensions validation check failed for unexpected reason";
}
@ -927,7 +916,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid strides spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Strides should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Strides spatial dimensions validation check failed for unexpected reason";
}
@ -946,7 +935,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid dilations spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Dilations spatial dimensions validation check failed for unexpected reason";
}
@ -963,7 +952,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid dilations spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Dilations should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Dilations spatial dimensions validation check failed for unexpected reason";
}
@ -982,7 +971,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -999,7 +988,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_conv_param_spatial_dims) {
make_shared<DeformableConvolution>(data_batch, offsets, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads should be defined for all and only spatial features.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -1040,7 +1029,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_mask_spatial_dims) {
// deformable values has incorrect spatial dimensions
FAIL() << "Invalid spatial dimensions of mask not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of mask and output must be equal");
EXPECT_HAS_SUBSTRING(error.what(), "Spatial dimensions of mask and output must be compatible");
} catch (...) {
FAIL() << "Spatial dimension of mask validation check failed for unexpected reason";
}
@ -1073,12 +1062,12 @@ TEST(type_prop, deformable_convolution_opset8_mask_dynamic) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_output_partial_shape(0), (PartialShape{1, 4, 5, 5}));
}
TEST(type_prop, deformable_convolution_opset8_invalid_mask_channels_dim) {
@ -1161,9 +1150,7 @@ TEST(type_prop, deformable_convolution_opset8_invalid_mask_channels_dim) {
FAIL() << "Invalid channels dimension of mask input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The channels dimension of mask input must be "
"evenly divisible by the 'deformable group' value along the "
"channels axis.");
"Mask channels dimension (9) must be evenly divisible by the 'deformable group'");
} catch (...) {
FAIL() << "Channels dimension of mask input validation check failed for "
"unexpected reason.";
@ -1239,10 +1226,66 @@ TEST(type_prop, deformable_convolution_opset8_mask) {
group,
deformable_group);
ASSERT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
ASSERT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_auto_pad(), op::PadType::SAME_LOWER);
EXPECT_EQ(deformable_conv->get_strides(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_dilations(), (Strides{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
EXPECT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
}
class TypePropDeformableConvolutionV8Test : public TypePropOpTest<op::v8::DeformableConvolution> {
protected:
CoordinateDiff empty_pad{};
};
TEST_F(TypePropDeformableConvolutionV8Test, default_ctor) {
const auto data = make_shared<Parameter>(element::f32, PartialShape{1, 4, 5, 5});
const auto offsets = make_shared<Parameter>(element::f32, PartialShape{1, 36, 7, 2});
const auto filters = make_shared<Parameter>(element::f32, PartialShape{4, 1, 3, 3});
const auto masks = make_shared<Parameter>(element::f32, PartialShape{1, 18, 7, -1});
const auto op = make_op();
op->set_arguments(OutputVector{data, offsets, filters, masks});
op->set_strides({1, 3});
op->set_dilations({1, 2});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_group(4);
op->set_deformable_group(2);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 4);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 3}));
EXPECT_EQ(op->get_dilations(), Strides({1, 2}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 4, 7, 2}));
}
TEST_F(TypePropDeformableConvolutionV8Test, interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, {2, 6}, {1, 5}, {3, 10}};
PartialShape offsets_shape{1, 36, 4, 5};
PartialShape filters_pshape{{2, 5}, {1, 3}, {2, 3}, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(offsets_shape, 20);
set_shape_labels(filters_pshape, 30);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::EXPLICIT;
const auto data_batch = make_shared<Parameter>(et, data_batch_pshape);
const auto offsets = make_shared<Parameter>(et, offsets_shape);
const auto filters = make_shared<Parameter>(et, filters_pshape);
const auto masks = make_shared<Parameter>(element::f32, PartialShape{-1, 18, {1, 10}, 3});
const auto op =
make_op(data_batch, offsets, filters, masks, Strides{}, empty_pad, empty_pad, Strides{}, auto_pad, 4, 2);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 30, ov::no_label, ov::no_label));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, {2, 5}, {1, 4}, {1, 8}}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0}));
}

View File

@ -2,17 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "convolution_shape_inference.hpp"
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
using namespace testing;
TEST(type_prop, group_convolution_auto_padding_same_lower) {
const PartialShape data_batch_pshape{1, 4, 5, 5};
const PartialShape filters_pshape{2, 1, 2, 3, 3};
PartialShape data_batch_pshape{1, 4, 5, 5};
PartialShape filters_pshape{2, 1, 2, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
element::Type_t et = element::f32;
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
@ -26,6 +29,8 @@ TEST(type_prop, group_convolution_auto_padding_same_lower) {
auto groupConv =
make_shared<op::v1::GroupConvolution>(data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
EXPECT_THAT(get_shape_labels(groupConv->get_output_partial_shape(0)),
ElementsAre(10, 20, ov::no_label, ov::no_label));
ASSERT_EQ(groupConv->get_output_partial_shape(0), PartialShape({1, 2, 5, 5}));
ASSERT_EQ(groupConv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(groupConv->get_pads_end(), (CoordinateDiff{1, 1}));
@ -74,8 +79,10 @@ TEST(type_prop, group_convolution_auto_padding_same_lower_spatial_dims_static) {
}
TEST(type_prop, group_convolution_auto_padding_same_upper_spatial_dims_static) {
const PartialShape data_batch_pshape{1, Dimension::dynamic(), 5, 5};
const PartialShape filters_pshape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 2, 2};
PartialShape data_batch_pshape{1, Dimension::dynamic(), 5, 5};
PartialShape filters_pshape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 2, 2};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::SAME_UPPER;
@ -89,14 +96,19 @@ TEST(type_prop, group_convolution_auto_padding_same_upper_spatial_dims_static) {
Strides{},
auto_pad);
EXPECT_THAT(get_shape_labels(groupConv->get_output_partial_shape(0)),
ElementsAre(10, ov::no_label, ov::no_label, ov::no_label));
ASSERT_EQ(groupConv->get_output_partial_shape(0), PartialShape({1, Dimension::dynamic(), 5, 5}));
ASSERT_EQ(groupConv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(groupConv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, group_convolution_static_ranks_filters_groups_dyn) {
const PartialShape data_batch_pshape{Dimension::dynamic(), 4, 5, 5};
const PartialShape filters_pshape{Dimension::dynamic(), 1, 2, 3, 3};
PartialShape data_batch_pshape{Dimension::dynamic(), 4, 5, 5};
PartialShape filters_pshape{Dimension::dynamic(), 1, 2, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::SAME_LOWER;
@ -109,7 +121,8 @@ TEST(type_prop, group_convolution_static_ranks_filters_groups_dyn) {
CoordinateDiff{},
Strides{},
auto_pad);
EXPECT_THAT(get_shape_labels(groupConv->get_output_partial_shape(0)),
ElementsAre(10, 20, ov::no_label, ov::no_label));
ASSERT_EQ(groupConv->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 2, 5, 5}));
ASSERT_EQ(groupConv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(groupConv->get_pads_end(), (CoordinateDiff{1, 1}));
@ -348,50 +361,43 @@ TEST(type_prop, group_convolution_invalid_input_ranks) {
}
TEST(type_prop, group_convolution_invalid_input_channel_dims) {
try {
constexpr auto et = element::f32;
// data batch shape does not have correct dimension C_IN * GROUPS
{
const PartialShape data_batch_pshape{1, 6, 5, 5};
const PartialShape filters_pshape{2, 1, 2, 3, 3};
element::Type_t et = element::f32;
const PartialShape filters_pshape{1, 1, 3, 3, 3};
auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
auto filters = make_shared<op::Parameter>(et, filters_pshape);
auto groupConv = make_shared<op::v1::GroupConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{});
// data batch shape does not have correct dimension C_IN * GROUPS
FAIL() << "Invalid input channels dimension of data batch not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"Input channels dimension of data batch has incompatible value "
"with filter shape.");
} catch (...) {
FAIL() << "Input channels dimension of data batch validation check failed for unexpected "
"reason.";
OV_EXPECT_THROW(
const auto op = make_shared<op::v1::GroupConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{}),
NodeValidationFailure,
HasSubstr("Input channels dimension of data batch is incompatible with filter groups or input channels."));
}
try {
// data batch shape does not have correct dimension C_IN * GROUPS
{
const PartialShape data_batch_pshape{1, 3, 5, 5};
const PartialShape filters_pshape{2, 1, Dimension::dynamic(), 3, 3};
element::Type_t et = element::f32;
const PartialShape filters_pshape{-1, 1, 2, 3, 3};
auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
auto filters = make_shared<op::Parameter>(et, filters_pshape);
auto groupConv = make_shared<op::v1::GroupConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{});
// data batch shape does not have correct dimension C_IN * GROUPS
FAIL() << "Invalid input channels dimension of data batch not detected.";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Input channels dimension of data batch not a multiple of group size");
} catch (...) {
FAIL() << "Input channels dimension of data batch validation check failed for unexpected "
"reason.";
OV_EXPECT_THROW(
const auto op = make_shared<op::v1::GroupConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{}),
NodeValidationFailure,
HasSubstr("Input channels dimension of data batch is incompatible with filter groups or input channels."));
}
}
@ -469,23 +475,23 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) {
}
// invalid padding spatial dimensions
try {
{
Strides strides{1, 1};
Strides dilations{1, 1};
CoordinateDiff pads_begin{0, 0, 0};
CoordinateDiff pads_end{0, 0};
auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
auto filters = make_shared<op::Parameter>(et, PartialShape::dynamic());
auto groupConv =
make_shared<op::v1::GroupConvolution>(data_batch, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
auto data_batch = make_shared<op::Parameter>(et, PartialShape::dynamic());
auto filters = make_shared<op::Parameter>(et, filters_pshape);
OV_EXPECT_THROW(
auto op =
make_shared<op::v1::GroupConvolution>(data_batch, filters, strides, pads_begin, pads_end, dilations),
NodeValidationFailure,
HasSubstr("Pads begin and end should be defined for all and only spatial dimensions."));
}
try {
{
Strides strides{1, 1};
Strides dilations{1, 1};
CoordinateDiff pads_begin{0, 0};
@ -493,26 +499,58 @@ TEST(type_prop, group_convolution_invalid_conv_param_spatial_dims) {
auto data_batch = make_shared<op::Parameter>(et, PartialShape::dynamic());
auto filters = make_shared<op::Parameter>(et, filters_pshape);
auto groupConv =
make_shared<op::v1::GroupConvolution>(data_batch, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
OV_EXPECT_THROW(
auto op =
make_shared<op::v1::GroupConvolution>(data_batch, filters, strides, pads_begin, pads_end, dilations),
NodeValidationFailure,
HasSubstr("Pads begin and end should be defined for all and only spatial dimensions."));
}
}
TEST(type_prop, group_convolution_default_constructed) {
auto conv = make_shared<op::v1::GroupConvolution>();
conv->set_auto_pad(op::PadType::SAME_LOWER);
TEST(type_prop, group_convolution_interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, {2, 6}, {1, 5}, {3, 10}, {20, 100}};
PartialShape filters_pshape{{2, 3}, {1, 3}, {2, 3}, 3, 3, 3};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
const auto &input_shape = ov::PartialShape::dynamic(), filters_shape = ov::PartialShape{1, 1, 1, 3, 3};
const auto& input_shapes = std::vector<ov::PartialShape>{input_shape, filters_shape};
std::vector<ov::PartialShape> output_shapes(1);
auto pad_begin = CoordinateDiff{}, pad_end = CoordinateDiff{};
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::EXPLICIT;
int64_t num_spatial = calculate_num_spatial(conv.get(), input_shape, filters_shape, 2, 3);
update_and_validate_attributes(conv.get(), num_spatial);
EXPECT_NO_THROW(shape_infer(conv.get(), pad_begin, pad_end, input_shapes, output_shapes));
auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
auto filters = make_shared<op::Parameter>(et, filters_pshape);
auto groupConv = make_shared<op::v1::GroupConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
auto_pad);
EXPECT_THAT(get_shape_labels(groupConv->get_output_partial_shape(0)),
ElementsAre(10, ov::no_label, ov::no_label, ov::no_label, ov::no_label));
EXPECT_EQ(groupConv->get_output_partial_shape(0), PartialShape({{1, 3}, {2, 9}, {1, 3}, {1, 8}, {18, 98}}));
EXPECT_EQ(groupConv->get_pads_begin(), (CoordinateDiff{0, 0, 0}));
EXPECT_EQ(groupConv->get_pads_end(), (CoordinateDiff{0, 0, 0}));
}
TEST(type_prop, group_convolution_default_constructed) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 1, 3, 3});
const auto op = make_shared<op::v1::GroupConvolution>();
op->set_arguments(OutputVector{data, filters});
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 1}));
EXPECT_EQ(op->get_dilations(), Strides({1, 1}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, 1, {2, -1}, {2, -1}}));
}

View File

@ -9,6 +9,7 @@
using namespace std;
using namespace ngraph;
using namespace testing;
TEST(type_prop, group_convolution_backprop_data_shape_infer) {
const PartialShape data_pshape{1, 16, 6, 6}; // [N, C_IN * GROUPS, H, W]
@ -156,12 +157,10 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st
}
TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_static_ranks_filters_group_cout_dyn) {
const PartialShape data_pshape{Dimension::dynamic(), 16, 5, 5}; // [N, C_IN * GROUPS, H, W]
const PartialShape filters_pshape{Dimension::dynamic(),
16,
Dimension::dynamic(),
3,
3}; // [GROUPS, C_IN, C_OUT, kH, kW]
PartialShape data_pshape{Dimension::dynamic(), 16, 5, 5}; // [N, C_IN * GROUPS, H, W]
PartialShape filters_pshape{Dimension::dynamic(), 16, Dimension::dynamic(), 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW]
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
auto data = make_shared<op::Parameter>(et, data_pshape);
@ -174,6 +173,7 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_with_output_shape_st
Strides{},
op::PadType::SAME_UPPER);
EXPECT_THAT(get_shape_labels(gcbd->get_output_partial_shape(0)), ElementsAre(10, 22, ov::no_label, ov::no_label));
ASSERT_EQ(gcbd->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}));
}
@ -270,12 +270,10 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_data_ci
}
TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters_group_cout_dyn) {
const PartialShape data_pshape{1, 20, 224, 224}; // [N, C_IN * GROUPS, H, W]
const PartialShape filters_pshape{Dimension::dynamic(),
Dimension::dynamic(),
2,
3,
3}; // [GROUPS, C_IN, C_OUT, kH, kW]
PartialShape data_pshape{1, 20, 224, 224}; // [N, C_IN * GROUPS, H, W]
PartialShape filters_pshape{Dimension::dynamic(), Dimension::dynamic(), 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW]
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
const Strides strides{2, 2};
@ -291,7 +289,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters
padding_begin,
padding_end,
dilations);
EXPECT_THAT(get_shape_labels(gcbd->get_output_partial_shape(0)),
ElementsAre(10, ov::no_label, ov::no_label, ov::no_label));
ASSERT_EQ(gcbd->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 447, 447}));
}
@ -318,8 +317,10 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_data_sp
}
TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters_spatial_dim_dyn) {
const PartialShape data_pshape{Dimension::dynamic(), 20, 224, Dimension::dynamic()}; // [N, C_IN * GROUPS, H, W]
const PartialShape filters_pshape{4, 5, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW]
PartialShape data_pshape{Dimension::dynamic(), 20, 224, Dimension::dynamic()}; // [N, C_IN * GROUPS, H, W]
PartialShape filters_pshape{4, 5, 2, 3, 3}; // [GROUPS, C_IN, C_OUT, kH, kW]
set_shape_labels(data_pshape, 10);
set_shape_labels(filters_pshape, 20);
const element::Type_t et = element::f32;
const Strides strides{2, 2};
@ -336,6 +337,8 @@ TEST(type_prop, group_convolution_backprop_data_shape_infer_static_ranks_filters
padding_end,
dilations);
EXPECT_THAT(get_shape_labels(gcbd->get_output_partial_shape(0)),
ElementsAre(10, ov::no_label, ov::no_label, ov::no_label));
ASSERT_EQ(gcbd->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 8, 447, Dimension(1, -1)}));
}
@ -627,7 +630,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_channel_dims) {
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input channels dimension of data batch has incompatible value with filter shape."));
std::string(
"Input channels dimension of data batch is incompatible with filter groups or input channels."));
} catch (...) {
FAIL() << "Input shapes validation check failed for unexpected reason.";
}
@ -646,7 +650,8 @@ TEST(type_prop, group_convolution_backprop_data_invalid_input_channel_dims) {
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input channels dimension of data batch has incompatible value with filter shape."));
std::string(
"Input channels dimension of data batch is incompatible with filter groups or input channels."));
} catch (...) {
FAIL() << "Input shapes validation check failed for unexpected reason.";
}
@ -671,7 +676,7 @@ TEST(type_prop, group_convolution_backprop_data_invalid_output_shape_spatial_dim
FAIL() << "Incompatible output shape optional input not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Output shape should be specified only and for all spatial dimensions."));
std::string("Output shape should be defined for all and only spatial dimensions."));
} catch (...) {
FAIL() << "Output shape validation check failed for unexpected reason.";
}
@ -763,7 +768,7 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims)
make_shared<op::v1::GroupConvolutionBackpropData>(data, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin should be defined for all and only spatial dimensions.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -779,7 +784,7 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims)
make_shared<op::v1::GroupConvolutionBackpropData>(data, filters, strides, pads_begin, pads_end, dilations);
FAIL() << "Invalid padding spatial dimensions not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Pads end should be defined for all and only spatial dimensions.");
EXPECT_HAS_SUBSTRING(error.what(), "Pads begin and end should be defined for all and only spatial dimensions.");
} catch (...) {
FAIL() << "Padding spatial dimensions validation check failed for unexpected reason";
}
@ -835,17 +840,54 @@ TEST(type_prop, group_convolution_backprop_data_invalid_conv_param_spatial_dims)
}
}
TEST(type_prop, group_convolution_back_prop_data_default_constructed) {
auto conv = make_shared<op::v1::GroupConvolutionBackpropData>();
TEST(type_prop, group_convolution_backprop_data_default_constructed) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 1, 3, 3, 3});
const auto out_spatial = op::Constant::create(element::i32, Shape{3}, {5, 4, 10});
const auto &input_shape = ov::PartialShape::dynamic(), filters_shape = ov::PartialShape{1, 1, 1, 3, 3},
output_spatial_shape_shape = ov::PartialShape({2});
const auto& input_shapes = std::vector<ov::PartialShape>{input_shape, filters_shape, output_spatial_shape_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic()};
auto pad_begin = CoordinateDiff{}, pad_end = CoordinateDiff{};
const auto& output_spatial_shape = ov::PartialShape{3, 3};
int64_t num_spatial =
calculate_num_spatial(conv.get(), input_shape, filters_shape, output_spatial_shape_shape, 2, 3);
update_and_validate_attributes_back_prop(conv.get(), num_spatial);
EXPECT_NO_THROW(shape_infer(conv.get(), pad_begin, pad_end, output_spatial_shape, input_shapes, output_shapes));
const auto op = make_shared<op::v1::GroupConvolutionBackpropData>();
op->set_arguments(OutputVector{data, filters, out_spatial});
op->set_strides({1, 1, 1});
op->set_dilations({1, 1, 1});
op->set_pads_begin({2, 2, 2});
op->set_pads_end({2, 2, 2});
op->set_auto_pad(op::PadType::EXPLICIT);
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 3);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 1, 1}));
EXPECT_EQ(op->get_dilations(), Strides({1, 1, 1}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, 1, 5, 4, 10}));
}
TEST(type_prop, group_convolution_backprop_data_interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, {2, 6}, {1, 5}, {3, 10}, {20, 100}};
PartialShape filters_pshape{{2, 3}, {1, 3}, 1, 3, 3, 3};
PartialShape out_spatial_pshape{{2, 3}, -1, 10};
set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20);
set_shape_labels(out_spatial_pshape, 30);
const element::Type_t et = element::f32;
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape);
const auto filters = make_shared<op::Parameter>(et, filters_pshape);
const auto out_spatial_shape_of = make_shared<op::v0::ShapeOf>(make_shared<op::Parameter>(et, out_spatial_pshape));
const auto op = make_shared<op::v1::GroupConvolutionBackpropData>(data_batch,
filters,
out_spatial_shape_of,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
auto_pad);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 20, 30, 31, 32));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 3}, {2, 3}, {2, 3}, -1, 10}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0, 0}));
}

View File

@ -15,13 +15,16 @@
#include "augru_sequence_shape_inference.hpp"
#include "avg_pool_shape_inference.hpp"
#include "batch_to_space_shape_inference.hpp"
#include "binary_convolution_shape_inference.hpp"
#include "broadcast_shape_inference.hpp"
#include "bucketize_shape_inference.hpp"
#include "concat_shape_inference.hpp"
#include "convolution_backprop_shape_inference.hpp"
#include "convolution_shape_inference.hpp"
#include "ctc_greedy_decoder_seq_len_shape_inference.hpp"
#include "ctc_greedy_decoder_shape_inference.hpp"
#include "ctc_loss_shape_inference.hpp"
#include "deformable_convolution_shape_inference.hpp"
#include "deformable_psroi_pooling_shape_inference.hpp"
#include "depth_to_space_shape_inference.hpp"
#include "detection_output_shape_inference.hpp"
@ -43,6 +46,8 @@
#include "gather_shape_inference.hpp"
#include "gather_tree_shape_inference.hpp"
#include "grid_sample_shape_inference.hpp"
#include "group_convolution_backprop_shape_inference.hpp"
#include "group_convolution_shape_inference.hpp"
#include "gru_cell_shape_inference.hpp"
#include "gru_sequence_shape_inference.hpp"
#include "interpolate_shape_inference.hpp"
@ -256,11 +261,8 @@ public:
}
};
static inline ov::CoordinateDiff convertPadding(const ov::CoordinateDiff& newPads) {
return newPads;
}
static inline ov::CoordinateDiff convertPadding(const ov::Shape& newPads) {
template <class TContainer>
ov::CoordinateDiff convertPadding(const TContainer& newPads) {
return {newPads.begin(), newPads.end()};
}
@ -302,71 +304,13 @@ public:
}
};
template <typename OP, bool is_grouped>
class entryConv : public entryBase {
public:
entryConv(std::shared_ptr<Node> node) : entryBase(std::move(node)) {}
const ov::CoordinateDiff& get_pads_begin() override {
return pads_begin;
}
const ov::CoordinateDiff& get_pads_end() override {
return pads_end;
}
IShapeInferCommon::Result
infer(const std::vector<StaticShape>& input_shapes, const std::map<size_t, HostTensorPtr>& constant_data) override {
auto op = static_cast<OP*>(node.get());
std::vector<StaticShape> output_shapes(op->get_output_size());
bool status = resolve_auto_pad_for_shape(op, pads_begin, pads_end, input_shapes, 2, is_grouped ? 3 : 2);
OPENVINO_ASSERT(status,
"Convolution shape inference doesn't have enough information to calculate static shapes");
shape_infer(op, pads_begin, pads_end, input_shapes, output_shapes);
return {std::move(output_shapes), ShapeInferStatus::success};
}
protected:
ov::CoordinateDiff pads_begin, pads_end;
};
template <typename OP, bool is_grouped>
class entryConvBackprop : public entryBase {
public:
entryConvBackprop(std::shared_ptr<Node> node) : entryBase{std::move(node)} {}
const ov::CoordinateDiff& get_pads_begin() override {
return pads_begin;
}
const ov::CoordinateDiff& get_pads_end() override {
return pads_end;
}
IShapeInferCommon::Result
infer(const std::vector<StaticShape>& input_shapes, const std::map<size_t, HostTensorPtr>& constant_data) override {
StaticShape output_shape_input;
auto op = static_cast<OP*>(node.get());
std::vector<StaticShape> output_shapes(op->get_output_size());
if (op->get_input_size() == 3)
get_data_as_shape<StaticShape>(2, op, output_shape_input, constant_data);
bool status = resolve_auto_pad_for_shape_back_prop(op,
pads_begin,
pads_end,
input_shapes,
output_shape_input,
2,
is_grouped ? 3 : 2);
OPENVINO_ASSERT(
status,
"ConvolutionBackpropData shape inference doesn't have enough information to calculate static shapes");
shape_infer(op, pads_begin, pads_end, output_shape_input, input_shapes, output_shapes);
return {std::move(output_shapes), ShapeInferStatus::success};
}
protected:
ov::CoordinateDiff pads_begin, pads_end;
};
template <class TOp>
class ShapeInferBaseWithPadding : public entryBase {
class ShapeInferWithPaddingConvert : public entryBase {
public:
ShapeInferBaseWithPadding(std::shared_ptr<Node> node) : entryBase{std::move(node)}, m_pads_begin{}, m_pads_end{} {}
ShapeInferWithPaddingConvert(std::shared_ptr<Node> node)
: entryBase{std::move(node)},
m_pads_begin{},
m_pads_end{} {}
IShapeInferCommon::Result infer(const std::vector<StaticShape>& input_shapes,
const std::map<size_t, ov::HostTensorPtr>& constant_data) override {
@ -393,6 +337,30 @@ protected:
ov::CoordinateDiff m_pads_begin, m_pads_end;
};
template <class TOp>
class ShapeInferWithPadding : public entryBase {
public:
ShapeInferWithPadding(std::shared_ptr<Node> node) : entryBase{std::move(node)}, m_pads_begin{}, m_pads_end{} {}
IShapeInferCommon::Result infer(const std::vector<StaticShape>& input_shapes,
const std::map<size_t, ov::HostTensorPtr>& constant_data) override {
auto op = static_cast<TOp*>(node.get());
auto out_shapes = shape_infer(op, input_shapes, m_pads_begin, m_pads_end, constant_data);
return {std::move(out_shapes), ShapeInferStatus::success};
}
const ov::CoordinateDiff& get_pads_begin() override {
return m_pads_begin;
}
const ov::CoordinateDiff& get_pads_end() override {
return m_pads_end;
}
protected:
ov::CoordinateDiff m_pads_begin, m_pads_end;
};
/**
* @brief Base shape inference object implementing the IStaticShapeInfer without padding support
*
@ -548,15 +516,18 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(AdaptiveAvgPool, entryIOC),
_OV_OP_SHAPE_INFER_REG(AdaptiveMaxPool, entryIOC),
_OV_OP_SHAPE_INFER_REG(Assign, entryIO),
_OV_OP_SHAPE_INFER_REG(AvgPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(AvgPool, ShapeInferWithPaddingConvert),
_OV_OP_SHAPE_INFER_REG(BatchToSpace, entryIOC),
_OV_OP_SHAPE_INFER_REG(BinaryConvolution, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(Broadcast, entryIOC),
_OV_OP_SHAPE_INFER_REG(Bucketize, entryIO),
_OV_OP_SHAPE_INFER_REG(Concat, entryIO),
_OV_OP_SHAPE_INFER_REG(Convolution, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(ConvolutionBackpropData, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(CTCGreedyDecoder, entryIO),
_OV_OP_SHAPE_INFER_REG(CTCGreedyDecoderSeqLen, entryIO),
_OV_OP_SHAPE_INFER_REG(CTCLoss, entryIO),
_OV_OP_SHAPE_INFER_REG(DeformableConvolution, entryFallbackWithPadding),
_OV_OP_SHAPE_INFER_REG(DeformableConvolution, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(DeformablePSROIPooling, entryIO),
_OV_OP_SHAPE_INFER_REG(DepthToSpace, entryIO),
_OV_OP_SHAPE_INFER_REG(DetectionOutput, entryIO),
@ -577,6 +548,8 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(GatherND, entryIO),
_OV_OP_SHAPE_INFER_REG(GatherTree, entryIO),
_OV_OP_SHAPE_INFER_REG(GridSample, entryIO),
_OV_OP_SHAPE_INFER_REG(GroupConvolution, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(GroupConvolutionBackpropData, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(GRUCell, entryIO),
_OV_OP_SHAPE_INFER_REG(GRUSequence, entryIO),
_OV_OP_SHAPE_INFER_REG(IDFT, entryIOC),
@ -584,7 +557,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(IRDFT, entryIOC),
_OV_OP_SHAPE_INFER_REG(LSTMCell, entryIO),
_OV_OP_SHAPE_INFER_REG(MatMul, entryIO),
_OV_OP_SHAPE_INFER_REG(MaxPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(MaxPool, ShapeInferWithPaddingConvert),
_OV_OP_SHAPE_INFER_REG(OneHot, entryIOC),
_OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUCell, entryIO),
_OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUSequence, entryIO),
@ -616,12 +589,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(Transpose, entryIOC),
_OV_OP_SHAPE_INFER_REG(Unsqueeze, entryIOC),
_OV_OP_SHAPE_INFER_REG(VariadicSplit, entryIOC),
_OV_OP_SHAPE_INFER_VA_REG(Convolution, entryConv, Convolution, false),
_OV_OP_SHAPE_INFER_VA_REG(ConvolutionBackpropData, entryConvBackprop, ConvolutionBackpropData, false),
_OV_OP_SHAPE_INFER_VA_REG(ConvolutionBackpropData, entryConvBackprop, ConvolutionBackpropData, false),
_OV_OP_SHAPE_INFER_VA_REG(Gather, entryIOC, ov::op::util::GatherBase),
_OV_OP_SHAPE_INFER_VA_REG(GroupConvolution, entryConv, GroupConvolution, true),
_OV_OP_SHAPE_INFER_VA_REG(GroupConvolutionBackpropData, entryConvBackprop, GroupConvolutionBackpropData, true),
_OV_OP_SHAPE_INFER_VA_REG(ReduceL1, entryIOC, op::util::ArithmeticReductionKeepDims),
_OV_OP_SHAPE_INFER_VA_REG(ReduceL2, entryIOC, op::util::ArithmeticReductionKeepDims),
_OV_OP_SHAPE_INFER_VA_REG(ReduceLogicalAnd, entryIOC, op::util::LogicalReductionKeepDims),
@ -645,11 +613,11 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(opset1::BatchNormInference, entryFirstPassthrough),
_OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(opset1::Softmax, entryCopy),
_OV_OP_SHAPE_INFER_REG(opset1::Broadcast, entryIOC),
_OV_OP_SHAPE_INFER_REG(opset1::DeformableConvolution, entryFallbackWithPadding),
_OV_OP_SHAPE_INFER_REG(opset1::DeformableConvolution, ShapeInferWithPadding),
_OV_OP_SHAPE_INFER_REG(opset1::DetectionOutput, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::Interpolate, entryIOC),
_OV_OP_SHAPE_INFER_REG(opset1::LSTMCell, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::MaxPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(opset1::MaxPool, ShapeInferWithPaddingConvert),
_OV_OP_SHAPE_INFER_REG(opset1::Proposal, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::Range, entryIOC),
_OV_OP_SHAPE_INFER_REG(opset1::ShapeOf, entryIO),

View File

@ -0,0 +1,117 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class BinaryConvolutionV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::BinaryConvolution> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
const op_type::BinaryConvolutionMode mode = op_type::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
};
TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 2, 6, 8}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pads_same_lower_inputs_dynamic_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
}
TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pad_same_lower_inputs_static_ranks) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
}
TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channels_not_same) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 5, 5, 5}, {7, 6, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Data batch channel count (5) does not match filter"));
}
TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_rank_not_4) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
input_shapes = ShapeVector{{3, 6, 5}, {7, 6, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Expected 4D for the input. Got:"));
}

View File

@ -0,0 +1,147 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "convolution_backprop_shape_inference.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class ConvolutionBackpropDataV1StaticShapeInferenceTest
: public OpStaticShapeInferenceTest<op::v1::ConvolutionBackpropData> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor_direct_infer_call) {
const auto spatial_shape = PartialShape{500, 500};
op = make_op();
op->set_strides({2, 2});
op->set_dilations({1, 1});
op->set_output_padding({0, 0});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_output_shape(spatial_shape.to_shape());
auto pads_begin = CoordinateDiff{1, 1};
auto pads_end = CoordinateDiff{1, 1};
input_shapes = ShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}};
output_shapes = ov::op::v1::shape_infer(op.get(), input_shapes, pads_begin, pads_end);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 10, 500, 500}));
EXPECT_EQ(pads_begin, CoordinateDiff({1, 1}));
EXPECT_EQ(pads_end, CoordinateDiff({1, 1}));
}
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor_with_output_shape) {
const auto spatial_shape = PartialShape{500, 500};
op = make_op();
op->set_strides({2, 2});
op->set_dilations({1, 1});
op->set_pads_begin({1, 1});
op->set_pads_end({1, 1});
op->set_output_padding({0, 0});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_output_shape(spatial_shape.to_shape());
input_shapes = ShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 10, 500, 500}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({1, 1}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({1, 1}));
}
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_output_padding({1, 1});
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}, {3, 3, 5, 5}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 15, 17}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 2d_inputs_dynamic_rank_no_spatial_shape) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {6, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 1, 7, 7}));
}
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_out_spatial_as_const) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto out_spatial = op::v0::Constant::create(element::i64, Shape{3}, {2, 1, 3});
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {6, 2, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 2, 2, 1, 3}));
}
TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_upper_out_spatial_in_map) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto out_spatial = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic(1));
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
int32_t spatial_dims[] = {2, 6, 1};
const auto const_map =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_dims)}};
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {5, 7, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_map);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 2, 6, 1}));
}

View File

@ -1,156 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
TEST(StaticShapeInferenceTest, ConvolutionTest) {
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto filters = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto conv =
std::make_shared<op::v1::Convolution>(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}, StaticShape{7, 6, 3, 3}},
static_output_shapes = {StaticShape{}};
shape_inference(conv.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 7, 5, 5}));
}
TEST(StaticShapeInferenceTest, GroupConvolutionTest) {
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto filters = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
auto conv =
std::make_shared<op::v1::GroupConvolution>(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 4, 5, 5}, StaticShape{2, 1, 2, 3, 3}},
static_output_shapes = {StaticShape{}};
shape_inference(conv.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 2, 5, 5}));
}
TEST(StaticShapeInferenceTest, ConvolutionBackPropDataTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto filters = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
const Strides strides{2, 2};
const Strides dilations{1, 1};
const CoordinateDiff padding_begin{1, 1};
const CoordinateDiff padding_end{1, 1};
const CoordinateDiff output_padding{1, 1};
const op::PadType auto_pad = op::PadType::SAME_LOWER;
auto output_shape = std::make_shared<op::v0::Constant>(
ov::element::i64, ov::Shape{2}, std::vector<int64_t>({3, 3}));
auto conv = std::make_shared<op::v1::ConvolutionBackpropData>(data,
filters,
output_shape,
strides,
padding_begin,
padding_end,
dilations,
auto_pad,
output_padding);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 16, 2, 2}, StaticShape{16, 6, 3, 3}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(conv.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 6, 3, 3}));
}
TEST(StaticShapeInferenceTest, GroupConvolutionBackPropDataTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto filters = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
const Strides strides{2, 2};
const Strides dilations{1, 1};
const CoordinateDiff padding_begin{1, 1};
const CoordinateDiff padding_end{1, 1};
const CoordinateDiff output_padding{1, 1};
const op::PadType auto_pad = op::PadType::SAME_LOWER;
auto output_shape = std::make_shared<op::v0::Constant>(
ov::element::i64, ov::Shape{2}, std::vector<int64_t>({3, 3}));
auto conv = std::make_shared<op::v1::GroupConvolutionBackpropData>(data,
filters,
output_shape,
strides,
padding_begin,
padding_end,
dilations,
auto_pad,
output_padding);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 16, 2, 2}, StaticShape{4, 4, 6, 3, 3}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(conv.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 24, 3, 3}));
}
#if 0
TEST(StaticShapeInferenceTest, ConvolutionTimeTest) {
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{3, 6, 5, 5});
auto filters = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{7, 6, 3, 3});
auto conv =
std::make_shared<op::v1::Convolution>(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}, StaticShape{7, 6, 3, 3}}, static_output_shapes = {StaticShape{}};
auto before = std::chrono::high_resolution_clock::now();
auto after = std::chrono::high_resolution_clock::now();
std::cout << conv << std::endl;
auto convolution_time_sum = 0;
for (size_t i = 0; i < 10; ++i) {
before = std::chrono::high_resolution_clock::now();
shape_inference(conv.get(), static_input_shapes, static_output_shapes);
after = std::chrono::high_resolution_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::nanoseconds>(after - before).count();
std::cout << diff << " ns" << std::endl;
convolution_time_sum += diff;
}
// other operation creation and time measurements: ReLU is an example
auto relu = std::make_shared<op::v0::Relu>(data);
std::cout << relu << std::endl;
auto other_op_time_sum = 0;
for (size_t i = 0; i < 10; ++i) {
before = std::chrono::high_resolution_clock::now();
relu->validate_and_infer_types();
after = std::chrono::high_resolution_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::nanoseconds>(after - before).count();
std::cout << diff << " ns" << std::endl;
other_op_time_sum += diff;
}
std::cout << (convolution_time_sum >= other_op_time_sum ? "ON PAR WITH CONVOLUTION: " : "LONGER THAN CONVOLUTION ")
<< 1. * other_op_time_sum / convolution_time_sum << std::endl;
}
#endif

View File

@ -0,0 +1,114 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class ConvolutionV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::Convolution> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(ConvolutionV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 2, 6, 8}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(ConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_inputs_dynamic_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5}));
}
TEST_F(ConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs_static_ranks) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 7, 5, 5, 5}));
}
TEST_F(ConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channels_not_same) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1, -1});
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {7, 6, 3, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Data batch channel count (5) does not match filter"));
}
TEST_F(ConvolutionV1StaticShapeInferenceTest, data_rank_not_compatible_with_filters_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Data batch and filters rank do not match"));
}

View File

@ -0,0 +1,146 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class DeformableConvolutionV8StaticShapeInferenceTest
: public OpStaticShapeInferenceTest<op::v8::DeformableConvolution> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 2});
op->set_dilations({1, 2});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_auto_pad(op::PadType::VALID);
op->set_group(4);
op->set_deformable_group(2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 3, 1}, {4, 1, 3, 3}, {1, 18, 3, 1}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 4, 3, 1}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_dynamic_rank_no_masks) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto offsets = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
}
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_dynamic_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto offsets = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto masks = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
}
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_uper_inputs_static_rank_no_masks) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto offsets = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
}
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_upper_inputs_static_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto offsets = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto masks = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 5, 5}));
}
TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, mask_channel_dimension_not_divisible_by_deformable_group) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto offsets = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
const auto masks = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2);
input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 17, 5, 5}};
OV_EXPECT_THROW(
shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr(
"The channels dimension of mask input is not compatible with filters and 'deformable group' attribute"));
}

View File

@ -0,0 +1,128 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class GroupConvolutionBackpropDataStaticShapeInferenceTest
: public OpStaticShapeInferenceTest<op::v1::GroupConvolutionBackpropData> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor_with_output_shape) {
const auto spatial_shape = PartialShape{500, 500};
op = make_op();
op->set_strides({2, 2});
op->set_dilations({1, 1});
op->set_pads_begin({1, 1});
op->set_pads_end({1, 1});
op->set_output_padding({0, 0});
op->set_auto_pad(op::PadType::EXPLICIT);
op->set_output_shape(spatial_shape.to_shape());
input_shapes = ShapeVector{{1, 20, 224, 224}, {2, 10, 10, 3, 3}, {2}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 20, 500, 500}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({1, 1}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({1, 1}));
}
TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1, 1});
op->set_dilations({1, 1, 1});
op->set_pads_begin({2, 2, 2});
op->set_pads_end({2, 1, 3});
op->set_output_padding({1, 1, 1});
op->set_auto_pad(op::PadType::EXPLICIT);
int32_t spatial_shape[] = {5, 10, 15};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_shape)}};
input_shapes = ShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, const_data).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 6, 5, 10, 15}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 2, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1, 3}));
}
TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 2d_inputs_dynamic_rank_no_spatial_shape) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 2, 5, 5}, {2, 1, 2, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 4, 7, 7}));
}
TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_lower_out_spatial_as_const) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(6));
const auto out_spatial = op::v0::Constant::create(element::i64, Shape{3}, {2, 1, 3});
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 6, 2, 1, 3}));
}
TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_upper_out_spatial_in_map) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(6));
const auto out_spatial = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic(1));
op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad);
int32_t spatial_dims[] = {2, 6, 1};
const auto const_data =
std::map<size_t, HostTensorPtr>{{2, std::make_shared<HostTensor>(element::i32, Shape{3}, spatial_dims)}};
input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {1, 5, 1, 3, 3, 3}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 1, 2, 6, 1}));
}

View File

@ -0,0 +1,130 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "group_convolution_shape_inference.hpp"
#include "openvino/opsets/opset11.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class GroupConvolutionV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::GroupConvolution> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, default_ctor_direct_infer_call) {
op = make_op();
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_auto_pad(op::PadType::EXPLICIT);
auto pads_begin = CoordinateDiff{2, 2};
auto pads_end = CoordinateDiff{2, 1};
input_shapes = ShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}};
output_shapes = ov::op::v1::shape_infer(op.get(), input_shapes, pads_begin, pads_end);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 6, 10, 11}));
EXPECT_EQ(pads_begin, CoordinateDiff({2, 2}));
EXPECT_EQ(pads_end, CoordinateDiff({2, 1}));
}
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_dilations({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_auto_pad(op::PadType::EXPLICIT);
input_shapes = ShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 6, 10, 11}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1}));
}
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 1d_explicit_pads_inputs_static_rank) {
const auto strides = Strides{1};
const auto dilations = Strides{1};
const auto pads_begin = CoordinateDiff{0};
const auto pads_end = CoordinateDiff{0};
const auto auto_pad = op::PadType::EXPLICIT;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(3));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 12, 20}, {12, 1, 1, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 12, 18}));
}
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_inputs_dynamic_rank) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1, 1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({1, 2, 5, 5}));
}
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs_static_ranks) {
const auto strides = Strides{1, 1, 1};
const auto dilations = Strides{1, 1, 1};
const auto pads_begin = CoordinateDiff{0, 0, 0};
const auto pads_end = CoordinateDiff{0, 0, 0};
const auto auto_pad = op::PadType::SAME_UPPER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(5));
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(6));
op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad);
input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes[0], StaticShape({3, 6, 5, 5, 5}));
}
TEST_F(GroupConvolutionV1StaticShapeInferenceTest, dilations_not_defined_for_spatial_shape) {
const auto strides = Strides{1, 1};
const auto dilations = Strides{1};
const auto pads_begin = CoordinateDiff{0, 0};
const auto pads_end = CoordinateDiff{0, 0};
const auto auto_pad = op::PadType::SAME_LOWER;
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic());
input_shapes = ShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}};
OV_EXPECT_THROW(op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad),
NodeValidationFailure,
HasSubstr("Dilations should be defined for all and only spatial dimensions"));
}

View File

@ -1,14 +1,16 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "pass_manager.h"
#include <string>
#include "convolution_inst.h"
#include "primitive_type_base.h"
#include "convolution_shape_inference.hpp"
#include "sliding_window_utils.hpp"
#include "group_convolution_shape_inference.hpp"
#include "intel_gpu/runtime/error_handler.hpp"
#include "json_object.h"
#include <string>
#include "pass_manager.h"
#include "primitive_type_base.h"
#include "sliding_window_utils.hpp"
using namespace ov::intel_gpu;
@ -413,29 +415,27 @@ std::vector<layout> convolution_inst::calc_output_layouts(convolution_node const
input_layout.get<ShapeType>(),
weights_layout.get<ShapeType>()
};
std::vector<ShapeType> output_shapes = {ShapeType()};
std::vector<ShapeType> output_shapes;
auto pads_begin = desc->padding_above;
auto pads_end = desc->padding_below;
if (desc->groups > 1) {
ov::op::v1::GroupConvolution op;
op.set_dilations(desc->dilation);
op.set_strides(desc->stride);
op.set_auto_pad(ov::op::PadType::EXPLICIT);
auto pad_begin = desc->padding_above;
auto pad_end = desc->padding_below;
if (input_shapes[1].size() == 4 && input_shapes[0].size() == 3) {
// 3D
input_shapes[1][3] = input_shapes[1][2];
input_shapes[1][2] = input_shapes[0][1].get_length()/input_shapes[1][0].get_length();
}
ov::op::v1::shape_infer(&op, pad_begin, pad_end, input_shapes, output_shapes);
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
} else {
ov::op::v1::Convolution op;
op.set_dilations(desc->dilation);
op.set_strides(desc->stride);
op.set_auto_pad(ov::op::PadType::EXPLICIT);
auto pad_begin = desc->padding_above;
auto pad_end = desc->padding_below;
ov::op::v1::shape_infer(&op, pad_begin, pad_end, input_shapes, output_shapes);
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
}
format::type output_format = input_layout.format.value;
return {layout{output_shapes[0], output_type, output_format}};

View File

@ -1,14 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "deconvolution_inst.h"
#include "primitive_type_base.h"
#include "sliding_window_utils.hpp"
#include "intel_gpu/runtime/error_handler.hpp"
#include "json_object.h"
#include <string>
#include "convolution_shape_inference.hpp"
#include "convolution_backprop_shape_inference.hpp"
#include "deconvolution_inst.h"
#include "group_convolution_backprop_shape_inference.hpp"
#include "intel_gpu/runtime/error_handler.hpp"
#include "json_object.h"
#include "primitive_type_base.h"
#include "sliding_window_utils.hpp"
using namespace ov::intel_gpu;
@ -164,7 +165,7 @@ std::vector<layout> deconvolution_inst::calc_output_layouts(deconvolution_node c
std::vector<ShapeType> input_shapes = {
input_layout.get<ShapeType>()
};
std::vector<ShapeType> output_shapes = {ShapeType()};
std::vector<ShapeType> output_shapes;
auto& memory_deps = impl_param.memory_deps;
// Dimensions order of weights is IOYX, but the selected format is OIYX by default and I/O dimensions are
// already swapped when creating constant op. So we need to swap I/O dimensions according to the original
@ -179,19 +180,22 @@ std::vector<layout> deconvolution_inst::calc_output_layouts(deconvolution_node c
std::swap(weights_pshape[2], weights_pshape[1]);
input_shapes.push_back(weights_pshape);
if (output_partial_shape.size() != 0) {
ShapeType output_shape = ov::Shape{ output_partial_shape.size() };
input_shapes.push_back(output_shape);
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_partial_shape, input_shapes, output_shapes);
op.set_output_shape(output_partial_shape.to_shape());
input_shapes.push_back(ov::Shape{output_partial_shape.size()});
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
} else if (memory_deps.count(2)) {
auto mem = memory_deps.at(2);
std::vector<int64_t> dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
ov::Shape shape(dims.begin(), dims.end());
ov::PartialShape output_pshape(shape);
ShapeType output_shape = ov::Shape{ output_pshape.size() };
input_shapes.push_back(output_shape);
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_pshape, input_shapes, output_shapes);
auto dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
auto dims_shape = ov::Shape{dims.size()};
input_shapes.push_back(dims_shape);
output_shapes = ov::op::v1::shape_infer(
&op,
input_shapes,
pads_begin,
pads_end,
{{2, std::make_shared<ov::HostTensor>(ov::element::i64, dims_shape, dims.data())}});
} else {
ov::op::v1::shape_infer(&op, pads_begin, pads_end, ov::PartialShape{}, input_shapes, output_shapes);
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
}
} else {
ov::op::v1::ConvolutionBackpropData op;
@ -202,19 +206,22 @@ std::vector<layout> deconvolution_inst::calc_output_layouts(deconvolution_node c
std::swap(weights_pshape[1], weights_pshape[0]);
input_shapes.push_back(weights_pshape);
if (output_partial_shape.size() != 0) {
ShapeType output_shape = ov::Shape{ output_partial_shape.size() };
input_shapes.push_back(output_shape);
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_partial_shape, input_shapes, output_shapes);
op.set_output_shape(output_partial_shape.to_shape());
input_shapes.push_back(ov::Shape{output_partial_shape.size()});
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
} else if (memory_deps.count(2)) {
auto mem = memory_deps.at(2);
std::vector<int64_t> dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
ov::Shape shape(dims.begin(), dims.end());
ov::PartialShape output_pshape(shape);
ShapeType output_shape = ov::Shape{ output_pshape.size() };
input_shapes.push_back(output_shape);
ov::op::v1::shape_infer(&op, pads_begin, pads_end, output_pshape, input_shapes, output_shapes);
auto dims = read_vector<int64_t>(mem, impl_param.prog->get_stream());
auto dims_shape = ov::Shape{dims.size()};
input_shapes.push_back(dims_shape);
output_shapes = ov::op::v1::shape_infer(
&op,
input_shapes,
pads_begin,
pads_end,
{{2, std::make_shared<ov::HostTensor>(ov::element::i64, dims_shape, dims.data())}});
} else {
ov::op::v1::shape_infer(&op, pads_begin, pads_end, ov::PartialShape{}, input_shapes, output_shapes);
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, pads_begin, pads_end);
}
}
return {layout{output_shapes[0], output_type, out_fmt.value}};