Removed v0 convolution and group convolution (#1512)

This commit is contained in:
Ilya Churaev 2020-07-31 13:00:28 +03:00 committed by GitHub
parent 601f66e1ec
commit 41da44ec07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1703 additions and 2278 deletions

View File

@ -49,6 +49,7 @@ namespace ngraph
///
/// \return The vector containing multiple outputs we split input node into.
///
NGRAPH_API
OutputVector split(const Output<Node>& value, size_t split_parts, int axis = 0);
namespace opset1

View File

@ -458,642 +458,3 @@ shared_ptr<Node>
m_output_padding);
}
}
// *** Convolution OP SET 0 ***
constexpr NodeTypeInfo op::v0::Convolution::type_info;
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: Op({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_pad_type(pad_type)
{
constructor_validate_and_infer_types();
}
bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
visitor.on_attribute("window_dilation_strides", m_window_dilation_strides);
visitor.on_attribute("data_dilation_strides", m_data_dilation_strides);
visitor.on_attribute("padding_below", m_padding_below);
visitor.on_attribute("padding_above", m_padding_above);
visitor.on_attribute("pad_type", m_pad_type);
return true;
}
void op::v0::Convolution::validate_and_infer_types()
{
const PartialShape& data_batch_shape = get_input_partial_shape(0);
element::Type data_batch_et = get_input_element_type(0);
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
if (m_data_dilation_strides.size() == 0)
{
m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_movement_strides.size() == 0)
{
m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_dilation_strides.size() == 0)
{
m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_padding_below.size() == 0)
{
m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_padding_above.size() == 0)
{
m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
// TODO: data dilation
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
element::Type result_et;
PartialShape result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
result_shape = infer_convolution_forward(this,
data_batch_shape,
m_data_dilation_strides,
m_padding_below,
m_padding_above,
filters_shape,
m_window_movement_strides,
m_window_dilation_strides);
set_output_type(0, result_et, result_shape);
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
Strides())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
Strides(),
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch, const Output<Node>& filters)
: Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff())
{
}
shared_ptr<Node> op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::Convolution>(new_args.at(0),
new_args.at(1),
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type);
}
constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info;
shared_ptr<Node> op::v0::Convolution::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
op::v0::ConvolutionBackpropData::ConvolutionBackpropData(
const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: Op({filters, output_delta})
, m_data_batch_shape(data_batch_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
, m_window_dilation_strides_forward(window_dilation_strides_forward)
, m_padding_below_forward(padding_below_forward)
, m_padding_above_forward(padding_above_forward)
, m_data_dilation_strides_forward(data_dilation_strides_forward)
{
constructor_validate_and_infer_types();
}
bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("data_batch_shape", m_data_batch_shape);
visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward);
visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward);
visitor.on_attribute("padding_below_forward", m_padding_below_forward);
visitor.on_attribute("padding_above_forward", m_padding_above_forward);
visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward);
return true;
}
void op::v0::ConvolutionBackpropData::validate_and_infer_types()
{
// Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as
// follows.
//
// Forward Backward
// "N" axis for data batch 0 0
// "C" axis for data batch 1 1
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 0
// "C" axis for output 1 1
// Data batch x delta
// Data batch shape S_x S_o
// Filters f reverse(f) [on spatial axes]
// Filters shape S_f S_f
// Window movement strides q_x p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f +
// + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f)
// % q_x)
// - b_x
// Data dilation strides p_x q_x
// Output shape S_o S_x
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
const PartialShape& filters_shape = get_input_partial_shape(0);
element::Type filters_et = get_input_element_type(0);
const PartialShape& delta_shape = get_input_partial_shape(1);
element::Type delta_et = get_input_element_type(1);
element::Type forward_result_et;
PartialShape forward_result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(forward_result_et, delta_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
delta_et,
", filters element type: ",
filters_et,
").");
forward_result_shape = infer_convolution_forward(this,
m_data_batch_shape,
m_data_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
filters_shape,
m_window_movement_strides_forward,
m_window_dilation_strides_forward);
NODE_VALIDATION_CHECK(this,
forward_result_shape.compatible(delta_shape),
"Inferred forward output shape (",
forward_result_shape,
") does not match shape of ",
"delta (",
delta_shape,
").");
set_output_type(0, forward_result_et, m_data_batch_shape);
}
shared_ptr<Node>
op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::ConvolutionBackpropData>(m_data_batch_shape,
new_args.at(0),
new_args.at(1),
m_window_movement_strides_forward,
m_window_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_data_dilation_strides_forward);
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_below;
backward_delta_out_pad_below.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_below[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] -
in_pad_below[i];
}
return backward_delta_out_pad_below;
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
auto& in_pad_above = get_padding_above_forward();
auto& in_dilation = get_data_dilation_strides_forward();
auto& stride = get_window_movement_strides_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_above;
backward_delta_out_pad_above.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_above[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] +
((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] -
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i]) %
stride[i]) -
in_pad_above[i];
}
return backward_delta_out_pad_above;
}
//
// This is a legacy function, retained because the CPU backend uses it for now.
// TODO(amprocte): Update CPU backend to use the new stuff in validation_util.hpp, and remove this
// function.
//
Shape op::util::infer_convolution_output_shape(const Node* node,
const Shape& data_batch_shape,
const Shape& filters_shape,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
size_t batch_axis_data,
size_t input_channel_axis_data,
size_t input_channel_axis_filters,
size_t output_channel_axis_filters,
size_t batch_axis_result,
size_t output_channel_axis_result)
{
NODE_VALIDATION_CHECK(node, batch_axis_data <= 1, "(This is an internal nGraph error)");
NODE_VALIDATION_CHECK(node, input_channel_axis_data <= 1, "(This is an internal nGraph error)");
NODE_VALIDATION_CHECK(
node, input_channel_axis_filters <= 1, "(This is an internal nGraph error)");
NODE_VALIDATION_CHECK(
node, output_channel_axis_filters <= 1, "(This is an internal nGraph error)");
NODE_VALIDATION_CHECK(node, batch_axis_result <= 1, "(This is an internal nGraph error)");
NODE_VALIDATION_CHECK(
node, output_channel_axis_result <= 1, "(This is an internal nGraph error)");
//
// Make sure data_batch: NCiDi for some Di of rank>0, N != 0, Ci != 0.
//
NODE_VALIDATION_CHECK(node,
data_batch_shape.size() >= 3,
"Data batch input must have rank of at least 3 (one batch axis, ",
"one input-channel axis, and at least one spatial dimension) ",
"(data batch shape: ",
data_batch_shape,
").");
size_t batch_size = data_batch_shape[batch_axis_data];
NODE_VALIDATION_CHECK(node,
batch_size != 0,
"Data batch size is zero (data batch shape: ",
data_batch_shape,
", ",
"batch axis is axis ",
batch_axis_data,
").");
size_t input_channel_count = data_batch_shape[input_channel_axis_data];
NODE_VALIDATION_CHECK(node,
input_channel_count != 0,
"Input channel count is zero (data batch shape: ",
data_batch_shape,
", ",
"channel axis is axis ",
input_channel_axis_data,
").");
size_t spatial_dimension_count = data_batch_shape.size() - 2;
//
// Make sure filters: CoCiWv for some Co>0, rank of W = rank of Di.
//
NODE_VALIDATION_CHECK(
node,
filters_shape.size() == 2 + spatial_dimension_count,
"Filter input must have rank equal to the data batch (one axis for output ",
"channels, one axis for input channels, and the same number of spatial ",
"dimensions as the data batch (filter input shape: ",
filters_shape,
", ",
"data batch shape: ",
data_batch_shape,
").");
size_t output_channel_count = filters_shape[output_channel_axis_filters];
NODE_VALIDATION_CHECK(node,
output_channel_count != 0,
"Output channel count for filters is zero (filters shape: ",
filters_shape,
", ",
"output channels on axis ",
output_channel_axis_filters,
").");
NODE_VALIDATION_CHECK(node,
filters_shape[input_channel_axis_filters] == input_channel_count,
"Input channel count for filters (",
filters_shape[input_channel_axis_filters],
") ",
"does not match the number of channels in the data batch (",
input_channel_count,
") ",
"(filter input shape: ",
filters_shape,
", filter input channels on axis ",
input_channel_axis_filters,
"; data batch shape: ",
data_batch_shape,
", data batch channels on axis ",
batch_axis_data,
").");
//
// Make sure window movement strides, window dilation strides, and data dilation strides
// have same rank as Di.
//
NODE_VALIDATION_CHECK(
node,
window_movement_strides.size() == spatial_dimension_count,
"Rank of window movement strides does not match the number of spatial dimensions (",
spatial_dimension_count,
") in the data batch (window movement strides: ",
window_movement_strides,
", data batch shape: ",
data_batch_shape,
").");
NODE_VALIDATION_CHECK(
node,
window_dilation_strides.size() == spatial_dimension_count,
"Rank of window dilation strides does not match the number of spatial dimensions (",
spatial_dimension_count,
") in the data batch (window dilation strides: ",
window_dilation_strides,
", data batch shape: ",
data_batch_shape,
").");
NODE_VALIDATION_CHECK(
node,
data_dilation_strides.size() == spatial_dimension_count,
"Rank of data dilation strides does not match the number of spatial dimensions (",
spatial_dimension_count,
") in the data batch (data dilation strides: ",
data_dilation_strides,
", data batch shape: ",
data_batch_shape,
").");
//
// Make sure padding-below and padding-above shapes have same rank as Di.
//
NODE_VALIDATION_CHECK(
node,
padding_below.size() == spatial_dimension_count,
"Rank of the padding below does not match the number of spatial dimensions (",
spatial_dimension_count,
") in the data batch (padding below: ",
padding_below,
", data batch shape: ",
data_batch_shape,
").");
NODE_VALIDATION_CHECK(
node,
padding_above.size() == spatial_dimension_count,
"Rank of the padding above does not match the number of spatial dimensions (",
spatial_dimension_count,
") in the data batch (padding above: ",
padding_above,
", data batch shape: ",
data_batch_shape,
").");
//
// Extract input item shape Di and make sure all dimensions are larger than 0 after padding and
// dilation.
//
std::vector<ptrdiff_t> input_item_virtual_shape_signed;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_CHECK(node,
data_dilation_strides[i] != 0,
"Data dilation stride at spatial dimension ",
i,
" is zero ",
"(data dilation strides: ",
data_dilation_strides,
").");
size_t dim_size = data_batch_shape[1 + 1 + i];
size_t dilated_dim_size = (dim_size - 1) * data_dilation_strides[i] + 1;
ptrdiff_t padded_dilated_dim_size = padding_below[i] + dilated_dim_size + padding_above[i];
input_item_virtual_shape_signed.push_back(padded_dilated_dim_size);
}
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_CHECK(node,
input_item_virtual_shape_signed[i] > 0,
"Input dimension after padding and dilation is non-positive ",
"at spatial axis ",
i,
" (post-padding/dilation input item shape: ",
input_item_virtual_shape,
", data batch shape: ",
data_batch_shape,
", data dilation strides: ",
data_dilation_strides,
", padding below: ",
padding_below,
", padding above: ",
padding_above,
").");
input_item_virtual_shape.push_back(size_t(input_item_virtual_shape_signed[i]));
}
//
// Extract the physical shape Wp of the convolution window, *not* including dilation, from the
// filter dimensions. At the same time, make sure window shape dimensions are all larger than
// 0.
//
Shape window_physical_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
window_physical_shape.push_back(filters_shape[1 + 1 + i]);
NODE_VALIDATION_CHECK(node,
window_physical_shape[i] != 0,
"Filters shape at spatial dimension ",
i,
" is zero ",
"(filters shape: ",
filters_shape,
").");
}
//
// Compute virtual shape Wp of the convolution window, *including* dilation. At the same time,
// make sure all window dilation strides are larger than 0, and that the dilated filter fits
// within the spatial dimensions.
//
Shape window_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_CHECK(node,
window_dilation_strides[i] != 0,
"Window dilation stride at spatial dimension ",
i,
" is zero ",
"(window dilation strides: ",
window_dilation_strides,
").");
window_virtual_shape.push_back((window_physical_shape[i] - 1) * window_dilation_strides[i] +
1);
NODE_VALIDATION_CHECK(
node,
window_virtual_shape[i] <= input_item_virtual_shape[i],
"Post-dilation window shape is smaller than the post-padding/dilation ",
"input item shape at spatial dimension ",
i,
" (post-padding/dilation ",
"input item shape: ",
input_item_virtual_shape,
", data batch shape: ",
data_batch_shape,
", data dilation strides: ",
data_dilation_strides,
", padding below: ",
padding_below,
", padding above: ",
padding_above,
", post-dilation window shape: ",
window_virtual_shape,
", filters shape: ",
filters_shape,
", window dilation strides: ",
window_dilation_strides);
}
//
// Construct result shape: NCoDo or CoNDo (depending on *_axis_result), checking at the same
// time that all window movement strides are larger than 0.
//
Shape result_shape(spatial_dimension_count + 2);
result_shape[batch_axis_result] = batch_size;
result_shape[output_channel_axis_result] = output_channel_count;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_CHECK(node,
window_movement_strides[i] != 0,
"Window movement stride at spatial dimension ",
i,
" is zero ",
"(window movement strides: ",
window_movement_strides,
").");
result_shape[i + 2] = ceil_div(input_item_virtual_shape[i] - window_virtual_shape[i] + 1,
window_movement_strides[i]);
}
return result_shape;
}

View File

@ -217,316 +217,5 @@ namespace ngraph
CoordinateDiff m_output_padding;
};
} // namespace v1
namespace v0
{
/// \brief Batched convolution operation, with optional window dilation and stride.
///
class NGRAPH_API Convolution : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Convolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched convolution operation.
Convolution() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
/// \param data_dilation_strides The data dilation strides.<br>
/// `[f]`
/// \param pad_type The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
/// \brief Constructs a batched convolution operation with no data dilation (i.e.,
/// all
/// data dilation strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above);
/// \brief Constructs a batched convolution operation with no padding or data
/// dilation
/// (i.e., padding above and below are 0 everywhere, and all data dilation
/// strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides);
/// \brief Constructs a batched convolution operation with no window dilation,
/// padding,
/// or data dilation (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides);
/// \brief Constructs a batched convolution operation with no window dilation or
/// movement stride (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides and window movement strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch, const Output<Node>& filters);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The window movement strides.
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
void set_window_movement_strides(const Strides& window_movement_strides)
{
m_window_movement_strides = window_movement_strides;
}
/// \return The window dilation strides.
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
void set_window_dilation_strides(const Strides& window_dilation_strides)
{
m_window_dilation_strides = window_dilation_strides;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
void set_padding_below(const CoordinateDiff& padding_below)
{
m_padding_below = padding_below;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
void set_adding_above(const CoordinateDiff& padding_above)
{
m_padding_above = padding_above;
}
/// \return The input data dilation strides.
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
void set_data_dilation_strides(const Strides& data_dilation_strides)
{
m_data_dilation_strides = data_dilation_strides;
}
/// \return The pad type for convolution.
const PadType& get_pad_type() const { return m_pad_type; }
void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; }
/// \return The default value for Convolution.
virtual std::shared_ptr<Node> get_default_value() const override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
PadType m_pad_type;
};
/// \brief Data batch backprop for batched convolution operation.
class NGRAPH_API ConvolutionBackpropData : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
///
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data_batch_shape The shape of the data batch from
/// forward-prop.
/// \param filters The node producing the filters from
/// forward-prop.
/// \param data The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from
/// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
/// forward-prop.
/// \param padding_below_forward The padding-below sizes from
/// forward-prop.
/// \param padding_above_forward The padding-above sizes from
/// forward-prop.
/// \param data_dilation_strides_forward The data dilation strides from
/// forward-prop.
///
ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& data,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The data batch shape.
const Shape& get_data_batch_shape() const { return m_data_batch_shape; }
void set_data_batch_shape(const Shape& data_batch_shape)
{
m_data_batch_shape = data_batch_shape;
}
/// \return The window movement strides from the forward prop.
const Strides& get_window_movement_strides_forward() const
{
return m_window_movement_strides_forward;
}
void set_window_movement_strides_forward(
const Strides& window_movement_strides_forward)
{
m_window_movement_strides_forward = window_movement_strides_forward;
}
/// \return The window dilation strides from the forward prop.
const Strides& get_window_dilation_strides_forward() const
{
return m_window_dilation_strides_forward;
}
void set_window_dilation_strides_forward(
const Strides& window_dilation_strides_forward)
{
m_window_dilation_strides_forward = window_dilation_strides_forward;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_below_forward() const
{
return m_padding_below_forward;
}
void set_padding_below_forward(const CoordinateDiff& padding_below_forward)
{
m_padding_below_forward = padding_below_forward;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_above_forward() const
{
return m_padding_above_forward;
}
void set_padding_above_forward(const CoordinateDiff& padding_above_forward)
{
m_padding_above_forward = padding_above_forward;
}
/// \return The input data dilation strides from the forward prop.
const Strides& get_data_dilation_strides_forward() const
{
return m_data_dilation_strides_forward;
}
void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward)
{
m_data_dilation_strides_forward = data_dilation_strides_forward;
}
// Compute the pad_above values to be used if in a convolution
CoordinateDiff compute_backward_delta_out_pad_above() const;
CoordinateDiff compute_backward_delta_out_pad_below() const;
protected:
Shape m_data_batch_shape;
Strides m_window_movement_strides_forward;
Strides m_window_dilation_strides_forward;
CoordinateDiff m_padding_below_forward;
CoordinateDiff m_padding_above_forward;
Strides m_data_dilation_strides_forward;
};
} // namespace v0
namespace util
{
// This is a legacy function, retained because the CPU backend uses it for now.
// TODO: Update CPU backend to use the new stuff in validation_util.hpp, and remove this
// function.
NGRAPH_API
Shape infer_convolution_output_shape(const Node* node,
const Shape& data_batch_shape,
const Shape& filters_shape,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
size_t batch_axis_data,
size_t input_channel_axis_data,
size_t input_channel_axis_filters,
size_t output_channel_axis_filters,
size_t batch_axis_result,
size_t output_channel_axis_result);
} // namespace util
using v0::Convolution;
using v0::ConvolutionBackpropData;
} // namespace op
} // namespace ngraph

View File

@ -576,305 +576,3 @@ shared_ptr<Node>
m_output_padding);
}
}
//------------------------------------------------------------------------------
// v0::GroupConvolution
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolution::type_info;
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(groups)
, m_pad_type(pad_type)
, m_groups_in_filters(false)
{
constructor_validate_and_infer_types();
}
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(0)
, m_pad_type(pad_type)
, m_groups_in_filters(true)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolution::pre_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
// Update groups
if (m_groups_in_filters)
{
m_groups = get_input_partial_shape(1)[0].get_length();
}
// Data channels
NODE_VALIDATION_CHECK(this,
data_shape.to_shape()[1] % get_groups() == 0,
"Data channels not a multiple of group size");
// Output channels
NODE_VALIDATION_CHECK(this,
filters_shape.to_shape()[0] % get_groups() == 0,
"# Filters not a multiple of group size");
// Input Filters
NODE_VALIDATION_CHECK(this,
(filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] *
get_groups()) == data_shape.to_shape()[1],
"Incorrect number of channels per filter");
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
}
}
void op::v0::GroupConvolution::post_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
}
Shape op::v0::GroupConvolution::get_weights_dimensions() const
{
auto data_shape = get_input_shape(0);
auto weights_shape = get_input_shape(1);
// check if weights already includes groups
if (m_groups_in_filters)
{
return weights_shape;
}
// reshape weights into 5d tensors that includes groups
const size_t OC = 0;
const size_t OC_IN_OUTPUT = 1;
const size_t IC = 1;
Shape weights_shape_groups{weights_shape};
// adjust output and channel given a number of groups
weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups();
weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups();
// push_front the number of groups
weights_shape_groups.insert(weights_shape_groups.begin(), get_groups());
return weights_shape_groups;
}
shared_ptr<Node> op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
if (m_groups_in_filters)
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_pad_type());
}
else
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_groups(),
get_pad_type());
}
}
OutputVector op::v0::GroupConvolution::decompose_op() const
{
auto data = input_value(0);
auto filters = input_value(1);
auto filters_shape = get_input_shape(1);
// Split one convolution op to N ops where N is the number of groups
// and concat results after computation.
NodeVector convolution_nodes;
// slice data
auto sliced_data = builder::split(data, get_groups(), 1);
// slice filters
auto sliced_filters = builder::split(filters, get_groups(), 0);
for (std::size_t group{0}; group < get_groups(); ++group)
{
auto sliced_filter = sliced_filters[group];
if (m_groups_in_filters)
{
// Remove group dimmension after slicing
sliced_filter = make_shared<op::Reshape>(
sliced_filters[group],
get_default_order(sliced_filters[group].get_shape().size()),
Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape)));
}
convolution_nodes.push_back(
std::make_shared<ngraph::op::Convolution>(sliced_data[group],
sliced_filter,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type));
}
std::size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)};
}
//------------------------------------------------------------------------------
// v0::GroupConvolutionBackpropData
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info;
op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData(
const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups)
: FusedOp({data_batch, filters, output_delta})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_groups(groups)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types()
{
element::Type data_element_type = get_input_element_type(2);
element::Type filters_elem_type = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
data_element_type.is_dynamic() || data_element_type.is_real(),
"Output delta element type must be f16, bf16, f32, f64 or dynamic (got ",
data_element_type,
").");
NODE_VALIDATION_CHECK(this,
filters_elem_type.is_dynamic() || filters_elem_type.is_real(),
"Filters element type must be f16, bf16, f32, f64 or dynamic (got ",
filters_elem_type,
").");
PartialShape data_pshape = get_input_partial_shape(0);
PartialShape filters_pshape = get_input_partial_shape(1);
PartialShape delta_pshape = get_input_partial_shape(2);
if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic())
{
set_output_type(0, data_element_type, PartialShape::dynamic());
}
}
shared_ptr<Node>
op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
if (new_args.size() != 3)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<op::v0::GroupConvolutionBackpropData>(new_args.at(0),
new_args.at(1),
new_args.at(2),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_groups());
}
OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const
{
auto filters = input_value(1);
auto output_delta = input_value(2);
auto data_shape = get_input_shape(0);
NodeVector sliced_inputs;
auto groups = get_groups();
// slice data shape
data_shape[1] /= groups;
// slice delta
auto sliced_delta = builder::split(output_delta, groups, 1);
// slice filters
auto sliced_filters = builder::split(filters, groups, 0);
auto num_spatials = get_window_movement_strides().size();
for (size_t i = 0; i < groups; ++i)
{
auto sliced_conv = std::make_shared<op::ConvolutionBackpropData>(
data_shape,
sliced_filters[i],
sliced_delta[i],
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
Strides(num_spatials, 1)); // default data dilation strides
sliced_inputs.push_back(sliced_conv);
}
size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(sliced_inputs, concatenation_axis)};
}

View File

@ -248,117 +248,5 @@ namespace ngraph
CoordinateDiff m_output_padding;
};
} // namespace v1
namespace v0
{
/// \brief Group Convolution
class NGRAPH_API GroupConvolution : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolution() = default;
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type = PadType::EXPLICIT);
// constructor which accept groups included in filters shape.
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const { return m_groups; };
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
bool has_groups_in_filters() const { return m_groups_in_filters; }
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
size_t m_groups;
PadType m_pad_type{PadType::NOTSET};
private:
bool m_groups_in_filters;
};
/// \brief Group Convolution data batch backprop
class NGRAPH_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolutionBackpropData() = default;
GroupConvolutionBackpropData(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups);
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
size_t get_groups() const { return m_groups; };
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
size_t m_groups;
};
}
using v0::GroupConvolution;
using v0::GroupConvolutionBackpropData;
} // namespace op
} // namespace ngraph

View File

@ -52,9 +52,7 @@ NGRAPH_OP(Concat, ngraph::op::v0, 0)
NGRAPH_OP(Constant, ngraph::op::v0, 0)
NGRAPH_OP(Convert, ngraph::op::v0, 0)
NGRAPH_OP(ConvertLike, ngraph::op::v1, 1)
NGRAPH_OP(Convolution, ngraph::op::v0, 0)
NGRAPH_OP(Convolution, ngraph::op::v1, 1)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0, 0)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1, 1)
NGRAPH_OP(Cos, ngraph::op::v0, 0)
NGRAPH_OP(Cosh, ngraph::op::v0, 0)
@ -91,9 +89,7 @@ NGRAPH_OP(Greater, ngraph::op::v0, 0)
NGRAPH_OP(Greater, ngraph::op::v1, 1)
NGRAPH_OP(GreaterEq, ngraph::op::v0, 0)
NGRAPH_OP(GreaterEqual, ngraph::op::v1, 1)
NGRAPH_OP(GroupConvolution, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolution, ngraph::op::v1, 1)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0, 0)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1, 1)
NGRAPH_OP(HardSigmoid, ngraph::op::v0, 0)
NGRAPH_OP(Interpolate, ngraph::op::v0, 0)

View File

@ -1030,75 +1030,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::Convert>(args[0], target_type);
break;
}
case OP_TYPEID::Convolution:
{
auto window_movement_strides =
node_js.at("window_movement_strides").get<vector<size_t>>();
auto window_dilation_strides =
node_js.at("window_dilation_strides").get<vector<size_t>>();
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
// For backwards compatibility, we accept "image_dilation_strides" in place of
// "data_dilation_strides", and we also allow it to be omitted altogether.
json data_dilation_strides;
if (has_key(node_js, "data_dilation_strides"))
{
data_dilation_strides = node_js["data_dilation_strides"];
}
else if (has_key(node_js, "image_dilation_strides"))
{
data_dilation_strides = node_js["image_dilation_strides"];
}
op::PadType pad_type = read_pad_type(node_js);
if (data_dilation_strides.empty())
{
node = make_shared<op::v0::Convolution>(args[0],
args[1],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above);
}
else
{
node = make_shared<op::v0::Convolution>(
args[0],
args[1],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
data_dilation_strides.get<std::vector<size_t>>(),
pad_type);
}
break;
}
case OP_TYPEID::ConvolutionBackpropData:
{
auto data_batch_shape = node_js.at("data_batch_shape").get<vector<size_t>>();
auto window_movement_strides_forward =
node_js.at("window_movement_strides_forward").get<vector<size_t>>();
auto window_dilation_strides_forward =
node_js.at("window_dilation_strides_forward").get<vector<size_t>>();
auto padding_below_forward =
node_js.at("padding_below_forward").get<vector<std::ptrdiff_t>>();
auto padding_above_forward =
node_js.at("padding_above_forward").get<vector<std::ptrdiff_t>>();
auto data_dilation_strides_forward =
node_js.at("data_dilation_strides_forward").get<vector<size_t>>();
node = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
args[0],
args[1],
window_movement_strides_forward,
window_dilation_strides_forward,
padding_below_forward,
padding_above_forward,
data_dilation_strides_forward);
break;
}
case OP_TYPEID::Cos:
{
node = make_shared<op::Cos>(args[0]);
@ -1253,62 +1184,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::GRN>(args[0], bias);
break;
}
case OP_TYPEID::GroupConvolution:
{
auto window_movement_strides =
node_js.at("window_movement_strides").get<vector<size_t>>();
auto window_dilation_strides =
node_js.at("window_dilation_strides").get<vector<size_t>>();
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
auto data_dilation_strides = node_js.at("data_dilation_strides").get<vector<size_t>>();
op::PadType pad_type = read_pad_type(node_js);
if (has_key(node_js, "groups"))
{
auto groups = node_js.at("groups").get<size_t>();
node = make_shared<op::GroupConvolution>(args[0],
args[1],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
data_dilation_strides,
groups,
pad_type);
}
else
{
node = make_shared<op::GroupConvolution>(args[0],
args[1],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
data_dilation_strides,
pad_type);
}
break;
}
case OP_TYPEID::GroupConvolutionBackpropData:
{
auto window_movement_strides =
node_js.at("window_movement_strides").get<vector<size_t>>();
auto window_dilation_strides =
node_js.at("window_dilation_strides").get<vector<size_t>>();
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
auto groups = node_js.at("groups").get<size_t>();
node = make_shared<op::GroupConvolutionBackpropData>(args[0],
args[1],
args[2],
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
groups);
break;
}
case OP_TYPEID::HardSigmoid:
{
node = make_shared<op::HardSigmoid>(args[0], args[1], args[2]);
@ -2252,28 +2127,6 @@ json JSONSerializer::serialize_node(const Node& n)
node["target_type"] = write_element_type(tmp->get_convert_element_type());
break;
}
case OP_TYPEID::Convolution:
{
auto tmp = static_cast<const op::v0::Convolution*>(&n);
node["window_movement_strides"] = tmp->get_window_movement_strides();
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
node["padding_below"] = tmp->get_padding_below();
node["padding_above"] = tmp->get_padding_above();
node["data_dilation_strides"] = tmp->get_data_dilation_strides();
node["pad_type"] = tmp->get_pad_type();
break;
}
case OP_TYPEID::ConvolutionBackpropData:
{
auto tmp = static_cast<const op::v0::ConvolutionBackpropData*>(&n);
node["data_batch_shape"] = tmp->get_data_batch_shape();
node["window_movement_strides_forward"] = tmp->get_window_movement_strides_forward();
node["window_dilation_strides_forward"] = tmp->get_window_dilation_strides_forward();
node["padding_below_forward"] = tmp->get_padding_below_forward();
node["padding_above_forward"] = tmp->get_padding_above_forward();
node["data_dilation_strides_forward"] = tmp->get_data_dilation_strides_forward();
break;
}
case OP_TYPEID::Cos: { break;
}
case OP_TYPEID::Cosh: { break;
@ -2412,31 +2265,6 @@ json JSONSerializer::serialize_node(const Node& n)
node["bias"] = tmp->get_bias();
break;
}
case OP_TYPEID::GroupConvolution:
{
auto tmp = static_cast<const op::GroupConvolution*>(&n);
node["window_movement_strides"] = tmp->get_window_movement_strides();
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
node["padding_below"] = tmp->get_padding_below();
node["padding_above"] = tmp->get_padding_above();
node["data_dilation_strides"] = tmp->get_data_dilation_strides();
if (!tmp->has_groups_in_filters())
{
node["groups"] = tmp->get_groups();
}
node["pad_type"] = tmp->get_pad_type();
break;
}
case OP_TYPEID::GroupConvolutionBackpropData:
{
auto tmp = static_cast<const op::GroupConvolutionBackpropData*>(&n);
node["window_movement_strides"] = tmp->get_window_movement_strides();
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
node["padding_below"] = tmp->get_padding_below();
node["padding_above"] = tmp->get_padding_above();
node["groups"] = tmp->get_groups();
break;
}
case OP_TYPEID::HardSigmoid: { break;
}
case OP_TYPEID::Less:

View File

@ -25,10 +25,12 @@
namespace ngraph
{
NGRAPH_API
Strides conv_default_strides(const Node* node,
const PartialShape& data_batch_shape,
const PartialShape& filters_shape);
NGRAPH_API
CoordinateDiff conv_default_padding(const Node* node,
const PartialShape& data_batch_shape,
const PartialShape& filters_shape);

View File

@ -185,7 +185,6 @@ set(SRC
type_prop/unary_elementwise.cpp
type_prop/unsqueeze.cpp
type_prop/variadic_split.cpp
type_prop_benchmark.cpp
type_prop_layers.cpp
util.cpp
)

View File

@ -17,6 +17,7 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "op/convolution.hpp"
#include "runtime/backend.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
@ -37,14 +38,14 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining)
Shape shape_b{2, 2, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 2, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
auto conv1 = make_shared<op::v0::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto conv2 = make_shared<op::Convolution>(conv1,
auto conv2 = make_shared<op::v0::Convolution>(conv1,
B,
Strides{1, 1},
Strides{1, 1},
@ -76,7 +77,7 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple)
Shape shape_b{2, 2, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 2, 2, 2};
auto conv1 = make_shared<op::Convolution>(A,
auto conv1 = make_shared<op::v0::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},
@ -109,7 +110,7 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding)
Shape shape_b{1, 1, 1, 1};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{1, 1, 5, 5};
auto conv1 = make_shared<op::Convolution>(A,
auto conv1 = make_shared<op::v0::Convolution>(A,
B,
Strides{1, 1},
Strides{1, 1},

View File

@ -34,6 +34,7 @@
#include "ngraph/check.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "op/group_conv.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/engine/test_engines.hpp"
@ -194,7 +195,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},
@ -222,7 +223,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{2, 2},
Strides{1, 1},
@ -250,7 +251,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{2, 2},
@ -278,7 +279,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},
@ -306,7 +307,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},
@ -334,7 +335,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{2, 2},
@ -362,7 +363,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 4, 1});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{2, 2},
@ -390,7 +391,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 3, 3});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{2, 2},
@ -421,7 +422,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 4, 2, 2});
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 1, 2, 1, 1});
auto group_conv = make_shared<op::GroupConvolution>(data,
auto group_conv = make_shared<op::v0::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},

View File

@ -17,6 +17,7 @@
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "op/group_conv.hpp"
#include "runtime/backend.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
@ -48,7 +49,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data)
auto padding_end = CoordinateDiff{0, 0};
size_t groups = 3;
auto conv_bprop_data = make_shared<op::GroupConvolutionBackpropData>(
auto conv_bprop_data = make_shared<op::v0::GroupConvolutionBackpropData>(
data_batch, filters, deltas, strides, dilations, padding_begin, padding_end, groups);
auto f = make_shared<Function>(conv_bprop_data, ParameterVector{data_batch, filters, deltas});

View File

@ -151,23 +151,6 @@ TEST(build_graph, no_arg_construction)
ASSERT_EQ(add1->get_output_shape(0), Shape{7});
}
TEST(build_graph, multi_output_split)
{
const auto data = make_shared<op::Parameter>(element::f32, Shape{64, 8, 100, 150});
auto filters = make_shared<op::Parameter>(element::f32, Shape{128, 2, 10, 20});
const auto axis = op::Constant::create(element::i64, Shape{}, {1});
const auto split = make_shared<op::Split>(data, axis, 2);
auto conv = make_shared<op::GroupConvolution>(split->output(1),
filters,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
2);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131}));
}
TEST(build_graph, multi_output_split_dynamic)
{
const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());

View File

@ -19,6 +19,8 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/validation_util.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
@ -162,7 +164,7 @@ namespace
void op_is_Convolution()
{
op::Convolution node;
op::v0::Convolution node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
@ -171,7 +173,7 @@ namespace
void op_is_ConvolutionBackpropData()
{
op::ConvolutionBackpropData node;
op::v0::ConvolutionBackpropData node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
@ -405,7 +407,7 @@ namespace
void op_is_GroupConvolution()
{
op::GroupConvolution node;
op::v0::GroupConvolution node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
@ -414,7 +416,7 @@ namespace
void op_is_GroupConvolutionBackpropData()
{
op::GroupConvolutionBackpropData node;
op::v0::GroupConvolutionBackpropData node;
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));

View File

@ -3,6 +3,8 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
#include "opset0_downgrade.hpp"
#include "opset1_upgrade.hpp"
#include "util/test_control.hpp"

View File

@ -33,6 +33,10 @@ set (SRC
dynamic/dynamic_backend.hpp
op/avg_pool.cpp
op/avg_pool.hpp
op/convolution.cpp
op/convolution.hpp
op/group_conv.cpp
op/group_conv.hpp
pass/dyn_elimination.cpp
pass/dyn_elimination.hpp
pass/fused_op_decomposition.cpp

View File

@ -90,6 +90,8 @@
#include "ngraph/runtime/reference/topk.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "op/avg_pool.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
namespace ngraph
{
@ -348,7 +350,7 @@ protected:
}
case OP_TYPEID::Convolution:
{
const op::Convolution* c = static_cast<const op::Convolution*>(&node);
const op::v0::Convolution* c = static_cast<const op::v0::Convolution*>(&node);
reference::convolution<T>(args[0]->get_data_ptr<const T>(),
args[1]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
@ -366,8 +368,8 @@ protected:
case OP_TYPEID::ConvolutionBackpropData:
{
// Note that args[1] and args[0] are switched here from the usual order.
const op::ConvolutionBackpropData* c =
static_cast<const op::ConvolutionBackpropData*>(&node);
const op::v0::ConvolutionBackpropData* c =
static_cast<const op::v0::ConvolutionBackpropData*>(&node);
reference::convolution_backprop_in<T>(args[1]->get_data_ptr<const T>(),
args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),

View File

@ -0,0 +1,356 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "convolution.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
// *** Convolution OP SET 0 ***
constexpr NodeTypeInfo op::v0::Convolution::type_info;
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: Op({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_pad_type(pad_type)
{
constructor_validate_and_infer_types();
}
bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
visitor.on_attribute("window_dilation_strides", m_window_dilation_strides);
visitor.on_attribute("data_dilation_strides", m_data_dilation_strides);
visitor.on_attribute("padding_below", m_padding_below);
visitor.on_attribute("padding_above", m_padding_above);
visitor.on_attribute("pad_type", m_pad_type);
return true;
}
void op::v0::Convolution::validate_and_infer_types()
{
const PartialShape& data_batch_shape = get_input_partial_shape(0);
element::Type data_batch_et = get_input_element_type(0);
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
if (m_data_dilation_strides.size() == 0)
{
m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_movement_strides.size() == 0)
{
m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_window_dilation_strides.size() == 0)
{
m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_padding_below.size() == 0)
{
m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_padding_above.size() == 0)
{
m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
// TODO: data dilation
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
element::Type result_et;
PartialShape result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
result_shape = infer_convolution_forward(this,
data_batch_shape,
m_data_dilation_strides,
m_padding_below,
m_padding_above,
filters_shape,
m_window_movement_strides,
m_window_dilation_strides);
set_output_type(0, result_et, result_shape);
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
Strides())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
window_dilation_strides,
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides)
: Convolution(data_batch,
filters,
window_movement_strides,
Strides(),
CoordinateDiff(),
CoordinateDiff())
{
}
op::v0::Convolution::Convolution(const Output<Node>& data_batch, const Output<Node>& filters)
: Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff())
{
}
shared_ptr<Node> op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::Convolution>(new_args.at(0),
new_args.at(1),
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type);
}
constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info;
shared_ptr<Node> op::v0::Convolution::get_default_value() const
{
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
}
op::v0::ConvolutionBackpropData::ConvolutionBackpropData(
const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward)
: Op({filters, output_delta})
, m_data_batch_shape(data_batch_shape)
, m_window_movement_strides_forward(window_movement_strides_forward)
, m_window_dilation_strides_forward(window_dilation_strides_forward)
, m_padding_below_forward(padding_below_forward)
, m_padding_above_forward(padding_above_forward)
, m_data_dilation_strides_forward(data_dilation_strides_forward)
{
constructor_validate_and_infer_types();
}
bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("data_batch_shape", m_data_batch_shape);
visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward);
visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward);
visitor.on_attribute("padding_below_forward", m_padding_below_forward);
visitor.on_attribute("padding_above_forward", m_padding_above_forward);
visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward);
return true;
}
void op::v0::ConvolutionBackpropData::validate_and_infer_types()
{
// Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as
// follows.
//
// Forward Backward
// "N" axis for data batch 0 0
// "C" axis for data batch 1 1
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 0
// "C" axis for output 1 1
// Data batch x delta
// Data batch shape S_x S_o
// Filters f reverse(f) [on spatial axes]
// Filters shape S_f S_f
// Window movement strides q_x p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f +
// + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f)
// % q_x)
// - b_x
// Data dilation strides p_x q_x
// Output shape S_o S_x
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
const PartialShape& filters_shape = get_input_partial_shape(0);
element::Type filters_et = get_input_element_type(0);
const PartialShape& delta_shape = get_input_partial_shape(1);
element::Type delta_et = get_input_element_type(1);
element::Type forward_result_et;
PartialShape forward_result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(forward_result_et, delta_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
delta_et,
", filters element type: ",
filters_et,
").");
forward_result_shape = infer_convolution_forward(this,
m_data_batch_shape,
m_data_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
filters_shape,
m_window_movement_strides_forward,
m_window_dilation_strides_forward);
NODE_VALIDATION_CHECK(this,
forward_result_shape.compatible(delta_shape),
"Inferred forward output shape (",
forward_result_shape,
") does not match shape of ",
"delta (",
delta_shape,
").");
set_output_type(0, forward_result_et, m_data_batch_shape);
}
shared_ptr<Node>
op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::ConvolutionBackpropData>(m_data_batch_shape,
new_args.at(0),
new_args.at(1),
m_window_movement_strides_forward,
m_window_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_data_dilation_strides_forward);
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_below;
backward_delta_out_pad_below.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_below[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] -
in_pad_below[i];
}
return backward_delta_out_pad_below;
}
CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const
{
auto& in_shape = get_data_batch_shape();
auto& filter_dilation = get_window_dilation_strides_forward();
auto& filter_shape = get_input_shape(0);
auto& in_pad_below = get_padding_below_forward();
auto& in_pad_above = get_padding_above_forward();
auto& in_dilation = get_data_dilation_strides_forward();
auto& stride = get_window_movement_strides_forward();
size_t spatial_dim_count = static_cast<size_t>(in_shape.size()) - 2;
CoordinateDiff backward_delta_out_pad_above;
backward_delta_out_pad_above.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
backward_delta_out_pad_above[i] =
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i] +
((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] -
(static_cast<ptrdiff_t>(filter_shape[i + 2]) - 1) * filter_dilation[i]) %
stride[i]) -
in_pad_above[i];
}
return backward_delta_out_pad_above;
}

View File

@ -0,0 +1,314 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Batched convolution operation, with optional window dilation and stride.
///
class BACKEND_API Convolution : public Op
{
public:
static constexpr NodeTypeInfo type_info{"Convolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched convolution operation.
Convolution() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
/// \param data_dilation_strides The data dilation strides.<br>
/// `[f]`
/// \param pad_type The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
/// \brief Constructs a batched convolution operation with no data dilation (i.e.,
/// all
/// data dilation strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
/// \param padding_below The padding-below sizes.<br>
/// `[f]`
/// \param padding_above The padding-above sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above);
/// \brief Constructs a batched convolution operation with no padding or data
/// dilation
/// (i.e., padding above and below are 0 everywhere, and all data dilation
/// strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
/// \param window_dilation_strides The window dilation strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides);
/// \brief Constructs a batched convolution operation with no window dilation,
/// padding,
/// or data dilation (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param window_movement_strides The window movement strides.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides);
/// \brief Constructs a batched convolution operation with no window dilation or
/// movement stride (i.e., padding above and below are 0 everywhere, and all
/// window/data dilation strides and window movement strides are 1).
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
Convolution(const Output<Node>& data_batch, const Output<Node>& filters);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The window movement strides.
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
void set_window_movement_strides(const Strides& window_movement_strides)
{
m_window_movement_strides = window_movement_strides;
}
/// \return The window dilation strides.
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
void set_window_dilation_strides(const Strides& window_dilation_strides)
{
m_window_dilation_strides = window_dilation_strides;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
void set_padding_below(const CoordinateDiff& padding_below)
{
m_padding_below = padding_below;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
void set_adding_above(const CoordinateDiff& padding_above)
{
m_padding_above = padding_above;
}
/// \return The input data dilation strides.
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
void set_data_dilation_strides(const Strides& data_dilation_strides)
{
m_data_dilation_strides = data_dilation_strides;
}
/// \return The pad type for convolution.
const PadType& get_pad_type() const { return m_pad_type; }
void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; }
/// \return The default value for Convolution.
virtual std::shared_ptr<Node> get_default_value() const override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
PadType m_pad_type;
};
/// \brief Data batch backprop for batched convolution operation.
class BACKEND_API ConvolutionBackpropData : public Op
{
public:
static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
///
/// \brief Constructs a batched-convolution data batch-backprop operation.
///
/// \param data_batch_shape The shape of the data batch from
/// forward-prop.
/// \param filters The node producing the filters from
/// forward-prop.
/// \param data The node producing output delta.
/// \param window_movement_strides_forward The window movement strides from
/// forward-prop.
/// \param window_dilation_strides_forward The window dilation strides from
/// forward-prop.
/// \param padding_below_forward The padding-below sizes from
/// forward-prop.
/// \param padding_above_forward The padding-above sizes from
/// forward-prop.
/// \param data_dilation_strides_forward The data dilation strides from
/// forward-prop.
///
ConvolutionBackpropData(const Shape& data_batch_shape,
const Output<Node>& filters,
const Output<Node>& data,
const Strides& window_movement_strides_forward,
const Strides& window_dilation_strides_forward,
const CoordinateDiff& padding_below_forward,
const CoordinateDiff& padding_above_forward,
const Strides& data_dilation_strides_forward);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The data batch shape.
const Shape& get_data_batch_shape() const { return m_data_batch_shape; }
void set_data_batch_shape(const Shape& data_batch_shape)
{
m_data_batch_shape = data_batch_shape;
}
/// \return The window movement strides from the forward prop.
const Strides& get_window_movement_strides_forward() const
{
return m_window_movement_strides_forward;
}
void set_window_movement_strides_forward(
const Strides& window_movement_strides_forward)
{
m_window_movement_strides_forward = window_movement_strides_forward;
}
/// \return The window dilation strides from the forward prop.
const Strides& get_window_dilation_strides_forward() const
{
return m_window_dilation_strides_forward;
}
void set_window_dilation_strides_forward(
const Strides& window_dilation_strides_forward)
{
m_window_dilation_strides_forward = window_dilation_strides_forward;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_below_forward() const
{
return m_padding_below_forward;
}
void set_padding_below_forward(const CoordinateDiff& padding_below_forward)
{
m_padding_below_forward = padding_below_forward;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_padding_above_forward() const
{
return m_padding_above_forward;
}
void set_padding_above_forward(const CoordinateDiff& padding_above_forward)
{
m_padding_above_forward = padding_above_forward;
}
/// \return The input data dilation strides from the forward prop.
const Strides& get_data_dilation_strides_forward() const
{
return m_data_dilation_strides_forward;
}
void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward)
{
m_data_dilation_strides_forward = data_dilation_strides_forward;
}
// Compute the pad_above values to be used if in a convolution
CoordinateDiff compute_backward_delta_out_pad_above() const;
CoordinateDiff compute_backward_delta_out_pad_below() const;
protected:
Shape m_data_batch_shape;
Strides m_window_movement_strides_forward;
Strides m_window_dilation_strides_forward;
CoordinateDiff m_padding_below_forward;
CoordinateDiff m_padding_above_forward;
Strides m_data_dilation_strides_forward;
};
} // namespace v0
} // namespace op
} // namespace ngraph

View File

@ -0,0 +1,333 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <numeric>
#include "convolution.hpp"
#include "group_conv.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/builder/split.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
//------------------------------------------------------------------------------
// v0::GroupConvolution
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolution::type_info;
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(groups)
, m_pad_type(pad_type)
, m_groups_in_filters(false)
{
constructor_validate_and_infer_types();
}
op::v0::GroupConvolution::GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type)
: FusedOp({data_batch, filters})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_data_dilation_strides(data_dilation_strides)
, m_groups(0)
, m_pad_type(pad_type)
, m_groups_in_filters(true)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolution::pre_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
// Update groups
if (m_groups_in_filters)
{
m_groups = get_input_partial_shape(1)[0].get_length();
}
// Data channels
NODE_VALIDATION_CHECK(this,
data_shape.to_shape()[1] % get_groups() == 0,
"Data channels not a multiple of group size");
// Output channels
NODE_VALIDATION_CHECK(this,
filters_shape.to_shape()[0] % get_groups() == 0,
"# Filters not a multiple of group size");
// Input Filters
NODE_VALIDATION_CHECK(this,
(filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] *
get_groups()) == data_shape.to_shape()[1],
"Incorrect number of channels per filter");
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
}
}
void op::v0::GroupConvolution::post_validate_and_infer_types()
{
auto data_shape = get_input_partial_shape(0);
auto filters_shape = get_input_partial_shape(1);
if (data_shape.is_static() && filters_shape.is_static())
{
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
{
m_padding_below.clear();
m_padding_above.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_shape.to_shape(),
filter_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_pad_type,
m_padding_above,
m_padding_below);
}
}
}
Shape op::v0::GroupConvolution::get_weights_dimensions() const
{
auto data_shape = get_input_shape(0);
auto weights_shape = get_input_shape(1);
// check if weights already includes groups
if (m_groups_in_filters)
{
return weights_shape;
}
// reshape weights into 5d tensors that includes groups
const size_t OC = 0;
const size_t OC_IN_OUTPUT = 1;
const size_t IC = 1;
Shape weights_shape_groups{weights_shape};
// adjust output and channel given a number of groups
weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups();
weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups();
// push_front the number of groups
weights_shape_groups.insert(weights_shape_groups.begin(), get_groups());
return weights_shape_groups;
}
shared_ptr<Node> op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
if (m_groups_in_filters)
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_pad_type());
}
else
{
return make_shared<op::v0::GroupConvolution>(new_args.at(0),
new_args.at(1),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_data_dilation_strides(),
get_groups(),
get_pad_type());
}
}
OutputVector op::v0::GroupConvolution::decompose_op() const
{
auto data = input_value(0);
auto filters = input_value(1);
auto filters_shape = get_input_shape(1);
// Split one convolution op to N ops where N is the number of groups
// and concat results after computation.
NodeVector convolution_nodes;
// slice data
auto sliced_data = builder::split(data, get_groups(), 1);
// slice filters
auto sliced_filters = builder::split(filters, get_groups(), 0);
for (std::size_t group{0}; group < get_groups(); ++group)
{
auto sliced_filter = sliced_filters[group];
if (m_groups_in_filters)
{
// Remove group dimmension after slicing
sliced_filter = make_shared<op::Reshape>(
sliced_filters[group],
get_default_order(sliced_filters[group].get_shape().size()),
Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape)));
}
convolution_nodes.push_back(
std::make_shared<ngraph::op::v0::Convolution>(sliced_data[group],
sliced_filter,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
m_pad_type));
}
std::size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(convolution_nodes, concatenation_axis)};
}
//------------------------------------------------------------------------------
// v0::GroupConvolutionBackpropData
//------------------------------------------------------------------------------
constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info;
op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData(
const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups)
: FusedOp({data_batch, filters, output_delta})
, m_window_movement_strides(window_movement_strides)
, m_window_dilation_strides(window_dilation_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_groups(groups)
{
constructor_validate_and_infer_types();
}
void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types()
{
element::Type data_element_type = get_input_element_type(2);
element::Type filters_elem_type = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
data_element_type.is_dynamic() || data_element_type.is_real(),
"Output delta element type must be f16, bf16, f32, f64 or dynamic (got ",
data_element_type,
").");
NODE_VALIDATION_CHECK(this,
filters_elem_type.is_dynamic() || filters_elem_type.is_real(),
"Filters element type must be f16, bf16, f32, f64 or dynamic (got ",
filters_elem_type,
").");
PartialShape data_pshape = get_input_partial_shape(0);
PartialShape filters_pshape = get_input_partial_shape(1);
PartialShape delta_pshape = get_input_partial_shape(2);
if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic())
{
set_output_type(0, data_element_type, PartialShape::dynamic());
}
}
shared_ptr<Node>
op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const
{
if (new_args.size() != 3)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<op::v0::GroupConvolutionBackpropData>(new_args.at(0),
new_args.at(1),
new_args.at(2),
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
get_groups());
}
OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const
{
auto filters = input_value(1);
auto output_delta = input_value(2);
auto data_shape = get_input_shape(0);
NodeVector sliced_inputs;
auto groups = get_groups();
// slice data shape
data_shape[1] /= groups;
// slice delta
auto sliced_delta = builder::split(output_delta, groups, 1);
// slice filters
auto sliced_filters = builder::split(filters, groups, 0);
auto num_spatials = get_window_movement_strides().size();
for (size_t i = 0; i < groups; ++i)
{
auto sliced_conv = std::make_shared<op::v0::ConvolutionBackpropData>(
data_shape,
sliced_filters[i],
sliced_delta[i],
get_window_movement_strides(),
get_window_dilation_strides(),
get_padding_below(),
get_padding_above(),
Strides(num_spatials, 1)); // default data dilation strides
sliced_inputs.push_back(sliced_conv);
}
size_t concatenation_axis = 1;
return {std::make_shared<ngraph::op::Concat>(sliced_inputs, concatenation_axis)};
}

View File

@ -0,0 +1,138 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "backend_visibility.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Group Convolution
class BACKEND_API GroupConvolution : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolution", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolution() = default;
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const size_t groups,
const PadType& pad_type = PadType::EXPLICIT);
// constructor which accept groups included in filters shape.
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
const PadType& pad_type = PadType::EXPLICIT);
Shape get_weights_dimensions() const;
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; }
Output<Node> get_filters() { return input_value(1); }
Output<Node> get_data_batch() { return input_value(0); }
size_t get_groups() const { return m_groups; };
const PadType& get_pad_type() const { return m_pad_type; }
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
bool has_groups_in_filters() const { return m_groups_in_filters; }
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
Strides m_data_dilation_strides;
size_t m_groups;
PadType m_pad_type{PadType::NOTSET};
private:
bool m_groups_in_filters;
};
/// \brief Group Convolution data batch backprop
class BACKEND_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp
{
public:
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
GroupConvolutionBackpropData() = default;
GroupConvolutionBackpropData(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& output_delta,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const size_t groups);
const Strides& get_window_movement_strides() const
{
return m_window_movement_strides;
}
const Strides& get_window_dilation_strides() const
{
return m_window_dilation_strides;
}
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
size_t get_groups() const { return m_groups; };
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
protected:
Strides m_window_movement_strides;
Strides m_window_dilation_strides;
CoordinateDiff m_padding_below;
CoordinateDiff m_padding_above;
size_t m_groups;
};
}
} // namespace op
} // namespace ngraph

View File

@ -18,6 +18,8 @@
#include "ngraph/ops.hpp"
#include "op/avg_pool.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
namespace ngraph
{

View File

@ -31,6 +31,8 @@
#include "ngraph/type.hpp"
#include "ngraph/validation_util.hpp"
#include "op/avg_pool.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
#include "opset0_downgrade.hpp"
#include "pass/implicit_broadcast_elimination.hpp"
@ -309,7 +311,7 @@ namespace
const auto filters_arg = node->input_value(1);
const auto strides = node->get_strides();
const size_t num_spatial_dims = strides.size();
auto replacement_node = make_shared<op::GroupConvolution>(data_arg,
auto replacement_node = make_shared<op::v0::GroupConvolution>(data_arg,
filters_arg,
node->get_strides(),
node->get_dilations(),

View File

@ -65,8 +65,8 @@ NGRAPH_OP(Clamp, ngraph::op)
NGRAPH_OP(Concat, ngraph::op)
NGRAPH_OP(Constant, ngraph::op)
NGRAPH_OP(Convert, ngraph::op)
NGRAPH_OP(Convolution, ngraph::op)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op)
NGRAPH_OP(Convolution, ngraph::op::v0)
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0)
NGRAPH_OP(Cos, ngraph::op)
NGRAPH_OP(Cosh, ngraph::op)
NGRAPH_OP(CumSum, ngraph::op::v0)
@ -88,8 +88,8 @@ NGRAPH_OP(Gelu, ngraph::op)
NGRAPH_OP(GetOutputElement, ngraph::op)
NGRAPH_OP(Greater, ngraph::op)
NGRAPH_OP(GreaterEq, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op)
NGRAPH_OP(GroupConvolution, ngraph::op::v0)
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0)
NGRAPH_OP(HardSigmoid, ngraph::op)
NGRAPH_OP(Interpolate, ngraph::op)
NGRAPH_OP(Less, ngraph::op)

View File

@ -27,6 +27,8 @@
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
#include "op/avg_pool.hpp"
#include "op/convolution.hpp"
#include "op/group_conv.hpp"
using namespace std;
using namespace ngraph;
@ -59,7 +61,7 @@ namespace
}
shared_ptr<Node> op_cast(shared_ptr<op::BroadcastLike> node) { return nullptr; }
shared_ptr<Node> op_cast(shared_ptr<op::Convolution> node)
shared_ptr<Node> op_cast(shared_ptr<op::v0::Convolution> node)
{
auto strides = node->get_window_movement_strides();
auto dilations = node->get_window_dilation_strides();
@ -88,7 +90,7 @@ namespace
return replacement_node;
}
shared_ptr<Node> op_cast(shared_ptr<op::ConvolutionBackpropData> node)
shared_ptr<Node> op_cast(shared_ptr<op::v0::ConvolutionBackpropData> node)
{
auto data_batch_shape = node->get_data_batch_shape();
auto strides = node->get_window_movement_strides_forward();

View File

@ -14,6 +14,7 @@
// limitations under the License.
//*****************************************************************************
#include "op/convolution.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@ -26,7 +27,7 @@ TEST(type_prop, conv_1d_deduce)
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10});
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91}));
@ -44,7 +45,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce)
Shape data_batch_shape{64, 3, 100};
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 91}); // output delta
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
Strides{1},
@ -72,7 +73,7 @@ TEST(type_prop, conv_1d_deduce_padded)
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::Convolution>(
auto conv = make_shared<op::v0::Convolution>(
param0, param1, move_strides, dilation_strides, padding_below, padding_above);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96}));
@ -95,7 +96,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded)
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -120,7 +121,7 @@ TEST(type_prop, conv_1d_deduce_strided)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10});
auto move_strides = Strides{2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46}));
@ -139,7 +140,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 46}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -167,7 +168,7 @@ TEST(type_prop, conv_1d_deduce_strided_padded)
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::Convolution>(
auto conv = make_shared<op::v0::Convolution>(
param0, param1, move_strides, dilation_strides, padding_below, padding_above);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 48}));
@ -190,7 +191,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded)
auto dilation_strides = Strides{1};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -215,7 +216,7 @@ TEST(type_prop, conv_1d_deduce_strided_small_uneven)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 5});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2});
auto move_strides = Strides{2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2}));
@ -234,7 +235,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 2}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -259,7 +260,7 @@ TEST(type_prop, conv_1d_deduce_strided_small_even)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 6});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2});
auto move_strides = Strides{2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 3}));
@ -278,7 +279,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2}); // filters
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 3}); // output delta
auto move_strides = Strides{2};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -304,7 +305,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10});
auto move_strides = Strides{1};
auto dilate_strides = Strides{2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides, dilate_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides, dilate_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 82}));
@ -324,7 +325,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 82}); // output delta
auto move_strides = Strides{1};
auto dilate_strides = Strides{2};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -352,7 +353,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated_padded)
auto dilate_strides = Strides{2};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::Convolution>(
auto conv = make_shared<op::v0::Convolution>(
param0, param1, move_strides, dilate_strides, padding_below, padding_above);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 87}));
@ -375,7 +376,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded)
auto dilate_strides = Strides{2};
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -404,7 +405,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded)
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto data_dilate_strides = Strides{3};
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
move_strides,
dilate_strides,
@ -433,7 +434,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde
auto padding_below = CoordinateDiff{2};
auto padding_above = CoordinateDiff{3};
auto data_dilate_strides = Strides{3};
auto conv = make_shared<op::ConvolutionBackpropData>(data_batch_shape,
auto conv = make_shared<op::v0::ConvolutionBackpropData>(data_batch_shape,
param0,
param1,
move_strides,
@ -457,7 +458,7 @@ TEST(type_prop, conv_2d_deduce)
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10, 20});
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131}));
@ -478,7 +479,7 @@ TEST(type_prop, conv_2d_deduce_padded)
auto dilate_strides = Strides{1, 1};
auto padding_below = CoordinateDiff{2, 3};
auto padding_above = CoordinateDiff{3, 4};
auto conv = make_shared<op::Convolution>(
auto conv = make_shared<op::v0::Convolution>(
param0, param1, move_strides, dilate_strides, padding_below, padding_above);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 138}));
@ -500,7 +501,7 @@ TEST(type_prop, conv_2d_deduce_padded_neg)
auto dilate_strides = Strides{1, 1};
auto padding_below = CoordinateDiff{2, -3};
auto padding_above = CoordinateDiff{3, -4};
auto conv = make_shared<op::Convolution>(
auto conv = make_shared<op::v0::Convolution>(
param0, param1, move_strides, dilate_strides, padding_below, padding_above);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 124}));
@ -528,7 +529,7 @@ TEST_P(DeduceAutoPadTest, same_lower)
auto param0 = make_shared<op::Parameter>(element::f32, image_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filter_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
std::get<2>(GetParam()),
std::get<3>(GetParam()),
@ -591,7 +592,7 @@ TEST(type_prop, conv_2d_deduce_strided)
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10, 20});
auto move_strides = Strides{2, 3};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46, 44}));
@ -610,7 +611,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10, 20});
auto move_strides = Strides{2, 3};
auto dilate_strides = Strides{3, 2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides, dilate_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides, dilate_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 37, 38}));
@ -632,7 +633,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated)
auto padding_below = CoordinateDiff{0, 0};
auto padding_above = CoordinateDiff{0, 0};
auto data_dilate_strides = Strides{2, 3};
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
move_strides,
dilate_strides,
@ -657,7 +658,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_small)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2, 3});
auto move_strides = Strides{2, 3};
auto dilate_strides = Strides{3, 2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides, dilate_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides, dilate_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2}));
@ -676,7 +677,7 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_small)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{128, 3, 2, 3, 2});
auto move_strides = Strides{2, 3, 4};
auto dilate_strides = Strides{3, 2, 2};
auto conv = make_shared<op::Convolution>(param0, param1, move_strides, dilate_strides);
auto conv = make_shared<op::v0::Convolution>(param0, param1, move_strides, dilate_strides);
EXPECT_EQ(conv->get_element_type(), element::f32);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2, 2}));
@ -698,7 +699,7 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small)
auto padding_below = CoordinateDiff{0, 0, 0};
auto padding_above = CoordinateDiff{0, 0, 0};
auto data_dilate_strides = Strides{2, 3, 2};
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
move_strides,
dilate_strides,
@ -723,7 +724,7 @@ TEST(type_prop, conv_invalid_element_type_mismatch)
auto param1 = make_shared<op::Parameter>(element::i32, Shape{3, 3, 2, 2});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with element type mismatch not detected";
@ -746,7 +747,7 @@ TEST(type_prop, conv_invalid_0d_input)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid 0D input not detected";
@ -771,7 +772,7 @@ TEST(type_prop, conv_invalid_1d_input)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{2});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid 1D input not detected";
@ -796,7 +797,7 @@ TEST(type_prop, conv_invalid_2d_input)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{2, 6});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid 2D input not detected";
@ -821,7 +822,7 @@ TEST(type_prop, conv_invalid_0_batch_size)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{0, 6, 1});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with 0 batch size not detected";
@ -843,7 +844,7 @@ TEST(type_prop, conv_invalid_0_input_channels)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{5, 0, 1});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with 0 input channels not detected";
@ -867,7 +868,7 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{5, 2, 3, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with too many filter dimensions not detected";
@ -889,7 +890,7 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{5, 2, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with too few filter dimensions not detected";
@ -911,7 +912,7 @@ TEST(type_prop, conv_invalid_0_output_channels)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{0, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with 0 output channels not detected";
@ -933,7 +934,7 @@ TEST(type_prop, conv_invalid_input_channel_mismatch)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 3, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with channel count mismatch not detected";
@ -958,7 +959,7 @@ TEST(type_prop, conv_invalid_movement_stride_rank)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1, Strides{2, 3, 8});
auto conv = make_shared<op::v0::Convolution>(param0, param1, Strides{2, 3, 8});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with wrong movement stride rank not detected";
@ -987,7 +988,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_rank)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1, Strides{2, 3}, Strides{2, 3, 8});
auto conv =
make_shared<op::v0::Convolution>(param0, param1, Strides{2, 3}, Strides{2, 3, 8});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with wrong window dilation stride rank not detected";
@ -1016,7 +1018,7 @@ TEST(type_prop, conv_invalid_data_dilation_stride_rank)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{2, 3},
Strides{2, 3},
@ -1051,7 +1053,7 @@ TEST(type_prop, conv_invalid_padding_below_rank)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{2, 3},
Strides{1, 1},
@ -1085,7 +1087,7 @@ TEST(type_prop, conv_invalid_padding_above_rank)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{2, 3},
Strides{2, 3},
@ -1119,7 +1121,7 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{1, 1},
Strides{1, 1},
@ -1148,7 +1150,7 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{1, 1},
Strides{1, 1},
@ -1177,7 +1179,7 @@ TEST(type_prop, conv_invalid_input_spatial_size_0)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with zero-length spatial axis not detected";
@ -1201,7 +1203,7 @@ TEST(type_prop, conv_invalid_window_size_0)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 0});
try
{
auto conv = make_shared<op::Convolution>(param0, param1);
auto conv = make_shared<op::v0::Convolution>(param0, param1);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with zero-length window axis not detected";
@ -1225,7 +1227,7 @@ TEST(type_prop, conv_invalid_window_dilation_stride_0)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1, Strides{2, 3}, Strides{2, 0});
auto conv = make_shared<op::v0::Convolution>(param0, param1, Strides{2, 3}, Strides{2, 0});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with wrong 0-length window dilation stride axis not detected";
@ -1249,7 +1251,7 @@ TEST(type_prop, conv_invalid_data_dilation_stride_0)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
Strides{2, 3},
Strides{2, 3},
@ -1279,7 +1281,7 @@ TEST(type_prop, conv_invalid_dilated_window_too_large)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1, Strides{1, 1}, Strides{4, 4});
auto conv = make_shared<op::v0::Convolution>(param0, param1, Strides{1, 1}, Strides{4, 4});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with oversized dilated window not detected";
@ -1303,7 +1305,7 @@ TEST(type_prop, conv_invalid_movement_stride_0)
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6, 2, 3, 3});
try
{
auto conv = make_shared<op::Convolution>(param0, param1, Strides{0, 1});
auto conv = make_shared<op::v0::Convolution>(param0, param1, Strides{0, 1});
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input with wrong 0-length movement stride axis not detected";
@ -1333,7 +1335,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok)
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1360,7 +1362,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_rank_wrong
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1401,7 +1403,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_dim_zero)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1438,7 +1440,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_rank_wron
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1479,7 +1481,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_dim_zero)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1516,7 +1518,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_below_rank_wrong)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1557,7 +1559,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_above_rank_wrong)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1598,7 +1600,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_rank_wrong)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1639,7 +1641,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_dim_zero)
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1674,7 +1676,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok)
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1701,7 +1703,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_data_batch_rank_wr
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1742,7 +1744,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1771,7 +1773,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_z
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1805,7 +1807,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1833,7 +1835,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1869,7 +1871,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1897,7 +1899,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1930,7 +1932,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1957,7 +1959,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -1992,7 +1994,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok)
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2019,7 +2021,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_arg_ranks_m
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2055,7 +2057,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2084,7 +2086,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2120,7 +2122,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2147,7 +2149,7 @@ TEST(type_prop,
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2177,7 +2179,7 @@ TEST(
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2214,7 +2216,7 @@ TEST(
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2242,7 +2244,7 @@ TEST(
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2270,7 +2272,7 @@ TEST(
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2300,7 +2302,7 @@ TEST(
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2339,7 +2341,7 @@ TEST(
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2376,7 +2378,7 @@ TEST(
auto param0 = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2406,7 +2408,7 @@ TEST(
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2445,7 +2447,7 @@ TEST(
try
{
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,
@ -2482,7 +2484,7 @@ TEST(type_prop, conv_partial_dynamic_et)
auto param0 = make_shared<op::Parameter>(element::dynamic, data_batch_shape);
auto param1 = make_shared<op::Parameter>(element::dynamic, filters_shape);
auto conv = make_shared<op::Convolution>(param0,
auto conv = make_shared<op::v0::Convolution>(param0,
param1,
window_movement_strides,
window_dilation_strides,

View File

@ -21,113 +21,6 @@
using namespace std;
using namespace ngraph;
TEST(type_prop, group_conv)
{
// Deduce type
auto data = make_shared<op::Parameter>(element::f32, Shape{64, 4, 100, 150});
auto filters = make_shared<op::Parameter>(element::f32, Shape{128, 2, 10, 20});
auto conv = make_shared<op::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
2);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131}));
}
TEST(type_prop, group_conv_auto)
{
// Deduce type
auto data = make_shared<op::Parameter>(element::f32, Shape{64, 4, 100, 150});
auto filters = make_shared<op::Parameter>(element::f32, Shape{128, 2, 10, 20});
auto conv = make_shared<op::GroupConvolution>(data,
filters,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
2,
op::PadType::AUTO);
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 100, 150}));
EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{4, 9}));
EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{5, 10}));
}
TEST(type_prop, group_conv_invalid_groups)
{
// Deduce type
try
{
auto conv = make_shared<op::GroupConvolution>(
make_shared<op::Parameter>(element::f32, Shape{64, 20, 100, 150}),
make_shared<op::Parameter>(element::f32, Shape{30, 10, 10, 20}),
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
3);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid group conv";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data channels not a multiple of group size"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto conv = make_shared<op::GroupConvolution>(
make_shared<op::Parameter>(element::f32, Shape{64, 30, 100, 150}),
make_shared<op::Parameter>(element::f32, Shape{20, 10, 10, 20}),
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
3);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid group conv";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("# Filters not a multiple of group size"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
try
{
auto conv = make_shared<op::GroupConvolution>(
make_shared<op::Parameter>(element::f32, Shape{64, 30, 100, 150}),
make_shared<op::Parameter>(element::f32, Shape{30, 20, 10, 20}),
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1},
3);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid group conv";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Incorrect number of channels per filter"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower)
{
const PartialShape data_batch_shape{1, 4, 5, 5};

View File

@ -1,75 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, DISABLED_benchmark_type_prop_add)
{
auto p1 = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto p2 = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
constexpr size_t num_iterations = 1000000;
size_t total_nanosec = 0;
stopwatch sw;
for (size_t i = 0; i < num_iterations; i++)
{
sw.start();
auto n = make_shared<op::Add>(p1, p2);
sw.stop();
total_nanosec += sw.get_nanoseconds();
}
std::cout.imbue(std::locale(""));
std::cout << "Constructed " << std::fixed << num_iterations << " Add ops in " << std::fixed
<< total_nanosec << " ns" << std::endl;
}
TEST(type_prop, DISABLED_benchmark_type_prop_convolution)
{
auto d = make_shared<op::Parameter>(element::f32, Shape{64, 3, 224, 224});
auto f = make_shared<op::Parameter>(element::f32, Shape{64, 3, 7, 7});
auto strides = Strides{1, 1};
auto dilation = Strides{1, 1};
auto padding_below = CoordinateDiff{1, 1};
auto padding_above = CoordinateDiff{1, 1};
constexpr size_t num_iterations = 1000000;
size_t total_nanosec = 0;
stopwatch sw;
for (size_t i = 0; i < num_iterations; i++)
{
sw.start();
auto n =
make_shared<op::Convolution>(d, f, strides, dilation, padding_below, padding_above);
sw.stop();
total_nanosec += sw.get_nanoseconds();
}
std::cout.imbue(std::locale(""));
std::cout << "Constructed " << std::fixed << num_iterations << " Convolution ops in "
<< std::fixed << total_nanosec << " ns" << std::endl;
}