Removed backprop operations (#1091)
* Removed backprop operations * Fixed build * Removed AvgPool * Removed tests for v0 AvgPool * Fixed code style * Fixed export
This commit is contained in:
parent
08cd0f7779
commit
182499c006
@ -589,8 +589,6 @@ set (SRC
|
||||
pass/nop_elimination.hpp
|
||||
pass/pass.cpp
|
||||
pass/pass.hpp
|
||||
pass/opset0_downgrade.cpp
|
||||
pass/opset0_downgrade.hpp
|
||||
pass/opset1_downgrade.cpp
|
||||
pass/opset1_downgrade.hpp
|
||||
pass/opset1_upgrade.cpp
|
||||
|
@ -78,6 +78,7 @@ namespace ngraph
|
||||
///
|
||||
/// \return The squeezed node.
|
||||
///
|
||||
NGRAPH_API
|
||||
std::shared_ptr<Node> squeeze(const Output<Node>& value,
|
||||
std::vector<std::size_t> axes = {0});
|
||||
|
||||
|
@ -22,389 +22,6 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
// *** AvgPool OP SET 0 ***
|
||||
constexpr NodeTypeInfo op::v0::AvgPool::type_info;
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type,
|
||||
bool ceil_mode)
|
||||
: Op({arg})
|
||||
, m_window_shape(window_shape)
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
, m_include_padding_in_avg_computation(include_padding_in_avg_computation)
|
||||
, m_pad_type(pad_type)
|
||||
, m_ceil_mode(ceil_mode)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type)
|
||||
: AvgPool(arg,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation,
|
||||
pad_type,
|
||||
false)
|
||||
{
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation)
|
||||
: AvgPool(arg,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation,
|
||||
PadType::EXPLICIT)
|
||||
{
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("window_shape", m_window_shape);
|
||||
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
|
||||
visitor.on_attribute("padding_below", m_padding_below);
|
||||
visitor.on_attribute("padding_above", m_padding_above);
|
||||
visitor.on_attribute("include_padding_in_avg_computation",
|
||||
m_include_padding_in_avg_computation);
|
||||
visitor.on_attribute("pad_type", m_pad_type);
|
||||
visitor.on_attribute("ceil_mode", m_ceil_mode);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::validate_and_infer_types()
|
||||
{
|
||||
if (0 == m_window_movement_strides.size())
|
||||
{
|
||||
m_window_movement_strides = Strides(m_window_shape.size(), 1);
|
||||
}
|
||||
|
||||
if (0 == m_padding_below.size())
|
||||
{
|
||||
m_padding_below = Shape(m_window_shape.size(), 0);
|
||||
}
|
||||
|
||||
if (0 == m_padding_above.size())
|
||||
{
|
||||
m_padding_above = Shape(m_window_shape.size(), 0);
|
||||
}
|
||||
|
||||
const PartialShape& arg_shape = get_input_partial_shape(0);
|
||||
|
||||
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
|
||||
{
|
||||
if (arg_shape.is_static())
|
||||
{
|
||||
CoordinateDiff padding_above, padding_below;
|
||||
infer_auto_padding(arg_shape.to_shape(),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
Strides(m_window_shape.size(), 1), // No dilation
|
||||
m_pad_type,
|
||||
padding_above,
|
||||
padding_below);
|
||||
m_padding_above = Shape(padding_above.begin(), padding_above.end());
|
||||
m_padding_below = Shape(padding_below.begin(), padding_below.end());
|
||||
}
|
||||
}
|
||||
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
|
||||
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
|
||||
|
||||
set_output_type(0,
|
||||
get_input_element_type(0),
|
||||
infer_batched_pooling_forward(this,
|
||||
arg_shape,
|
||||
padding_below,
|
||||
padding_above,
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_include_padding_in_avg_computation,
|
||||
m_ceil_mode));
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides)
|
||||
: AvgPool(arg, window_shape, window_movement_strides, Shape(), Shape(), false)
|
||||
{
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg, const Shape& window_shape)
|
||||
: AvgPool(arg, window_shape, Strides(), Shape(), Shape(), false)
|
||||
{
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_window_shape() const
|
||||
{
|
||||
return m_window_shape;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_window_shape(const Shape& window_shape)
|
||||
{
|
||||
m_window_shape = window_shape;
|
||||
}
|
||||
|
||||
const Strides& op::v0::AvgPool::get_window_movement_strides() const
|
||||
{
|
||||
return m_window_movement_strides;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_window_movement_strides(const Strides& window_movement_strides)
|
||||
{
|
||||
m_window_movement_strides = window_movement_strides;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_padding_below() const
|
||||
{
|
||||
return m_padding_below;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_padding_below(const Shape& padding_below)
|
||||
{
|
||||
m_padding_below = padding_below;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_padding_above() const
|
||||
{
|
||||
return m_padding_above;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_padding_above(const Shape& padding_above)
|
||||
{
|
||||
m_padding_above = padding_above;
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::get_include_padding_in_avg_computation() const
|
||||
{
|
||||
return m_include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_include_padding_in_avg_computation(
|
||||
bool include_padding_in_avg_computation)
|
||||
{
|
||||
m_include_padding_in_avg_computation = include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
const op::PadType& op::v0::AvgPool::get_pad_type() const
|
||||
{
|
||||
return m_pad_type;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_pad_type(const op::PadType& pad_type)
|
||||
{
|
||||
m_pad_type = pad_type;
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::get_ceil_mode() const
|
||||
{
|
||||
return m_ceil_mode;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_ceil_mode(bool ceil_mode)
|
||||
{
|
||||
m_ceil_mode = ceil_mode;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v0::AvgPool>(new_args.at(0),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_include_padding_in_avg_computation,
|
||||
m_pad_type,
|
||||
m_ceil_mode);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v0::AvgPoolBackprop::type_info;
|
||||
shared_ptr<Node> op::v0::AvgPool::get_default_value() const
|
||||
{
|
||||
return Constant::create(get_element_type(), get_shape(), {0});
|
||||
}
|
||||
|
||||
op::v0::AvgPoolBackprop::AvgPoolBackprop(const Shape& forward_arg_shape,
|
||||
const Output<Node>& delta,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation)
|
||||
: Op({delta})
|
||||
, m_forward_arg_shape(forward_arg_shape)
|
||||
, m_window_shape(window_shape)
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
, m_include_padding_in_avg_computation(include_padding_in_avg_computation)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::v0::AvgPoolBackprop::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("forward_arg_shape", m_forward_arg_shape);
|
||||
visitor.on_attribute("window_shape", m_window_shape);
|
||||
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
|
||||
visitor.on_attribute("padding_below", m_padding_below);
|
||||
visitor.on_attribute("padding_above", m_padding_above);
|
||||
visitor.on_attribute("include_padding_in_avg_computation",
|
||||
m_include_padding_in_avg_computation);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::validate_and_infer_types()
|
||||
{
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
|
||||
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
|
||||
|
||||
PartialShape forward_result_shape =
|
||||
infer_batched_pooling_forward(this,
|
||||
m_forward_arg_shape,
|
||||
padding_below,
|
||||
padding_above,
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_include_padding_in_avg_computation);
|
||||
|
||||
const PartialShape& delta_shape = get_input_partial_shape(0);
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape does not match delta shape (inferred forward output ",
|
||||
"shape: ",
|
||||
forward_result_shape,
|
||||
", delta shape: ",
|
||||
delta_shape,
|
||||
").");
|
||||
|
||||
// TODO(amprocte): Once m_forward_arg_shape is allowed to be dynamic, we may technically be
|
||||
// able to infer some extra information from forward_result_shape that was not present in the
|
||||
// forward arg shape---namely batch size and channel count. Merge that info in.
|
||||
set_output_type(0, get_input_element_type(0), m_forward_arg_shape);
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPoolBackprop::get_forward_arg_shape() const
|
||||
{
|
||||
return m_forward_arg_shape;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_forward_arg_shape(const Shape& forward_arg_shape)
|
||||
{
|
||||
m_forward_arg_shape = forward_arg_shape;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPoolBackprop::get_window_shape() const
|
||||
{
|
||||
return m_window_shape;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_window_shape(const Shape& window_shape)
|
||||
{
|
||||
m_window_shape = window_shape;
|
||||
}
|
||||
|
||||
const Strides& op::v0::AvgPoolBackprop::get_window_movement_strides() const
|
||||
{
|
||||
return m_window_movement_strides;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_window_movement_strides(const Strides& window_movement_strides)
|
||||
{
|
||||
m_window_movement_strides = window_movement_strides;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPoolBackprop::get_padding_below() const
|
||||
{
|
||||
return m_padding_below;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_padding_below(const Shape& padding_below)
|
||||
{
|
||||
m_padding_below = padding_below;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPoolBackprop::get_padding_above() const
|
||||
{
|
||||
return m_padding_above;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_padding_above(const Shape& padding_above)
|
||||
{
|
||||
m_padding_above = padding_above;
|
||||
}
|
||||
|
||||
bool op::v0::AvgPoolBackprop::get_include_padding_in_avg_computation() const
|
||||
{
|
||||
return m_include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
void op::v0::AvgPoolBackprop::set_include_padding_in_avg_computation(
|
||||
bool include_padding_in_avg_computation)
|
||||
{
|
||||
m_include_padding_in_avg_computation = include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::AvgPoolBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v0::AvgPoolBackprop>(m_forward_arg_shape,
|
||||
new_args.at(0),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_include_padding_in_avg_computation);
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
if (m_ceil_mode)
|
||||
{
|
||||
throw ngraph_error("Autodiff not supported on AvgPool with ceil_mode set");
|
||||
}
|
||||
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto operand = input_value(0);
|
||||
auto& operand_shape = get_input_shape(0);
|
||||
auto backprop = make_shared<op::v0::AvgPoolBackprop>(operand_shape,
|
||||
delta,
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_include_padding_in_avg_computation);
|
||||
adjoints.add_delta(operand, backprop);
|
||||
}
|
||||
|
||||
// *** AvgPool OP SET 1 ***
|
||||
constexpr NodeTypeInfo op::v1::AvgPool::type_info;
|
||||
|
||||
@ -594,155 +211,6 @@ shared_ptr<Node> op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_
|
||||
m_auto_pad);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v1::AvgPoolBackprop::type_info;
|
||||
|
||||
op::v1::AvgPoolBackprop::AvgPoolBackprop(const Output<Node>& delta,
|
||||
const Output<Node>& forward_arg_shape,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel,
|
||||
bool exclude_pad)
|
||||
: Op({delta, forward_arg_shape})
|
||||
, m_kernel(kernel)
|
||||
, m_strides(strides)
|
||||
, m_pads_begin(pads_begin)
|
||||
, m_pads_end(pads_end)
|
||||
, m_exclude_pad(exclude_pad)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::v1::AvgPoolBackprop::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("kernel", m_kernel);
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
visitor.on_attribute("exclude_pad", m_exclude_pad);
|
||||
return true;
|
||||
}
|
||||
|
||||
const Shape op::v1::AvgPoolBackprop::get_forward_arg_shape() const
|
||||
{
|
||||
Shape shape;
|
||||
if (auto const_op = as_type<op::Constant>(input_value(1).get_node()))
|
||||
{
|
||||
shape = const_op->get_shape_val();
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::validate_and_infer_types()
|
||||
{
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());
|
||||
CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end());
|
||||
|
||||
PartialShape forward_arg_shape{PartialShape::dynamic()};
|
||||
|
||||
if (input_value(1).get_node_shared_ptr()->is_constant())
|
||||
{
|
||||
forward_arg_shape = get_forward_arg_shape();
|
||||
}
|
||||
|
||||
PartialShape forward_result_shape = infer_batched_pooling_forward(
|
||||
this, forward_arg_shape, pads_begin, pads_end, m_kernel, m_strides, m_exclude_pad);
|
||||
|
||||
const PartialShape& delta_shape = get_input_partial_shape(0);
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape does not match delta shape (inferred forward output ",
|
||||
"shape: ",
|
||||
forward_result_shape,
|
||||
", delta shape: ",
|
||||
delta_shape,
|
||||
").");
|
||||
|
||||
set_input_is_relevant_to_shape(1);
|
||||
set_output_type(0, get_input_element_type(0), forward_arg_shape);
|
||||
}
|
||||
|
||||
const Shape& op::v1::AvgPoolBackprop::get_kernel() const
|
||||
{
|
||||
return m_kernel;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::set_kernel(const Shape& kernel)
|
||||
{
|
||||
m_kernel = kernel;
|
||||
}
|
||||
|
||||
const Strides& op::v1::AvgPoolBackprop::get_strides() const
|
||||
{
|
||||
return m_strides;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::set_strides(const Strides& strides)
|
||||
{
|
||||
m_strides = strides;
|
||||
}
|
||||
|
||||
const Shape& op::v1::AvgPoolBackprop::get_pads_begin() const
|
||||
{
|
||||
return m_pads_begin;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::set_pads_begin(const Shape& pads_begin)
|
||||
{
|
||||
m_pads_begin = pads_begin;
|
||||
}
|
||||
|
||||
const Shape& op::v1::AvgPoolBackprop::get_pads_end() const
|
||||
{
|
||||
return m_pads_end;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::set_pads_end(const Shape& pads_end)
|
||||
{
|
||||
m_pads_end = pads_end;
|
||||
}
|
||||
|
||||
bool op::v1::AvgPoolBackprop::get_exclude_pad() const
|
||||
{
|
||||
return m_exclude_pad;
|
||||
}
|
||||
|
||||
void op::v1::AvgPoolBackprop::set_exclude_pad(bool exclude_pad)
|
||||
{
|
||||
m_exclude_pad = exclude_pad;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::AvgPoolBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::AvgPoolBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
m_strides,
|
||||
m_pads_begin,
|
||||
m_pads_end,
|
||||
m_kernel,
|
||||
m_exclude_pad);
|
||||
}
|
||||
|
||||
void op::v1::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
if (m_rounding_type == op::RoundingType::CEIL)
|
||||
{
|
||||
throw ngraph_error("Autodiff not supported on AvgPool with ceil_mode set");
|
||||
}
|
||||
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto operand = input_value(0);
|
||||
auto backprop = make_shared<op::v1::AvgPoolBackprop>(
|
||||
delta, input_value(1), m_strides, m_pads_begin, m_pads_end, m_kernel, m_exclude_pad);
|
||||
adjoints.add_delta(operand, backprop);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::AvgPool::get_default_value() const
|
||||
{
|
||||
return op::Constant::create(get_element_type(), get_shape(), {0});
|
||||
|
@ -23,199 +23,6 @@ namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
/// \brief Batched average pooling operation, with optional padding and window stride.
|
||||
///
|
||||
class NGRAPH_API AvgPool : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"AvgPool", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
AvgPool() = default;
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages. \param pad_type Padding type to use
|
||||
/// for additional padded dimensions \param ceil_mode Whether to use ceiling while
|
||||
/// computing output shape.
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type,
|
||||
bool ceil_mode);
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages. \param pad_type Padding type to use
|
||||
/// for additional padded dimensions
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type);
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages.
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation = false);
|
||||
|
||||
/// \brief Constructs a batched, unpadded average pooling operation (i.e., all
|
||||
/// padding shapes are set to 0).
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, ..., dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides);
|
||||
|
||||
/// \brief Constructs an unstrided batched convolution operation (i.e., all window
|
||||
/// movement strides are 1 and all padding shapes are set to 0).
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, ..., dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
AvgPool(const Output<Node>& arg, const Shape& window_shape);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
/// \return The window shape.
|
||||
const Shape& get_window_shape() const;
|
||||
void set_window_shape(const Shape& window_shape);
|
||||
/// \return The window movement strides.
|
||||
const Strides& get_window_movement_strides() const;
|
||||
void set_window_movement_strides(const Strides& window_movement_strides);
|
||||
/// \return The below-padding shape.
|
||||
const Shape& get_padding_below() const;
|
||||
void set_padding_below(const Shape& padding_below);
|
||||
/// \return The above-padding shape.
|
||||
const Shape& get_padding_above() const;
|
||||
void set_padding_above(const Shape& padding_above);
|
||||
bool get_include_padding_in_avg_computation() const;
|
||||
void
|
||||
set_include_padding_in_avg_computation(bool include_padding_in_avg_computation);
|
||||
/// \return The pad type for pooling.
|
||||
const PadType& get_pad_type() const;
|
||||
void set_pad_type(const PadType& pad_type);
|
||||
bool get_ceil_mode() const;
|
||||
void set_ceil_mode(bool ceil_mode);
|
||||
/// \return The default value for AvgPool.
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
|
||||
protected:
|
||||
Shape m_window_shape;
|
||||
Strides m_window_movement_strides;
|
||||
Shape m_padding_below;
|
||||
Shape m_padding_above;
|
||||
bool m_include_padding_in_avg_computation{false};
|
||||
PadType m_pad_type{PadType::EXPLICIT};
|
||||
bool m_ceil_mode{false};
|
||||
};
|
||||
|
||||
class NGRAPH_API AvgPoolBackprop : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"AvgPoolBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
AvgPoolBackprop() = default;
|
||||
AvgPoolBackprop(const Shape& forward_arg_shape,
|
||||
const Output<Node>& delta,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const Shape& get_forward_arg_shape() const;
|
||||
void set_forward_arg_shape(const Shape& forward_arg_shape);
|
||||
const Shape& get_window_shape() const;
|
||||
void set_window_shape(const Shape& window_shape);
|
||||
const Strides& get_window_movement_strides() const;
|
||||
void set_window_movement_strides(const Strides& window_movement_strides);
|
||||
const Shape& get_padding_below() const;
|
||||
void set_padding_below(const Shape& padding_below);
|
||||
const Shape& get_padding_above() const;
|
||||
void set_padding_above(const Shape& padding_abve);
|
||||
bool get_include_padding_in_avg_computation() const;
|
||||
void
|
||||
set_include_padding_in_avg_computation(bool include_padding_in_avg_computation);
|
||||
|
||||
protected:
|
||||
Shape m_forward_arg_shape;
|
||||
Shape m_window_shape;
|
||||
Strides m_window_movement_strides;
|
||||
Shape m_padding_below;
|
||||
Shape m_padding_above;
|
||||
bool m_include_padding_in_avg_computation{false};
|
||||
};
|
||||
} // namespace v0
|
||||
|
||||
namespace v1
|
||||
{
|
||||
/// \brief Batched average pooling operation.
|
||||
@ -285,9 +92,6 @@ namespace ngraph
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
/// \return The kernel shape.
|
||||
const Shape& get_kernel() const;
|
||||
void set_kernel(const Shape& kernel);
|
||||
@ -319,50 +123,8 @@ namespace ngraph
|
||||
PadType m_auto_pad{PadType::EXPLICIT};
|
||||
op::RoundingType m_rounding_type{op::RoundingType::FLOOR};
|
||||
};
|
||||
|
||||
class NGRAPH_API AvgPoolBackprop : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"AvgPoolBackprop", 1};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
AvgPoolBackprop() = default;
|
||||
AvgPoolBackprop(const Output<Node>& delta,
|
||||
const Output<Node>& forward_arg_shape,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel,
|
||||
bool exclude_pad);
|
||||
|
||||
size_t get_version() const override { return 1; }
|
||||
void validate_and_infer_types() override;
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const Shape get_forward_arg_shape() const;
|
||||
const Shape& get_kernel() const;
|
||||
void set_kernel(const Shape& kernel);
|
||||
const Strides& get_strides() const;
|
||||
void set_strides(const Strides& strides);
|
||||
const Shape& get_pads_begin() const;
|
||||
void set_pads_begin(const Shape& pads_begin);
|
||||
const Shape& get_pads_end() const;
|
||||
void set_pads_end(const Shape& padding_abve);
|
||||
bool get_exclude_pad() const;
|
||||
void set_exclude_pad(bool exclude_pad);
|
||||
|
||||
protected:
|
||||
Shape m_kernel;
|
||||
Strides m_strides;
|
||||
Shape m_pads_begin;
|
||||
Shape m_pads_end;
|
||||
bool m_exclude_pad{false};
|
||||
};
|
||||
} // namespace v1
|
||||
|
||||
using v0::AvgPool;
|
||||
using v0::AvgPoolBackprop;
|
||||
using v1::AvgPool;
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
||||
|
@ -82,34 +82,6 @@ std::shared_ptr<Node>
|
||||
new_args.at(2), new_args.at(0), new_args.at(1), m_epsilon);
|
||||
}
|
||||
|
||||
void op::BatchNormTraining::generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas)
|
||||
{
|
||||
auto gamma = input_value(0);
|
||||
auto beta = input_value(1);
|
||||
auto data = input_value(2);
|
||||
|
||||
// Extract mean and variance outputs from BatchNormBase
|
||||
// as these are used by BatchNormTrainingBackprop.
|
||||
// The users of the outputs (GetOutputElements' Inputs) aren't sorted
|
||||
// and get_n() is used to sort the inputs in the same order as Batchnorm's outputs
|
||||
// Next, Mean and Variance (`at(1)` and `at(2)`) are extracted
|
||||
// Please see `add_output` in `BatchNormBase::BatchNormBase` for more details
|
||||
|
||||
auto mean = output(1);
|
||||
auto var = output(2);
|
||||
|
||||
auto bbn = std::make_shared<op::BatchNormTrainingBackprop>(
|
||||
data, gamma, beta, mean, var, deltas.at(0), get_eps_value());
|
||||
auto dinput = Output<Node>(bbn, 0);
|
||||
auto dgamma = Output<Node>(bbn, 1);
|
||||
auto dbeta = Output<Node>(bbn, 2);
|
||||
|
||||
adjoints.add_delta(data, dinput);
|
||||
adjoints.add_delta(gamma, dgamma);
|
||||
adjoints.add_delta(beta, dbeta);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::BatchNormInference::type_info;
|
||||
|
||||
op::BatchNormInference::BatchNormInference(const Output<Node>& input,
|
||||
@ -173,101 +145,3 @@ std::shared_ptr<Node>
|
||||
return std::make_shared<BatchNormInference>(
|
||||
new_args.at(2), new_args.at(0), new_args.at(1), new_args.at(3), new_args.at(4), m_epsilon);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::BatchNormTrainingBackprop::type_info;
|
||||
|
||||
op::BatchNormTrainingBackprop::BatchNormTrainingBackprop(const Output<Node>& input,
|
||||
const Output<Node>& gamma,
|
||||
const Output<Node>& beta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& delta,
|
||||
double epsilon)
|
||||
: Op({gamma, beta, input, mean, variance, delta})
|
||||
, m_epsilon(epsilon)
|
||||
|
||||
{
|
||||
set_output_size(3);
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::BatchNormTrainingBackprop::BatchNormTrainingBackprop(double epsilon,
|
||||
const Output<Node>& gamma,
|
||||
const Output<Node>& beta,
|
||||
const Output<Node>& input,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& delta)
|
||||
: Op({gamma, beta, input, mean, variance, delta})
|
||||
, m_epsilon(epsilon)
|
||||
|
||||
{
|
||||
set_output_size(3);
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::BatchNormTrainingBackprop::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("epsilon", m_epsilon);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::BatchNormTrainingBackprop::validate_and_infer_types()
|
||||
{
|
||||
PartialShape input_and_delta_shape{get_input_partial_shape(INPUT_DATA)};
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
PartialShape::merge_into(input_and_delta_shape, get_input_partial_shape(INPUT_DELTA)),
|
||||
"Shape of delta does not match the shape of the input data (input data shape: ",
|
||||
get_input_partial_shape(INPUT_DATA),
|
||||
", delta shape: ",
|
||||
get_input_partial_shape(INPUT_DELTA),
|
||||
").");
|
||||
|
||||
element::Type input_and_delta_et;
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
element::Type::merge(input_and_delta_et,
|
||||
get_input_element_type(INPUT_DATA),
|
||||
get_input_element_type(INPUT_DELTA)),
|
||||
"Element type for input (",
|
||||
get_input_element_type(INPUT_DATA),
|
||||
") does not match element type for delta (",
|
||||
get_input_element_type(INPUT_DATA),
|
||||
").");
|
||||
|
||||
element::Type result_et;
|
||||
PartialShape result_batch_shape;
|
||||
PartialShape result_channel_shape;
|
||||
|
||||
std::tie(result_et, result_batch_shape, result_channel_shape) =
|
||||
infer_batch_norm_forward(this,
|
||||
input_and_delta_et,
|
||||
get_input_element_type(INPUT_GAMMA),
|
||||
get_input_element_type(INPUT_BETA),
|
||||
get_input_element_type(INPUT_MEAN),
|
||||
get_input_element_type(INPUT_VARIANCE),
|
||||
input_and_delta_shape,
|
||||
get_input_partial_shape(INPUT_GAMMA),
|
||||
get_input_partial_shape(INPUT_BETA),
|
||||
get_input_partial_shape(INPUT_MEAN),
|
||||
get_input_partial_shape(INPUT_VARIANCE));
|
||||
|
||||
set_output_type(0, result_et, result_batch_shape);
|
||||
set_output_type(1, result_et, result_channel_shape);
|
||||
set_output_type(2, result_et, result_channel_shape);
|
||||
}
|
||||
|
||||
std::shared_ptr<Node>
|
||||
op::BatchNormTrainingBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return std::make_shared<op::BatchNormTrainingBackprop>(new_args.at(2),
|
||||
new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(3),
|
||||
new_args.at(4),
|
||||
new_args.at(5),
|
||||
m_epsilon);
|
||||
}
|
||||
|
@ -81,9 +81,6 @@ namespace ngraph
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
static constexpr size_t INPUT_GAMMA = 0;
|
||||
static constexpr size_t INPUT_BETA = 1;
|
||||
static constexpr size_t INPUT_DATA = 2;
|
||||
@ -147,13 +144,6 @@ namespace ngraph
|
||||
std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */,
|
||||
const OutputVector& /* deltas */) override
|
||||
{
|
||||
throw ngraph_error("Invalid operation");
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t INPUT_GAMMA = 0;
|
||||
static constexpr size_t INPUT_BETA = 1;
|
||||
@ -163,53 +153,9 @@ namespace ngraph
|
||||
|
||||
double m_epsilon;
|
||||
};
|
||||
} // namespace v0
|
||||
|
||||
class NGRAPH_API BatchNormTrainingBackprop : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"BatchNormTrainingBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
BatchNormTrainingBackprop() = default;
|
||||
BatchNormTrainingBackprop(const Output<Node>& input,
|
||||
const Output<Node>& gamma,
|
||||
const Output<Node>& beta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& delta,
|
||||
double epsilon);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
NGRAPH_DEPRECATED_DOC
|
||||
NGRAPH_DEPRECATED("Use another constructor")
|
||||
BatchNormTrainingBackprop(double epsilon,
|
||||
const Output<Node>& gamma,
|
||||
const Output<Node>& beta,
|
||||
const Output<Node>& input,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& delta);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
double get_eps_value() const { return m_epsilon; }
|
||||
void set_eps_value(double epsilon) { m_epsilon = epsilon; }
|
||||
std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
private:
|
||||
static constexpr size_t INPUT_GAMMA = 0;
|
||||
static constexpr size_t INPUT_BETA = 1;
|
||||
static constexpr size_t INPUT_DATA = 2;
|
||||
static constexpr size_t INPUT_MEAN = 3;
|
||||
static constexpr size_t INPUT_VARIANCE = 4;
|
||||
static constexpr size_t INPUT_DELTA = 5;
|
||||
|
||||
double m_epsilon;
|
||||
};
|
||||
}
|
||||
using v0::BatchNormInference;
|
||||
using v0::BatchNormTraining;
|
||||
using v0::BatchNormTrainingBackprop;
|
||||
}
|
||||
}
|
||||
|
@ -157,16 +157,6 @@ void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
m_pads_end,
|
||||
m_dilations,
|
||||
m_auto_pad));
|
||||
|
||||
adjoints.add_delta(f,
|
||||
make_shared<op::v1::ConvolutionBackpropFilters>(
|
||||
x,
|
||||
delta,
|
||||
op::Constant::create(element::i64, Shape{x_shape.size()}, f_shape),
|
||||
m_strides,
|
||||
m_dilations,
|
||||
m_pads_begin,
|
||||
m_pads_end));
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v1::ConvolutionBackpropData::type_info;
|
||||
@ -537,160 +527,6 @@ shared_ptr<Node>
|
||||
}
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v1::ConvolutionBackpropFilters::type_info;
|
||||
|
||||
op::v1::ConvolutionBackpropFilters::ConvolutionBackpropFilters(const Output<Node>& data_batch,
|
||||
const Output<Node>& output_delta,
|
||||
const Output<Node>& filters_shape,
|
||||
const Strides& strides,
|
||||
const Strides& dilations,
|
||||
const CoordinateDiff& pads_begin,
|
||||
const CoordinateDiff& pads_end)
|
||||
: Op({data_batch, output_delta, filters_shape})
|
||||
, m_strides(strides)
|
||||
, m_dilations(dilations)
|
||||
, m_pads_begin(pads_begin)
|
||||
, m_pads_end(pads_end)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::v1::ConvolutionBackpropFilters::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("strides", m_strides);
|
||||
visitor.on_attribute("dilations", m_dilations);
|
||||
visitor.on_attribute("pads_begin", m_pads_begin);
|
||||
visitor.on_attribute("pads_end", m_pads_end);
|
||||
return true;
|
||||
}
|
||||
|
||||
const Shape op::v1::ConvolutionBackpropFilters::get_filters_shape() const
|
||||
{
|
||||
Shape shape;
|
||||
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
||||
{
|
||||
shape = const_op->get_shape_val();
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
||||
void op::v1::ConvolutionBackpropFilters::validate_and_infer_types()
|
||||
{
|
||||
// Backprop to filters is itself convolution, with inputs/outputs/attributes transmogrified as
|
||||
// follows.
|
||||
//
|
||||
// Forward Backward
|
||||
// "N" axis for data batch 0 1
|
||||
// "C" axis for data batch 1 0
|
||||
// "Co" axis for filters 0 0
|
||||
// "Ci" axis for filters 1 1
|
||||
// "N" axis for output 0 1
|
||||
// "C" axis for output 1 0
|
||||
// Data batch x x
|
||||
// Data batch shape S_x S_x
|
||||
// Filters f delta
|
||||
// Filters shape S_f S_f
|
||||
// Window movement strides q_x p_f
|
||||
// Window dilation strides p_f q_x
|
||||
// Padding below a_x a_x
|
||||
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q_x
|
||||
// Output shape S_o S_f
|
||||
//
|
||||
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
|
||||
// then check to make sure that the incoming delta has the same shape as the forward output.
|
||||
//
|
||||
// We will also compute and store the various parameters in the "backward" column above, since
|
||||
// some backends need them. (TODO(amprocte): Is it just because of the way the reference works
|
||||
// that this stuff is needed? If so, we can probably get rid of it and have conv_backprop
|
||||
// reference kernels that do the calculations of the backward parameters internally, or supply
|
||||
// utility functions to do it.)
|
||||
|
||||
const PartialShape& data_batch_shape = get_input_partial_shape(0);
|
||||
element::Type data_batch_et = get_input_element_type(0);
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
element::Type delta_et = get_input_element_type(1);
|
||||
|
||||
element::Type forward_result_et;
|
||||
PartialShape forward_result_shape;
|
||||
PartialShape filters_shape{PartialShape::dynamic()};
|
||||
|
||||
if (input_value(2).get_node_shared_ptr()->is_constant())
|
||||
{
|
||||
filters_shape = get_filters_shape();
|
||||
}
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
element::Type::merge(forward_result_et, data_batch_et, delta_et),
|
||||
"Element types for data batch and filters do not match (data batch element type: ",
|
||||
data_batch_et,
|
||||
", filters element type: ",
|
||||
delta_et,
|
||||
").");
|
||||
|
||||
if (input_value(1).get_node_shared_ptr()->is_constant())
|
||||
{
|
||||
forward_result_shape =
|
||||
infer_convolution_forward(this,
|
||||
data_batch_shape,
|
||||
Strides(data_batch_shape.rank().get_length() - 2, 1),
|
||||
m_pads_begin,
|
||||
m_pads_end,
|
||||
filters_shape,
|
||||
m_strides,
|
||||
m_dilations);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape (",
|
||||
forward_result_shape,
|
||||
") does not match shape of ",
|
||||
"delta (",
|
||||
delta_shape,
|
||||
").");
|
||||
}
|
||||
|
||||
set_input_is_relevant_to_shape(2);
|
||||
set_output_type(0, forward_result_et, filters_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node>
|
||||
op::v1::ConvolutionBackpropFilters::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::ConvolutionBackpropFilters>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
m_strides,
|
||||
m_dilations,
|
||||
m_pads_begin,
|
||||
m_pads_end);
|
||||
}
|
||||
|
||||
CoordinateDiff op::v1::ConvolutionBackpropFilters::compute_backward_in_pad_above() const
|
||||
{
|
||||
const auto& in_shape = get_input_shape(0);
|
||||
const auto& out_shape = get_input_shape(1);
|
||||
const auto& filter_shape = get_filters_shape();
|
||||
const auto& in_pad_above = get_pads_end();
|
||||
const auto& in_pad_below = get_pads_begin();
|
||||
const auto& filter_dilation = get_dilations();
|
||||
const auto& stride = get_strides();
|
||||
size_t spatial_dim_count = static_cast<size_t>(out_shape.size()) - 2;
|
||||
CoordinateDiff backward_in_pad_above;
|
||||
backward_in_pad_above.resize(spatial_dim_count);
|
||||
|
||||
for (size_t i = 0; i < spatial_dim_count; i++)
|
||||
{
|
||||
backward_in_pad_above[i] =
|
||||
in_pad_above[i] -
|
||||
(in_pad_below[i] + (static_cast<ptrdiff_t>(in_shape[i + 2]) - 1) + in_pad_above[i] -
|
||||
(filter_shape[i + 2] - 1) * filter_dilation[i]) %
|
||||
stride[i];
|
||||
}
|
||||
return backward_in_pad_above;
|
||||
}
|
||||
|
||||
// *** Convolution OP SET 0 ***
|
||||
constexpr NodeTypeInfo op::v0::Convolution::type_info;
|
||||
|
||||
@ -878,16 +714,6 @@ void op::v0::Convolution::generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_data_dilation_strides));
|
||||
|
||||
adjoints.add_delta(f,
|
||||
make_shared<op::v0::ConvolutionBackpropFilters>(x,
|
||||
f_shape,
|
||||
delta,
|
||||
m_window_movement_strides,
|
||||
m_window_dilation_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_data_dilation_strides));
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info;
|
||||
@ -1136,148 +962,6 @@ CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_a
|
||||
return backward_delta_out_pad_above;
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v0::ConvolutionBackpropFilters::type_info;
|
||||
|
||||
op::v0::ConvolutionBackpropFilters::ConvolutionBackpropFilters(
|
||||
const Output<Node>& data_batch,
|
||||
const Shape& filters_shape,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides_forward,
|
||||
const Strides& window_dilation_strides_forward,
|
||||
const CoordinateDiff& padding_below_forward,
|
||||
const CoordinateDiff& padding_above_forward,
|
||||
const Strides& data_dilation_strides_forward)
|
||||
: Op({data_batch, output_delta})
|
||||
, m_filters_shape(filters_shape)
|
||||
, m_window_movement_strides_forward(window_movement_strides_forward)
|
||||
, m_window_dilation_strides_forward(window_dilation_strides_forward)
|
||||
, m_padding_below_forward(padding_below_forward)
|
||||
, m_padding_above_forward(padding_above_forward)
|
||||
, m_data_dilation_strides_forward(data_dilation_strides_forward)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::v0::ConvolutionBackpropFilters::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("m_filters_shape", m_filters_shape);
|
||||
visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward);
|
||||
visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward);
|
||||
visitor.on_attribute("padding_below_forward", m_padding_below_forward);
|
||||
visitor.on_attribute("padding_above_forward", m_padding_above_forward);
|
||||
visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::ConvolutionBackpropFilters::validate_and_infer_types()
|
||||
{
|
||||
// Backprop to filters is itself convolution, with inputs/outputs/attributes transmogrified as
|
||||
// follows.
|
||||
//
|
||||
// Forward Backward
|
||||
// "N" axis for data batch 0 1
|
||||
// "C" axis for data batch 1 0
|
||||
// "Co" axis for filters 0 0
|
||||
// "Ci" axis for filters 1 1
|
||||
// "N" axis for output 0 1
|
||||
// "C" axis for output 1 0
|
||||
// Data batch x x
|
||||
// Data batch shape S_x S_x
|
||||
// Filters f delta
|
||||
// Filters shape S_f S_f
|
||||
// Window movement strides q_x p_f
|
||||
// Window dilation strides p_f q_x
|
||||
// Padding below a_x a_x
|
||||
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q_x
|
||||
// Data dilation strides p_x p_x
|
||||
// Output shape S_o S_f
|
||||
//
|
||||
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
|
||||
// then check to make sure that the incoming delta has the same shape as the forward output.
|
||||
//
|
||||
// We will also compute and store the various parameters in the "backward" column above, since
|
||||
// some backends need them. (TODO(amprocte): Is it just because of the way the reference works
|
||||
// that this stuff is needed? If so, we can probably get rid of it and have conv_backprop
|
||||
// reference kernels that do the calculations of the backward parameters internally, or supply
|
||||
// utility functions to do it.)
|
||||
|
||||
const PartialShape& data_batch_shape = get_input_partial_shape(0);
|
||||
element::Type data_batch_et = get_input_element_type(0);
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
element::Type delta_et = get_input_element_type(1);
|
||||
|
||||
element::Type forward_result_et;
|
||||
PartialShape forward_result_shape;
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
element::Type::merge(forward_result_et, data_batch_et, delta_et),
|
||||
"Element types for data batch and filters do not match (data batch element type: ",
|
||||
data_batch_et,
|
||||
", filters element type: ",
|
||||
delta_et,
|
||||
").");
|
||||
|
||||
forward_result_shape = infer_convolution_forward(this,
|
||||
data_batch_shape,
|
||||
m_data_dilation_strides_forward,
|
||||
m_padding_below_forward,
|
||||
m_padding_above_forward,
|
||||
m_filters_shape,
|
||||
m_window_movement_strides_forward,
|
||||
m_window_dilation_strides_forward);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape (",
|
||||
forward_result_shape,
|
||||
") does not match shape of ",
|
||||
"delta (",
|
||||
delta_shape,
|
||||
").");
|
||||
|
||||
set_output_type(0, forward_result_et, m_filters_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node>
|
||||
op::v0::ConvolutionBackpropFilters::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v0::ConvolutionBackpropFilters>(new_args.at(0),
|
||||
m_filters_shape,
|
||||
new_args.at(1),
|
||||
m_window_movement_strides_forward,
|
||||
m_window_dilation_strides_forward,
|
||||
m_padding_below_forward,
|
||||
m_padding_above_forward,
|
||||
m_data_dilation_strides_forward);
|
||||
}
|
||||
|
||||
CoordinateDiff op::v0::ConvolutionBackpropFilters::compute_backward_in_pad_above() const
|
||||
{
|
||||
const auto& in_shape = get_input_shape(0);
|
||||
const auto& out_shape = get_input_shape(1);
|
||||
const auto& filter_shape = get_filters_shape();
|
||||
const auto& in_pad_above = get_padding_above_forward();
|
||||
const auto& in_pad_below = get_padding_below_forward();
|
||||
const auto& in_dilation = get_data_dilation_strides_forward();
|
||||
const auto& filter_dilation = get_window_dilation_strides_forward();
|
||||
const auto& stride = get_window_movement_strides_forward();
|
||||
size_t spatial_dim_count = static_cast<size_t>(out_shape.size()) - 2;
|
||||
CoordinateDiff backward_in_pad_above;
|
||||
backward_in_pad_above.resize(spatial_dim_count);
|
||||
|
||||
for (size_t i = 0; i < spatial_dim_count; i++)
|
||||
{
|
||||
backward_in_pad_above[i] =
|
||||
in_pad_above[i] -
|
||||
(in_pad_below[i] + (static_cast<ptrdiff_t>(in_shape[i + 2]) - 1) * in_dilation[i] +
|
||||
in_pad_above[i] - (filter_shape[i + 2] - 1) * filter_dilation[i]) %
|
||||
stride[i];
|
||||
}
|
||||
return backward_in_pad_above;
|
||||
}
|
||||
|
||||
//
|
||||
// This is a legacy function, retained because the CPU backend uses it for now.
|
||||
// TODO(amprocte): Update CPU backend to use the new stuff in validation_util.hpp, and remove this
|
||||
|
@ -220,62 +220,6 @@ namespace ngraph
|
||||
PadType m_auto_pad;
|
||||
CoordinateDiff m_output_padding;
|
||||
};
|
||||
|
||||
/// \brief Filters backprop for batched convolution operation.
|
||||
class NGRAPH_API ConvolutionBackpropFilters : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"ConvolutionBackpropFilters", 1};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a batched-convolution filter-backprop operation.
|
||||
ConvolutionBackpropFilters() = default;
|
||||
/// \brief Constructs a batched-convolution filter-backprop operation.
|
||||
///
|
||||
/// \param data_batch The tensor producing the data batch from forward-prop.
|
||||
/// \param filters_shape The shape of the filters from forward-prop.
|
||||
/// \param output_delta The node producing output delta.
|
||||
/// \param strides The strides from forward-prop.
|
||||
/// \param dilations The dilations from forward-prop.
|
||||
/// \param pads_begin The padding-below sizes from forward-prop.
|
||||
/// \param pads_end The padding-above sizes from forward-prop.
|
||||
ConvolutionBackpropFilters(const Output<Node>& data_batch,
|
||||
const Output<Node>& output_delta,
|
||||
const Output<Node>& filters_shape,
|
||||
const Strides& strides,
|
||||
const Strides& dilations,
|
||||
const CoordinateDiff& pads_begin,
|
||||
const CoordinateDiff& pads_end);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The filters tensor shape.
|
||||
const Shape get_filters_shape() const;
|
||||
/// \return The strides from the forward prop.
|
||||
const Strides& get_strides() const { return m_strides; }
|
||||
void set_strides(const Strides& strides) { m_strides = strides; }
|
||||
/// \return The dilations from the forward prop.
|
||||
const Strides& get_dilations() const { return m_dilations; }
|
||||
void set_dilations(const Strides& dilations) { m_dilations = dilations; }
|
||||
/// \return The padding-below sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_pads_begin() const { return m_pads_begin; }
|
||||
void set_pads_begin(const CoordinateDiff& pads_begin) { m_pads_begin = pads_begin; }
|
||||
/// \return The padding-above sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_pads_end() const { return m_pads_end; }
|
||||
void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; }
|
||||
// Compute the pad_above value to be used if in a convolution
|
||||
CoordinateDiff compute_backward_in_pad_above() const;
|
||||
|
||||
protected:
|
||||
Strides m_strides;
|
||||
Strides m_dilations;
|
||||
CoordinateDiff m_pads_begin;
|
||||
CoordinateDiff m_pads_end;
|
||||
};
|
||||
|
||||
} // namespace v1
|
||||
|
||||
namespace v0
|
||||
@ -566,103 +510,6 @@ namespace ngraph
|
||||
CoordinateDiff m_padding_above_forward;
|
||||
Strides m_data_dilation_strides_forward;
|
||||
};
|
||||
|
||||
/// \brief Filters backprop for batched convolution operation.
|
||||
class NGRAPH_API ConvolutionBackpropFilters : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"ConvolutionBackpropFilters", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a batched-convolution filter-backprop operation.
|
||||
ConvolutionBackpropFilters() = default;
|
||||
/// \brief Constructs a batched-convolution filter-backprop operation.
|
||||
///
|
||||
/// \param data_batch The tensor producing the data batch from forward-prop.
|
||||
/// \param filters_shape The shape of the filters from forward-prop.
|
||||
/// \param output_delta The node producing output delta.
|
||||
/// \param window_movement_strides_forward The window movement strides from
|
||||
/// forward-prop. \param window_dilation_strides_forward The window dilation strides
|
||||
/// from forward-prop. \param padding_below_forward The padding-below sizes from
|
||||
/// forward-prop. \param padding_above_forward The padding-above sizes from
|
||||
/// forward-prop. \param data_dilation_strides_forward The data dilation strides
|
||||
/// from forward-prop.
|
||||
ConvolutionBackpropFilters(const Output<Node>& data_batch,
|
||||
const Shape& filters_shape,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides_forward,
|
||||
const Strides& window_dilation_strides_forward,
|
||||
const CoordinateDiff& padding_below_forward,
|
||||
const CoordinateDiff& padding_above_forward,
|
||||
const Strides& data_dilation_strides_forward);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The filters tensor shape.
|
||||
const Shape& get_filters_shape() const { return m_filters_shape; }
|
||||
/// \return The window movement strides from the forward prop.
|
||||
const Strides& get_window_movement_strides_forward() const
|
||||
{
|
||||
return m_window_movement_strides_forward;
|
||||
}
|
||||
void set_window_movement_strides_forward(
|
||||
const Strides& window_movement_strides_forward)
|
||||
{
|
||||
m_window_movement_strides_forward = window_movement_strides_forward;
|
||||
}
|
||||
/// \return The window dilation strides from the forward prop.
|
||||
const Strides& get_window_dilation_strides_forward() const
|
||||
{
|
||||
return m_window_dilation_strides_forward;
|
||||
}
|
||||
void set_window_dilation_strides_forward(
|
||||
const Strides& window_dilation_strides_forward)
|
||||
{
|
||||
m_window_dilation_strides_forward = window_dilation_strides_forward;
|
||||
}
|
||||
/// \return The padding-below sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_padding_below_forward() const
|
||||
{
|
||||
return m_padding_below_forward;
|
||||
}
|
||||
void set_padding_below_forward(const CoordinateDiff& padding_below_forward)
|
||||
{
|
||||
m_padding_below_forward = padding_below_forward;
|
||||
}
|
||||
/// \return The padding-above sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_padding_above_forward() const
|
||||
{
|
||||
return m_padding_above_forward;
|
||||
}
|
||||
void set_padding_above_forward(const CoordinateDiff& padding_above_forward)
|
||||
{
|
||||
m_padding_above_forward = padding_above_forward;
|
||||
}
|
||||
/// \return The data dilation strides from the forward prop.
|
||||
const Strides& get_data_dilation_strides_forward() const
|
||||
{
|
||||
return m_data_dilation_strides_forward;
|
||||
}
|
||||
void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward)
|
||||
{
|
||||
m_data_dilation_strides_forward = data_dilation_strides_forward;
|
||||
}
|
||||
|
||||
// Compute the pad_above value to be used if in a convolution
|
||||
CoordinateDiff compute_backward_in_pad_above() const;
|
||||
|
||||
protected:
|
||||
Shape m_filters_shape;
|
||||
Strides m_window_movement_strides_forward;
|
||||
Strides m_window_dilation_strides_forward;
|
||||
CoordinateDiff m_padding_below_forward;
|
||||
CoordinateDiff m_padding_above_forward;
|
||||
Strides m_data_dilation_strides_forward;
|
||||
};
|
||||
|
||||
} // namespace v0
|
||||
|
||||
namespace util
|
||||
@ -689,6 +536,5 @@ namespace ngraph
|
||||
|
||||
using v0::Convolution;
|
||||
using v0::ConvolutionBackpropData;
|
||||
using v0::ConvolutionBackpropFilters;
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
||||
|
@ -28,7 +28,6 @@ using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::ConvolutionBias::type_info;
|
||||
constexpr NodeTypeInfo op::ConvolutionBiasBackpropFiltersBias::type_info;
|
||||
constexpr NodeTypeInfo op::ConvolutionBiasAdd::type_info;
|
||||
|
||||
static void validate_convbias_shapes(const Node* node,
|
||||
@ -225,142 +224,6 @@ NodeVector op::ConvolutionBias::decompose_op() const
|
||||
}
|
||||
}
|
||||
|
||||
void op::ConvolutionBias::generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas)
|
||||
{
|
||||
auto delta = deltas.at(0);
|
||||
if (m_with_relu)
|
||||
{
|
||||
delta = make_shared<op::ReluBackprop>(shared_from_this(), delta);
|
||||
}
|
||||
|
||||
auto data = input_value(0);
|
||||
const auto data_shape = data.get_shape();
|
||||
|
||||
auto filter = input_value(1);
|
||||
const auto filter_shape = filter.get_shape();
|
||||
|
||||
auto bias = input_value(2);
|
||||
const auto bias_shape = bias.get_shape();
|
||||
|
||||
// using regular convolution backprop for data
|
||||
adjoints.add_delta(data,
|
||||
make_shared<op::ConvolutionBackpropData>(data_shape,
|
||||
filter,
|
||||
delta,
|
||||
m_window_movement_strides,
|
||||
m_window_dilation_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_data_dilation_strides));
|
||||
|
||||
auto filter_bias_backprop =
|
||||
make_shared<op::ConvolutionBiasBackpropFiltersBias>(data,
|
||||
filter_shape,
|
||||
bias_shape,
|
||||
delta,
|
||||
m_window_movement_strides,
|
||||
m_window_dilation_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_data_dilation_strides);
|
||||
auto filter_delta = Output<Node>(filter_bias_backprop, 0);
|
||||
auto bias_delta = Output<Node>(filter_bias_backprop, 1);
|
||||
|
||||
adjoints.add_delta(filter, filter_delta);
|
||||
adjoints.add_delta(bias, bias_delta);
|
||||
}
|
||||
|
||||
op::ConvolutionBiasBackpropFiltersBias::ConvolutionBiasBackpropFiltersBias(
|
||||
const Output<Node>& data_batch,
|
||||
const Shape& filters_shape,
|
||||
const Shape& bias_shape,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides_forward,
|
||||
const Strides& window_dilation_strides_forward,
|
||||
const CoordinateDiff& padding_below_forward,
|
||||
const CoordinateDiff& padding_above_forward,
|
||||
const Strides& data_dilation_strides_forward)
|
||||
: FusedOp({data_batch, output_delta})
|
||||
, m_filters_shape(filters_shape)
|
||||
, m_bias_shape(bias_shape)
|
||||
, m_window_movement_strides_forward(window_movement_strides_forward)
|
||||
, m_window_dilation_strides_forward(window_dilation_strides_forward)
|
||||
, m_padding_below_forward(padding_below_forward)
|
||||
, m_padding_above_forward(padding_above_forward)
|
||||
, m_data_dilation_strides_forward(data_dilation_strides_forward)
|
||||
{
|
||||
auto& data_batch_shape = get_input_shape(0);
|
||||
|
||||
// Forward Backward
|
||||
// Window movement strides q p_f
|
||||
// Window dilation strides p_f q
|
||||
// Padding below a_x a_x
|
||||
// Padding above b_x b_x -
|
||||
// (a_x + (S_x - 1)p_x + b_x -
|
||||
// (S_f - 1)p_f)
|
||||
// % q
|
||||
// Data dilation strides p_x p_x
|
||||
|
||||
for (size_t i = 0; i < filters_shape.size() - 2; i++)
|
||||
{
|
||||
m_window_movement_strides_backward.push_back(window_dilation_strides_forward[i]);
|
||||
m_window_dilation_strides_backward.push_back(window_movement_strides_forward[i]);
|
||||
m_padding_below_backward.push_back(padding_below_forward[i]);
|
||||
m_padding_above_backward.push_back(
|
||||
padding_above_forward[i] -
|
||||
(padding_below_forward[i] +
|
||||
(data_batch_shape[i + 2] - 1) * data_dilation_strides_forward[i] +
|
||||
padding_above_forward[i] -
|
||||
(filters_shape[i + 2] - 1) * window_dilation_strides_forward[i]) %
|
||||
window_movement_strides_forward[i]);
|
||||
m_data_dilation_strides_backward.push_back(data_dilation_strides_forward[i]);
|
||||
}
|
||||
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::ConvolutionBiasBackpropFiltersBias::clone_with_new_inputs(
|
||||
const OutputVector& new_args) const
|
||||
{
|
||||
if (new_args.size() != 2)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
return make_shared<ConvolutionBiasBackpropFiltersBias>(new_args.at(0),
|
||||
m_filters_shape,
|
||||
m_bias_shape,
|
||||
new_args.at(1),
|
||||
m_window_movement_strides_forward,
|
||||
m_window_dilation_strides_forward,
|
||||
m_padding_below_forward,
|
||||
m_padding_above_forward,
|
||||
m_data_dilation_strides_forward);
|
||||
}
|
||||
|
||||
NodeVector op::ConvolutionBiasBackpropFiltersBias::decompose_op() const
|
||||
{
|
||||
auto conv_bprop = make_shared<op::ConvolutionBackpropFilters>(input_value(0),
|
||||
m_filters_shape,
|
||||
input_value(1),
|
||||
m_window_movement_strides_forward,
|
||||
m_window_dilation_strides_forward,
|
||||
m_padding_below_forward,
|
||||
m_padding_above_forward,
|
||||
m_data_dilation_strides_forward);
|
||||
|
||||
AxisSet reduce_axes;
|
||||
reduce_axes.insert(0);
|
||||
for (size_t i = 2; i < conv_bprop->get_shape().size(); i++)
|
||||
{
|
||||
reduce_axes.insert(i);
|
||||
}
|
||||
|
||||
auto bias_bprop = make_shared<op::Sum>(input_value(1), reduce_axes);
|
||||
|
||||
return {conv_bprop, bias_bprop};
|
||||
}
|
||||
|
||||
op::ConvolutionBiasAdd::ConvolutionBiasAdd(const Output<Node>& data_batch,
|
||||
const Output<Node>& filters,
|
||||
const Output<Node>& bias,
|
||||
|
@ -73,9 +73,6 @@ namespace ngraph
|
||||
|
||||
virtual void validate_and_infer_types() override;
|
||||
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
protected:
|
||||
Strides m_window_movement_strides;
|
||||
Strides m_window_dilation_strides;
|
||||
@ -85,99 +82,6 @@ namespace ngraph
|
||||
bool m_with_relu;
|
||||
};
|
||||
|
||||
/// \brief Filters and bias backprop for batched convolution operation. Data backprop is
|
||||
/// the same as regular convolution backprop for data.
|
||||
class NGRAPH_API ConvolutionBiasBackpropFiltersBias : public ngraph::op::util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"ConvolutionBiasBackpropFiltersBias", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
ConvolutionBiasBackpropFiltersBias() = default;
|
||||
ConvolutionBiasBackpropFiltersBias(const Output<Node>& data_batch,
|
||||
const Shape& filters_shape,
|
||||
const Shape& bias_shape,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides_forward,
|
||||
const Strides& window_dilation_strides_forward,
|
||||
const CoordinateDiff& padding_below_forward,
|
||||
const CoordinateDiff& padding_above_forward,
|
||||
const Strides& data_dilation_strides_forward);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The filters tensor shape.
|
||||
const Shape& get_filters_shape() const { return m_filters_shape; }
|
||||
/// \return The bias tensor shape.
|
||||
const Shape& get_bias_shape() const { return m_bias_shape; }
|
||||
/// \return The window movement strides from the forward prop.
|
||||
const Strides& get_window_movement_strides_forward() const
|
||||
{
|
||||
return m_window_movement_strides_forward;
|
||||
}
|
||||
/// \return The window dilation strides from the forward prop.
|
||||
const Strides& get_window_dilation_strides_forward() const
|
||||
{
|
||||
return m_window_dilation_strides_forward;
|
||||
}
|
||||
/// \return The padding-below sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_padding_below_forward() const
|
||||
{
|
||||
return m_padding_below_forward;
|
||||
}
|
||||
/// \return The padding-above sizes (possibly negative) from the forward prop.
|
||||
const CoordinateDiff& get_padding_above_forward() const
|
||||
{
|
||||
return m_padding_above_forward;
|
||||
}
|
||||
/// \return The data dilation strides from the forward prop.
|
||||
const Strides& get_data_dilation_strides_forward() const
|
||||
{
|
||||
return m_data_dilation_strides_forward;
|
||||
}
|
||||
/// \return The window movement strides for the backward prop.
|
||||
const Strides& get_window_movement_strides_backward() const
|
||||
{
|
||||
return m_window_movement_strides_backward;
|
||||
}
|
||||
/// \return The window dilation strides for the backward prop.
|
||||
const Strides& get_window_dilation_strides_backward() const
|
||||
{
|
||||
return m_window_dilation_strides_backward;
|
||||
}
|
||||
/// \return The padding-below sizes (possibly negative) for the backward prop.
|
||||
const CoordinateDiff& get_padding_below_backward() const
|
||||
{
|
||||
return m_padding_below_backward;
|
||||
}
|
||||
/// \return The padding-above sizes (possibly negative) for the backward prop.
|
||||
const CoordinateDiff& get_padding_above_backward() const
|
||||
{
|
||||
return m_padding_above_backward;
|
||||
}
|
||||
/// \return The data dilation strides for the backward prop.
|
||||
const Strides& get_data_dilation_strides_backward() const
|
||||
{
|
||||
return m_data_dilation_strides_backward;
|
||||
}
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
protected:
|
||||
Shape m_filters_shape;
|
||||
Shape m_bias_shape;
|
||||
Strides m_window_movement_strides_forward;
|
||||
Strides m_window_dilation_strides_forward;
|
||||
CoordinateDiff m_padding_below_forward;
|
||||
CoordinateDiff m_padding_above_forward;
|
||||
Strides m_data_dilation_strides_forward;
|
||||
|
||||
Strides m_window_movement_strides_backward;
|
||||
Strides m_window_dilation_strides_backward;
|
||||
CoordinateDiff m_padding_below_backward;
|
||||
CoordinateDiff m_padding_above_backward;
|
||||
Strides m_data_dilation_strides_backward;
|
||||
};
|
||||
|
||||
class NGRAPH_API ConvolutionBiasAdd : public ngraph::op::util::FusedOp
|
||||
{
|
||||
public:
|
||||
@ -230,7 +134,6 @@ namespace ngraph
|
||||
};
|
||||
}
|
||||
using v0::ConvolutionBias;
|
||||
using v0::ConvolutionBiasBackpropFiltersBias;
|
||||
using v0::ConvolutionBiasAdd;
|
||||
}
|
||||
}
|
||||
|
@ -84,61 +84,3 @@ void op::Gelu::pre_validate_and_infer_types()
|
||||
set_output_type(0, input_element_type, input_pshape);
|
||||
}
|
||||
}
|
||||
|
||||
void op::Gelu::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto x = input_value(0);
|
||||
|
||||
adjoints.add_delta(x, delta * (make_shared<op::GeluBackpropFactor>(x)));
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::GeluBackpropFactor::type_info;
|
||||
|
||||
op::GeluBackpropFactor::GeluBackpropFactor(const Output<Node>& x)
|
||||
: FusedOp({x})
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::GeluBackpropFactor::pre_validate_and_infer_types()
|
||||
{
|
||||
element::Type input_element_type = get_input_element_type(0);
|
||||
PartialShape input_pshape = get_input_partial_shape(0);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_element_type.is_dynamic() || input_element_type.is_real(),
|
||||
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
|
||||
input_element_type,
|
||||
").");
|
||||
|
||||
if (input_pshape.is_dynamic())
|
||||
{
|
||||
set_output_type(0, input_element_type, input_pshape);
|
||||
}
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::GeluBackpropFactor::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<GeluBackpropFactor>(new_args.at(0));
|
||||
}
|
||||
|
||||
NodeVector op::GeluBackpropFactor::decompose_op() const
|
||||
{
|
||||
auto x = input_value(0);
|
||||
|
||||
// 0.5 * (1 + erf( x * sqrt(1/2))
|
||||
// + [x * exp (-x^2/2)] / sqrt(2 * pi)
|
||||
auto half = builder::make_constant(x.get_element_type(), x.get_shape(), 0.5);
|
||||
auto one = builder::make_constant(x.get_element_type(), x.get_shape(), 1.0);
|
||||
auto pi = 4.0 * std::atan(1);
|
||||
auto inv_sqrt_two_pi =
|
||||
builder::make_constant(x.get_element_type(), x.get_shape(), 1.0 / std::sqrt(2.0 * pi));
|
||||
auto sqrt_half = builder::make_constant(x.get_element_type(), x.get_shape(), std::sqrt(0.5));
|
||||
|
||||
auto e1 = half * (one + make_shared<op::Erf>(x * sqrt_half));
|
||||
auto e2 = x * make_shared<op::Exp>(x * x * (-half)) * inv_sqrt_two_pi;
|
||||
return {e1 + e2};
|
||||
}
|
||||
|
@ -44,33 +44,10 @@ namespace ngraph
|
||||
|
||||
void pre_validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
};
|
||||
|
||||
/// \brief Backprop for Gelu(x) is GeluBackprop(x) * delta
|
||||
class NGRAPH_API GeluBackpropFactor : public util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"GeluBackpropFactor", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
GeluBackpropFactor() = default;
|
||||
|
||||
GeluBackpropFactor(const Output<Node>& x);
|
||||
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
void pre_validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
};
|
||||
}
|
||||
using v0::Gelu;
|
||||
using v0::GeluBackpropFactor;
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::LayerNorm::type_info;
|
||||
constexpr NodeTypeInfo op::LayerNormBackprop::type_info;
|
||||
|
||||
op::LayerNorm::LayerNorm(const Output<Node>& data,
|
||||
const Output<Node>& scale,
|
||||
@ -232,359 +231,3 @@ void op::LayerNorm::pre_validate_and_infer_types()
|
||||
PartialShape norm_shape{data_shape};
|
||||
set_output_type(0, input_element_type, norm_shape);
|
||||
}
|
||||
|
||||
void op::LayerNorm::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
auto delta = deltas.at(0);
|
||||
auto data = input_value(0);
|
||||
if (m_use_affine)
|
||||
{
|
||||
auto scale = input_value(1);
|
||||
auto bias = input_value(2);
|
||||
if (m_keep_stats)
|
||||
{
|
||||
auto mean = outputs()[1];
|
||||
auto variance = outputs()[2];
|
||||
auto bprop = make_shared<op::LayerNormBackprop>(
|
||||
data, delta, mean, variance, scale, m_begin_norm_axis, m_epsilon);
|
||||
adjoints.add_delta(data, bprop->outputs()[0]);
|
||||
adjoints.add_delta(scale, bprop->outputs()[1]);
|
||||
adjoints.add_delta(bias, bprop->outputs()[2]);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto bprop = make_shared<op::LayerNormBackprop>(
|
||||
data, delta, scale, m_begin_norm_axis, m_epsilon);
|
||||
adjoints.add_delta(data, bprop->outputs()[0]);
|
||||
adjoints.add_delta(scale, bprop->outputs()[1]);
|
||||
adjoints.add_delta(bias, bprop->outputs()[2]);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_keep_stats)
|
||||
{
|
||||
auto mean = outputs()[1];
|
||||
auto variance = outputs()[2];
|
||||
auto bprop = make_shared<op::LayerNormBackprop>(
|
||||
data, delta, mean, variance, m_begin_norm_axis, m_epsilon);
|
||||
adjoints.add_delta(data, bprop->outputs()[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto bprop =
|
||||
make_shared<op::LayerNormBackprop>(data, delta, m_begin_norm_axis, m_epsilon);
|
||||
adjoints.add_delta(data, bprop->outputs()[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
op::LayerNormBackprop::LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& scale,
|
||||
int64_t begin_norm_axis,
|
||||
double epsilon)
|
||||
: FusedOp({data, delta, mean, variance, scale})
|
||||
, m_use_stats(true)
|
||||
, m_use_affine(true)
|
||||
, m_begin_norm_axis(begin_norm_axis)
|
||||
, m_epsilon(epsilon)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::LayerNormBackprop::LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
int64_t begin_norm_axis,
|
||||
double epsilon)
|
||||
: FusedOp({data, delta, mean, variance})
|
||||
, m_use_stats(true)
|
||||
, m_use_affine(false)
|
||||
, m_begin_norm_axis(begin_norm_axis)
|
||||
, m_epsilon(epsilon)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::LayerNormBackprop::LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& scale,
|
||||
int64_t begin_norm_axis,
|
||||
double epsilon)
|
||||
: FusedOp({data, delta, scale})
|
||||
, m_use_stats(false)
|
||||
, m_use_affine(true)
|
||||
, m_begin_norm_axis(begin_norm_axis)
|
||||
, m_epsilon(epsilon)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::LayerNormBackprop::LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
int64_t begin_norm_axis,
|
||||
double epsilon)
|
||||
: FusedOp({data, delta})
|
||||
, m_use_stats(false)
|
||||
, m_use_affine(false)
|
||||
, m_begin_norm_axis(begin_norm_axis)
|
||||
, m_epsilon(epsilon)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
// All input shape should be static by this point
|
||||
NodeVector op::LayerNormBackprop::decompose_op() const
|
||||
{
|
||||
const PartialShape& data_shape = get_input_partial_shape(0);
|
||||
if (data_shape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Data needs to have static shape to decompose");
|
||||
}
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
if (delta_shape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Delta needs to have static shape to decompose");
|
||||
}
|
||||
if (m_use_stats)
|
||||
{
|
||||
const PartialShape& mean_shape = get_input_partial_shape(2);
|
||||
const PartialShape& var_shape = get_input_partial_shape(3);
|
||||
if (mean_shape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Mean needs to have static shape to decompose");
|
||||
}
|
||||
if (var_shape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Variance needs to have static shape to decompose");
|
||||
}
|
||||
}
|
||||
if (m_use_affine)
|
||||
{
|
||||
const PartialShape& scale_shape = get_input_partial_shape(m_use_stats ? 4 : 2);
|
||||
if (scale_shape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Scale needs to have static shape to decompose");
|
||||
}
|
||||
}
|
||||
|
||||
// Compute real axis
|
||||
auto shape = data_shape.to_shape();
|
||||
int64_t n_axis = m_begin_norm_axis >= 0 ? m_begin_norm_axis : shape.size() + m_begin_norm_axis;
|
||||
|
||||
// Get input data
|
||||
auto data = input_value(0);
|
||||
|
||||
// Get delta
|
||||
auto delta = input_value(1);
|
||||
|
||||
// Get mean
|
||||
std::vector<size_t> post_reduction_axes(shape.size() - n_axis);
|
||||
std::iota(post_reduction_axes.begin(), post_reduction_axes.end(), n_axis);
|
||||
auto mean =
|
||||
m_use_stats ? input_value(2) : builder::mean(data, post_reduction_axes)->outputs()[0];
|
||||
|
||||
AxisSet post_axis_set;
|
||||
for (size_t i = static_cast<size_t>(n_axis); i < shape.size(); i++)
|
||||
{
|
||||
post_axis_set.insert(i);
|
||||
}
|
||||
auto b_mean = make_shared<ngraph::op::Broadcast>(mean, shape, post_axis_set);
|
||||
|
||||
// Get variance
|
||||
auto var =
|
||||
m_use_stats ? input_value(3) : builder::variance(data, post_reduction_axes)->outputs()[0];
|
||||
|
||||
// Compute standard deviation with epsilon
|
||||
auto epsilon = builder::make_constant(var.get_element_type(), var.get_shape(), m_epsilon);
|
||||
auto stddev = make_shared<op::Sqrt>(var + epsilon);
|
||||
auto b_stddev = make_shared<op::Broadcast>(stddev, shape, post_axis_set);
|
||||
|
||||
// Get normalized input
|
||||
auto norm = (data - b_mean) / b_stddev;
|
||||
|
||||
// Get gradient for data
|
||||
auto d_data = delta / b_stddev;
|
||||
|
||||
bool scale_flattened = false;
|
||||
if (m_use_affine)
|
||||
{
|
||||
AxisSet pre_axis_set;
|
||||
for (size_t i = 0; i < static_cast<size_t>(n_axis); i++)
|
||||
{
|
||||
pre_axis_set.insert(i);
|
||||
}
|
||||
|
||||
size_t scale_idx = m_use_stats ? 4 : 2;
|
||||
auto scale = input_value(scale_idx);
|
||||
auto scale_shape = get_input_partial_shape(scale_idx).to_shape();
|
||||
if (shape.size() - n_axis != scale_shape.size())
|
||||
{
|
||||
scale_flattened = true;
|
||||
Shape reshape_shape(shape.begin() + m_begin_norm_axis, shape.end());
|
||||
scale = make_shared<op::Reshape>(scale, AxisVector{0}, reshape_shape);
|
||||
}
|
||||
auto b_scale = make_shared<op::Broadcast>(scale, shape, pre_axis_set);
|
||||
d_data = d_data * b_scale;
|
||||
}
|
||||
auto d_mean = make_shared<op::Broadcast>(
|
||||
builder::mean(-d_data, post_reduction_axes), shape, post_axis_set);
|
||||
auto d_stddev =
|
||||
norm * make_shared<op::Broadcast>(
|
||||
builder::mean(-d_data * norm, post_reduction_axes), shape, post_axis_set);
|
||||
d_data = d_data + d_mean + d_stddev;
|
||||
|
||||
NodeVector retval;
|
||||
retval.emplace_back(d_data);
|
||||
|
||||
// Get gradients for affine
|
||||
if (m_use_affine)
|
||||
{
|
||||
std::vector<size_t> pre_reduction_axes(n_axis);
|
||||
std::iota(pre_reduction_axes.begin(), pre_reduction_axes.end(), 0);
|
||||
auto d_bias = make_shared<op::Sum>(delta, pre_reduction_axes);
|
||||
auto d_scale = make_shared<op::Sum>(delta * norm, pre_reduction_axes);
|
||||
if (scale_flattened)
|
||||
{
|
||||
std::vector<size_t> flatten_axes_vector(shape.size() - n_axis);
|
||||
std::iota(flatten_axes_vector.begin(), flatten_axes_vector.end(), 0);
|
||||
AxisVector flatten_axes = AxisVector(flatten_axes_vector);
|
||||
Shape reshape_shape(shape.begin() + m_begin_norm_axis, shape.end());
|
||||
size_t reshape_size = shape_size(reshape_shape);
|
||||
auto flatten_d_scale =
|
||||
make_shared<op::Reshape>(d_scale, flatten_axes, Shape{reshape_size});
|
||||
auto flatten_d_bias =
|
||||
make_shared<op::Reshape>(d_bias, flatten_axes, Shape{reshape_size});
|
||||
retval.emplace_back(flatten_d_scale);
|
||||
retval.emplace_back(flatten_d_bias);
|
||||
}
|
||||
else
|
||||
{
|
||||
retval.emplace_back(d_scale);
|
||||
retval.emplace_back(d_bias);
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::LayerNormBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
if (new_args.size() < 2 || new_args.size() > 5)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
if (new_args.size() == 5)
|
||||
{
|
||||
return make_shared<LayerNormBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
new_args.at(3),
|
||||
new_args.at(4),
|
||||
m_begin_norm_axis,
|
||||
m_epsilon);
|
||||
}
|
||||
else if (new_args.size() == 4)
|
||||
{
|
||||
return make_shared<LayerNormBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
new_args.at(3),
|
||||
m_begin_norm_axis,
|
||||
m_epsilon);
|
||||
}
|
||||
else if (new_args.size() == 3)
|
||||
{
|
||||
return make_shared<LayerNormBackprop>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), m_begin_norm_axis, m_epsilon);
|
||||
}
|
||||
else
|
||||
{
|
||||
return make_shared<LayerNormBackprop>(
|
||||
new_args.at(0), new_args.at(1), m_begin_norm_axis, m_epsilon);
|
||||
}
|
||||
}
|
||||
|
||||
void op::LayerNormBackprop::pre_validate_and_infer_types()
|
||||
{
|
||||
element::Type input_element_type = get_input_element_type(0);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_element_type.is_dynamic() || input_element_type.is_real(),
|
||||
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
|
||||
input_element_type,
|
||||
").");
|
||||
|
||||
const PartialShape& data_shape = get_input_partial_shape(0);
|
||||
Rank data_rank = data_shape.rank();
|
||||
int64_t d_rank = -1;
|
||||
int64_t n_axis = -1;
|
||||
if (data_rank.is_static())
|
||||
{
|
||||
d_rank = data_rank.get_length();
|
||||
n_axis = m_begin_norm_axis >= 0 ? m_begin_norm_axis : d_rank + m_begin_norm_axis;
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, n_axis >= 0 && n_axis < d_rank, "begin_norm_axis is out of range");
|
||||
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
Rank delta_rank = delta_shape.rank();
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
delta_rank.is_dynamic() || delta_rank.get_length() == d_rank,
|
||||
"Delta rank is incorrect");
|
||||
|
||||
if (m_use_stats)
|
||||
{
|
||||
const PartialShape& mean_shape = get_input_partial_shape(2);
|
||||
const PartialShape& var_shape = get_input_partial_shape(3);
|
||||
Rank mean_rank = mean_shape.rank();
|
||||
Rank var_rank = var_shape.rank();
|
||||
if (mean_rank.is_static() && var_rank.is_static())
|
||||
{
|
||||
int64_t m_rank = mean_rank.get_length();
|
||||
int64_t v_rank = var_rank.get_length();
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_rank == v_rank && m_rank == n_axis,
|
||||
"Mean and/or variance rank is incorrect");
|
||||
}
|
||||
}
|
||||
|
||||
if (m_use_affine)
|
||||
{
|
||||
const PartialShape& scale_shape = get_input_partial_shape(m_use_stats ? 4 : 2);
|
||||
Rank scale_rank = scale_shape.rank();
|
||||
if (scale_rank.is_static())
|
||||
{
|
||||
int64_t s_rank = scale_rank.get_length();
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, (s_rank == (d_rank - n_axis)) || s_rank == 1, "Scale rank is incorrect");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m_use_affine)
|
||||
{
|
||||
set_output_size(3);
|
||||
// output shape: data_shape[begin_norm_axis:]
|
||||
if (d_rank > 0)
|
||||
{
|
||||
std::vector<Dimension> affine_dim;
|
||||
for (int64_t i = n_axis; i < d_rank; i++)
|
||||
{
|
||||
affine_dim.emplace_back(data_shape[i]);
|
||||
}
|
||||
PartialShape affine_shape(affine_dim);
|
||||
set_output_type(1, input_element_type, affine_shape);
|
||||
set_output_type(2, input_element_type, affine_shape);
|
||||
}
|
||||
else // set shape to dynamic
|
||||
{
|
||||
set_output_type(1, input_element_type, PartialShape::dynamic());
|
||||
set_output_type(2, input_element_type, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
PartialShape norm_shape{data_shape};
|
||||
set_output_type(0, input_element_type, norm_shape);
|
||||
}
|
||||
|
@ -65,79 +65,13 @@ namespace ngraph
|
||||
bool get_use_affine() const { return m_use_affine; }
|
||||
double get_epsilon() const { return m_epsilon; }
|
||||
int64_t get_begin_norm_axis() const { return m_begin_norm_axis; }
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
private:
|
||||
bool m_keep_stats{true};
|
||||
bool m_use_affine{true};
|
||||
int64_t m_begin_norm_axis{1};
|
||||
double m_epsilon{1e-5};
|
||||
};
|
||||
|
||||
/// \brief Layer Normalization Backprop
|
||||
///
|
||||
class NGRAPH_API LayerNormBackprop : public ngraph::op::util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"LayerNormBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
LayerNormBackprop() = default;
|
||||
/// \brief Constructs an LayerNormBackprop operation.
|
||||
///
|
||||
/// \param data Input tensor
|
||||
/// \param mean Mean tensor from fprop
|
||||
/// \param variance Variance tensor from fprop
|
||||
/// \param delta Delta tensor
|
||||
/// \param scale Scale tensor
|
||||
/// \param begin_norm_axis Axis where normalization starts, default - -1
|
||||
/// \param epsilon Small number to add for stability of rsqrt, default 1e-5
|
||||
LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
const Output<Node>& scale,
|
||||
int64_t begin_norm_axis = 1,
|
||||
double epsilon = 1e-5);
|
||||
|
||||
LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& mean,
|
||||
const Output<Node>& variance,
|
||||
int64_t begin_norm_axis = 1,
|
||||
double epsilon = 1e-5);
|
||||
|
||||
LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& scale,
|
||||
int64_t begin_norm_axis = 1,
|
||||
double epsilon = 1e-5);
|
||||
|
||||
LayerNormBackprop(const Output<Node>& data,
|
||||
const Output<Node>& delta,
|
||||
int64_t begin_norm_axis = 1,
|
||||
double epsilon = 1e-5);
|
||||
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
void pre_validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool get_use_stats() const { return m_use_stats; }
|
||||
bool get_use_affine() const { return m_use_affine; }
|
||||
double get_epsilon() const { return m_epsilon; }
|
||||
int64_t get_begin_norm_axis() const { return m_begin_norm_axis; }
|
||||
private:
|
||||
bool m_use_stats{true};
|
||||
bool m_use_affine{true};
|
||||
int64_t m_begin_norm_axis{1};
|
||||
double m_epsilon{1e-5};
|
||||
};
|
||||
}
|
||||
using v0::LayerNorm;
|
||||
using v0::LayerNormBackprop;
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,6 @@ using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::PartialSlice::type_info;
|
||||
constexpr NodeTypeInfo op::PartialSliceBackprop::type_info;
|
||||
|
||||
op::PartialSlice::PartialSlice(const Output<Node>& data,
|
||||
const AxisVector& axes,
|
||||
@ -145,99 +144,3 @@ void op::PartialSlice::pre_validate_and_infer_types()
|
||||
set_output_type(0, input_element_type, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
void op::PartialSlice::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
throw ngraph_error("op::PartialSlice::generate_adjoints function is not implemented yet");
|
||||
}
|
||||
|
||||
op::PartialSliceBackprop::PartialSliceBackprop(const Output<Node>& data,
|
||||
const Output<Node>& dout,
|
||||
const AxisVector& axes,
|
||||
const std::vector<int64_t>& lower_bounds,
|
||||
const std::vector<int64_t>& upper_bounds)
|
||||
: FusedOp({data, dout})
|
||||
, m_axes(axes)
|
||||
, m_lower_bounds(lower_bounds)
|
||||
, m_upper_bounds(upper_bounds)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
// All input shape should be static by this point
|
||||
NodeVector op::PartialSliceBackprop::decompose_op() const
|
||||
{
|
||||
const PartialShape& data_pshape = get_input_partial_shape(0);
|
||||
if (data_pshape.is_dynamic())
|
||||
{
|
||||
throw ngraph_error("Data needs to have static shape to decompose");
|
||||
}
|
||||
|
||||
auto data = input_value(0);
|
||||
auto dout = input_value(1);
|
||||
auto data_shape = data.get_shape();
|
||||
auto axes = get_axes();
|
||||
auto starts = get_lower_bounds();
|
||||
auto ends = get_upper_bounds();
|
||||
|
||||
Coordinate ng_start, ng_end;
|
||||
int axis_length, start, end;
|
||||
|
||||
auto reshape = data_shape;
|
||||
for (size_t i = 0; i < data_shape.size(); ++i)
|
||||
{
|
||||
ng_start.push_back(0);
|
||||
ng_end.push_back(data_shape[i]);
|
||||
}
|
||||
for (size_t i = 0; i < axes.size(); ++i)
|
||||
{
|
||||
axis_length = data_shape[axes[i]];
|
||||
start = starts[i] < 0 ? (starts[i] + axis_length) : starts[i];
|
||||
end = ends[i] < 0 ? (ends[i] + axis_length) : ends[i];
|
||||
start = max(start, 0);
|
||||
end = max(end, 0);
|
||||
start = min(start, axis_length);
|
||||
end = min(end, axis_length);
|
||||
start = min(start, end);
|
||||
ng_start[axes[i]] = start;
|
||||
ng_end[axes[i]] = end;
|
||||
reshape[axes[i]] = end - start;
|
||||
}
|
||||
|
||||
auto dout_reshape =
|
||||
std::make_shared<op::Reshape>(dout, get_default_order(dout.get_shape()), reshape);
|
||||
|
||||
std::shared_ptr<ngraph::Node> mask =
|
||||
op::Constant::create(dout.get_element_type(), data_shape, {0});
|
||||
|
||||
auto din = std::make_shared<op::ReplaceSlice>(mask, dout_reshape, ng_start, ng_end);
|
||||
return {din};
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::PartialSliceBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
if (new_args.size() != 2)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
|
||||
return make_shared<PartialSliceBackprop>(
|
||||
new_args.at(0), new_args.at(1), m_axes, m_lower_bounds, m_upper_bounds);
|
||||
}
|
||||
|
||||
void op::PartialSliceBackprop::pre_validate_and_infer_types()
|
||||
{
|
||||
element::Type input_element_type = get_input_element_type(0);
|
||||
PartialShape data_pshape = get_input_partial_shape(0);
|
||||
PartialShape delta_pshape = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_element_type.is_dynamic() || input_element_type.is_real(),
|
||||
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
|
||||
input_element_type,
|
||||
").");
|
||||
if (data_pshape.is_dynamic() || delta_pshape.is_dynamic())
|
||||
{
|
||||
set_output_type(0, input_element_type, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
@ -59,55 +59,13 @@ namespace ngraph
|
||||
const std::vector<int64_t>& get_lower_bounds() const { return m_lower_bounds; }
|
||||
const std::vector<int64_t>& get_upper_bounds() const { return m_upper_bounds; }
|
||||
const AxisVector& get_decrease_axes() const { return m_decrease_axes; }
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
private:
|
||||
AxisVector m_axes;
|
||||
std::vector<int64_t> m_lower_bounds;
|
||||
std::vector<int64_t> m_upper_bounds;
|
||||
AxisVector m_decrease_axes;
|
||||
};
|
||||
|
||||
/// \brief pdpd slice backprop
|
||||
///
|
||||
class NGRAPH_API PartialSliceBackprop : public ngraph::op::util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"PartialSliceBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
PartialSliceBackprop() = default;
|
||||
/// \brief Constructs an PartialSliceBackprop operation.
|
||||
///
|
||||
/// \param data Input tensor
|
||||
/// \param dout Onput tensor from fprop
|
||||
/// \param axes Axes that lower and upper bounds apply to
|
||||
/// \param lower_bounds Starting indices of corresponding axis in `axes`
|
||||
/// \param upper_bounds Ending indices of corresponding axis in `axes`
|
||||
PartialSliceBackprop(const Output<Node>& data,
|
||||
const Output<Node>& dout,
|
||||
const AxisVector& axes,
|
||||
const std::vector<int64_t>& lower_bounds,
|
||||
const std::vector<int64_t>& upper_bounds);
|
||||
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
void pre_validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
const AxisVector& get_axes() const { return m_axes; }
|
||||
const std::vector<int64_t>& get_lower_bounds() const { return m_lower_bounds; }
|
||||
const std::vector<int64_t>& get_upper_bounds() const { return m_upper_bounds; }
|
||||
private:
|
||||
AxisVector m_axes;
|
||||
std::vector<int64_t> m_lower_bounds;
|
||||
std::vector<int64_t> m_upper_bounds;
|
||||
};
|
||||
}
|
||||
using v0::PartialSlice;
|
||||
using v0::PartialSliceBackprop;
|
||||
}
|
||||
}
|
||||
|
@ -156,114 +156,3 @@ shared_ptr<Node> op::SoftmaxCrossEntropy::clone_with_new_inputs(const OutputVect
|
||||
return make_shared<SoftmaxCrossEntropy>(
|
||||
new_args.at(0), new_args.at(1), m_soft_label, m_ignore_index);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::SoftmaxCrossEntropyBackprop::type_info;
|
||||
|
||||
op::SoftmaxCrossEntropyBackprop::SoftmaxCrossEntropyBackprop(const Output<Node>& delta,
|
||||
const Output<Node>& softmax,
|
||||
const Output<Node>& labels,
|
||||
bool soft_label,
|
||||
int64_t ignore_index)
|
||||
: FusedOp({delta, softmax, labels})
|
||||
, m_soft_label(soft_label)
|
||||
, m_ignore_index(ignore_index)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::SoftmaxCrossEntropyBackprop::pre_validate_and_infer_types()
|
||||
{
|
||||
element::Type input_element_type = get_input_element_type(0);
|
||||
PartialShape delta_pshape = get_input_partial_shape(0);
|
||||
PartialShape softmax_pshape = get_input_partial_shape(1);
|
||||
PartialShape labels_pshape = get_input_partial_shape(2);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_element_type.is_dynamic() || input_element_type.is_real(),
|
||||
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
|
||||
input_element_type,
|
||||
").");
|
||||
|
||||
if (delta_pshape.is_dynamic() || softmax_pshape.is_dynamic() || labels_pshape.is_dynamic())
|
||||
{
|
||||
set_output_type(0, input_element_type, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
shared_ptr<Node>
|
||||
op::SoftmaxCrossEntropyBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<SoftmaxCrossEntropyBackprop>(
|
||||
new_args.at(0), new_args.at(1), new_args.at(2), m_soft_label, m_ignore_index);
|
||||
}
|
||||
|
||||
NodeVector op::SoftmaxCrossEntropyBackprop::decompose_op() const
|
||||
{
|
||||
auto delta = input_value(0);
|
||||
auto softmax = input_value(1);
|
||||
auto labels = input_value(2);
|
||||
size_t one_hot_axis = delta.get_shape().size() - 1;
|
||||
// always reduces the sum on the last axis
|
||||
auto reduction_axis = delta.get_shape().size() - 1;
|
||||
|
||||
if (m_soft_label)
|
||||
{
|
||||
if (delta.get_shape() != labels.get_shape())
|
||||
{
|
||||
auto reshape = std::make_shared<ngraph::op::Reshape>(
|
||||
delta, AxisVector{0, 1}, Shape{delta.get_shape().at(0)});
|
||||
delta =
|
||||
std::make_shared<ngraph::op::Broadcast>(reshape, labels.get_shape(), AxisSet{1});
|
||||
}
|
||||
auto delta_mul_labels = std::make_shared<ngraph::op::Multiply>(delta, labels);
|
||||
auto summation_delta_mul_labels = std::make_shared<ngraph::op::Sum>(
|
||||
delta_mul_labels, AxisSet{static_cast<size_t>(reduction_axis)});
|
||||
auto broadcast_sum = std::make_shared<ngraph::op::Broadcast>(
|
||||
summation_delta_mul_labels, softmax.get_shape(), AxisSet{1});
|
||||
auto multiply_sm = broadcast_sum * softmax;
|
||||
return {multiply_sm - delta_mul_labels};
|
||||
}
|
||||
else
|
||||
{
|
||||
// ignore mask
|
||||
auto mask_constant =
|
||||
ngraph::op::Constant::create(element::i64, labels.get_shape(), {m_ignore_index});
|
||||
auto not_equal = std::make_shared<ngraph::op::NotEqual>(labels, mask_constant);
|
||||
auto convert = std::make_shared<ngraph::op::Convert>(not_equal, delta.get_element_type());
|
||||
auto reshape = std::make_shared<ngraph::op::Reshape>(
|
||||
convert, AxisVector{0, 1}, Shape{convert->get_shape().at(0)});
|
||||
auto broadcast_mask =
|
||||
std::make_shared<ngraph::op::Broadcast>(reshape, softmax.get_shape(), AxisSet{1});
|
||||
|
||||
// one hot encoding of labels
|
||||
auto reshape_labels =
|
||||
make_shared<op::Reshape>(labels, AxisVector{0, 1}, Shape{labels.get_shape().at(0)});
|
||||
auto one_hot =
|
||||
std::make_shared<ngraph::op::OneHot>(reshape_labels, softmax.get_shape(), one_hot_axis);
|
||||
auto convert_one_hot =
|
||||
std::make_shared<ngraph::op::Convert>(one_hot, delta.get_element_type());
|
||||
|
||||
if (delta.get_shape() != convert_one_hot->get_shape())
|
||||
{
|
||||
auto reshape = std::make_shared<ngraph::op::Reshape>(
|
||||
delta, AxisVector{0, 1}, Shape{delta.get_shape().at(0)});
|
||||
delta = std::make_shared<ngraph::op::Broadcast>(
|
||||
reshape, convert_one_hot->get_shape(), AxisSet{1});
|
||||
}
|
||||
|
||||
// (cross_entr * delta * mask)
|
||||
auto delta_mul_labels = std::make_shared<ngraph::op::Multiply>(delta, convert_one_hot);
|
||||
auto multiply_mask =
|
||||
std::make_shared<ngraph::op::Multiply>(delta_mul_labels, broadcast_mask);
|
||||
|
||||
// sum (cross_entr * delta * mask)
|
||||
auto summation_delta_mul_labels = std::make_shared<ngraph::op::Sum>(
|
||||
multiply_mask, AxisSet{static_cast<size_t>(reduction_axis)});
|
||||
|
||||
auto broadcast_sum = std::make_shared<ngraph::op::Broadcast>(
|
||||
summation_delta_mul_labels, softmax.get_shape(), AxisSet{1});
|
||||
auto multiply_sm_with_summation = broadcast_sum * softmax;
|
||||
return {multiply_sm_with_summation - multiply_mask};
|
||||
}
|
||||
}
|
||||
|
@ -59,44 +59,7 @@ namespace ngraph
|
||||
bool m_soft_label;
|
||||
int64_t m_ignore_index;
|
||||
};
|
||||
|
||||
class NGRAPH_API SoftmaxCrossEntropyBackprop : public util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"SoftmaxCrossEntropyBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
SoftmaxCrossEntropyBackprop() = default;
|
||||
|
||||
/// \brief Backprop for SoftmaxCrossEntropy
|
||||
/// \param delta Node that produces the delta during bprop
|
||||
/// \param softmax Node that produces softmax from fprop
|
||||
/// \param labels Node that produces ground truth labels for input
|
||||
/// \param soft_label flag indicating whether to interpretate the given labels as
|
||||
/// soft
|
||||
/// labels
|
||||
/// \param ignore_index Specifies a target value that is ignored and does not
|
||||
/// contribute
|
||||
/// to the input gradient Only valid if soft_label is set to False
|
||||
SoftmaxCrossEntropyBackprop(const Output<Node>& delta,
|
||||
const Output<Node>& softmax,
|
||||
const Output<Node>& labels,
|
||||
bool soft_label = false,
|
||||
int64_t ignore_index = -100);
|
||||
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
void pre_validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
bool get_soft_label() const { return m_soft_label; }
|
||||
int64_t get_ignore_index() const { return m_ignore_index; }
|
||||
private:
|
||||
bool m_soft_label;
|
||||
int64_t m_ignore_index;
|
||||
};
|
||||
}
|
||||
using v0::SoftmaxCrossEntropy;
|
||||
using v0::SoftmaxCrossEntropyBackprop;
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
||||
|
@ -869,118 +869,3 @@ NodeVector op::v0::GroupConvolutionBackpropData::decompose_op() const
|
||||
size_t concatenation_axis = 1;
|
||||
return {std::make_shared<ngraph::op::Concat>(sliced_inputs, concatenation_axis)};
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// v0::GroupConvolutionBackpropFilters
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropFilters::type_info;
|
||||
|
||||
op::v0::GroupConvolutionBackpropFilters::GroupConvolutionBackpropFilters(
|
||||
const Output<Node>& data_batch,
|
||||
const Output<Node>& filters,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides,
|
||||
const Strides& window_dilation_strides,
|
||||
const CoordinateDiff& padding_below,
|
||||
const CoordinateDiff& padding_above,
|
||||
const size_t groups)
|
||||
: FusedOp({data_batch, filters, output_delta})
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_window_dilation_strides(window_dilation_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
, m_groups(groups)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::v0::GroupConvolutionBackpropFilters::pre_validate_and_infer_types()
|
||||
{
|
||||
element::Type filters_element_type = get_input_element_type(1);
|
||||
PartialShape data_pshape = get_input_partial_shape(0);
|
||||
PartialShape filters_pshape = get_input_partial_shape(1);
|
||||
PartialShape delta_pshape = get_input_partial_shape(2);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
filters_element_type.is_dynamic() || filters_element_type.is_real(),
|
||||
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
|
||||
filters_element_type,
|
||||
").");
|
||||
|
||||
if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic())
|
||||
{
|
||||
set_output_type(0, filters_element_type, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::GroupConvolutionBackpropFilters::clone_with_new_inputs(
|
||||
const OutputVector& new_args) const
|
||||
{
|
||||
if (new_args.size() != 3)
|
||||
{
|
||||
throw ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
|
||||
return make_shared<op::v0::GroupConvolutionBackpropFilters>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
get_window_movement_strides(),
|
||||
get_window_dilation_strides(),
|
||||
get_padding_below(),
|
||||
get_padding_above(),
|
||||
get_groups());
|
||||
}
|
||||
|
||||
NodeVector op::v0::GroupConvolutionBackpropFilters::decompose_op() const
|
||||
{
|
||||
auto data_batch = input_value(0);
|
||||
auto filters = input_value(1);
|
||||
auto output_delta = input_value(2);
|
||||
|
||||
auto data_shape = get_input_shape(0);
|
||||
auto filters_shape = get_input_shape(1);
|
||||
auto delta_shape = get_input_shape(2);
|
||||
|
||||
NodeVector sliced_inputs;
|
||||
|
||||
for (size_t i = 0; i < get_groups(); ++i)
|
||||
{
|
||||
size_t channel_step = filters_shape.at(1);
|
||||
|
||||
const Coordinate data_lower_bound{0, i * channel_step, 0, 0};
|
||||
const Coordinate data_upper_bound{
|
||||
data_shape.at(0), (i + 1) * channel_step, data_shape.at(2), data_shape.at(3)};
|
||||
auto sliced_data =
|
||||
std::make_shared<op::Slice>(data_batch, data_lower_bound, data_upper_bound);
|
||||
|
||||
size_t filters_step = filters_shape.at(0) / get_groups();
|
||||
|
||||
const Coordinate filters_lower_bound{i * filters_step, 0, 0, 0};
|
||||
const Coordinate filters_upper_bound{
|
||||
(i + 1) * filters_step, filters_shape.at(1), filters_shape.at(2), filters_shape.at(3)};
|
||||
auto sliced_filters =
|
||||
std::make_shared<op::Slice>(filters, filters_lower_bound, filters_upper_bound);
|
||||
|
||||
const Coordinate delta_lower_bound{0, i * filters_step, 0, 0};
|
||||
const Coordinate delta_upper_bound{
|
||||
delta_shape.at(0), (i + 1) * filters_step, delta_shape.at(2), delta_shape.at(3)};
|
||||
auto sliced_delta =
|
||||
std::make_shared<op::Slice>(output_delta, delta_lower_bound, delta_upper_bound);
|
||||
|
||||
auto sliced_conv =
|
||||
std::make_shared<op::ConvolutionBackpropFilters>(sliced_data,
|
||||
sliced_filters->get_shape(),
|
||||
sliced_delta,
|
||||
get_window_movement_strides(),
|
||||
get_window_dilation_strides(),
|
||||
get_padding_below(),
|
||||
get_padding_above(),
|
||||
Strides{1, 1});
|
||||
|
||||
sliced_inputs.push_back(sliced_conv);
|
||||
}
|
||||
|
||||
size_t concatenation_axis = 0;
|
||||
return {std::make_shared<ngraph::op::Concat>(sliced_inputs, concatenation_axis)};
|
||||
}
|
||||
|
@ -364,52 +364,9 @@ namespace ngraph
|
||||
CoordinateDiff m_padding_above;
|
||||
size_t m_groups;
|
||||
};
|
||||
|
||||
/// \brief Group Convolution filters backprop
|
||||
class NGRAPH_API GroupConvolutionBackpropFilters : public ngraph::op::util::FusedOp
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropFilters", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
GroupConvolutionBackpropFilters() = default;
|
||||
GroupConvolutionBackpropFilters(const Output<Node>& data_batch,
|
||||
const Output<Node>& filters,
|
||||
const Output<Node>& output_delta,
|
||||
const Strides& window_movement_strides,
|
||||
const Strides& window_dilation_strides,
|
||||
const CoordinateDiff& padding_below,
|
||||
const CoordinateDiff& padding_above,
|
||||
const size_t groups);
|
||||
|
||||
const Strides& get_window_movement_strides() const
|
||||
{
|
||||
return m_window_movement_strides;
|
||||
}
|
||||
const Strides& get_window_dilation_strides() const
|
||||
{
|
||||
return m_window_dilation_strides;
|
||||
}
|
||||
const CoordinateDiff& get_padding_below() const { return m_padding_below; }
|
||||
const CoordinateDiff& get_padding_above() const { return m_padding_above; }
|
||||
size_t get_groups() const { return m_groups; }
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
virtual NodeVector decompose_op() const override;
|
||||
|
||||
virtual void pre_validate_and_infer_types() override;
|
||||
|
||||
protected:
|
||||
Strides m_window_movement_strides;
|
||||
Strides m_window_dilation_strides;
|
||||
CoordinateDiff m_padding_below;
|
||||
CoordinateDiff m_padding_above;
|
||||
size_t m_groups;
|
||||
};
|
||||
}
|
||||
|
||||
using v0::GroupConvolution;
|
||||
using v0::GroupConvolutionBackpropData;
|
||||
using v0::GroupConvolutionBackpropFilters;
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
||||
|
@ -176,135 +176,11 @@ shared_ptr<Node> op::v0::MaxPool::clone_with_new_inputs(const OutputVector& new_
|
||||
m_ceil_mode);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v0::MaxPoolBackprop::type_info;
|
||||
shared_ptr<Node> op::v0::MaxPool::get_default_value() const
|
||||
{
|
||||
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
|
||||
}
|
||||
|
||||
op::v0::MaxPoolBackprop::MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above)
|
||||
: Op({arg_forward, delta})
|
||||
, m_window_shape(window_shape)
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v0::MaxPoolBackprop::MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& result_forward,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above)
|
||||
: Op({arg_forward, delta, result_forward})
|
||||
, m_window_shape(window_shape)
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::v0::MaxPoolBackprop::validate_and_infer_types()
|
||||
{
|
||||
element::Type forward_arg_et = get_input_element_type(0);
|
||||
element::Type delta_et = get_input_element_type(1);
|
||||
|
||||
element::Type result_et;
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
element::Type::merge(result_et, forward_arg_et, delta_et),
|
||||
"Element types for forward argument (",
|
||||
forward_arg_et,
|
||||
") and delta (",
|
||||
delta_et,
|
||||
") do not match.");
|
||||
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
|
||||
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
|
||||
|
||||
const PartialShape& forward_arg_shape = get_input_partial_shape(0);
|
||||
|
||||
PartialShape forward_result_shape = infer_batched_pooling_forward(this,
|
||||
forward_arg_shape,
|
||||
padding_below,
|
||||
padding_above,
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
true);
|
||||
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape does not match delta shape (inferred forward output ",
|
||||
"shape: ",
|
||||
forward_result_shape,
|
||||
", delta shape: ",
|
||||
delta_shape,
|
||||
").");
|
||||
|
||||
// TODO(amprocte): We may technically be able to infer some extra information from
|
||||
// forward_result_shape that was not present in the forward arg shape---namely batch size and
|
||||
// channel count. Merge that info in.
|
||||
set_output_type(0, get_input_element_type(0), forward_arg_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::MaxPoolBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
if (this->get_input_size() == 3)
|
||||
{
|
||||
return make_shared<op::v0::MaxPoolBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above);
|
||||
}
|
||||
|
||||
return make_shared<op::v0::MaxPoolBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above);
|
||||
}
|
||||
|
||||
void op::v0::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
if (m_ceil_mode)
|
||||
{
|
||||
throw ngraph_error("Autodiff not supported on MaxPool with ceil_mode set");
|
||||
}
|
||||
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto operand = input_value(0);
|
||||
auto backprop =
|
||||
make_shared<op::v0::MaxPoolBackprop>(operand,
|
||||
delta,
|
||||
static_pointer_cast<op::MaxPool>(shared_from_this()),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above);
|
||||
|
||||
adjoints.add_delta(operand, backprop);
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v1::MaxPool::type_info;
|
||||
|
||||
op::v1::MaxPool::MaxPool(const Output<Node>& arg,
|
||||
@ -396,119 +272,6 @@ shared_ptr<Node> op::v1::MaxPool::get_default_value() const
|
||||
return op::Constant::create(get_element_type(), get_shape(), {0});
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::v1::MaxPoolBackprop::type_info;
|
||||
|
||||
op::v1::MaxPoolBackprop::MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel)
|
||||
: Op({arg_forward, delta})
|
||||
, m_kernel(kernel)
|
||||
, m_strides(strides)
|
||||
, m_pads_begin(pads_begin)
|
||||
, m_pads_end(pads_end)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v1::MaxPoolBackprop::MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& result_forward,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel)
|
||||
: Op({arg_forward, delta, result_forward})
|
||||
, m_kernel(kernel)
|
||||
, m_strides(strides)
|
||||
, m_pads_begin(pads_begin)
|
||||
, m_pads_end(pads_end)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::v1::MaxPoolBackprop::validate_and_infer_types()
|
||||
{
|
||||
element::Type forward_arg_et = get_input_element_type(0);
|
||||
element::Type delta_et = get_input_element_type(1);
|
||||
|
||||
element::Type result_et;
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
element::Type::merge(result_et, forward_arg_et, delta_et),
|
||||
"Element types for forward argument (",
|
||||
forward_arg_et,
|
||||
") and delta (",
|
||||
delta_et,
|
||||
") do not match.");
|
||||
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());
|
||||
CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end());
|
||||
|
||||
const PartialShape& forward_arg_shape = get_input_partial_shape(0);
|
||||
|
||||
PartialShape forward_result_shape = infer_batched_pooling_forward(
|
||||
this, forward_arg_shape, pads_begin, pads_end, m_kernel, m_strides, true);
|
||||
|
||||
const PartialShape& delta_shape = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
forward_result_shape.compatible(delta_shape),
|
||||
"Inferred forward output shape does not match delta shape (inferred forward output ",
|
||||
"shape: ",
|
||||
forward_result_shape,
|
||||
", delta shape: ",
|
||||
delta_shape,
|
||||
").");
|
||||
|
||||
set_output_type(0, get_input_element_type(0), forward_arg_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::MaxPoolBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
if (this->get_input_size() == 3)
|
||||
{
|
||||
return make_shared<op::v1::MaxPoolBackprop>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
new_args.at(2),
|
||||
m_strides,
|
||||
m_pads_begin,
|
||||
m_pads_end,
|
||||
m_kernel);
|
||||
}
|
||||
|
||||
return make_shared<op::v1::MaxPoolBackprop>(
|
||||
new_args.at(0), new_args.at(1), m_strides, m_pads_begin, m_pads_end, m_kernel);
|
||||
}
|
||||
|
||||
void op::v1::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
if (m_rounding_type == op::RoundingType::CEIL)
|
||||
{
|
||||
throw ngraph_error("Autodiff not supported on MaxPool with rounding_type set");
|
||||
}
|
||||
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto operand = input_value(0);
|
||||
auto backprop =
|
||||
make_shared<op::v1::MaxPoolBackprop>(operand,
|
||||
delta,
|
||||
static_pointer_cast<op::MaxPool>(shared_from_this()),
|
||||
m_strides,
|
||||
m_pads_begin,
|
||||
m_pads_end,
|
||||
m_kernel);
|
||||
|
||||
adjoints.add_delta(operand, backprop);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
template <element::Type_t ET>
|
||||
|
@ -138,9 +138,6 @@ namespace ngraph
|
||||
const HostTensorVector& inputs) override;
|
||||
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
Shape m_window_shape;
|
||||
Strides m_window_movement_strides;
|
||||
Shape m_padding_below;
|
||||
@ -153,61 +150,6 @@ namespace ngraph
|
||||
Shape& new_padding_above,
|
||||
Shape& new_padding_below);
|
||||
};
|
||||
|
||||
class NGRAPH_API MaxPoolBackprop : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"MaxPoolBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
MaxPoolBackprop() = default;
|
||||
|
||||
MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above);
|
||||
|
||||
MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& result_forward,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
const Shape& get_window_shape() const { return m_window_shape; }
|
||||
void set_window_shape(const Shape& window_shape) { m_window_shape = window_shape; }
|
||||
const Strides& get_window_movement_strides() const
|
||||
{
|
||||
return m_window_movement_strides;
|
||||
}
|
||||
void set_window_movement_strides(const Strides& window_movement_strides)
|
||||
{
|
||||
m_window_movement_strides = window_movement_strides;
|
||||
}
|
||||
const Shape& get_padding_below() const { return m_padding_below; }
|
||||
void set_padding_below(const Shape& padding_below)
|
||||
{
|
||||
m_padding_below = padding_below;
|
||||
}
|
||||
const Shape& get_padding_above() const { return m_padding_above; }
|
||||
void set_padding_above(const Shape& padding_above)
|
||||
{
|
||||
m_padding_above = padding_above;
|
||||
}
|
||||
|
||||
protected:
|
||||
Shape m_window_shape;
|
||||
Strides m_window_movement_strides;
|
||||
Shape m_padding_below;
|
||||
Shape m_padding_above;
|
||||
};
|
||||
} // namespace v0
|
||||
|
||||
namespace v1
|
||||
@ -290,9 +232,6 @@ namespace ngraph
|
||||
const HostTensorVector& inputs) override;
|
||||
|
||||
protected:
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
|
||||
Shape m_kernel;
|
||||
Strides m_strides;
|
||||
Shape m_pads_begin;
|
||||
@ -305,52 +244,8 @@ namespace ngraph
|
||||
Shape& new_pads_end,
|
||||
Shape& new_pads_begin);
|
||||
};
|
||||
|
||||
class NGRAPH_API MaxPoolBackprop : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"MaxPoolBackprop", 1};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
MaxPoolBackprop() = default;
|
||||
|
||||
MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel);
|
||||
|
||||
MaxPoolBackprop(const Output<Node>& arg_forward,
|
||||
const Output<Node>& delta,
|
||||
const Output<Node>& result_forward,
|
||||
const Strides& strides,
|
||||
const Shape& pads_begin,
|
||||
const Shape& pads_end,
|
||||
const Shape& kernel);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
size_t get_version() const override { return 1; }
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
const Shape& get_kernel() const { return m_kernel; }
|
||||
void set_kernel(const Shape& kernel) { m_kernel = kernel; }
|
||||
const Strides& get_strides() const { return m_strides; }
|
||||
void set_strides(const Strides& strides) { m_strides = strides; }
|
||||
const Shape& get_pads_begin() const { return m_pads_begin; }
|
||||
void set_pads_begin(const Shape& pads_begin) { m_pads_begin = pads_begin; }
|
||||
const Shape& get_pads_end() const { return m_pads_end; }
|
||||
void set_pads_end(const Shape& pads_end) { m_pads_end = pads_end; }
|
||||
protected:
|
||||
Shape m_kernel;
|
||||
Strides m_strides;
|
||||
Shape m_pads_begin;
|
||||
Shape m_pads_end;
|
||||
};
|
||||
} // namespace v1
|
||||
|
||||
using v0::MaxPool;
|
||||
using v0::MaxPoolBackprop;
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
||||
|
@ -42,15 +42,11 @@ NGRAPH_OP(Asinh, ngraph::op::v3, 3)
|
||||
NGRAPH_OP(Atan, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Atanh, ngraph::op::v3, 3)
|
||||
NGRAPH_OP(Atan2, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(AvgPool, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(AvgPool, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(AvgPoolBackprop, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(BatchMatMul, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(BatchMatMulTranspose, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(BatchNormInference, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(BatchNormTraining, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(BatchToSpace, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(BinaryConvolution, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Broadcast, ngraph::op::v0, 0)
|
||||
@ -71,16 +67,12 @@ NGRAPH_OP(Convolution, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Convolution, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(ConvolutionBias, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Cos, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Cosh, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(CropAndResize, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(CrossEntropy, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(CrossEntropyBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(CumSum, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1)
|
||||
@ -114,7 +106,6 @@ NGRAPH_OP(Gather, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(GatherND, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GatherTree, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Gelu, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GeluBackpropFactor, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Gemm, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GenerateMask, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GenerateMask, ngraph::op::v1, 1)
|
||||
@ -127,7 +118,6 @@ NGRAPH_OP(GroupConvolution, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GroupConvolution, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(GroupConvolutionBackpropFilters, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(HardSigmoid, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Interpolate, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Interpolate, ngraph::op::v3, 3)
|
||||
@ -135,7 +125,6 @@ NGRAPH_OP(LRN, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(LSTMCell, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(LSTMSequence, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(LayerNorm, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(LayerNormBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Less, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Less, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(LessEq, ngraph::op::v0, 0)
|
||||
@ -150,8 +139,6 @@ NGRAPH_OP(MatMul, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Max, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(MaxPool, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(MaxPool, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(MaxPoolBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(MaxPoolBackprop, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Maximum, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Maximum, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Min, ngraph::op::v0, 0)
|
||||
@ -176,7 +163,6 @@ NGRAPH_OP(Pad, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Pad, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Parameter, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(PartialSlice, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(PartialSliceBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Passthrough, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Power, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Power, ngraph::op::v1, 1)
|
||||
@ -206,7 +192,6 @@ NGRAPH_OP(ReduceProd, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(ReduceSum, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(RegionYolo, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Relu, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ReluBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ReorgYolo, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ReplaceSlice, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Reshape, ngraph::op::v0, 0)
|
||||
@ -233,7 +218,6 @@ NGRAPH_OP(ShapeOf, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ShapeOf, ngraph::op::v3, 3)
|
||||
NGRAPH_OP(ShuffleChannels, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Sigmoid, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(SigmoidBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Sign, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Sin, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Sinh, ngraph::op::v0, 0)
|
||||
@ -241,7 +225,6 @@ NGRAPH_OP(Slice, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Softmax, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Softmax, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Split, ngraph::op::v1, 1)
|
||||
|
@ -24,7 +24,6 @@ using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::Relu::type_info;
|
||||
constexpr NodeTypeInfo op::ReluBackprop::type_info;
|
||||
|
||||
op::Relu::Relu(const Output<Node>& arg)
|
||||
: UnaryElementwiseArithmetic(arg)
|
||||
@ -79,23 +78,3 @@ bool op::Relu::evaluate(const HostTensorVector& outputs, const HostTensorVector&
|
||||
{
|
||||
return evaluate_relu(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
||||
op::ReluBackprop::ReluBackprop(const Output<Node>& arg, const Output<Node>& delta)
|
||||
: BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::ReluBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<ReluBackprop>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
||||
void op::Relu::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto backprop = make_shared<op::ReluBackprop>(output(0), delta);
|
||||
adjoints.add_delta(input_value(0), backprop);
|
||||
}
|
||||
|
@ -47,32 +47,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) override;
|
||||
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
};
|
||||
|
||||
/// \brief Elementwise ReluBackprop operation.
|
||||
///
|
||||
class NGRAPH_API ReluBackprop : public ngraph::op::util::BinaryElementwiseArithmetic
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"ReluBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
ReluBackprop()
|
||||
: BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
|
||||
{
|
||||
}
|
||||
/// \brief Constructs a ReluBackprop operation.
|
||||
///
|
||||
/// \param arg Node that produces the relu forward input tensor.
|
||||
ReluBackprop(const Output<ngraph::Node>& arg, const Output<ngraph::Node>& delta);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
};
|
||||
}
|
||||
using v0::Relu;
|
||||
using v0::ReluBackprop;
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::Sigmoid::type_info;
|
||||
constexpr NodeTypeInfo op::SigmoidBackprop::type_info;
|
||||
|
||||
shared_ptr<Node> op::Sigmoid::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
@ -80,23 +79,3 @@ bool op::Sigmoid::evaluate(const HostTensorVector& outputs, const HostTensorVect
|
||||
{
|
||||
return evaluate_sigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
|
||||
}
|
||||
|
||||
op::SigmoidBackprop::SigmoidBackprop(const Output<Node>& arg, const Output<Node>& delta)
|
||||
: BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::SigmoidBackprop::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<SigmoidBackprop>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
||||
void op::Sigmoid::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas)
|
||||
{
|
||||
auto delta = deltas.at(0);
|
||||
|
||||
auto backprop = make_shared<op::SigmoidBackprop>(input_value(0), delta);
|
||||
adjoints.add_delta(input_value(0), backprop);
|
||||
}
|
||||
|
@ -36,34 +36,10 @@ namespace ngraph
|
||||
Sigmoid() = default;
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
|
||||
const OutputVector& deltas) override;
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) override;
|
||||
};
|
||||
|
||||
/// \brief Elementwise SigmoidBackprop operation.
|
||||
///
|
||||
class NGRAPH_API SigmoidBackprop : public util::BinaryElementwiseArithmetic
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"SigmoidBackprop", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
SigmoidBackprop()
|
||||
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
|
||||
{
|
||||
}
|
||||
|
||||
/// \brief Constructs a SigmoidBackprop operation.
|
||||
///
|
||||
/// \param arg Node that produces the Sigmoid forward input tensor.
|
||||
SigmoidBackprop(const Output<Node>& arg, const Output<Node>& delta);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
};
|
||||
}
|
||||
using v0::Sigmoid;
|
||||
using v0::SigmoidBackprop;
|
||||
}
|
||||
}
|
||||
|
@ -62,13 +62,10 @@ NGRAPH_OP(ArgMin, ngraph::op)
|
||||
NGRAPH_OP(Asin, ngraph::op)
|
||||
NGRAPH_OP(Atan, ngraph::op)
|
||||
NGRAPH_OP(Atan2, ngraph::op)
|
||||
NGRAPH_OP(AvgPool, ngraph::op)
|
||||
NGRAPH_OP(AvgPoolBackprop, ngraph::op)
|
||||
NGRAPH_OP(BatchMatMul, ngraph::op)
|
||||
NGRAPH_OP(BatchMatMulTranspose, ngraph::op)
|
||||
NGRAPH_OP(BatchNormInference, ngraph::op)
|
||||
NGRAPH_OP(BatchNormTraining, ngraph::op)
|
||||
NGRAPH_OP(BatchNormTrainingBackprop, ngraph::op)
|
||||
NGRAPH_OP(Broadcast, ngraph::op)
|
||||
NGRAPH_OP(BroadcastDistributed, ngraph::op)
|
||||
NGRAPH_OP(BroadcastLike, ngraph::op)
|
||||
@ -79,14 +76,11 @@ NGRAPH_OP(Constant, ngraph::op)
|
||||
NGRAPH_OP(Convert, ngraph::op)
|
||||
NGRAPH_OP(Convolution, ngraph::op)
|
||||
NGRAPH_OP(ConvolutionBackpropData, ngraph::op)
|
||||
NGRAPH_OP(ConvolutionBackpropFilters, ngraph::op)
|
||||
NGRAPH_OP(ConvolutionBias, ngraph::op)
|
||||
NGRAPH_OP(ConvolutionBiasAdd, ngraph::op)
|
||||
NGRAPH_OP(ConvolutionBiasBackpropFiltersBias, ngraph::op)
|
||||
NGRAPH_OP(Cos, ngraph::op)
|
||||
NGRAPH_OP(Cosh, ngraph::op)
|
||||
NGRAPH_OP(CrossEntropy, ngraph::op)
|
||||
NGRAPH_OP(CrossEntropyBackprop, ngraph::op)
|
||||
NGRAPH_OP(CropAndResize, ngraph::op)
|
||||
NGRAPH_OP(CumSum, ngraph::op::v0)
|
||||
NGRAPH_OP(DepthToSpace, ngraph::op)
|
||||
@ -109,7 +103,6 @@ NGRAPH_OP(GRUCell, ngraph::op)
|
||||
NGRAPH_OP(Gather, ngraph::op)
|
||||
NGRAPH_OP(GatherND, ngraph::op)
|
||||
NGRAPH_OP(Gelu, ngraph::op)
|
||||
NGRAPH_OP(GeluBackpropFactor, ngraph::op)
|
||||
NGRAPH_OP(Gemm, ngraph::op)
|
||||
NGRAPH_OP(GenerateMask, ngraph::op)
|
||||
NGRAPH_OP(GetOutputElement, ngraph::op)
|
||||
@ -117,11 +110,9 @@ NGRAPH_OP(Greater, ngraph::op)
|
||||
NGRAPH_OP(GreaterEq, ngraph::op)
|
||||
NGRAPH_OP(GroupConvolution, ngraph::op)
|
||||
NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op)
|
||||
NGRAPH_OP(GroupConvolutionBackpropFilters, ngraph::op)
|
||||
NGRAPH_OP(HardSigmoid, ngraph::op)
|
||||
NGRAPH_OP(Interpolate, ngraph::op)
|
||||
NGRAPH_OP(LayerNorm, ngraph::op)
|
||||
NGRAPH_OP(LayerNormBackprop, ngraph::op)
|
||||
NGRAPH_OP(Less, ngraph::op)
|
||||
NGRAPH_OP(LessEq, ngraph::op)
|
||||
NGRAPH_OP(Log, ngraph::op)
|
||||
@ -133,7 +124,6 @@ NGRAPH_OP(NormalizeL2, ngraph::op)
|
||||
NGRAPH_OP(Max, ngraph::op)
|
||||
NGRAPH_OP(Maximum, ngraph::op)
|
||||
NGRAPH_OP(MaxPool, ngraph::op)
|
||||
NGRAPH_OP(MaxPoolBackprop, ngraph::op)
|
||||
NGRAPH_OP(Min, ngraph::op)
|
||||
NGRAPH_OP(Minimum, ngraph::op)
|
||||
NGRAPH_OP(Multiply, ngraph::op)
|
||||
@ -146,7 +136,6 @@ NGRAPH_OP(Or, ngraph::op)
|
||||
NGRAPH_OP(Pad, ngraph::op)
|
||||
NGRAPH_OP(Parameter, ngraph::op)
|
||||
NGRAPH_OP(PartialSlice, ngraph::op)
|
||||
NGRAPH_OP(PartialSliceBackprop, ngraph::op)
|
||||
NGRAPH_OP(Passthrough, ngraph::op)
|
||||
NGRAPH_OP(Power, ngraph::op)
|
||||
NGRAPH_OP(PRelu, ngraph::op)
|
||||
@ -163,7 +152,6 @@ NGRAPH_OP(RandomUniform, ngraph::op)
|
||||
NGRAPH_OP(Recv, ngraph::op)
|
||||
NGRAPH_OP(Range, ngraph::op)
|
||||
NGRAPH_OP(Relu, ngraph::op)
|
||||
NGRAPH_OP(ReluBackprop, ngraph::op)
|
||||
NGRAPH_OP(ReplaceSlice, ngraph::op)
|
||||
NGRAPH_OP(Reshape, ngraph::op)
|
||||
NGRAPH_OP(Result, ngraph::op)
|
||||
@ -182,14 +170,12 @@ NGRAPH_OP(Send, ngraph::op)
|
||||
NGRAPH_OP(ShapeOf, ngraph::op)
|
||||
NGRAPH_OP(ShuffleChannels, ngraph::op)
|
||||
NGRAPH_OP(Sigmoid, ngraph::op)
|
||||
NGRAPH_OP(SigmoidBackprop, ngraph::op)
|
||||
NGRAPH_OP(Sign, ngraph::op)
|
||||
NGRAPH_OP(Sin, ngraph::op)
|
||||
NGRAPH_OP(Sinh, ngraph::op)
|
||||
NGRAPH_OP(Slice, ngraph::op)
|
||||
NGRAPH_OP(Softmax, ngraph::op)
|
||||
NGRAPH_OP(SoftmaxCrossEntropy, ngraph::op)
|
||||
NGRAPH_OP(SoftmaxCrossEntropyBackprop, ngraph::op)
|
||||
NGRAPH_OP(SpaceToDepth, ngraph::op)
|
||||
NGRAPH_OP(Split, ngraph::op)
|
||||
NGRAPH_OP(Sqrt, ngraph::op)
|
||||
|
@ -95,140 +95,6 @@ void pass::CoreFusion::construct_softmax_cross_entropy_fprop()
|
||||
this->add_matcher(m, callback);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_softmax_cross_entropy_bprop_with_soft_labels()
|
||||
{
|
||||
// Softmax bprop
|
||||
auto input_x = std::make_shared<pattern::op::Label>(element::f32, Shape{41, 37});
|
||||
auto constant_1 = ngraph::op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto max_x = std::make_shared<ngraph::op::Max>(input_x, constant_1);
|
||||
auto broadcast_max_x =
|
||||
std::make_shared<ngraph::op::Broadcast>(max_x, Shape{41, 37}, AxisSet{1});
|
||||
auto subtract_input_x = std::make_shared<ngraph::op::Subtract>(input_x, broadcast_max_x);
|
||||
auto constant_2 = ngraph::op::Constant::create(element::f32, Shape{41, 37}, {1});
|
||||
auto maximum = std::make_shared<ngraph::op::Maximum>(constant_2, subtract_input_x);
|
||||
auto softmax_axes = ngraph::op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto softmax = std::make_shared<ngraph::op::Softmax>(maximum, softmax_axes);
|
||||
auto softmax_label =
|
||||
std::make_shared<pattern::op::Label>(softmax, nullptr, NodeVector{softmax});
|
||||
|
||||
// Cross Entropy Bprop
|
||||
auto delta_label = std::make_shared<pattern::op::Label>(element::f32, Shape{41, 37});
|
||||
// if soft_label = true, we will not have one hot encoding on the labels,
|
||||
// instead we will get labels has 2d floating point tensor
|
||||
auto labels_y = std::make_shared<pattern::op::Label>(
|
||||
element::f32, Shape{41, 37}, pattern::has_class<op::Parameter>());
|
||||
auto negative_y = std::make_shared<ngraph::op::Negative>(labels_y);
|
||||
auto multiply_ce = std::make_shared<ngraph::op::Multiply>(negative_y, delta_label);
|
||||
|
||||
// summation
|
||||
auto divide_sm_ce = std::make_shared<ngraph::op::Divide>(multiply_ce, softmax_label);
|
||||
auto multiply_sm_ce = std::make_shared<ngraph::op::Multiply>(softmax_label, divide_sm_ce);
|
||||
auto reduction_axes_label = std::make_shared<pattern::op::Label>(element::i64, Shape{1});
|
||||
auto summation = std::make_shared<ngraph::op::Sum>(multiply_sm_ce, reduction_axes_label);
|
||||
auto broadcast_summation =
|
||||
std::make_shared<ngraph::op::Broadcast>(summation, Shape{41, 37}, AxisSet{1});
|
||||
|
||||
auto subtract = std::make_shared<ngraph::op::Subtract>(divide_sm_ce, broadcast_summation);
|
||||
auto multiply = std::make_shared<ngraph::op::Multiply>(softmax_label, subtract);
|
||||
|
||||
auto callback = [input_x, delta_label, labels_y, reduction_axes_label, softmax_label](
|
||||
pattern::Matcher& m) {
|
||||
NGRAPH_DEBUG << "In a callback for construct_softmax_cross_entropy_bprop against "
|
||||
<< m.get_match_root()->get_name();
|
||||
|
||||
auto pattern_map = m.get_pattern_map();
|
||||
auto input = pattern_map[input_x];
|
||||
auto labels = pattern_map[labels_y];
|
||||
auto delta = pattern_map[delta_label];
|
||||
auto softmax = pattern_map[softmax_label];
|
||||
|
||||
auto sm_ce_bprop =
|
||||
std::make_shared<ngraph::op::SoftmaxCrossEntropyBackprop>(delta, softmax, labels, true);
|
||||
ngraph::replace_node(m.get_match_root(), sm_ce_bprop);
|
||||
return true;
|
||||
};
|
||||
auto m = std::make_shared<pattern::Matcher>(multiply, "CoreFusion.SoftmaxCrossEntropyBprop");
|
||||
this->add_matcher(m, callback);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_softmax_cross_entropy_bprop_with_ignore_mask()
|
||||
{
|
||||
// Softmax bprop
|
||||
auto input_x = std::make_shared<pattern::op::Label>(element::f64, Shape{41, 37});
|
||||
auto constant_1 = ngraph::op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto max_x = std::make_shared<ngraph::op::Max>(input_x, constant_1);
|
||||
auto broadcast_max_x =
|
||||
std::make_shared<ngraph::op::Broadcast>(max_x, Shape{41, 37}, AxisSet{1});
|
||||
auto subtract_input_x = std::make_shared<ngraph::op::Subtract>(input_x, broadcast_max_x);
|
||||
auto constant_2 = ngraph::op::Constant::create(element::f64, Shape{41, 37}, {1});
|
||||
auto maximum = std::make_shared<ngraph::op::Maximum>(constant_2, subtract_input_x);
|
||||
auto softmax_axes = ngraph::op::Constant::create(element::i64, Shape{1}, {1});
|
||||
auto softmax = std::make_shared<ngraph::op::Softmax>(maximum, softmax_axes);
|
||||
auto softmax_label =
|
||||
std::make_shared<pattern::op::Label>(softmax, nullptr, NodeVector{softmax});
|
||||
|
||||
// labels
|
||||
auto labels_y = std::make_shared<pattern::op::Label>(
|
||||
element::i64, Shape{41, 1}, pattern::has_class<op::Parameter>());
|
||||
// ignore_mask
|
||||
auto mask_constant = ngraph::op::Constant::create(element::i64, Shape{41, 1}, {1});
|
||||
auto mask_label = std::make_shared<pattern::op::Label>(mask_constant);
|
||||
auto not_equal = std::make_shared<ngraph::op::NotEqual>(labels_y, mask_label);
|
||||
auto convert = std::make_shared<ngraph::op::Convert>(not_equal, element::f64);
|
||||
auto reshape = std::make_shared<ngraph::op::Reshape>(
|
||||
convert, AxisVector{0, 1}, Shape{convert->get_shape().at(0)});
|
||||
auto broadcast_mask =
|
||||
std::make_shared<ngraph::op::Broadcast>(reshape, Shape{41, 37}, AxisSet{1});
|
||||
|
||||
// Cross Entropy Bprop
|
||||
auto delta_label = std::make_shared<pattern::op::Label>(element::f64, Shape{41, 37});
|
||||
// if ignore_mask is enabled, we will have one hot encoding on the labels,
|
||||
auto reshape_labels = make_shared<op::Reshape>(labels_y, AxisVector{0, 1}, Shape{41});
|
||||
auto one_hot = std::make_shared<ngraph::op::OneHot>(reshape_labels, Shape{41, 37}, size_t(1));
|
||||
auto convert_one_hot = std::make_shared<ngraph::op::Convert>(one_hot, element::f64);
|
||||
auto negative_y = std::make_shared<ngraph::op::Negative>(convert_one_hot);
|
||||
auto multiply_ce = std::make_shared<ngraph::op::Multiply>(negative_y, delta_label);
|
||||
|
||||
// summation
|
||||
auto divide_sm_ce = std::make_shared<ngraph::op::Divide>(multiply_ce, softmax_label);
|
||||
auto multiply_mask = std::make_shared<ngraph::op::Multiply>(divide_sm_ce, broadcast_mask);
|
||||
auto multiply_sm_ce = std::make_shared<ngraph::op::Multiply>(softmax_label, multiply_mask);
|
||||
auto reduction_axes_label = std::make_shared<pattern::op::Label>(element::i64, Shape{1});
|
||||
auto summation = std::make_shared<ngraph::op::Sum>(multiply_sm_ce, reduction_axes_label);
|
||||
auto broadcast_summation =
|
||||
std::make_shared<ngraph::op::Broadcast>(summation, Shape{41, 37}, AxisSet{1});
|
||||
|
||||
auto subtract = std::make_shared<ngraph::op::Subtract>(multiply_mask, broadcast_summation);
|
||||
auto multiply = std::make_shared<ngraph::op::Multiply>(softmax_label, subtract);
|
||||
|
||||
auto callback = [input_x,
|
||||
delta_label,
|
||||
labels_y,
|
||||
reduction_axes_label,
|
||||
softmax_label,
|
||||
mask_label](pattern::Matcher& m) {
|
||||
NGRAPH_DEBUG
|
||||
<< "In a callback for construct_softmax_cross_entropy_bprop_with_ignore_mask against "
|
||||
<< m.get_match_root()->get_name();
|
||||
|
||||
auto pattern_map = m.get_pattern_map();
|
||||
auto input = pattern_map[input_x];
|
||||
auto labels = pattern_map[labels_y];
|
||||
auto delta = pattern_map[delta_label];
|
||||
auto softmax = pattern_map[softmax_label];
|
||||
|
||||
auto mask_constant_op =
|
||||
std::static_pointer_cast<ngraph::op::Constant>(pattern_map[mask_label]);
|
||||
auto ignore_index = *(static_cast<size_t const*>(mask_constant_op->get_data_ptr()));
|
||||
auto sm_ce_bprop = std::make_shared<ngraph::op::SoftmaxCrossEntropyBackprop>(
|
||||
delta, softmax, labels, false, ignore_index);
|
||||
ngraph::replace_node(m.get_match_root(), sm_ce_bprop);
|
||||
return true;
|
||||
};
|
||||
auto m = std::make_shared<pattern::Matcher>(multiply, "CoreFusion.SoftmaxCrossEntropyBprop");
|
||||
this->add_matcher(m, callback);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_relu()
|
||||
{
|
||||
auto iconst0 = construct_constant_node(0);
|
||||
@ -308,57 +174,6 @@ void pass::CoreFusion::construct_sigmoid()
|
||||
this->add_matcher(m, callback, all_pass_property_off);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_sigmoid_bprop()
|
||||
{
|
||||
// construct variance
|
||||
auto input = make_shared<pattern::op::Label>(element::f32, Shape{3, 4});
|
||||
auto neg_input = make_shared<op::Negative>(input);
|
||||
auto exp_neg_input = make_shared<op::Exp>(neg_input);
|
||||
|
||||
// broadcast input
|
||||
auto constant = make_shared<pattern::op::Label>(element::f32, Shape{});
|
||||
auto broadcast_constant = make_shared<op::Broadcast>(constant, Shape{3, 4}, AxisSet{0, 1});
|
||||
|
||||
auto add_exp = make_shared<op::Add>(exp_neg_input, broadcast_constant);
|
||||
// auto divide_1_over_exp = make_shared<op::Divide>(broadcast_constant, add_exp);
|
||||
auto sigmoid_fwd = make_shared<pattern::op::Label>(element::f32, Shape{3, 4});
|
||||
|
||||
auto delta = make_shared<pattern::op::Label>(element::f32, Shape{3, 4});
|
||||
auto neg_delta = make_shared<op::Negative>(delta);
|
||||
|
||||
auto multiply_sigmoid_delta = make_shared<op::Multiply>(sigmoid_fwd, neg_delta);
|
||||
auto divide_2 = make_shared<op::Divide>(multiply_sigmoid_delta, add_exp);
|
||||
|
||||
auto multiply_2 = make_shared<op::Multiply>(divide_2, exp_neg_input);
|
||||
auto negative_2 = make_shared<op::Negative>(multiply_2);
|
||||
|
||||
// Define a call back that needs to called once the DFG matches the pattern
|
||||
auto callback = [input, delta](pattern::Matcher& m) {
|
||||
NGRAPH_DEBUG << "In a callback for construct_bprop_sigmoid pattern against "
|
||||
<< m.get_match_root()->get_name();
|
||||
auto pattern_map = m.get_pattern_map();
|
||||
if (m.get_match_root()->get_element_type() != element::f32)
|
||||
{
|
||||
NGRAPH_DEBUG << "mpattern = " << m.get_match_root()->get_name()
|
||||
<< " type is not float!";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (m.get_match_root()->get_shape().size() != pattern_map[input]->get_shape().size())
|
||||
{
|
||||
NGRAPH_DEBUG << "mpattern = " << m.get_match_root()->get_name()
|
||||
<< "input= " << pattern_map[input]->get_name() << "size dont match!";
|
||||
return false;
|
||||
}
|
||||
auto dsigmoid = make_shared<op::SigmoidBackprop>(pattern_map[input], pattern_map[delta]);
|
||||
replace_node(m.get_match_root(), dsigmoid);
|
||||
return true;
|
||||
};
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(negative_2, "CoreFusion.SigmoidBprop");
|
||||
this->add_matcher(m, callback, PassProperty::REQUIRE_STATIC_SHAPE);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_folded_batch_norm()
|
||||
{
|
||||
Shape shape{2, 2, 1, 1};
|
||||
@ -1125,80 +940,6 @@ void pass::CoreFusion::construct_zero_padded_conv()
|
||||
this->add_matcher(m, callback);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_zero_padded_conv_backprop_filters()
|
||||
{
|
||||
auto pad_input = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
|
||||
auto pad_value = std::make_shared<pattern::op::Label>(element::f32, Shape{});
|
||||
auto pad = std::make_shared<ngraph::op::Pad>(
|
||||
pad_input, pad_value, CoordinateDiff{0, 0, 0, 0}, CoordinateDiff{0, 0, 0, 0});
|
||||
auto pad_label = std::make_shared<pattern::op::Label>(pad, nullptr, NodeVector{pad});
|
||||
|
||||
auto output_delta = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
|
||||
|
||||
auto conv = std::make_shared<ngraph::op::ConvolutionBackpropFilters>(pad_label,
|
||||
Shape{1, 1, 3, 3},
|
||||
output_delta,
|
||||
Strides{1, 1},
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto conv_label = std::make_shared<pattern::op::Label>(conv, nullptr, NodeVector{conv});
|
||||
|
||||
auto callback = [pad_input, pad_value, pad_label, output_delta, conv_label](
|
||||
pattern::Matcher& m) {
|
||||
auto pattern_map = m.get_pattern_map();
|
||||
|
||||
auto pad_value_op = as_type_ptr<ngraph::op::Constant>(pattern_map[pad_value]);
|
||||
if (!pad_value_op)
|
||||
{
|
||||
NGRAPH_DEBUG << "Pad value must be a constant";
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto& matched_conv = std::static_pointer_cast<ngraph::op::ConvolutionBackpropFilters>(
|
||||
pattern_map[conv_label]);
|
||||
const auto& matched_pad = std::static_pointer_cast<ngraph::op::Pad>(pattern_map[pad_label]);
|
||||
|
||||
if (!zero_padded_conv_consistency_check(m.get_match_root(),
|
||||
pad_value_op,
|
||||
pattern_map[pad_input],
|
||||
matched_pad,
|
||||
matched_conv->get_padding_below_forward(),
|
||||
matched_conv->get_padding_above_forward(),
|
||||
0,
|
||||
1))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
CoordinateDiff padding_below{
|
||||
static_cast<CoordinateDiff::value_type>(matched_pad->get_padding_below().at(2)),
|
||||
static_cast<CoordinateDiff::value_type>(matched_pad->get_padding_below().at(3))};
|
||||
CoordinateDiff padding_above{
|
||||
static_cast<CoordinateDiff::value_type>(matched_pad->get_padding_above().at(2)),
|
||||
static_cast<CoordinateDiff::value_type>(matched_pad->get_padding_above().at(3))};
|
||||
|
||||
auto zero_padded_conv_backprop_filters =
|
||||
std::make_shared<ngraph::op::ConvolutionBackpropFilters>(
|
||||
pattern_map[pad_input],
|
||||
matched_conv->get_filters_shape(),
|
||||
pattern_map[output_delta],
|
||||
matched_conv->get_window_movement_strides_forward(),
|
||||
matched_conv->get_window_dilation_strides_forward(),
|
||||
padding_below,
|
||||
padding_above,
|
||||
matched_conv->get_data_dilation_strides_forward());
|
||||
|
||||
ngraph::replace_node(m.get_match_root(), zero_padded_conv_backprop_filters);
|
||||
return true;
|
||||
};
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(conv_label,
|
||||
"CoreFusion.ZeroPaddedConvBackpropFilters");
|
||||
this->add_matcher(m, callback);
|
||||
}
|
||||
|
||||
void pass::CoreFusion::construct_conv_bias()
|
||||
{
|
||||
Shape shape{2, 2, 1, 1};
|
||||
|
@ -38,16 +38,12 @@ public:
|
||||
construct_folded_batch_norm();
|
||||
construct_conv_affine_folding();
|
||||
construct_sigmoid();
|
||||
construct_sigmoid_bprop();
|
||||
construct_optimized_strided_conv();
|
||||
construct_reshape_broadcast();
|
||||
construct_reshape_softmax_reshape();
|
||||
construct_zero_padded_reshaped_conv();
|
||||
construct_zero_padded_conv();
|
||||
construct_zero_padded_conv_backprop_filters();
|
||||
construct_softmax_cross_entropy_fprop();
|
||||
construct_softmax_cross_entropy_bprop_with_soft_labels();
|
||||
construct_softmax_cross_entropy_bprop_with_ignore_mask();
|
||||
}
|
||||
// Patterns under FOP_FUSIONS create ops (FusedOps) that might not
|
||||
// be all supported by certain backends. In such a case, backends
|
||||
@ -63,16 +59,12 @@ public:
|
||||
void construct_folded_batch_norm();
|
||||
void construct_conv_affine_folding();
|
||||
void construct_sigmoid();
|
||||
void construct_sigmoid_bprop();
|
||||
void construct_optimized_strided_conv();
|
||||
void construct_reshape_broadcast();
|
||||
void construct_reshape_softmax_reshape();
|
||||
void construct_zero_padded_reshaped_conv();
|
||||
void construct_zero_padded_conv();
|
||||
void construct_zero_padded_conv_backprop_filters();
|
||||
void construct_conv_bias();
|
||||
void construct_conv_bias_add();
|
||||
void construct_softmax_cross_entropy_fprop();
|
||||
void construct_softmax_cross_entropy_bprop_with_soft_labels();
|
||||
void construct_softmax_cross_entropy_bprop_with_ignore_mask();
|
||||
};
|
||||
|
@ -53,59 +53,6 @@ namespace
|
||||
return op_cast_binary_elementwise_node<op::v0::And, op::v1::LogicalAnd>(node);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::AvgPool> node)
|
||||
{
|
||||
auto rounding_mode =
|
||||
node->get_ceil_mode() ? op::RoundingType::CEIL : op::RoundingType::FLOOR;
|
||||
auto exclude_pad = !node->get_include_padding_in_avg_computation();
|
||||
auto auto_pad = node->get_pad_type();
|
||||
auto pads_begin = node->get_padding_below();
|
||||
auto pads_end = node->get_padding_above();
|
||||
auto strides = node->get_window_movement_strides();
|
||||
auto kernel = node->get_window_shape();
|
||||
|
||||
auto replacement_node = make_shared<op::v1::AvgPool>(node->input_value(0),
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel,
|
||||
exclude_pad,
|
||||
rounding_mode,
|
||||
auto_pad);
|
||||
#if defined(__clang__) && __clang_major__ == 3
|
||||
// There are some really by clang 3.9 bugs
|
||||
if (node->get_ceil_mode())
|
||||
{
|
||||
replacement_node->set_rounding_type(op::RoundingType::CEIL);
|
||||
}
|
||||
else
|
||||
{
|
||||
replacement_node->set_rounding_type(op::RoundingType::FLOOR);
|
||||
}
|
||||
#endif
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::AvgPoolBackprop> node)
|
||||
{
|
||||
auto exclude_pad = !node->get_include_padding_in_avg_computation();
|
||||
auto pads_begin = node->get_padding_below();
|
||||
auto pads_end = node->get_padding_above();
|
||||
auto strides = node->get_window_movement_strides();
|
||||
auto kernel = node->get_window_shape();
|
||||
|
||||
auto replacement_node = make_shared<op::v1::AvgPoolBackprop>(node->input_value(0),
|
||||
node->input_value(1),
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel,
|
||||
exclude_pad);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Broadcast> node)
|
||||
{
|
||||
auto replacement_node = ngraph::builder::opset1::make_broadcast(
|
||||
@ -178,38 +125,6 @@ namespace
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::ConvolutionBackpropFilters> node)
|
||||
{
|
||||
auto filters_shape = node->get_filters_shape();
|
||||
auto strides = node->get_window_movement_strides_forward();
|
||||
auto dilations = node->get_window_dilation_strides_forward();
|
||||
auto pads_begin = node->get_padding_below_forward();
|
||||
auto pads_end = node->get_padding_above_forward();
|
||||
auto data_dilation_strides = node->get_data_dilation_strides_forward();
|
||||
|
||||
bool is_dds_valid = all_of(data_dilation_strides.begin(),
|
||||
data_dilation_strides.end(),
|
||||
[](size_t value) { return value == 1; });
|
||||
|
||||
NGRAPH_CHECK(
|
||||
is_dds_valid,
|
||||
"Unable to convert ConvolutionBackpropFilters:0 to ConvolutionBackpropFilters:1 "
|
||||
"with data dilation strides "
|
||||
"other than `1`. Node: ",
|
||||
*node);
|
||||
|
||||
auto replacement_node =
|
||||
make_shared<op::v1::ConvolutionBackpropFilters>(node->input_value(0),
|
||||
node->input_value(1),
|
||||
node->input_value(2),
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Divide> node)
|
||||
{
|
||||
const auto autob = node->get_autob();
|
||||
@ -402,33 +317,6 @@ namespace
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::MaxPoolBackprop> node)
|
||||
{
|
||||
auto pads_begin = node->get_padding_below();
|
||||
auto pads_end = node->get_padding_above();
|
||||
auto strides = node->get_window_movement_strides();
|
||||
auto kernel = node->get_window_shape();
|
||||
|
||||
shared_ptr<Node> replacement_node;
|
||||
if (node->get_input_size() == 3)
|
||||
{
|
||||
replacement_node = make_shared<op::v1::MaxPoolBackprop>(node->input_value(0),
|
||||
node->input_value(1),
|
||||
node->input_value(2),
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel);
|
||||
}
|
||||
else
|
||||
{
|
||||
replacement_node = make_shared<op::v1::MaxPoolBackprop>(
|
||||
node->input_value(0), node->input_value(1), strides, pads_begin, pads_end, kernel);
|
||||
}
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Min> node)
|
||||
{
|
||||
bool keep_dims = false;
|
||||
|
@ -1011,46 +1011,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_TYPEID::AvgPool:
|
||||
{
|
||||
auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
|
||||
auto window_movement_strides =
|
||||
node_js.at("window_movement_strides").get<vector<size_t>>();
|
||||
auto padding_below = node_js.at("padding_below").get<vector<size_t>>();
|
||||
auto padding_above = node_js.at("padding_above").get<vector<size_t>>();
|
||||
auto include_padding_in_avg_computation =
|
||||
node_js.at("include_padding_in_avg_computation").get<bool>();
|
||||
op::PadType pad_type = read_pad_type(node_js);
|
||||
bool ceil_mode = get_or_default<bool>(node_js, "ceil_mode", false);
|
||||
node = make_shared<op::v0::AvgPool>(args[0],
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation,
|
||||
pad_type,
|
||||
ceil_mode);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AvgPoolBackprop:
|
||||
{
|
||||
auto forward_arg_shape = node_js.at("forward_arg_shape").get<vector<size_t>>();
|
||||
auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
|
||||
auto window_movement_strides =
|
||||
node_js.at("window_movement_strides").get<vector<size_t>>();
|
||||
auto padding_below = node_js.at("padding_below").get<vector<size_t>>();
|
||||
auto padding_above = node_js.at("padding_above").get<vector<size_t>>();
|
||||
auto include_padding_in_avg_computation =
|
||||
get_or_default<bool>(node_js, "include_padding_in_avg_computation", false);
|
||||
node = make_shared<op::v0::AvgPoolBackprop>(forward_arg_shape,
|
||||
args[0],
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BatchMatMul:
|
||||
{
|
||||
node = make_shared<op::BatchMatMul>(args[0], args[1]);
|
||||
@ -1079,14 +1039,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
args[2], args[0], args[1], args[3], args[4], epsilon);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BatchNormTrainingBackprop:
|
||||
{
|
||||
auto epsilon = node_js.at("eps").get<double>();
|
||||
// Odd order for back-compatibility
|
||||
node = make_shared<op::BatchNormTrainingBackprop>(
|
||||
args[2], args[0], args[1], args[3], args[4], args[5], epsilon);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Broadcast:
|
||||
{
|
||||
auto shape = node_js.at("shape").get<vector<size_t>>();
|
||||
@ -1208,29 +1160,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
data_dilation_strides_forward);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBackpropFilters:
|
||||
{
|
||||
auto filters_shape = node_js.at("filters_shape").get<vector<size_t>>();
|
||||
auto window_movement_strides_forward =
|
||||
node_js.at("window_movement_strides_forward").get<vector<size_t>>();
|
||||
auto window_dilation_strides_forward =
|
||||
node_js.at("window_dilation_strides_forward").get<vector<size_t>>();
|
||||
auto padding_below_forward =
|
||||
node_js.at("padding_below_forward").get<vector<std::ptrdiff_t>>();
|
||||
auto padding_above_forward =
|
||||
node_js.at("padding_above_forward").get<vector<std::ptrdiff_t>>();
|
||||
auto data_dilation_strides_forward =
|
||||
node_js.at("data_dilation_strides_forward").get<vector<size_t>>();
|
||||
node = make_shared<op::v0::ConvolutionBackpropFilters>(args[0],
|
||||
filters_shape,
|
||||
args[1],
|
||||
window_movement_strides_forward,
|
||||
window_dilation_strides_forward,
|
||||
padding_below_forward,
|
||||
padding_above_forward,
|
||||
data_dilation_strides_forward);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBias:
|
||||
{
|
||||
auto window_movement_strides =
|
||||
@ -1272,32 +1201,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
data_dilation_strides);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBiasBackpropFiltersBias:
|
||||
{
|
||||
auto filters_shape = node_js.at("filters_shape").get<vector<size_t>>();
|
||||
auto bias_shape = node_js.at("bias_shape").get<vector<size_t>>();
|
||||
auto window_movement_strides_forward =
|
||||
node_js.at("window_movement_strides_forward").get<vector<size_t>>();
|
||||
auto window_dilation_strides_forward =
|
||||
node_js.at("window_dilation_strides_forward").get<vector<size_t>>();
|
||||
auto padding_below_forward =
|
||||
node_js.at("padding_below_forward").get<vector<std::ptrdiff_t>>();
|
||||
auto padding_above_forward =
|
||||
node_js.at("padding_above_forward").get<vector<std::ptrdiff_t>>();
|
||||
auto data_dilation_strides_forward =
|
||||
node_js.at("data_dilation_strides_forward").get<vector<size_t>>();
|
||||
node =
|
||||
make_shared<op::ConvolutionBiasBackpropFiltersBias>(args[0],
|
||||
filters_shape,
|
||||
bias_shape,
|
||||
args[1],
|
||||
window_movement_strides_forward,
|
||||
window_dilation_strides_forward,
|
||||
padding_below_forward,
|
||||
padding_above_forward,
|
||||
data_dilation_strides_forward);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Cos:
|
||||
{
|
||||
node = make_shared<op::Cos>(args[0]);
|
||||
@ -1322,14 +1225,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
node = make_shared<op::CrossEntropy>(args[0], args[1], soft_label, ignore_index);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::CrossEntropyBackprop:
|
||||
{
|
||||
auto soft_label = node_js.at("soft_label");
|
||||
auto ignore_index = node_js.at("ignore_index");
|
||||
node = make_shared<op::CrossEntropyBackprop>(
|
||||
args[0], args[1], args[2], soft_label, ignore_index);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::CropAndResize:
|
||||
{
|
||||
auto resize_method =
|
||||
@ -1505,11 +1400,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
node = make_shared<op::Gelu>(args[0]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GeluBackpropFactor:
|
||||
{
|
||||
node = make_shared<op::GeluBackpropFactor>(args[0]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Gemm:
|
||||
{
|
||||
auto alpha = node_js.at("alpha").get<double>();
|
||||
@ -1614,26 +1504,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
groups);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GroupConvolutionBackpropFilters:
|
||||
{
|
||||
auto window_movement_strides =
|
||||
node_js.at("window_movement_strides").get<vector<size_t>>();
|
||||
auto window_dilation_strides =
|
||||
node_js.at("window_dilation_strides").get<vector<size_t>>();
|
||||
auto padding_below = node_js.at("padding_below").get<vector<std::ptrdiff_t>>();
|
||||
auto padding_above = node_js.at("padding_above").get<vector<std::ptrdiff_t>>();
|
||||
auto groups = node_js.at("groups").get<size_t>();
|
||||
|
||||
node = make_shared<op::GroupConvolutionBackpropFilters>(args[0],
|
||||
args[1],
|
||||
args[2],
|
||||
window_movement_strides,
|
||||
window_dilation_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
groups);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::HardSigmoid:
|
||||
{
|
||||
node = make_shared<op::HardSigmoid>(args[0], args[1], args[2]);
|
||||
@ -1656,34 +1526,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::LayerNormBackprop:
|
||||
{
|
||||
auto use_stats = node_js.at("use_stats").get<bool>();
|
||||
auto use_affine = node_js.at("use_affine").get<bool>();
|
||||
auto epsilon = node_js.at("epsilon").get<double>();
|
||||
auto begin_norm_axis = node_js.at("begin_norm_axis").get<int64_t>();
|
||||
if (use_stats && use_affine)
|
||||
{
|
||||
node = make_shared<op::LayerNormBackprop>(
|
||||
args[0], args[1], args[2], args[3], args[4], begin_norm_axis, epsilon);
|
||||
}
|
||||
else if (use_stats)
|
||||
{
|
||||
node = make_shared<op::LayerNormBackprop>(
|
||||
args[0], args[1], args[2], args[3], begin_norm_axis, epsilon);
|
||||
}
|
||||
else if (use_affine)
|
||||
{
|
||||
node = make_shared<op::LayerNormBackprop>(
|
||||
args[0], args[1], args[2], begin_norm_axis, epsilon);
|
||||
}
|
||||
else
|
||||
{
|
||||
node =
|
||||
make_shared<op::LayerNormBackprop>(args[0], args[1], begin_norm_axis, epsilon);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Less:
|
||||
{
|
||||
node = make_shared<op::v0::Less>(
|
||||
@ -1872,34 +1714,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::MaxPoolBackprop:
|
||||
{
|
||||
auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
|
||||
auto window_movement_strides =
|
||||
node_js.at("window_movement_strides").get<vector<size_t>>();
|
||||
auto padding_below = node_js.at("padding_below").get<vector<size_t>>();
|
||||
auto padding_above = node_js.at("padding_above").get<vector<size_t>>();
|
||||
if (args.size() == 3)
|
||||
{
|
||||
node = make_shared<op::v0::MaxPoolBackprop>(args[0],
|
||||
args[1],
|
||||
args[2],
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above);
|
||||
}
|
||||
else
|
||||
{
|
||||
node = make_shared<op::v0::MaxPoolBackprop>(args[0],
|
||||
args[1],
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Maximum:
|
||||
{
|
||||
node = make_shared<op::v0::Maximum>(
|
||||
@ -2031,15 +1845,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
args[0], axes, lower_bounds, upper_bounds, decrease_axes);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::PartialSliceBackprop:
|
||||
{
|
||||
auto axes = node_js.at("axes").get<vector<size_t>>();
|
||||
auto lower_bounds = node_js.at("lower_bounds").get<vector<int64_t>>();
|
||||
auto upper_bounds = node_js.at("upper_bounds").get<vector<int64_t>>();
|
||||
node = make_shared<op::PartialSliceBackprop>(
|
||||
args[0], args[1], axes, lower_bounds, upper_bounds);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Passthrough:
|
||||
{
|
||||
std::vector<json> outputs_js = node_js.at("output_shapes");
|
||||
@ -2188,11 +1993,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
node = make_shared<op::Relu>(args[0]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ReluBackprop:
|
||||
{
|
||||
node = make_shared<op::ReluBackprop>(args[0], args[1]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ReplaceSlice:
|
||||
{
|
||||
auto lower_bounds = node_js.at("lower_bounds").get<vector<size_t>>();
|
||||
@ -2346,11 +2146,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
node = make_shared<op::Sigmoid>(args[0]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::SigmoidBackprop:
|
||||
{
|
||||
node = make_shared<op::SigmoidBackprop>(args[0], args[1]);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Sign:
|
||||
{
|
||||
node = make_shared<op::Sign>(args[0]);
|
||||
@ -2395,14 +2190,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
|
||||
node = make_shared<op::SoftmaxCrossEntropy>(args[0], args[1], soft_label, ignore_index);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::SoftmaxCrossEntropyBackprop:
|
||||
{
|
||||
auto soft_label = node_js.at("soft_label");
|
||||
auto ignore_index = node_js.at("ignore_index");
|
||||
node = make_shared<op::SoftmaxCrossEntropyBackprop>(
|
||||
args[0], args[1], args[2], soft_label, ignore_index);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::SpaceToDepth:
|
||||
{
|
||||
auto block_size = node_js.at("block_size").get<size_t>();
|
||||
@ -2775,32 +2562,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AvgPool:
|
||||
{
|
||||
auto tmp = static_cast<const op::v0::AvgPool*>(&n);
|
||||
node["window_shape"] = tmp->get_window_shape();
|
||||
node["window_movement_strides"] = tmp->get_window_movement_strides();
|
||||
node["padding_below"] = tmp->get_padding_below();
|
||||
node["padding_above"] = tmp->get_padding_above();
|
||||
node["include_padding_in_avg_computation"] = tmp->get_include_padding_in_avg_computation();
|
||||
node["pad_type"] = tmp->get_pad_type();
|
||||
if (tmp->get_ceil_mode())
|
||||
{
|
||||
node["ceil_mode"] = tmp->get_ceil_mode();
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AvgPoolBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::v0::AvgPoolBackprop*>(&n);
|
||||
node["forward_arg_shape"] = tmp->get_forward_arg_shape();
|
||||
node["window_shape"] = tmp->get_window_shape();
|
||||
node["window_movement_strides"] = tmp->get_window_movement_strides();
|
||||
node["padding_below"] = tmp->get_padding_below();
|
||||
node["padding_above"] = tmp->get_padding_above();
|
||||
node["include_padding_in_avg_computation"] = tmp->get_include_padding_in_avg_computation();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BatchMatMul: { break;
|
||||
}
|
||||
case OP_TYPEID::BatchMatMulTranspose:
|
||||
@ -2822,12 +2583,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["eps"] = tmp->get_eps_value();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BatchNormTrainingBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::BatchNormTrainingBackprop*>(&n);
|
||||
node["eps"] = tmp->get_eps_value();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Broadcast:
|
||||
{
|
||||
auto tmp = dynamic_cast<const op::v0::Broadcast*>(&n);
|
||||
@ -2903,17 +2658,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["data_dilation_strides_forward"] = tmp->get_data_dilation_strides_forward();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBackpropFilters:
|
||||
{
|
||||
auto tmp = static_cast<const op::v0::ConvolutionBackpropFilters*>(&n);
|
||||
node["filters_shape"] = tmp->get_filters_shape();
|
||||
node["window_movement_strides_forward"] = tmp->get_window_movement_strides_forward();
|
||||
node["window_dilation_strides_forward"] = tmp->get_window_dilation_strides_forward();
|
||||
node["padding_below_forward"] = tmp->get_padding_below_forward();
|
||||
node["padding_above_forward"] = tmp->get_padding_above_forward();
|
||||
node["data_dilation_strides_forward"] = tmp->get_data_dilation_strides_forward();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBias:
|
||||
{
|
||||
auto tmp = static_cast<const op::ConvolutionBias*>(&n);
|
||||
@ -2934,18 +2678,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["data_dilation_strides"] = tmp->get_data_dilation_strides();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBiasBackpropFiltersBias:
|
||||
{
|
||||
auto tmp = static_cast<const op::ConvolutionBiasBackpropFiltersBias*>(&n);
|
||||
node["filters_shape"] = tmp->get_filters_shape();
|
||||
node["bias_shape"] = tmp->get_bias_shape();
|
||||
node["window_movement_strides_forward"] = tmp->get_window_movement_strides_forward();
|
||||
node["window_dilation_strides_forward"] = tmp->get_window_dilation_strides_forward();
|
||||
node["padding_below_forward"] = tmp->get_padding_below_forward();
|
||||
node["padding_above_forward"] = tmp->get_padding_above_forward();
|
||||
node["data_dilation_strides_forward"] = tmp->get_data_dilation_strides_forward();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Cos: { break;
|
||||
}
|
||||
case OP_TYPEID::Cosh: { break;
|
||||
@ -2964,13 +2696,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["ignore_index"] = tmp->get_ignore_index();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::CrossEntropyBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::CrossEntropyBackprop*>(&n);
|
||||
node["soft_label"] = tmp->get_soft_label();
|
||||
node["ignore_index"] = tmp->get_ignore_index();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::CropAndResize:
|
||||
{
|
||||
auto tmp = static_cast<const op::CropAndResize*>(&n);
|
||||
@ -3107,8 +2832,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
}
|
||||
case OP_TYPEID::Gelu: { break;
|
||||
}
|
||||
case OP_TYPEID::GeluBackpropFactor: { break;
|
||||
}
|
||||
case OP_TYPEID::Gemm:
|
||||
{
|
||||
auto tmp = static_cast<const op::Gemm*>(&n);
|
||||
@ -3179,16 +2902,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["groups"] = tmp->get_groups();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GroupConvolutionBackpropFilters:
|
||||
{
|
||||
auto tmp = static_cast<const op::GroupConvolutionBackpropFilters*>(&n);
|
||||
node["window_movement_strides"] = tmp->get_window_movement_strides();
|
||||
node["window_dilation_strides"] = tmp->get_window_dilation_strides();
|
||||
node["padding_below"] = tmp->get_padding_below();
|
||||
node["padding_above"] = tmp->get_padding_above();
|
||||
node["groups"] = tmp->get_groups();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::HardSigmoid: { break;
|
||||
}
|
||||
case OP_TYPEID::LayerNorm:
|
||||
@ -3200,15 +2913,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["begin_norm_axis"] = tmp->get_begin_norm_axis();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::LayerNormBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::LayerNormBackprop*>(&n);
|
||||
node["use_stats"] = tmp->get_use_stats();
|
||||
node["use_affine"] = tmp->get_use_affine();
|
||||
node["epsilon"] = tmp->get_epsilon();
|
||||
node["begin_norm_axis"] = tmp->get_begin_norm_axis();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Less:
|
||||
{
|
||||
const op::util::BinaryElementwiseComparison* tmp = nullptr;
|
||||
@ -3287,15 +2991,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["pad_type"] = tmp->get_pad_type();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::MaxPoolBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::v0::MaxPoolBackprop*>(&n);
|
||||
node["window_shape"] = tmp->get_window_shape();
|
||||
node["window_movement_strides"] = tmp->get_window_movement_strides();
|
||||
node["padding_below"] = tmp->get_padding_below();
|
||||
node["padding_above"] = tmp->get_padding_above();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Maximum:
|
||||
{
|
||||
const op::util::BinaryElementwiseArithmetic* tmp = nullptr;
|
||||
@ -3410,14 +3105,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["decrease_axes"] = tmp->get_decrease_axes();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::PartialSliceBackprop:
|
||||
{
|
||||
auto tmp = dynamic_cast<const op::PartialSliceBackprop*>(&n);
|
||||
node["axes"] = tmp->get_axes();
|
||||
node["lower_bounds"] = tmp->get_lower_bounds();
|
||||
node["upper_bounds"] = tmp->get_upper_bounds();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Passthrough:
|
||||
{
|
||||
auto tmp = static_cast<const op::Passthrough*>(&n);
|
||||
@ -3511,8 +3198,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
}
|
||||
case OP_TYPEID::Relu: { break;
|
||||
}
|
||||
case OP_TYPEID::ReluBackprop: { break;
|
||||
}
|
||||
case OP_TYPEID::ReplaceSlice:
|
||||
{
|
||||
auto tmp = static_cast<const op::ReplaceSlice*>(&n);
|
||||
@ -3594,8 +3279,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
}
|
||||
case OP_TYPEID::Sigmoid: { break;
|
||||
}
|
||||
case OP_TYPEID::SigmoidBackprop: { break;
|
||||
}
|
||||
case OP_TYPEID::Sign: { break;
|
||||
}
|
||||
case OP_TYPEID::Sin: { break;
|
||||
@ -3669,13 +3352,6 @@ json JSONSerializer::serialize_node(const Node& n)
|
||||
node["ignore_index"] = tmp->get_ignore_index();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::SoftmaxCrossEntropyBackprop:
|
||||
{
|
||||
auto tmp = static_cast<const op::SoftmaxCrossEntropyBackprop*>(&n);
|
||||
node["soft_label"] = tmp->get_soft_label();
|
||||
node["ignore_index"] = tmp->get_ignore_index();
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Tan: { break;
|
||||
}
|
||||
case OP_TYPEID::Tanh: { break;
|
||||
|
@ -87,7 +87,6 @@ set(SRC
|
||||
opset_pass/logical_xor_opset_pass.cpp
|
||||
opset_pass/one_hot_opset_pass.cpp
|
||||
opset_pass/gather_opset_pass.cpp
|
||||
opset_pass/generate_mask_opset_pass.cpp
|
||||
opset_pass/pad_opset_pass.cpp
|
||||
opset_pass/poolings_opset_pass.cpp
|
||||
opset_pass/reduction_opset_pass.cpp
|
||||
@ -114,7 +113,6 @@ set(SRC
|
||||
type_prop/all.cpp
|
||||
type_prop/any.cpp
|
||||
type_prop/assign.cpp
|
||||
type_prop/avg_pool.cpp
|
||||
type_prop/batch_mat_mul.cpp
|
||||
type_prop/batch_mat_mul_transpose.cpp
|
||||
type_prop/batch_norm.cpp
|
||||
@ -311,7 +309,6 @@ set(MULTI_TEST_SRC
|
||||
backend/builder_reduce_ops_opset1.in.cpp
|
||||
backend/ceiling.in.cpp
|
||||
backend/comparison.in.cpp
|
||||
backend/computation_reuse.in.cpp
|
||||
backend/concat.in.cpp
|
||||
backend/constant.in.cpp
|
||||
backend/convert.in.cpp
|
||||
|
@ -39,373 +39,6 @@ using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_maxpool_n4_c1_hw4_2x2_max)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{1, 4, 4, 4}; // in CHWN
|
||||
Shape maxpool_shape{1, 4, 3, 3};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
auto reshape = make_shared<op::Reshape>(
|
||||
A, AxisVector{0, 3, 1, 2}, Shape{1, 4, 4, 4}); // convert CHWN to CNHW
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
auto maxpool = make_shared<op::MaxPool>(reshape, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(maxpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::i32, maxpool_shape);
|
||||
vector<int> dataEp(shape_size(maxpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::i32, shape_a);
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
vector<int> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27,
|
||||
13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71,
|
||||
17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68,
|
||||
45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23};
|
||||
|
||||
vector<int> expected{// delta
|
||||
0, 4, 0, 0, 0, 0, 0, 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 4, 4, 12, 0,
|
||||
0, 0, 0, 8, 0, 0, 4, 8, 0, 8, 0, 0, 8, 0, 0, 0, 0, 4, 16, 4, 16, 8,
|
||||
0, 0, 0, 4, 0, 4, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::i32, maxpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<int>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_maxpool_n2_c1_hw5_3x3_str2_max)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{1, 5, 5, 2}; // in CHWN
|
||||
Shape maxpool_shape{1, 2, 2, 2};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
auto reshape = make_shared<op::Reshape>(
|
||||
A, AxisVector{0, 3, 1, 2}, Shape{1, 2, 5, 5}); // convert CHWN to CNHW
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto maxpool = make_shared<op::MaxPool>(reshape, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(maxpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::i32, maxpool_shape);
|
||||
vector<int> dataEp(shape_size(maxpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::i32, shape_a);
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
vector<int> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
|
||||
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
|
||||
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
|
||||
|
||||
vector<int> expected{// delta
|
||||
4, 0, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 4, 4, 0};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::i32, maxpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<int>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{1, 5, 5, 2}; // in CHWN
|
||||
Shape maxpool_shape{1, 2, 4, 5};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto reshape = make_shared<op::Reshape>(
|
||||
A, AxisVector{0, 3, 1, 2}, Shape{1, 2, 5, 5}); // convert CHWN to CNHW
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
Shape pad_below{1, 2};
|
||||
Shape pad_above{3, 4};
|
||||
auto maxpool = make_shared<op::MaxPool>(
|
||||
reshape, window_shape, window_movement_strides, pad_below, pad_above);
|
||||
auto f = make_shared<Function>(maxpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::f32, maxpool_shape);
|
||||
vector<float> dataEp(shape_size(maxpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::f32, shape_a);
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::f32, shape_a);
|
||||
|
||||
vector<float> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
|
||||
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
|
||||
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
|
||||
|
||||
vector<float> expected{// delta
|
||||
8, 0, 0, 0, 0, 4, 0, 0, 8, 0, 0, 8, 4, 8, 0, 0, 0,
|
||||
0, 0, 4, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 4, 12, 4, 8, 4, 0, 0, 0, 0, 4, 8, 0};
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
EXPECT_EQ(expected, read_vector<float>(output));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n1_c1_hw2x2)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape padding{1, 1};
|
||||
|
||||
Shape shape_a{1, 1, 2, 2};
|
||||
Shape avgpool_shape{1, 1, 2, 2};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto avgpool =
|
||||
make_shared<op::AvgPool>(A, window_shape, window_movement_strides, padding, padding, false);
|
||||
auto f = make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::i32, avgpool_shape);
|
||||
vector<int> dataEp(shape_size(avgpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
vector<int> dataInput{4, 8, 12, 16};
|
||||
|
||||
vector<int> expected{1, 2, 3, 4};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::i32, avgpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<int>(output) == dataEp);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n1_c1_hw4x4)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{1, 1, 4, 4};
|
||||
Shape avgpool_shape{1, 1, 3, 3};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
auto avgpool = make_shared<op::AvgPool>(A, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::i32, avgpool_shape);
|
||||
vector<int> dataEp(shape_size(avgpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
vector<int> dataInput{1, 3, 1, 3, 1, 3, 1, 3, 3, 5, 3, 5, 3, 5, 3, 5};
|
||||
|
||||
vector<int> expected{1, 2, 2, 1, 2, 4, 4, 2, 2, 4, 4, 2, 1, 2, 2, 1};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::i32, avgpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<int>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw4x4)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{2, 2, 4, 4};
|
||||
Shape avgpool_shape{2, 2, 2, 2};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto avgpool = make_shared<op::AvgPool>(A, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::i32, avgpool_shape);
|
||||
vector<int> dataEp(shape_size(avgpool_shape), 12);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::i32, shape_a);
|
||||
|
||||
vector<int> dataInput{// i1c1
|
||||
1,
|
||||
2,
|
||||
6,
|
||||
7,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
3,
|
||||
19,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
18,
|
||||
2,
|
||||
3,
|
||||
2,
|
||||
// i1c2
|
||||
4,
|
||||
1,
|
||||
5,
|
||||
5,
|
||||
1,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
12,
|
||||
8,
|
||||
2,
|
||||
3,
|
||||
15,
|
||||
5,
|
||||
3,
|
||||
2,
|
||||
// i2c1
|
||||
2,
|
||||
3,
|
||||
7,
|
||||
7,
|
||||
3,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
13,
|
||||
7,
|
||||
1,
|
||||
2,
|
||||
7,
|
||||
13,
|
||||
3,
|
||||
4,
|
||||
// i2c2
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
7,
|
||||
1,
|
||||
2,
|
||||
14,
|
||||
6,
|
||||
16,
|
||||
4,
|
||||
1,
|
||||
14,
|
||||
4,
|
||||
4,
|
||||
1};
|
||||
|
||||
vector<int> expected(shape_size(shape_a), 3);
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::i32, avgpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<int>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw4x4_numeric)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
Shape shape_a{2, 2, 4, 4};
|
||||
test::Uniform<float> rng(1.0f, 10.0f);
|
||||
|
||||
auto make_graph = [shape_a]() {
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto avgpool = make_shared<op::AvgPool>(A, window_shape, window_movement_strides);
|
||||
return make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
};
|
||||
|
||||
auto f = make_graph();
|
||||
auto g = make_graph();
|
||||
for (auto i = 0; i < ${TEST_LOOPS}; i++)
|
||||
{
|
||||
auto x = rng.initialize(backend->create_tensor(element::f32, shape_a));
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x}, .01f, .01f));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw4x4_win_2x2_str_1x1_numeric)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
Shape shape_a{2, 2, 4, 4};
|
||||
test::Uniform<float> rng(1.0f, 10.0f);
|
||||
|
||||
auto make_graph = [shape_a]() {
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
auto avgpool = make_shared<op::AvgPool>(A, window_shape, window_movement_strides);
|
||||
return make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
};
|
||||
|
||||
auto f = make_graph();
|
||||
auto g = make_graph();
|
||||
for (auto i = 0; i < ${TEST_LOOPS}; i++)
|
||||
{
|
||||
auto x = rng.initialize(backend->create_tensor(element::f32, shape_a));
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x}, .01f, .01f));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw2x2_win_2x2_str_1x1_padding_numeric)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
Shape shape_a{2, 2, 4, 4};
|
||||
test::Uniform<float> rng(1.0f, 10.0f);
|
||||
|
||||
auto make_graph = [shape_a]() {
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
Shape padding{1, 1};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto avgpool = make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding, padding, false);
|
||||
return make_shared<Function>(avgpool, ParameterVector{A});
|
||||
|
||||
};
|
||||
|
||||
auto f = make_graph();
|
||||
auto g = make_graph();
|
||||
for (auto i = 0; i < ${TEST_LOOPS}; i++)
|
||||
{
|
||||
auto x = rng.initialize(backend->create_tensor(element::f32, shape_a));
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x}, .01f, .01f));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_abs)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -1064,36 +697,6 @@ NGRAPH_TEST(${BACKEND_NAME}, backwards_power)
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), make_graph, {x0, x1}, .01f, .01f));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_relu)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
test::Uniform<float> rng_neg(-1.0f, -0.01f);
|
||||
test::Uniform<float> rng_pos(0.01f, 1.0f);
|
||||
Shape shape{2, 3};
|
||||
auto x0 = rng_neg.initialize(backend->create_tensor<float>(shape));
|
||||
auto x1 = rng_pos.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
auto make_graph = [shape]() {
|
||||
auto X = make_shared<op::Parameter>(element::f32, shape);
|
||||
return make_shared<Function>(make_shared<op::Relu>(X),
|
||||
std::vector<std::shared_ptr<op::Parameter>>{X});
|
||||
};
|
||||
|
||||
auto f = make_graph();
|
||||
auto g = make_graph();
|
||||
for (auto i = 0; i < ${TEST_LOOPS}; i++)
|
||||
{
|
||||
auto x_neg = rng_neg.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x_neg}, .01f, .01f));
|
||||
|
||||
auto x_pos = rng_pos.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x_pos}, .01f, .01f));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_replace_slice)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -1192,36 +795,6 @@ NGRAPH_TEST(${BACKEND_NAME}, backwards_select_nested)
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_sigmoid)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
test::Uniform<float> rng_neg(-1.0f, -0.01f);
|
||||
test::Uniform<float> rng_pos(0.01f, 1.0f);
|
||||
Shape shape{2, 3};
|
||||
auto x0 = rng_neg.initialize(backend->create_tensor<float>(shape));
|
||||
auto x1 = rng_pos.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
auto make_graph = [shape]() {
|
||||
auto X = make_shared<op::Parameter>(element::f32, shape);
|
||||
return make_shared<Function>(make_shared<op::Sigmoid>(X),
|
||||
std::vector<std::shared_ptr<op::Parameter>>{X});
|
||||
};
|
||||
|
||||
auto f = make_graph();
|
||||
auto g = make_graph();
|
||||
for (auto i = 0; i < ${TEST_LOOPS}; i++)
|
||||
{
|
||||
auto x_neg = rng_neg.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x_neg}, .01f, .01f));
|
||||
|
||||
auto x_pos = rng_pos.initialize(backend->create_tensor<float>(shape));
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), f, g, {x_pos}, .01f, .01f));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_sign)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -1599,157 +1172,6 @@ NGRAPH_TEST(${BACKEND_NAME}, backwards_reverse_3d_02)
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), make_graph, {x}, .01f, .01f));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
Shape shape_a{4, 1, 4, 4}; // in NCHW
|
||||
Shape maxpool_shape{4, 1, 3, 3};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
auto maxpool = make_shared<op::MaxPool>(A, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(maxpool, ParameterVector{A});
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::f32, maxpool_shape);
|
||||
vector<float> dataEp(shape_size(maxpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::f32, shape_a);
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::f32, shape_a);
|
||||
|
||||
vector<float> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27,
|
||||
13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71,
|
||||
17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68,
|
||||
45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23};
|
||||
|
||||
vector<float> expected{// delta
|
||||
0, 8, 0, 0, 0, 0, 0, 4, 0, 8, 16, 0, 0, 0, 0, 0, 0, 4, 0, 4, 8, 0,
|
||||
0, 0, 0, 4, 4, 0, 4, 4, 0, 4, 0, 0, 8, 0, 4, 0, 0, 0, 8, 0, 16, 0,
|
||||
0, 0, 0, 0, 0, 8, 0, 0, 4, 0, 4, 0, 4, 0, 16, 0, 0, 0, 0, 0};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<float>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape_a{1, 2, 5, 5}; // in NCHW
|
||||
Shape maxpool_shape{1, 2, 2, 2};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
auto maxpool = make_shared<op::MaxPool>(A, window_shape, window_movement_strides);
|
||||
auto f = make_shared<Function>(maxpool, ParameterVector{A});
|
||||
|
||||
shared_ptr<runtime::Tensor> ep = backend->create_tensor(element::f32, maxpool_shape);
|
||||
vector<float> dataEp(shape_size(maxpool_shape), 4);
|
||||
|
||||
shared_ptr<runtime::Tensor> input = backend->create_tensor(element::f32, shape_a);
|
||||
shared_ptr<runtime::Tensor> output = backend->create_tensor(element::f32, shape_a);
|
||||
|
||||
vector<float> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
|
||||
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
|
||||
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
|
||||
|
||||
vector<float> expected{// delta
|
||||
4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 4, 0, 4, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0};
|
||||
|
||||
copy_data(ep, dataEp);
|
||||
copy_data(input, dataInput);
|
||||
|
||||
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
|
||||
auto df = autodiff::backprop_function(f);
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({output}, {input, ep});
|
||||
ASSERT_TRUE(read_vector<float>(output) == expected);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_batch_norm_training_4d)
|
||||
{
|
||||
const Shape input_shape{10, 4, 5, 5};
|
||||
const Shape channel_shape{input_shape.at(1)};
|
||||
const double eps = 1e-3;
|
||||
|
||||
// Need to keep the output elements for mean and variance from going out of scope
|
||||
// and getting freed.
|
||||
NodeVector goes;
|
||||
|
||||
auto make_graph = [&input_shape, &channel_shape, &eps, &goes] {
|
||||
const element::Type& et = element::f32;
|
||||
auto input = make_shared<op::Parameter>(et, input_shape);
|
||||
auto gamma = make_shared<op::Parameter>(et, channel_shape);
|
||||
auto beta = make_shared<op::Parameter>(et, channel_shape);
|
||||
auto BN = make_shared<op::BatchNormTraining>(input, gamma, beta, eps);
|
||||
auto normed_input = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 0));
|
||||
auto mean = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 1));
|
||||
auto variance = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 2));
|
||||
goes.push_back(mean);
|
||||
goes.push_back(variance);
|
||||
// TODO autodiff testing with more than one result
|
||||
auto f =
|
||||
make_shared<Function>(ResultVector{normed_input}, ParameterVector{input, gamma, beta});
|
||||
return f;
|
||||
};
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
using T = float;
|
||||
test::Uniform<T> rng(-5.0, 2.0);
|
||||
auto input = rng.initialize(backend->create_tensor<T>(input_shape));
|
||||
auto gamma = rng.initialize(backend->create_tensor<T>(channel_shape));
|
||||
auto beta = rng.initialize(backend->create_tensor<T>(channel_shape));
|
||||
|
||||
EXPECT_TRUE(
|
||||
autodiff_numeric_compare<T>(backend.get(), make_graph, {input, gamma, beta}, .005, .005));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_batch_norm_training_3d)
|
||||
{
|
||||
const Shape input_shape{10, 4, 5};
|
||||
const Shape channel_shape{input_shape.at(1)};
|
||||
const double eps = 1e-3;
|
||||
|
||||
// Need to keep the output elements for mean and variance from going out of scope
|
||||
// and getting freed.
|
||||
NodeVector goes;
|
||||
|
||||
auto make_graph = [&input_shape, &channel_shape, &eps, &goes] {
|
||||
const element::Type& et = element::f32;
|
||||
auto input = make_shared<op::Parameter>(et, input_shape);
|
||||
auto gamma = make_shared<op::Parameter>(et, channel_shape);
|
||||
auto beta = make_shared<op::Parameter>(et, channel_shape);
|
||||
auto BN = make_shared<op::BatchNormTraining>(input, gamma, beta, eps);
|
||||
auto normed_input = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 0));
|
||||
auto mean = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 1));
|
||||
auto variance = make_shared<op::Result>(make_shared<op::GetOutputElement>(BN, 2));
|
||||
goes.push_back(mean);
|
||||
goes.push_back(variance);
|
||||
// TODO autodiff testing with more than one result
|
||||
auto f =
|
||||
make_shared<Function>(ResultVector{normed_input}, ParameterVector{input, gamma, beta});
|
||||
return f;
|
||||
};
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
using T = float;
|
||||
test::Uniform<T> rng(-5.0, 2.0);
|
||||
auto input = rng.initialize(backend->create_tensor<T>(input_shape));
|
||||
auto gamma = rng.initialize(backend->create_tensor<T>(channel_shape));
|
||||
auto beta = rng.initialize(backend->create_tensor<T>(channel_shape));
|
||||
|
||||
EXPECT_TRUE(
|
||||
autodiff_numeric_compare<T>(backend.get(), make_graph, {input, gamma, beta}, .005, .005));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_reverse_sequence_n3_c2_h3)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
@ -681,105 +681,6 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_b2c2d2h1w1)
|
||||
test::all_close(expected_variance, read_vector<float>(result_variance), 1e-5f, 1e-6f));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, batch_norm_bprop_n4c3h2w2)
|
||||
{
|
||||
auto input_shape = Shape{4, 3, 2, 2};
|
||||
auto shape_mean = Shape{3};
|
||||
auto input = make_shared<op::Parameter>(element::f32, input_shape);
|
||||
auto mean_shape = Shape{3};
|
||||
auto mean = make_shared<op::Parameter>(element::f32, mean_shape);
|
||||
auto var_shape = Shape{3};
|
||||
auto var = make_shared<op::Parameter>(element::f32, var_shape);
|
||||
auto gamma_shape = Shape{3};
|
||||
auto gamma = make_shared<op::Parameter>(element::f32, gamma_shape);
|
||||
auto beta_shape = Shape{3};
|
||||
auto beta = make_shared<op::Parameter>(element::f32, beta_shape);
|
||||
double eps = 0.001;
|
||||
auto shape_r = Shape{4, 3, 2, 2};
|
||||
auto bn = make_shared<op::BatchNormTraining>(input, gamma, beta, eps);
|
||||
auto bn_dx = make_shared<op::GetOutputElement>(bn, 0);
|
||||
auto bn_dgamma = make_shared<op::GetOutputElement>(bn, 1);
|
||||
auto bn_dbeta = make_shared<op::GetOutputElement>(bn, 2);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto _input = backend->create_tensor(element::f32, input_shape);
|
||||
vector<float> dataInput{
|
||||
10.76331902f, 11.51178265f, 10.31018162f, 12.2993021f, 14.17626667f, 14.63498497f,
|
||||
13.63494492f, 13.84248161f, 11.34602547f, 13.22014618f, 10.46686649f, 10.39842987f,
|
||||
12.94806862f, 11.71670246f, 14.94438076f, 13.13236618f, 13.40889645f, 12.76128387f,
|
||||
11.34430027f, 11.86629677f, 11.11464024f, 10.93221283f, 11.95324039f, 10.96581173f,
|
||||
13.05455494f, 14.41404247f, 13.11169434f, 11.26559448f, 10.89965153f, 14.08202171f,
|
||||
11.12685776f, 12.58428574f, 12.59247875f, 13.00187492f, 12.66310215f, 10.06655025f,
|
||||
12.62048626f, 14.47942352f, 13.84950638f, 10.61425877f, 11.47936344f, 13.06011772f,
|
||||
13.63069057f, 12.31748772f, 13.84555244f, 10.95815468f, 12.78933334f, 12.75389099f};
|
||||
copy_data(_input, dataInput);
|
||||
auto _mean = backend->create_tensor(element::f32, mean_shape);
|
||||
copy_data(_mean, vector<float>{12.56472874f, 12.80312157f, 11.81676865f});
|
||||
auto _var = backend->create_tensor(element::f32, var_shape);
|
||||
copy_data(_var, vector<float>{1.94557643f, 1.32772446f, 1.28163588f});
|
||||
|
||||
auto _gamma = backend->create_tensor(element::f32, gamma_shape);
|
||||
copy_data(_gamma, vector<float>{2.0f, 2.0f, 2.0f});
|
||||
auto _beta = backend->create_tensor(element::f32, beta_shape);
|
||||
copy_data(_beta, vector<float>{1.0f, 1.0f, 1.0f});
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
shared_ptr<runtime::Tensor> _delta = backend->create_tensor(element::f32, shape_r);
|
||||
vector<float> deltaData(shape_size(shape_r), 20.0f);
|
||||
copy_data(_delta, deltaData);
|
||||
|
||||
auto f = make_shared<Function>(NodeVector{bn_dx, bn_dgamma, bn_dbeta},
|
||||
ParameterVector{mean, var, input, gamma, beta});
|
||||
|
||||
auto C = std::make_shared<op::Parameter>(element::f32, shape_r);
|
||||
|
||||
auto zero = ngraph::make_zero(bn_dgamma->get_element_type(), bn_dgamma->get_shape());
|
||||
ngraph::autodiff::Adjoints adjoints(OutputVector{bn_dx, bn_dgamma, bn_dbeta},
|
||||
OutputVector{C, zero, zero});
|
||||
|
||||
auto dinput = adjoints.backprop_output(input);
|
||||
auto dgamma = adjoints.backprop_output(gamma);
|
||||
auto dbeta = adjoints.backprop_output(beta);
|
||||
|
||||
auto df = make_shared<Function>(OutputVector{dinput, dgamma, dbeta},
|
||||
ParameterVector{mean, var, input, gamma, beta, C});
|
||||
|
||||
#ifndef NGRAPH_JSON_DISABLE
|
||||
// roundtrip serialization
|
||||
string js = serialize(df, 4);
|
||||
istringstream in(js);
|
||||
df = deserialize(in);
|
||||
#endif
|
||||
|
||||
shared_ptr<runtime::Tensor> _dinput = backend->create_tensor(element::f32, shape_r);
|
||||
shared_ptr<runtime::Tensor> _dgamma = backend->create_tensor(element::f32, gamma_shape);
|
||||
shared_ptr<runtime::Tensor> _dbeta = backend->create_tensor(element::f32, beta_shape);
|
||||
|
||||
auto handle = backend->compile(df);
|
||||
handle->call_with_validate({_dinput, _dgamma, _dbeta},
|
||||
{_mean, _var, _input, _gamma, _beta, _delta});
|
||||
|
||||
vector<float> expected_input{
|
||||
8.17051607e-06f, 4.77576657e-06f, 1.02257760e-05f, 1.20387525e-06f, -1.73868522e-06f,
|
||||
3.84632768e-06f, -1.07932050e-05f, -2.57458956e-06f, -2.22166714e-06f, -8.38779043e-06f,
|
||||
-2.48082982e-06f, 5.89238360e-06f, -2.52895109e-07f, -8.68433445e-06f, -5.82726737e-06f,
|
||||
8.84659658e-06f, 3.03944108e-05f, 4.05480879e-05f, 1.84123158e-05f, 2.30061178e-05f,
|
||||
1.34087590e-05f, -9.26072571e-07f, -3.22908454e-05f, -2.07365116e-05f, -4.21330941e-05f,
|
||||
2.83083100e-05f, -3.71039101e-05f, -4.84390640e-06f, -2.93012376e-05f, 5.68858087e-06f,
|
||||
1.83181458e-05f, -1.07494506e-05f, -2.32429103e-06f, 6.92914809e-06f, -6.66512321e-06f,
|
||||
-7.00302840e-06f, -3.46675184e-06f, -4.36748381e-06f, 6.73822226e-07f, -4.20158993e-06f,
|
||||
3.83005061e-06f, 5.85143729e-06f, 4.17875243e-06f, -8.64167783e-06f, 1.00170803e-05f,
|
||||
-4.23939666e-06f, 4.80201680e-06f, 4.62702078e-06f};
|
||||
|
||||
ASSERT_TRUE(ngraph::test::all_close(read_vector<float>(_dinput), expected_input, 1e-3f, 1e-4f));
|
||||
vector<float> expected_dgamma{7.06315041e-05f, -2.35289335e-04f, -5.06639481e-05f};
|
||||
ASSERT_TRUE(
|
||||
ngraph::test::all_close(read_vector<float>(_dgamma), expected_dgamma, 1e-2f, 1e-3f));
|
||||
vector<float> expected_dbeta{320.f, 320.f, 320.f};
|
||||
ASSERT_TRUE(ngraph::test::all_close(read_vector<float>(_dbeta), expected_dbeta, 1e-4f, 1e-8f));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1)
|
||||
{
|
||||
auto input_shape = Shape{2, 2, 2, 1};
|
||||
|
@ -1,75 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/known_element_types.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, computation_reuse)
|
||||
{
|
||||
Shape shape_a{1, 16, 2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_b{32, 16, 1, 1};
|
||||
auto B = make_shared<op::Parameter>(element::f32, shape_b, true);
|
||||
Shape shape_r{1, 32, 2, 2};
|
||||
auto conv = make_shared<op::Convolution>(A,
|
||||
B,
|
||||
Strides{1, 1},
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{0, 0},
|
||||
CoordinateDiff{0, 0},
|
||||
Strides{1, 1});
|
||||
Shape pool_shape{1, 1};
|
||||
auto pool = make_shared<op::AvgPool>(conv, pool_shape);
|
||||
auto bias = make_shared<op::Broadcast>(
|
||||
op::Constant::create(element::f32, Shape{}, {2.14}), shape_r, AxisSet{0, 1, 2, 3});
|
||||
auto result_op = make_shared<op::Result>(pool + bias);
|
||||
auto f = make_shared<Function>(ResultVector{result_op}, ParameterVector{A, B});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
vector<float> input(64, 1.0f);
|
||||
vector<float> weights(512, 0.5f);
|
||||
vector<float> rv(128);
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
auto b = backend->create_tensor(element::f32, shape_b);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
copy_data(a, input);
|
||||
copy_data(b, weights);
|
||||
|
||||
auto exec = backend->compile(f);
|
||||
exec->call_with_validate({result}, {a, b});
|
||||
|
||||
vector<float> rv_saved(read_vector<float>(result));
|
||||
|
||||
b->set_stale(false);
|
||||
exec->call_with_validate({result}, {a, b});
|
||||
EXPECT_TRUE(test::all_close_f(rv_saved, read_vector<float>(result)));
|
||||
}
|
@ -189,52 +189,3 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data)
|
||||
handle->call_with_validate({result}, {a, b, c});
|
||||
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
// The purpose of this test is to check if we can allow
|
||||
// filters_shape as a node rather than argument
|
||||
NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_filter)
|
||||
{
|
||||
Shape shape_data{64, 3, 100};
|
||||
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
Shape shape_delta{64, 128, 96};
|
||||
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto filters_shape =
|
||||
make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
|
||||
auto strides = Strides{1};
|
||||
auto dilations = Strides{1};
|
||||
auto padding_begin = CoordinateDiff{2};
|
||||
auto padding_end = CoordinateDiff{3};
|
||||
auto conv1 = make_shared<op::v1::ConvolutionBackpropFilters>(
|
||||
data, deltas, filters_shape, strides, dilations, padding_begin, padding_end);
|
||||
|
||||
auto f = make_shared<Function>(conv1, ParameterVector{data, deltas, filters_shape});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
|
||||
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
|
||||
vector<float> input, delta, expected_result;
|
||||
|
||||
for (int i = 0; i < 64 * 3 * 100; i++)
|
||||
input.emplace_back(i);
|
||||
|
||||
for (int i = 0; i < 64 * 128 * 96; i++)
|
||||
delta.emplace_back(i);
|
||||
|
||||
for (int i = 0; i < 128 * 3 * 10; i++)
|
||||
expected_result.emplace_back(i);
|
||||
|
||||
vector<int64_t> shapes = {128, 3, 10};
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_data);
|
||||
copy_data(a, input);
|
||||
auto b = backend->create_tensor(element::f32, shape_delta);
|
||||
copy_data(b, delta);
|
||||
auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape
|
||||
copy_data(c, shapes);
|
||||
handle->call_with_validate({result}, {a, b, c});
|
||||
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
|
||||
}
|
||||
|
@ -349,42 +349,6 @@ NGRAPH_TEST(${BACKEND_NAME}, conv_bias_3d)
|
||||
EXPECT_EQ(expected, read_vector<float>(result0));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, conv_bias_bprop_2d)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 2, 2});
|
||||
auto filters = make_shared<op::Parameter>(element::f32, Shape{2, 3, 1, 1});
|
||||
auto bias = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 2});
|
||||
auto conv_bprop = make_shared<op::ConvolutionBiasBackpropFiltersBias>(data,
|
||||
filters->get_shape(),
|
||||
bias->get_shape(),
|
||||
delta,
|
||||
Strides{1, 1},
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{0, 0},
|
||||
CoordinateDiff{0, 0},
|
||||
Strides{1, 1});
|
||||
auto goe0 = make_shared<op::GetOutputElement>(conv_bprop, 0);
|
||||
auto goe1 = make_shared<op::GetOutputElement>(conv_bprop, 1);
|
||||
auto f0 = make_shared<Function>(NodeVector{goe0, goe1}, ParameterVector{data, delta});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, Shape{1, 3, 2, 2});
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
|
||||
auto b = backend->create_tensor(element::f32, Shape{1, 2, 2, 2});
|
||||
copy_data(b, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
|
||||
auto result0 = backend->create_tensor(element::f32, filters->get_shape());
|
||||
auto result1 = backend->create_tensor(element::f32, bias->get_shape());
|
||||
auto handle = backend->compile(f0);
|
||||
handle->call_with_validate({result0, result1}, {a, b});
|
||||
vector<float> expected0{30, 70, 110, 70, 174, 278};
|
||||
vector<float> expected1{10, 26};
|
||||
EXPECT_EQ(expected0, read_vector<float>(result0));
|
||||
EXPECT_EQ(expected1, read_vector<float>(result1));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, conv_bias_add_2d)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 2, 2});
|
||||
|
@ -102,92 +102,3 @@ NGRAPH_TEST(${BACKEND_NAME}, gelu_f64)
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(input, read_vector<double>(result)));
|
||||
}
|
||||
|
||||
static double gelu_backprop_factor(double x)
|
||||
{
|
||||
auto pi = 4.0 * std::atan(1.0);
|
||||
return 0.5 * (1.0 + erf(x * sqrt(1.0 / 2.0))) + (x * exp(-x * x / 2.0)) / sqrt(2.0 * pi);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_backprop_factor_f32)
|
||||
{
|
||||
Shape shape{8};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::GeluBackpropFactor>(A), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
vector<float> input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(a, input);
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float {
|
||||
return static_cast<float>(gelu_backprop_factor(static_cast<double>(x)));
|
||||
});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(
|
||||
test::all_close_f(input, read_vector<float>(result), DEFAULT_FLOAT_TOLERANCE_BITS + 6));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gelu_backprop_factor_f64)
|
||||
{
|
||||
Shape shape{8};
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::GeluBackpropFactor>(A), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f64, shape);
|
||||
vector<double> input{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
|
||||
copy_data(a, input);
|
||||
auto result = backend->create_tensor(element::f64, shape);
|
||||
|
||||
std::transform(input.begin(), input.end(), input.begin(), [](double x) -> double {
|
||||
return gelu_backprop_factor(x);
|
||||
});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(input, read_vector<double>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_gelu_f32)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape{8};
|
||||
auto make_graph = [shape]() {
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
return make_shared<Function>(make_shared<op::Gelu>(A), ParameterVector{A});
|
||||
};
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
vector<float> input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(a, input);
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), make_graph, {a}, .01f, .01f));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, backwards_gelu_f64)
|
||||
{
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
Shape shape{8};
|
||||
auto make_graph = [shape]() {
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape);
|
||||
return make_shared<Function>(make_shared<op::Gelu>(A), ParameterVector{A});
|
||||
};
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f64, shape);
|
||||
vector<double> input{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
|
||||
copy_data(a, input);
|
||||
|
||||
EXPECT_TRUE(autodiff_numeric_compare<double>(backend.get(), make_graph, {a}, .01, .01));
|
||||
}
|
||||
|
@ -98,44 +98,3 @@ NGRAPH_TEST(${BACKEND_NAME}, generate_mask2)
|
||||
ASSERT_TRUE(test::all_close_f(result2, result2_2));
|
||||
ASSERT_FALSE(std::any_of(result2_2.begin(), result2_2.end(), is_not_zero_or_one));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, dyn_generate_mask)
|
||||
{
|
||||
const unsigned int seed = 777;
|
||||
auto training = op::Constant::create(element::f32, Shape{}, {1});
|
||||
auto result_shape =
|
||||
make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
|
||||
auto gen_mask =
|
||||
make_shared<op::v1::GenerateMask>(training, result_shape, element::f32, seed, 0.5, true);
|
||||
auto gen_mask2 =
|
||||
make_shared<op::v1::GenerateMask>(training, result_shape, element::f32, seed, 0.5, true);
|
||||
auto f = make_shared<Function>(NodeVector{gen_mask, gen_mask2}, ParameterVector{result_shape});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto is_not_zero_or_one = [](float num) { return num != 0.f && num != 1.f; };
|
||||
|
||||
vector<int64_t> shapes = {1, 128};
|
||||
auto shape_result = backend->create_tensor(element::i64, Shape{shapes.size()});
|
||||
copy_data(shape_result, shapes);
|
||||
auto result_tv1 = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
auto result_tv2 = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result_tv1, result_tv2}, {shape_result});
|
||||
ASSERT_EQ(result_tv1->get_shape(), (Shape{1, 128}));
|
||||
ASSERT_EQ(result_tv2->get_shape(), (Shape{1, 128}));
|
||||
auto result1 = read_vector<float>(result_tv1);
|
||||
auto result2 = read_vector<float>(result_tv2);
|
||||
ASSERT_TRUE(test::all_close_f(result1, result2));
|
||||
ASSERT_FALSE(std::any_of(result1.begin(), result1.end(), is_not_zero_or_one));
|
||||
|
||||
auto result_tv1_2 = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
auto result_tv2_2 = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
handle->call_with_validate({result_tv1_2, result_tv2_2}, {shape_result});
|
||||
auto result1_2 = read_vector<float>(result_tv1_2);
|
||||
auto result2_2 = read_vector<float>(result_tv2_2);
|
||||
ASSERT_TRUE(test::all_close_f(result1, result1_2));
|
||||
ASSERT_FALSE(std::any_of(result1_2.begin(), result1_2.end(), is_not_zero_or_one));
|
||||
ASSERT_TRUE(test::all_close_f(result2, result2_2));
|
||||
ASSERT_FALSE(std::any_of(result2_2.begin(), result2_2.end(), is_not_zero_or_one));
|
||||
}
|
||||
|
@ -83,56 +83,6 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data)
|
||||
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_filters)
|
||||
{
|
||||
Shape shape_filter{6, 1, 3, 3};
|
||||
auto filters = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
Shape shape_delta{2, 6, 3, 3};
|
||||
auto deltas = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
Shape shape_data_batch{2, 3, 5, 5};
|
||||
auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto strides = Strides{1, 1};
|
||||
auto dilations = Strides{1, 1};
|
||||
auto padding_begin = CoordinateDiff{0, 0};
|
||||
auto padding_end = CoordinateDiff{0, 0};
|
||||
size_t groups = 3;
|
||||
|
||||
auto conv_bprop_filters = make_shared<op::GroupConvolutionBackpropFilters>(
|
||||
data_batch, filters, deltas, strides, dilations, padding_begin, padding_end, groups);
|
||||
|
||||
auto f =
|
||||
make_shared<Function>(conv_bprop_filters, ParameterVector{data_batch, filters, deltas});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
|
||||
auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
|
||||
vector<float> filter, delta, data, expected_result;
|
||||
|
||||
for (int i = 0; i < 6 * 1 * 3 * 3; i++)
|
||||
filter.emplace_back(i);
|
||||
|
||||
for (int i = 0; i < 2 * 6 * 3 * 3; i++)
|
||||
delta.emplace_back(i);
|
||||
|
||||
for (int i = 0; i < 2 * 3 * 5 * 5; i++)
|
||||
data.emplace_back(i);
|
||||
|
||||
for (int i = 0; i < 6 * 1 * 3 * 3; i++)
|
||||
expected_result.emplace_back(i);
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_data_batch);
|
||||
copy_data(a, data);
|
||||
auto b = backend->create_tensor(element::f32, shape_filter);
|
||||
copy_data(b, filter);
|
||||
auto c = backend->create_tensor(element::f32, shape_delta);
|
||||
copy_data(c, delta);
|
||||
handle->call_with_validate({result}, {a, b, c});
|
||||
EXPECT_FALSE(test::all_close_f(vector<float>{expected_result}, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, v1_group_conv_backprop_data)
|
||||
{
|
||||
const CoordinateDiff output_padding{1, 1};
|
||||
|
@ -91,165 +91,3 @@ NGRAPH_TEST(${BACKEND_NAME}, layer_norm_affine_stats)
|
||||
EXPECT_TRUE(test::all_close_f(exp_mean, read_vector<float>(mean)));
|
||||
EXPECT_TRUE(test::all_close_f(exp_var, read_vector<float>(var)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_affine_stats)
|
||||
{
|
||||
auto p_data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto p_delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto p_mean = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
auto p_var = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
auto p_scale = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(p_data, p_delta, p_mean, p_var, p_scale);
|
||||
auto f = make_shared<Function>(lnb->outputs(),
|
||||
ParameterVector{p_data, p_delta, p_mean, p_var, p_scale});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create tensors for input
|
||||
auto data = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto delta = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto mean = backend->create_tensor(element::f32, Shape{2});
|
||||
auto var = backend->create_tensor(element::f32, Shape{2});
|
||||
auto scale = backend->create_tensor(element::f32, Shape{4});
|
||||
// Fill in input tensors
|
||||
vector<float> d_input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(data, d_input);
|
||||
vector<float> dt_input{0.1f, -0.1f, 0.2f, -0.2f, 0.1f, -0.1f, 0.2f, -0.2f};
|
||||
copy_data(delta, dt_input);
|
||||
vector<float> s_input{-1.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(scale, s_input);
|
||||
vector<float> m_input{-2.5f, 1.5f};
|
||||
copy_data(mean, m_input);
|
||||
vector<float> v_input{1.25f, 1.25f};
|
||||
copy_data(var, v_input);
|
||||
// Create tensors for output
|
||||
auto d_data = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto d_scale = backend->create_tensor(element::f32, Shape{4});
|
||||
auto d_bias = backend->create_tensor(element::f32, Shape{4});
|
||||
|
||||
// Expected results (Manually compute)
|
||||
vector<float> exp_d_data{-0.1341624855995178223f,
|
||||
-0.04472083225846290588f,
|
||||
0.4919326305389404297f,
|
||||
-0.31304931640625f,
|
||||
-0.1341624855995178223f,
|
||||
-0.04472083225846290588f,
|
||||
0.4919326305389404297f,
|
||||
-0.31304931640625f};
|
||||
vector<float> exp_d_scale{-0.2683270871639251709f,
|
||||
0.08944236487150192261f,
|
||||
0.1788847297430038452f,
|
||||
-0.5366541743278503418f};
|
||||
vector<float> exp_d_bias{0.2f, -0.2f, 0.4f, -0.4f};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({d_data, d_scale, d_bias}, {data, delta, mean, var, scale});
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_data, read_vector<float>(d_data)));
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_scale, read_vector<float>(d_scale)));
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_bias, read_vector<float>(d_bias)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_affine)
|
||||
{
|
||||
auto p_data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto p_delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto p_scale = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(p_data, p_delta, p_scale);
|
||||
auto f = make_shared<Function>(lnb->outputs(), ParameterVector{p_data, p_delta, p_scale});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create tensors for input
|
||||
auto data = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto delta = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto scale = backend->create_tensor(element::f32, Shape{4});
|
||||
// Fill in input tensors
|
||||
vector<float> d_input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(data, d_input);
|
||||
vector<float> dt_input{0.1f, -0.1f, 0.2f, -0.2f, 0.1f, -0.1f, 0.2f, -0.2f};
|
||||
copy_data(delta, dt_input);
|
||||
vector<float> s_input{-1.0f, 1.0f, 2.0f, 3.0f};
|
||||
copy_data(scale, s_input);
|
||||
// Create tensors for output
|
||||
auto d_data = backend->create_tensor(element::f32, Shape{2, 4});
|
||||
auto d_scale = backend->create_tensor(element::f32, Shape{4});
|
||||
auto d_bias = backend->create_tensor(element::f32, Shape{4});
|
||||
|
||||
// Expected results (Manually computed)
|
||||
vector<float> exp_d_data{-0.1341624855995178223f,
|
||||
-0.04472083225846290588f,
|
||||
0.4919326305389404297f,
|
||||
-0.31304931640625f,
|
||||
-0.1341624855995178223f,
|
||||
-0.04472083225846290588f,
|
||||
0.4919326305389404297f,
|
||||
-0.31304931640625f};
|
||||
vector<float> exp_d_scale{-0.2683270871639251709f,
|
||||
0.08944236487150192261f,
|
||||
0.1788847297430038452f,
|
||||
-0.5366541743278503418f};
|
||||
vector<float> exp_d_bias{0.2f, -0.2f, 0.4f, -0.4f};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({d_data, d_scale, d_bias}, {data, delta, scale});
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_data, read_vector<float>(d_data)));
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_scale, read_vector<float>(d_scale)));
|
||||
EXPECT_TRUE(test::all_close_f(exp_d_bias, read_vector<float>(d_bias)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_4d_input)
|
||||
{
|
||||
auto p_data = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
|
||||
auto p_delta = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
|
||||
auto p_mean = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
auto p_variance = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
auto p_scale = make_shared<op::Parameter>(element::f32, Shape{60});
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(p_data, p_delta, p_mean, p_variance, p_scale);
|
||||
|
||||
auto output_data = lnb->output(0);
|
||||
auto output_scale = lnb->output(1);
|
||||
auto output_bias = lnb->output(2);
|
||||
|
||||
// flatten output_scale
|
||||
auto output_scale_shape = output_scale.get_shape();
|
||||
auto flattened_output_scale = make_shared<op::Reshape>(
|
||||
output_scale, get_default_order(output_scale_shape), Shape{shape_size(output_scale_shape)});
|
||||
|
||||
auto f = make_shared<Function>(OutputVector{output_data, flattened_output_scale, output_bias},
|
||||
ParameterVector{p_data, p_delta, p_mean, p_variance, p_scale});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create tensors for input
|
||||
auto data = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
|
||||
auto delta = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
|
||||
auto mean = backend->create_tensor(element::f32, Shape{2});
|
||||
auto variance = backend->create_tensor(element::f32, Shape{2});
|
||||
auto scale = backend->create_tensor(element::f32, Shape{60});
|
||||
// Fill in input tensors
|
||||
vector<float> d_input(2 * 3 * 4 * 5, 1);
|
||||
copy_data(data, d_input);
|
||||
vector<float> dt_input(2 * 3 * 4 * 5, 1);
|
||||
copy_data(delta, dt_input);
|
||||
vector<float> m_input(2, 1);
|
||||
copy_data(mean, m_input);
|
||||
vector<float> v_input(2, 1);
|
||||
copy_data(variance, v_input);
|
||||
vector<float> s_input(60, 1);
|
||||
copy_data(scale, s_input);
|
||||
// Create tensors for output
|
||||
auto d_data = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
|
||||
auto d_scale = backend->create_tensor(element::f32, Shape{60});
|
||||
auto d_bias = backend->create_tensor(element::f32, Shape{60});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({d_data, d_scale, d_bias}, {data, delta, mean, variance, scale});
|
||||
|
||||
vector<float> expected_data(120, 0);
|
||||
vector<float> expected_scale(60, 0);
|
||||
vector<float> expected_bias(60, 2);
|
||||
|
||||
EXPECT_TRUE(test::all_close(expected_data, read_vector<float>(d_data), 1e-5f, 1e-6f));
|
||||
EXPECT_TRUE(test::all_close(expected_scale, read_vector<float>(d_scale), 1e-5f, 1e-6f));
|
||||
EXPECT_TRUE(test::all_close(expected_bias, read_vector<float>(d_bias), 1e-5f, 1e-6f));
|
||||
}
|
||||
|
@ -129,36 +129,3 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_unkown_rank)
|
||||
ASSERT_EQ(t_r->get_shape(), (Shape{1, 2, 2}));
|
||||
EXPECT_TRUE(test::all_close_f(v_r, read_vector<float>(t_r)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, partial_slice_bprop_unkown_rank)
|
||||
{
|
||||
auto pshape_x = PartialShape::dynamic();
|
||||
auto pshape_dout = PartialShape::dynamic();
|
||||
auto x = make_shared<op::Parameter>(element::f32, pshape_x);
|
||||
auto dout = make_shared<op::Parameter>(element::f32, pshape_dout);
|
||||
AxisVector axes{0, 1};
|
||||
vector<int64_t> lower_bounds{1, 0};
|
||||
vector<int64_t> upper_bounds{2, 2};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::PartialSliceBackprop>(x, dout, axes, lower_bounds, upper_bounds),
|
||||
ParameterVector{x, dout});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
// Create some tensors for input/output
|
||||
Shape shape_x{2, 3, 2};
|
||||
Shape shape_dout{1, 2, 2};
|
||||
auto t_x = backend->create_tensor(element::f32, shape_x);
|
||||
auto t_dout = backend->create_tensor(element::f32, shape_dout);
|
||||
vector<float> v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f};
|
||||
vector<float> v_dout{6.f, 7.f, 8.f, 9.f};
|
||||
copy_data(t_x, v_x);
|
||||
copy_data(t_dout, v_dout);
|
||||
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({t_r}, {t_x, t_dout});
|
||||
vector<float> v_r{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 6.f, 7.f, 8.f, 9.f, 0.f, 0.f};
|
||||
ASSERT_EQ(t_r->get_shape(), (Shape{2, 3, 2}));
|
||||
EXPECT_TRUE(test::all_close_f(v_r, read_vector<float>(t_r)));
|
||||
}
|
||||
|
@ -108,52 +108,6 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_int8)
|
||||
EXPECT_EQ((vector<int8_t>{2, 2, 2, 2, 2, 2}), read_vector<int8_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_uint8)
|
||||
{
|
||||
vector<uint8_t> a_data = {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 1};
|
||||
Shape shape_a{1, 1, 3, 5};
|
||||
Shape window_shape{2, 3};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{0, 0};
|
||||
Shape shape_r{1, 1, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::u8, shape_a);
|
||||
auto QAP = make_shared<ngraph::op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above);
|
||||
auto f = make_shared<Function>(NodeVector{QAP}, ParameterVector{A});
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::u8, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::u8, shape_r);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<uint8_t>{1, 1, 1, 1, 1, 0}), read_vector<uint8_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_int8)
|
||||
{
|
||||
vector<int8_t> a_data = {10, 1, 0, -2, 1, 0, -3, 4, 0, 0, 2, 0, 0, 0, 1};
|
||||
Shape shape_a{1, 1, 3, 5};
|
||||
Shape window_shape{2, 3};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{0, 0};
|
||||
Shape shape_r{1, 1, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::i8, shape_a);
|
||||
auto QAP = make_shared<ngraph::op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above);
|
||||
auto f = make_shared<Function>(NodeVector{QAP}, ParameterVector{A});
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i8, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::i8, shape_r);
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<int8_t>{2, 0, 0, 0, 0, 1}), read_vector<int8_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, max_pool_1d_1channel_2image)
|
||||
{
|
||||
Shape shape_a{2, 1, 14};
|
||||
@ -531,910 +485,3 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
|
||||
test::all_close_f(cpu_results.at(i), int_results.at(i), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_1channel_1image)
|
||||
{
|
||||
Shape shape_a{1, 1, 14};
|
||||
Shape window_shape{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 1, 12};
|
||||
auto f = make_shared<Function>(make_shared<op::AvgPool>(A, window_shape), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 3>{{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}}}.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
float denom = 3.0;
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
4 / denom,
|
||||
5 / denom,
|
||||
5 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
0 / denom}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_1channel_2image)
|
||||
{
|
||||
Shape shape_a{2, 1, 14};
|
||||
Shape window_shape{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 12};
|
||||
auto f = make_shared<Function>(make_shared<op::AvgPool>(A, window_shape), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}},
|
||||
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
float denom = 3.0;
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
4 / denom,
|
||||
5 / denom,
|
||||
5 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
0 / denom}},
|
||||
{{3 / denom,
|
||||
4 / denom,
|
||||
2 / denom,
|
||||
1 / denom,
|
||||
0 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
3 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
3 / denom}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_1d_2channel_2image)
|
||||
{
|
||||
Shape shape_a{2, 2, 14};
|
||||
Shape window_shape{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 2, 12};
|
||||
auto f = make_shared<Function>(make_shared<op::AvgPool>(A, window_shape), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0},
|
||||
{0, 0, 0, 2, 0, 0, 2, 3, 0, 1, 2, 0, 1, 0}},
|
||||
|
||||
{{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2},
|
||||
{2, 1, 0, 0, 1, 0, 2, 0, 0, 0, 1, 1, 2, 0}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
float denom = 3.0;
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 3>({{{1 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
4 / denom,
|
||||
5 / denom,
|
||||
5 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
0 / denom},
|
||||
{0 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
5 / denom,
|
||||
5 / denom,
|
||||
4 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
3 / denom,
|
||||
1 / denom}},
|
||||
|
||||
{{3 / denom,
|
||||
4 / denom,
|
||||
2 / denom,
|
||||
1 / denom,
|
||||
0 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
3 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
3 / denom},
|
||||
{3 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
1 / denom,
|
||||
3 / denom,
|
||||
2 / denom,
|
||||
2 / denom,
|
||||
0 / denom,
|
||||
1 / denom,
|
||||
2 / denom,
|
||||
4 / denom,
|
||||
3 / denom}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image)
|
||||
{
|
||||
Shape shape_a{2, 2, 5, 5};
|
||||
Shape window_shape{2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 2, 4, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::AvgPool>(A, window_shape), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>({{{{0, 1, 0, 2, 1}, // img 0 chan 0
|
||||
{0, 3, 2, 0, 0},
|
||||
{2, 0, 0, 0, 1},
|
||||
{2, 0, 1, 1, 2},
|
||||
{0, 2, 1, 0, 0}},
|
||||
|
||||
{{0, 0, 0, 2, 0}, // img 0 chan 1
|
||||
{0, 2, 3, 0, 1},
|
||||
{2, 0, 1, 0, 2},
|
||||
{3, 1, 0, 0, 0},
|
||||
{2, 0, 0, 0, 0}}},
|
||||
|
||||
{{{0, 2, 1, 1, 0}, // img 1 chan 0
|
||||
{0, 0, 2, 0, 1},
|
||||
{0, 0, 1, 2, 3},
|
||||
{2, 0, 0, 3, 0},
|
||||
{0, 0, 0, 0, 0}},
|
||||
|
||||
{{2, 1, 0, 0, 1}, // img 1 chan 1
|
||||
{0, 2, 0, 0, 0},
|
||||
{1, 1, 2, 0, 2},
|
||||
{1, 1, 1, 0, 1},
|
||||
{1, 0, 0, 0, 2}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
float denom = 2 * 3;
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
test::NDArray<float, 4>({{{{6 / denom, 8 / denom, 5 / denom}, // img 0 chan 0
|
||||
{7 / denom, 5 / denom, 3 / denom},
|
||||
{5 / denom, 2 / denom, 5 / denom},
|
||||
{6 / denom, 5 / denom, 5 / denom}},
|
||||
|
||||
{{5 / denom, 7 / denom, 6 / denom}, // img 0 chan 1
|
||||
{8 / denom, 6 / denom, 7 / denom},
|
||||
{7 / denom, 2 / denom, 3 / denom},
|
||||
{6 / denom, 1 / denom, 0 / denom}}},
|
||||
|
||||
{{{5 / denom, 6 / denom, 5 / denom}, // img 1 chan 0
|
||||
{3 / denom, 5 / denom, 9 / denom},
|
||||
{3 / denom, 6 / denom, 9 / denom},
|
||||
{2 / denom, 3 / denom, 3 / denom}},
|
||||
|
||||
{{5 / denom, 3 / denom, 1 / denom}, // img 1 chan 1
|
||||
{6 / denom, 5 / denom, 4 / denom},
|
||||
{7 / denom, 5 / denom, 6 / denom},
|
||||
{4 / denom, 2 / denom, 4 / denom}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_strided)
|
||||
{
|
||||
Shape shape_a{1, 1, 8, 8};
|
||||
Shape window_shape{2, 3};
|
||||
auto window_movement_strides = Strides{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(A, window_shape, window_movement_strides), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>({{{{0, 1, 0, 2, 1, 2, 0, 0},
|
||||
{0, 3, 2, 0, 0, 0, 1, 0},
|
||||
{2, 0, 0, 0, 1, 0, 0, 0},
|
||||
{2, 0, 1, 1, 2, 2, 3, 0},
|
||||
{0, 2, 1, 0, 0, 0, 1, 0},
|
||||
{2, 0, 3, 1, 0, 0, 0, 0},
|
||||
{1, 2, 0, 0, 0, 1, 2, 0},
|
||||
{1, 0, 2, 0, 0, 0, 1, 0}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
float denom = 2 * 3;
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{6 / denom, 5 / denom, 4 / denom},
|
||||
{6 / denom, 5 / denom, 8 / denom},
|
||||
{6 / denom, 2 / denom, 4 / denom}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{1, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 1, 4, 4};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, test::NDArray<float, 4>({{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(
|
||||
test::all_close(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 2, 1.0f / 2, 0.0f / 1},
|
||||
{0.0f / 2, 4.0f / 4, 6.0f / 4, 2.0f / 2},
|
||||
{2.0f / 2, 5.0f / 4, 5.0f / 4, 2.0f / 2},
|
||||
{2.0f / 1, 2.0f / 2, 0.0f / 2, 0.0f / 1}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_1channel_1image_padded_include_in_computation)
|
||||
{
|
||||
Shape shape_a{1, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 1, 4, 4};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, test::NDArray<float, 4>({{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}}}).get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(
|
||||
test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4, 0.0f / 4},
|
||||
{0.0f / 4, 4.0f / 4, 6.0f / 4, 2.0f / 4},
|
||||
{2.0f / 4, 5.0f / 4, 5.0f / 4, 2.0f / 4},
|
||||
{2.0f / 4, 2.0f / 4, 0.0f / 4, 0.0f / 4}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 4, 4};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(
|
||||
test::all_close(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 2, 1.0f / 2, 0.0f / 1},
|
||||
{0.0f / 2, 4.0f / 4, 6.0f / 4, 2.0f / 2},
|
||||
{2.0f / 2, 5.0f / 4, 5.0f / 4, 2.0f / 2},
|
||||
{2.0f / 1, 2.0f / 2, 0.0f / 2, 0.0f / 1}},
|
||||
{{3.0f / 1, 8.0f / 2, 7.0f / 2, 2.0f / 1},
|
||||
{5.0f / 2, 10.0f / 4, 16.0f / 4, 11.0f / 2},
|
||||
{5.0f / 2, 11.0f / 4, 20.0f / 4, 14.0f / 2},
|
||||
{3.0f / 1, 9.0f / 2, 11.0f / 2, 5.0f / 1}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 4, 4};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(
|
||||
test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4, 0.0f / 4},
|
||||
{0.0f / 4, 4.0f / 4, 6.0f / 4, 2.0f / 4},
|
||||
{2.0f / 4, 5.0f / 4, 5.0f / 4, 2.0f / 4},
|
||||
{2.0f / 4, 2.0f / 4, 0.0f / 4, 0.0f / 4}},
|
||||
{{3.0f / 4, 8.0f / 4, 7.0f / 4, 2.0f / 4},
|
||||
{5.0f / 4, 10.0f / 4, 16.0f / 4, 11.0f / 4},
|
||||
{5.0f / 4, 11.0f / 4, 20.0f / 4, 14.0f / 4},
|
||||
{3.0f / 4, 9.0f / 4, 11.0f / 4, 5.0f / 4}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME},
|
||||
avg_pool_2d_2channel_2image_padded_only_below_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{0, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 2, 1.0f / 2},
|
||||
{0.0f / 2, 4.0f / 4, 6.0f / 4},
|
||||
{2.0f / 2, 5.0f / 4, 5.0f / 4}},
|
||||
{{3.0f / 1, 8.0f / 2, 7.0f / 2},
|
||||
{5.0f / 2, 10.0f / 4, 16.0f / 4},
|
||||
{5.0f / 2, 11.0f / 4, 20.0f / 4}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_below_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{1, 1};
|
||||
Shape padding_above{0, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{0.0f / 4, 1.0f / 4, 1.0f / 4},
|
||||
{0.0f / 4, 4.0f / 4, 6.0f / 4},
|
||||
{2.0f / 4, 5.0f / 4, 5.0f / 4}},
|
||||
{{3.0f / 4, 8.0f / 4, 7.0f / 4},
|
||||
{5.0f / 4, 10.0f / 4, 16.0f / 4},
|
||||
{5.0f / 4, 11.0f / 4, 20.0f / 4}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME},
|
||||
avg_pool_2d_2channel_2image_padded_only_above_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{4.0f / 4, 6.0f / 4, 2.0f / 2},
|
||||
{5.0f / 4, 5.0f / 4, 2.0f / 2},
|
||||
{2.0f / 2, 0.0f / 2, 0.0f / 1}},
|
||||
{{10.0f / 4, 16.0f / 4, 11.0f / 2},
|
||||
{11.0f / 4, 20.0f / 4, 14.0f / 2},
|
||||
{9.0f / 2, 11.0f / 2, 5.0f / 1}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_only_above_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close(test::NDArray<float, 4>({{{{4.0f / 4, 6.0f / 4, 2.0f / 4},
|
||||
{5.0f / 4, 5.0f / 4, 2.0f / 4},
|
||||
{2.0f / 4, 0.0f / 4, 0.0f / 4}},
|
||||
{{10.0f / 4, 16.0f / 4, 11.0f / 4},
|
||||
{11.0f / 4, 20.0f / 4, 14.0f / 4},
|
||||
{9.0f / 4, 11.0f / 4, 5.0f / 4}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_padded_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 5, 5};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 2, 1.0f / 3, 1.0f / 2, 0.0f / 1},
|
||||
{0.0f / 2, 4.0f / 4, 6.0f / 6, 6.0f / 4, 2.0f / 2},
|
||||
{2.0f / 3, 6.0f / 6, 8.0f / 9, 6.0f / 6, 2.0f / 3},
|
||||
{2.0f / 2, 5.0f / 4, 7.0f / 6, 5.0f / 4, 2.0f / 2},
|
||||
{2.0f / 1, 2.0f / 2, 2.0f / 3, 0.0f / 2, 0.0f / 1}},
|
||||
{{3.0f / 1, 8.0f / 2, 10.0f / 3, 7.0f / 2, 2.0f / 1},
|
||||
{5.0f / 2, 10.0f / 4, 21.0f / 6, 16.0f / 4, 11.0f / 2},
|
||||
{8.0f / 3, 19.0f / 6, 35.0f / 9, 27.0f / 6, 16.0f / 3},
|
||||
{5.0f / 2, 11.0f / 4, 25.0f / 6, 20.0f / 4, 14.0f / 2},
|
||||
{3.0f / 1, 9.0f / 2, 14.0f / 3, 11.0f / 2, 5.0f / 1}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_padded_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 5, 5};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
test::NDArray<float, 4>({{{{0.0f / 9, 1.0f / 9, 1.0f / 9, 1.0f / 9, 0.0f / 9},
|
||||
{0.0f / 9, 4.0f / 9, 6.0f / 9, 6.0f / 9, 2.0f / 9},
|
||||
{2.0f / 9, 6.0f / 9, 8.0f / 9, 6.0f / 9, 2.0f / 9},
|
||||
{2.0f / 9, 5.0f / 9, 7.0f / 9, 5.0f / 9, 2.0f / 9},
|
||||
{2.0f / 9, 2.0f / 9, 2.0f / 9, 0.0f / 9, 0.0f / 9}},
|
||||
{{3.0f / 9, 8.0f / 9, 10.0f / 9, 7.0f / 9, 2.0f / 9},
|
||||
{5.0f / 9, 10.0f / 9, 21.0f / 9, 16.0f / 9, 11.0f / 9},
|
||||
{8.0f / 9, 19.0f / 9, 35.0f / 9, 27.0f / 9, 16.0f / 9},
|
||||
{5.0f / 9, 11.0f / 9, 25.0f / 9, 20.0f / 9, 14.0f / 9},
|
||||
{3.0f / 9, 9.0f / 9, 14.0f / 9, 11.0f / 9, 5.0f / 9}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME},
|
||||
avg_pool_2d_2channel_2image_3x3_strided_padded_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{0.0f / 1, 1.0f / 3, 0.0f / 1},
|
||||
{2.0f / 3, 8.0f / 9, 2.0f / 3},
|
||||
{2.0f / 1, 2.0f / 3, 0.0f / 1}},
|
||||
{{3.0f / 1, 10.0f / 3, 2.0f / 1},
|
||||
{8.0f / 3, 35.0f / 9, 16.0f / 3},
|
||||
{3.0f / 1, 14.0f / 3, 5.0f / 1}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_3x3_strided_padded_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 2};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 3};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{0.0f / 9, 1.0f / 9, 0.0f / 9},
|
||||
{2.0f / 9, 8.0f / 9, 2.0f / 9},
|
||||
{2.0f / 9, 2.0f / 9, 0.0f / 9}},
|
||||
{{3.0f / 9, 10.0f / 9, 2.0f / 9},
|
||||
{8.0f / 9, 35.0f / 9, 16.0f / 9},
|
||||
{3.0f / 9, 14.0f / 9, 5.0f / 9}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME},
|
||||
avg_pool_2d_2channel_2image_3x3_strided_uneven_padded_do_not_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 3};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 2};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, false),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0.0f / 1, 1.0f / 2}, {2.0f / 3, 6.0f / 6}, {2.0f / 1, 0.0f / 2}},
|
||||
{{3.0f / 1, 7.0f / 2}, {8.0f / 3, 27.0f / 6}, {3.0f / 1, 11.0f / 2}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME},
|
||||
avg_pool_2d_2channel_2image_3x3_strided_uneven_padded_include_in_computation)
|
||||
{
|
||||
Shape shape_a{2, 1, 3, 3};
|
||||
Shape window_shape{3, 3};
|
||||
auto window_movement_strides = Strides{2, 3};
|
||||
Shape padding_below{2, 2};
|
||||
Shape padding_above{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 1, 3, 2};
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, window_movement_strides, padding_below, padding_above, true),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0, 1, 0}, {0, 3, 2}, {2, 0, 0}}, {{3, 5, 2}, {2, 0, 9}, {3, 6, 5}}}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
test::NDArray<float, 4>(
|
||||
{{{{0.0f / 9, 1.0f / 9}, {2.0f / 9, 6.0f / 9}, {2.0f / 9, 0.0f / 9}},
|
||||
{{3.0f / 9, 7.0f / 9}, {8.0f / 9, 27.0f / 9}, {3.0f / 9, 11.0f / 9}}}})
|
||||
.get_vector(),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
// Params to drive avg_pool_3d testing variations
|
||||
class avg_pool_3d_params : public ::testing::TestWithParam<bool>
|
||||
{
|
||||
protected:
|
||||
avg_pool_3d_params() { include_pad = GetParam(); }
|
||||
bool include_pad;
|
||||
};
|
||||
|
||||
// avg_pool_3d test code using params
|
||||
NGRAPH_TEST_P(${BACKEND_NAME}, avg_pool_3d_params, avg_pool_3d_uneven_strided_padded)
|
||||
{
|
||||
Shape shape_a{64, 3, 12, 13, 15};
|
||||
Shape window_shape{4, 5, 4};
|
||||
auto move_strides = Strides{2, 3, 4};
|
||||
Shape padding_below{2, 3, 1};
|
||||
Shape padding_above{3, 1, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto B = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
|
||||
auto cpu_f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
A, window_shape, move_strides, padding_below, padding_above, include_pad),
|
||||
ParameterVector{A});
|
||||
auto int_f = make_shared<Function>(
|
||||
make_shared<op::AvgPool>(
|
||||
B, window_shape, move_strides, padding_below, padding_above, include_pad),
|
||||
ParameterVector{B});
|
||||
test::Uniform<float> rng(0.0f, 1.0f);
|
||||
vector<vector<float>> args;
|
||||
|
||||
for (shared_ptr<op::Parameter> param : int_f->get_parameters())
|
||||
{
|
||||
vector<float> tensor_val(shape_size(param->get_shape()));
|
||||
rng.initialize(tensor_val);
|
||||
args.push_back(tensor_val);
|
||||
}
|
||||
auto int_results = execute(int_f, args, "INTERPRETER");
|
||||
auto backend_results = execute(cpu_f, args, "${BACKEND_NAME}");
|
||||
for (size_t i = 0; i < backend_results.size(); i++)
|
||||
{
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
backend_results.at(i), int_results.at(i), DEFAULT_FLOAT_TOLERANCE_BITS + 1));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_bprop_2d_2channel_2image_dyn_shape)
|
||||
{
|
||||
Shape window_shape{2, 2};
|
||||
auto window_movement_strides = Strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{0, 0};
|
||||
Shape shape_d{2, 2, 2, 2};
|
||||
auto delta = make_shared<op::Parameter>(element::f32, shape_d);
|
||||
auto forward_arg_shape =
|
||||
make_shared<op::Parameter>(element::i64, PartialShape{Dimension::dynamic()});
|
||||
|
||||
auto avg_pool_bprop = make_shared<op::v1::AvgPoolBackprop>(delta,
|
||||
forward_arg_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
window_shape,
|
||||
true);
|
||||
|
||||
auto f = make_shared<Function>(NodeVector{avg_pool_bprop},
|
||||
ParameterVector{delta, forward_arg_shape});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto ex = backend->compile(f);
|
||||
|
||||
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
|
||||
vector<int64_t> shapes = {2, 2, 3, 3};
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto deltas = backend->create_tensor(element::f32, shape_d);
|
||||
copy_data(deltas,
|
||||
test::NDArray<float, 4>({{{{0.3, 0.3}, // img 0 chan 0
|
||||
{0.3, 0.3}},
|
||||
|
||||
{{0.2, 0.2}, // img 0 chan 1
|
||||
{0.2, 0.2}}},
|
||||
|
||||
{{{0.1, 0.1}, // img 1 chan 0
|
||||
{0.1, 0.1}},
|
||||
|
||||
{{0.4, 0.4}, // img 1 chan 1
|
||||
{0.4, 0.4}}}})
|
||||
.get_vector());
|
||||
|
||||
auto forward_shape = backend->create_tensor(element::i64, Shape{shapes.size()});
|
||||
copy_data(forward_shape, shapes);
|
||||
|
||||
float denom = 2 * 2;
|
||||
|
||||
ex->call_with_validate({t_r}, {deltas, forward_shape});
|
||||
ex->call_with_validate({t_r}, {deltas, forward_shape});
|
||||
ex->call_with_validate({t_r}, {deltas, forward_shape});
|
||||
|
||||
ASSERT_EQ(t_r->get_shape(), (Shape{2, 2, 3, 3}));
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(test::NDArray<float, 4>({{{{0.3f / denom, 0.6f / denom, 0.3f / denom}, // img 0 chan 0
|
||||
{0.6f / denom, 1.2f / denom, 0.6f / denom},
|
||||
{0.3f / denom, 0.6f / denom, 0.3f / denom}},
|
||||
|
||||
{{0.2f / denom, 0.4f / denom, 0.2f / denom}, // img 0 chan 1
|
||||
{0.4f / denom, 0.8f / denom, 0.4f / denom},
|
||||
{0.2f / denom, 0.4f / denom, 0.2f / denom}}},
|
||||
|
||||
{{{0.1f / denom, 0.2f / denom, 0.1f / denom}, // img 1 chan 0
|
||||
{0.2f / denom, 0.4f / denom, 0.2f / denom},
|
||||
{0.1f / denom, 0.2f / denom, 0.1f / denom}},
|
||||
|
||||
{{0.4f / denom, 0.8f / denom, 0.4f / denom}, // img 1 chan 1
|
||||
{0.8f / denom, 1.6f / denom, 0.8f / denom},
|
||||
{0.4f / denom, 0.8f / denom, 0.4f / denom}}}})
|
||||
.get_vector()),
|
||||
read_vector<float>(t_r),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
// avg_pool_3d case generation
|
||||
NGRAPH_INSTANTIATE_TEST_CASE_P(${BACKEND_NAME}, include_pad, avg_pool_3d_params, testing::Bool());
|
||||
|
@ -110,49 +110,3 @@ NGRAPH_TEST(${BACKEND_NAME}, fuse_max_with_constant_zero_input_as_relu)
|
||||
handle->call_with_validate({result}, {b});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, relu_2Dbackprop)
|
||||
{
|
||||
auto shape_a = Shape{2, 5};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto delta_val = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto relu = make_shared<op::ReluBackprop>(A, delta_val);
|
||||
auto shape_rt = Shape{2, 5};
|
||||
auto f = make_shared<Function>(relu, ParameterVector{A, delta_val});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5});
|
||||
auto delta = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(delta, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
vector<float> expected{1, 2, 0, 4, 0, 6, 7, 0, 9, 0};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, delta});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, relu_4Dbackprop)
|
||||
{
|
||||
auto shape_a = Shape{2, 2, 2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto delta_val = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto relu = make_shared<op::ReluBackprop>(A, delta_val);
|
||||
auto shape_rt = Shape{2, 2, 2, 2};
|
||||
auto f = make_shared<Function>(relu, ParameterVector{A, delta_val});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1});
|
||||
auto delta = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(delta, vector<float>{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
vector<float> expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1};
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a, delta});
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
@ -89,38 +89,3 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4)
|
||||
vector<float> expected{sigma1, sigma2, sigma1, sigma2};
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected));
|
||||
}
|
||||
|
||||
#define DNNL_MIN_FLOAT_TOLERANCE_BITS 19
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sigmoid_bprop_n1c1h4)
|
||||
{
|
||||
auto input = make_shared<op::Parameter>(element::f32, Shape{1, 1, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{1, 1, 4});
|
||||
auto sigmoid_node = make_shared<op::SigmoidBackprop>(input, delta);
|
||||
auto func = make_shared<Function>(sigmoid_node, ParameterVector{input, delta});
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape());
|
||||
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, delta->get_shape());
|
||||
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
|
||||
|
||||
float x1 = 1.0f;
|
||||
float x2 = 4.0f;
|
||||
float dt = 1.0f;
|
||||
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
|
||||
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
|
||||
float bprop1 = sigma1 * (1 - sigma1) * dt;
|
||||
float bprop2 = sigma2 * (1 - sigma2) * dt;
|
||||
|
||||
vector<float> dataA{x1, x2, x1, x2};
|
||||
vector<float> dataB{dt, dt, dt, dt};
|
||||
|
||||
copy_data(a, dataA);
|
||||
copy_data(b, dataB);
|
||||
auto handle = backend->compile(func);
|
||||
handle->call_with_validate({result}, {a, b});
|
||||
|
||||
vector<float> expected{bprop1, bprop2, bprop1, bprop2};
|
||||
EXPECT_TRUE(
|
||||
test::all_close_f(expected, read_vector<float>(result), DNNL_MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "ngraph/pass/dyn_elimination.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
|
@ -20,9 +20,9 @@
|
||||
#include "ngraph/frontend/onnx_import/onnx.hpp"
|
||||
#include "ngraph/opsets/opset0.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_downgrade.hpp"
|
||||
#include "ngraph/provenance.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/provenance_enabler.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
@ -141,15 +141,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_AvgPoolBackprop()
|
||||
{
|
||||
op::AvgPoolBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_BatchMatMul()
|
||||
{
|
||||
op::BatchMatMul node;
|
||||
@ -186,15 +177,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_BatchNormTrainingBackprop()
|
||||
{
|
||||
op::BatchNormTrainingBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Broadcast()
|
||||
{
|
||||
op::Broadcast node;
|
||||
@ -285,15 +267,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_ConvolutionBackpropFilters()
|
||||
{
|
||||
op::ConvolutionBackpropFilters node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_ConvolutionBias()
|
||||
{
|
||||
op::ConvolutionBias node;
|
||||
@ -312,15 +285,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_ConvolutionBiasBackpropFiltersBias()
|
||||
{
|
||||
op::ConvolutionBiasBackpropFiltersBias node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Cos()
|
||||
{
|
||||
op::Cos node;
|
||||
@ -348,15 +312,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_CrossEntropyBackprop()
|
||||
{
|
||||
op::CrossEntropyBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_CropAndResize()
|
||||
{
|
||||
op::CropAndResize node;
|
||||
@ -591,15 +546,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_GeluBackpropFactor()
|
||||
{
|
||||
op::GeluBackpropFactor node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Gemm()
|
||||
{
|
||||
op::Gemm node;
|
||||
@ -663,15 +609,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_GroupConvolutionBackpropFilters()
|
||||
{
|
||||
op::GroupConvolutionBackpropFilters node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_HardSigmoid()
|
||||
{
|
||||
op::HardSigmoid node;
|
||||
@ -699,15 +636,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_LayerNormBackprop()
|
||||
{
|
||||
op::LayerNormBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Less()
|
||||
{
|
||||
op::Less node;
|
||||
@ -807,15 +735,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_MaxPoolBackprop()
|
||||
{
|
||||
op::MaxPoolBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Min()
|
||||
{
|
||||
op::Min node;
|
||||
@ -924,15 +843,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_PartialSliceBackprop()
|
||||
{
|
||||
op::PartialSliceBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Passthrough()
|
||||
{
|
||||
op::Passthrough node;
|
||||
@ -1077,15 +987,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_ReluBackprop()
|
||||
{
|
||||
op::ReluBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_TRUE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_ReplaceSlice()
|
||||
{
|
||||
op::ReplaceSlice node;
|
||||
@ -1248,15 +1149,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_SigmoidBackprop()
|
||||
{
|
||||
op::SigmoidBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_TRUE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_Sign()
|
||||
{
|
||||
op::Sign node;
|
||||
@ -1311,15 +1203,6 @@ namespace
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_SoftmaxCrossEntropyBackprop()
|
||||
{
|
||||
op::SoftmaxCrossEntropyBackprop node;
|
||||
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_comparison());
|
||||
EXPECT_FALSE(node.is_binary_elementwise_logical());
|
||||
}
|
||||
|
||||
void op_is_SpaceToDepth()
|
||||
{
|
||||
op::SpaceToDepth node;
|
||||
|
@ -75,7 +75,6 @@ TEST(opset, check_opset1)
|
||||
CHECK_OPSET(op::v0::Convert, opset1::Convert)
|
||||
// TODO: CHECK_OPSET(op::v0::ConvertLike, opset1::ConvertLike)
|
||||
CHECK_OPSET(op::v1::Convolution, opset1::Convolution)
|
||||
CHECK_OPSET(op::v1::ConvolutionBackpropData, opset1::ConvolutionBackpropData)
|
||||
CHECK_OPSET(op::v0::Cos, opset1::Cos)
|
||||
CHECK_OPSET(op::v0::Cosh, opset1::Cosh)
|
||||
CHECK_OPSET(op::v0::CTCGreedyDecoder, opset1::CTCGreedyDecoder)
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
@ -108,37 +108,6 @@ TEST(opset_transform, opset1_convolution_backprop_data_downgrade_pass)
|
||||
EXPECT_EQ(conv_v0_node->get_data_dilation_strides_forward(), (Strides{1}));
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_convolution_backprop_filters_downgrade_pass)
|
||||
{
|
||||
auto filters_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {128, 3, 10});
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{64, 128, 96});
|
||||
auto strides = Strides{1};
|
||||
auto dilations = Strides{1};
|
||||
auto padding_begin = CoordinateDiff{2};
|
||||
auto padding_end = CoordinateDiff{3};
|
||||
auto conv = make_shared<op::v1::ConvolutionBackpropFilters>(
|
||||
data, delta, filters_shape, strides, dilations, padding_begin, padding_end);
|
||||
auto result = make_shared<op::Result>(conv);
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, delta});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset0Downgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto conv_s0_result = f->get_results().at(0);
|
||||
auto node = conv_s0_result->get_input_node_shared_ptr(0);
|
||||
auto conv_v0_node = as_type_ptr<op::v0::ConvolutionBackpropFilters>(node);
|
||||
|
||||
ASSERT_TRUE(conv_v0_node);
|
||||
EXPECT_EQ(conv_v0_node->get_filters_shape(), (Shape{128, 3, 10}));
|
||||
EXPECT_EQ(conv_v0_node->get_window_movement_strides_forward(), strides);
|
||||
EXPECT_EQ(conv_v0_node->get_window_dilation_strides_forward(), dilations);
|
||||
EXPECT_EQ(conv_v0_node->get_padding_below_forward(), padding_begin);
|
||||
EXPECT_EQ(conv_v0_node->get_padding_above_forward(), padding_end);
|
||||
EXPECT_EQ(conv_v0_node->get_data_dilation_strides_forward(), (Strides{1}));
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_group_convolution_backprop_data_downgrade_pass)
|
||||
{
|
||||
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{1}, {100});
|
||||
|
@ -1,33 +0,0 @@
|
||||
#include "gmock/gmock.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(opset_transform, opset1_generate_mask_downgrade_pass)
|
||||
{
|
||||
Shape scalar{};
|
||||
const unsigned int seed = 777;
|
||||
auto training = op::Constant::create(element::f32, Shape{}, {1});
|
||||
auto result_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {1, 128});
|
||||
auto gen_mask =
|
||||
make_shared<op::v1::GenerateMask>(training, result_shape, element::f32, seed, 0.5, false);
|
||||
auto gen_mask2 =
|
||||
make_shared<op::v1::GenerateMask>(training, result_shape, element::f32, seed, 0.5, false);
|
||||
auto f = make_shared<Function>(NodeVector{gen_mask, gen_mask2}, ParameterVector{});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset0Downgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto generate_mask_v0 = as_type_ptr<op::v0::GenerateMask>(
|
||||
f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(generate_mask_v0);
|
||||
EXPECT_EQ(generate_mask_v0->get_mask_shape(), (Shape{1, 128}));
|
||||
}
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -3,82 +3,14 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(opset_transform, opset1_avgpool_upgrade_pass_floor)
|
||||
{
|
||||
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
Shape pads_begin{0, 0};
|
||||
Shape pads_end{0, 0};
|
||||
Strides strides{1, 1};
|
||||
Shape kernel_shape{3, 3};
|
||||
bool include_pad = true;
|
||||
bool ceil_mode = false;
|
||||
op::PadType pad_mode = op::PadType::EXPLICIT;
|
||||
|
||||
auto avgpool_v0 = make_shared<op::v0::AvgPool>(
|
||||
arg, kernel_shape, strides, pads_begin, pads_end, include_pad, pad_mode, ceil_mode);
|
||||
auto result = make_shared<op::Result>(avgpool_v0);
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset1Upgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto avgpool_s1_result = f->get_results().at(0);
|
||||
auto node = avgpool_s1_result->get_input_node_shared_ptr(0);
|
||||
auto avg_pool_v1_node = as_type_ptr<op::v1::AvgPool>(node);
|
||||
ASSERT_TRUE(avg_pool_v1_node);
|
||||
|
||||
EXPECT_EQ(avg_pool_v1_node->get_pads_begin(), pads_begin);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_pads_end(), pads_end);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_strides(), strides);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_kernel(), kernel_shape);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_rounding_type(), op::RoundingType::FLOOR);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_exclude_pad(), !include_pad);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_auto_pad(), pad_mode);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_avgpool_upgrade_pass_ceil)
|
||||
{
|
||||
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
Shape pads_begin{0, 0};
|
||||
Shape pads_end{0, 0};
|
||||
Strides strides{1, 1};
|
||||
Shape kernel_shape{3, 3};
|
||||
bool include_pad = true;
|
||||
bool ceil_mode = true;
|
||||
op::PadType pad_mode = op::PadType::EXPLICIT;
|
||||
|
||||
auto avgpool_v0 = make_shared<op::v0::AvgPool>(
|
||||
arg, kernel_shape, strides, pads_begin, pads_end, include_pad, pad_mode, ceil_mode);
|
||||
auto result = make_shared<op::Result>(avgpool_v0);
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset1Upgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto avgpool_s1_result = f->get_results().at(0);
|
||||
auto node = avgpool_s1_result->get_input_node_shared_ptr(0);
|
||||
auto avg_pool_v1_node = as_type_ptr<op::v1::AvgPool>(node);
|
||||
ASSERT_TRUE(avg_pool_v1_node);
|
||||
|
||||
EXPECT_EQ(avg_pool_v1_node->get_pads_begin(), pads_begin);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_pads_end(), pads_end);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_strides(), strides);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_kernel(), kernel_shape);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_rounding_type(), op::RoundingType::CEIL);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_exclude_pad(), !include_pad);
|
||||
EXPECT_EQ(avg_pool_v1_node->get_auto_pad(), pad_mode);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_maxpool_upgrade_pass_fllor)
|
||||
{
|
||||
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
@ -143,46 +75,6 @@ TEST(opset_transform, opset1_maxpool_upgrade_pass_ceil)
|
||||
EXPECT_EQ(max_pool_v1_node->get_auto_pad(), pad_mode);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_avgpool_downgrade_pass)
|
||||
{
|
||||
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
Shape padding_below{1, 0};
|
||||
Shape padding_above{0, 1};
|
||||
Strides window_movement_strides{1, 1};
|
||||
Shape window_shape{3, 3};
|
||||
bool exclude_pad = false;
|
||||
auto rounding_type = op::RoundingType::FLOOR;
|
||||
op::PadType auto_pad = op::PadType::EXPLICIT;
|
||||
|
||||
auto avgpool_v1 = make_shared<op::v1::AvgPool>(arg,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
window_shape,
|
||||
exclude_pad,
|
||||
rounding_type,
|
||||
auto_pad);
|
||||
auto result = make_shared<op::Result>(avgpool_v1);
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset0Downgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto avgpool_s0_result = f->get_results().at(0);
|
||||
auto node = avgpool_s0_result->get_input_node_shared_ptr(0);
|
||||
auto avg_pool_v0_node = as_type_ptr<op::v0::AvgPool>(node);
|
||||
ASSERT_TRUE(avg_pool_v0_node);
|
||||
|
||||
EXPECT_EQ(avg_pool_v0_node->get_padding_below(), padding_below);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_padding_above(), padding_above);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_window_movement_strides(), window_movement_strides);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_window_shape(), window_shape);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_ceil_mode(), false);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_include_padding_in_avg_computation(), !exclude_pad);
|
||||
EXPECT_EQ(avg_pool_v0_node->get_pad_type(), auto_pad);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_maxpool_downgrade_pass)
|
||||
{
|
||||
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
@ -219,76 +111,3 @@ TEST(opset_transform, opset1_maxpool_downgrade_pass)
|
||||
EXPECT_EQ(max_pool_v0_node->get_ceil_mode(), false);
|
||||
EXPECT_EQ(max_pool_v0_node->get_pad_type(), pad_type);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_avgpool_backprop_downgrade_pass)
|
||||
{
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
auto forward_arg_shape =
|
||||
op::Constant::create(element::i64, Shape{4}, vector<int64_t>{1, 3, 7, 10});
|
||||
Shape padding_below{1, 0};
|
||||
Shape padding_above{0, 1};
|
||||
Strides window_movement_strides{1, 1};
|
||||
Shape window_shape{3, 3};
|
||||
bool exclude_pad = false;
|
||||
|
||||
auto avgpool_backprop_v1 = make_shared<op::v1::AvgPoolBackprop>(delta,
|
||||
forward_arg_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
window_shape,
|
||||
exclude_pad);
|
||||
auto result = make_shared<op::Result>(avgpool_backprop_v1);
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{delta});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset0Downgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto avgpool_backprop_s0_result = f->get_results().at(0);
|
||||
auto node = avgpool_backprop_s0_result->get_input_node_shared_ptr(0);
|
||||
auto avg_pool_backprop_v0_node = as_type_ptr<op::v0::AvgPoolBackprop>(node);
|
||||
ASSERT_TRUE(avg_pool_backprop_v0_node);
|
||||
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_padding_below(), padding_below);
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_padding_above(), padding_above);
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_window_movement_strides(), window_movement_strides);
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_window_shape(), window_shape);
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_forward_arg_shape(), Shape({1, 3, 7, 10}));
|
||||
EXPECT_EQ(avg_pool_backprop_v0_node->get_include_padding_in_avg_computation(), !exclude_pad);
|
||||
}
|
||||
|
||||
TEST(opset_transform, opset1_maxpool_backprop_downgrade_pass)
|
||||
{
|
||||
auto arg_forward = make_shared<op::Parameter>(element::f32, Shape{1, 3, 7, 10});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
auto result_forward = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
|
||||
Shape padding_below{1, 0};
|
||||
Shape padding_above{0, 1};
|
||||
Strides window_movement_strides{1, 1};
|
||||
Shape window_shape{3, 3};
|
||||
|
||||
auto max_pool_backprop_v1 = make_shared<op::v1::MaxPoolBackprop>(arg_forward,
|
||||
delta,
|
||||
result_forward,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
window_shape);
|
||||
auto result = make_shared<op::Result>(max_pool_backprop_v1);
|
||||
auto f = make_shared<Function>(ResultVector{result},
|
||||
ParameterVector{arg_forward, delta, result_forward});
|
||||
|
||||
ngraph::pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::Opset0Downgrade>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
auto max_pool_backprop_s0_result = f->get_results().at(0);
|
||||
auto node = max_pool_backprop_s0_result->get_input_node_shared_ptr(0);
|
||||
auto max_pool_backprop_v0_node = as_type_ptr<op::v0::MaxPoolBackprop>(node);
|
||||
ASSERT_TRUE(max_pool_backprop_v0_node);
|
||||
EXPECT_EQ(max_pool_backprop_v0_node->get_padding_below(), padding_below);
|
||||
EXPECT_EQ(max_pool_backprop_v0_node->get_padding_above(), padding_above);
|
||||
EXPECT_EQ(max_pool_backprop_v0_node->get_window_movement_strides(), window_movement_strides);
|
||||
EXPECT_EQ(max_pool_backprop_v0_node->get_window_shape(), window_shape);
|
||||
}
|
||||
|
@ -18,8 +18,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -27,9 +27,9 @@
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/pass/fused_op_decomposition.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_upgrade.hpp"
|
||||
#include "ngraph/provenance.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
#include "util/provenance_enabler.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
@ -107,93 +107,6 @@ TEST(reshape_sinking, broadcast_swimming)
|
||||
ASSERT_EQ(add->get_argument(1), conv);
|
||||
}
|
||||
|
||||
#ifndef NGRAPH_JSON_DISABLE
|
||||
TEST(reshape_sinking, mnist_conv)
|
||||
{
|
||||
const string json_path = file_util::path_join(SERIALIZED_ZOO, "tf_conv_mnist_nhwc.json");
|
||||
const string json_string = file_util::read_file_to_string(json_path);
|
||||
stringstream ss(json_string);
|
||||
shared_ptr<Function> func = ngraph::deserialize(ss);
|
||||
pass::Manager pass_manager;
|
||||
size_t before_count = count_ops_of_type<op::Reshape>(func);
|
||||
pass_manager.register_pass<pass::ReshapeSinking>();
|
||||
pass_manager.register_pass<pass::ReshapeElimination>();
|
||||
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
|
||||
// pass_manager.register_pass<pass::CoreFusion>();
|
||||
// pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
|
||||
pass_manager.run_passes(func);
|
||||
size_t before_after = count_ops_of_type<op::Reshape>(func);
|
||||
ASSERT_LE(before_after, before_count);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(reshape_sinking, nasnet_pooladd)
|
||||
{
|
||||
Shape input_shape{1, 3, 3, 1};
|
||||
|
||||
auto input_type = element::f32;
|
||||
auto output_type = element::f32;
|
||||
|
||||
auto X = make_shared<op::Parameter>(input_type, input_shape);
|
||||
auto c_weights = op::Constant::create(input_type, Shape{1, 1, 1, 1}, {3});
|
||||
auto reshape1 = make_shared<op::Reshape>(X, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 3});
|
||||
auto avgpool =
|
||||
make_shared<op::AvgPool>(reshape1, Shape{1, 1}, Strides{1, 1}, Shape{0, 0}, Shape{0, 0});
|
||||
auto reshape2 = make_shared<op::Reshape>(avgpool, AxisVector{0, 2, 3, 1}, Shape{1, 3, 3, 1});
|
||||
auto maxpool =
|
||||
make_shared<op::MaxPool>(reshape1, Shape{1, 1}, Strides{1, 1}, Shape{0, 0}, Shape{0, 0});
|
||||
auto reshape3 = make_shared<op::Reshape>(maxpool, AxisVector{0, 2, 3, 1}, Shape{1, 3, 3, 1});
|
||||
auto const1 = op::Constant::create(input_type, Shape{1, 3, 3, 1}, {3});
|
||||
auto add1 = make_shared<op::Add>(reshape3, const1);
|
||||
auto add2 = make_shared<op::Add>(add1, reshape2);
|
||||
auto func = make_shared<Function>(add2, ParameterVector{X});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
size_t before_count = count_ops_of_type<op::Reshape>(func);
|
||||
pass_manager.register_pass<pass::ReshapeSinking>();
|
||||
pass_manager.register_pass<pass::ReshapeElimination>();
|
||||
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
|
||||
pass_manager.run_passes(func);
|
||||
size_t before_after = count_ops_of_type<op::Reshape>(func);
|
||||
ASSERT_LE(before_after, before_count);
|
||||
}
|
||||
|
||||
TEST(reshape_sinking, slice_pad)
|
||||
{
|
||||
Shape shape_a{100, 8, 8, 1};
|
||||
|
||||
AxisVector to_nhwc{0, 2, 3, 1};
|
||||
AxisVector to_nchw{0, 3, 1, 2};
|
||||
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
|
||||
|
||||
CoordinateDiff padding_below{0, 0, 0, 0};
|
||||
CoordinateDiff padding_above{0, 1, 1, 0};
|
||||
|
||||
auto reshape1 = make_shared<op::Reshape>(A, to_nchw, Shape{100, 1, 8, 8});
|
||||
auto maxpool =
|
||||
make_shared<op::MaxPool>(reshape1, Shape{1, 1}, Strides{2, 2}, Shape{0, 0}, Shape{0, 0});
|
||||
auto reshape2 = make_shared<op::Reshape>(maxpool, to_nhwc, Shape{100, 4, 4, 1});
|
||||
auto pad = make_shared<op::Pad>(reshape2, pad_value, padding_below, padding_above);
|
||||
auto slice = make_shared<op::Slice>(
|
||||
pad, Coordinate{0, 1, 1, 0}, Coordinate{100, 5, 5, 1}, Strides{1, 1, 1, 1});
|
||||
|
||||
auto reshape3 = make_shared<op::Reshape>(slice, to_nchw, Shape{100, 1, 4, 4});
|
||||
auto avgpool = make_shared<op::AvgPool>(reshape3, Shape{1, 1}, Strides{2, 2});
|
||||
auto reshape4 = make_shared<op::Reshape>(avgpool, to_nhwc, Shape{100, 1, 2, 2});
|
||||
auto f = make_shared<Function>(reshape4, ParameterVector{A});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
size_t before_count = count_ops_of_type<op::Reshape>(f);
|
||||
pass_manager.register_pass<pass::ReshapeSinking>();
|
||||
pass_manager.register_pass<pass::ReshapeElimination>();
|
||||
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
|
||||
pass_manager.run_passes(f);
|
||||
size_t before_after = count_ops_of_type<op::Reshape>(f);
|
||||
ASSERT_LE(before_after, before_count);
|
||||
}
|
||||
|
||||
TEST(reshape_sinking, concat)
|
||||
{
|
||||
Shape shape{};
|
||||
|
@ -20,11 +20,15 @@ set (SRC
|
||||
backend_manager.hpp
|
||||
cache.cpp
|
||||
cache.hpp
|
||||
opset0_downgrade.cpp
|
||||
opset0_downgrade.hpp
|
||||
executable.cpp
|
||||
executable.hpp
|
||||
performance_counter.hpp
|
||||
dynamic/dynamic_backend.cpp
|
||||
dynamic/dynamic_backend.hpp
|
||||
op/avg_pool.cpp
|
||||
op/avg_pool.hpp
|
||||
)
|
||||
|
||||
add_library(ngraph_backend SHARED ${SRC})
|
||||
@ -38,7 +42,7 @@ if (NOT WIN32)
|
||||
target_link_libraries(ngraph_backend PRIVATE dl)
|
||||
endif()
|
||||
target_compile_definitions(ngraph_backend PRIVATE BACKEND_DLL_EXPORTS)
|
||||
target_include_directories(ngraph_backend INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_include_directories(ngraph_backend PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
add_subdirectory(interpreter)
|
||||
|
||||
|
@ -29,11 +29,11 @@
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "ngraph/pass/dyn_elimination.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_downgrade.hpp"
|
||||
#include "ngraph/pass/shape_relevance.hpp"
|
||||
#include "ngraph/specialize_function.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -96,10 +96,8 @@ bool is_dynamic_op(const std::shared_ptr<Node>& op)
|
||||
return is_type<op::Transpose>(op) || is_type<op::DynBroadcast>(op) ||
|
||||
is_type<op::DynReplaceSlice>(op) || is_type<op::DynSlice>(op) ||
|
||||
is_type<op::v1::Reshape>(op) || is_type<op::Range>(op) ||
|
||||
is_type<op::v1::ConvolutionBackpropData>(op) ||
|
||||
is_type<op::v1::ConvolutionBackpropFilters>(op) ||
|
||||
is_type<op::v1::AvgPoolBackprop>(op) || is_type<op::v1::Broadcast>(op) ||
|
||||
is_type<op::v3::Broadcast>(op) || is_type<op::v1::GenerateMask>(op);
|
||||
is_type<op::v1::ConvolutionBackpropData>(op) || is_type<op::v3::Broadcast>(op) ||
|
||||
is_type<op::v1::GenerateMask>(op);
|
||||
}
|
||||
|
||||
// Helper for a vile hack in DynamicExecutable::call. See body of that function for details.
|
||||
|
@ -21,8 +21,8 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "../backend.hpp"
|
||||
#include "../cache.hpp"
|
||||
#include "backend.hpp"
|
||||
#include "cache.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
|
||||
|
@ -839,7 +839,6 @@ avg_pool_1d_2channel_2image
|
||||
|
||||
# Could not eliminate all Dyn nodes
|
||||
avg_pool_bprop_2d_2channel_2image_dyn_shape
|
||||
dyn_generate_mask
|
||||
reshape_v1
|
||||
dyn_convolution_backprop_data
|
||||
dyn_convolution_backprop_filter
|
||||
@ -1155,15 +1154,6 @@ batch_norm_bprop_n4c3h2w2
|
||||
batch_norm_fprop_inference_b2c2h2w1
|
||||
dyn_batch_norm_fprop_b1c2h2w2
|
||||
|
||||
backwards_maxpool_n4_c1_hw4_2x2_max
|
||||
backwards_maxpool_n2_c1_hw5_3x3_str2_max
|
||||
backwards_maxpool_n2_c1_hw5_3x3_str2_max_pad1x2_2x3
|
||||
backwards_avgpool_n1_c1_hw2x2
|
||||
backwards_avgpool_n1_c1_hw4x4
|
||||
backwards_avgpool_n2_c2_hw4x4
|
||||
backwards_avgpool_n2_c2_hw4x4_numeric
|
||||
backwards_avgpool_n2_c2_hw4x4_win_2x2_str_1x1_numeric
|
||||
backwards_avgpool_n2_c2_hw2x2_win_2x2_str_1x1_padding_numeric
|
||||
backwards_abs
|
||||
backwards_acos
|
||||
backwards_add
|
||||
|
@ -27,10 +27,10 @@
|
||||
#include "ngraph/pass/like_replacement.hpp"
|
||||
#include "ngraph/pass/liveness.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/pass/opset1_downgrade.hpp"
|
||||
#include "ngraph/serializer.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
@ -104,6 +104,7 @@
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "ngraph/state/bernoulli_rng_state.hpp"
|
||||
#include "ngraph/state/uniform_rng_state.hpp"
|
||||
#include "op/avg_pool.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
@ -330,7 +331,7 @@ protected:
|
||||
}
|
||||
case OP_TYPEID::AvgPool:
|
||||
{
|
||||
const op::AvgPool* avg_pool = static_cast<const op::AvgPool*>(&node);
|
||||
const op::v0::AvgPool* avg_pool = static_cast<const op::v0::AvgPool*>(&node);
|
||||
|
||||
reference::avg_pool<T>(args[0]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
@ -416,37 +417,6 @@ protected:
|
||||
node.get_input_shape(2));
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BatchNormTrainingBackprop:
|
||||
{
|
||||
const ngraph::op::BatchNormTrainingBackprop* bn_bprop =
|
||||
static_cast<const ngraph::op::BatchNormTrainingBackprop*>(&node);
|
||||
reference::batch_norm_backprop(bn_bprop->get_eps_value(),
|
||||
args[0]->get_data_ptr<const T>(),
|
||||
args[1]->get_data_ptr<const T>(),
|
||||
args[2]->get_data_ptr<const T>(),
|
||||
args[3]->get_data_ptr<const T>(),
|
||||
args[4]->get_data_ptr<const T>(),
|
||||
args[5]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
out[1]->get_data_ptr<T>(),
|
||||
out[2]->get_data_ptr<T>(),
|
||||
node.get_input_shape(2));
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::AvgPoolBackprop:
|
||||
{
|
||||
const op::AvgPoolBackprop* apb = static_cast<const op::AvgPoolBackprop*>(&node);
|
||||
reference::avg_pool_backprop<T>(args[0]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_shape(0),
|
||||
node.get_output_shape(0),
|
||||
apb->get_window_shape(),
|
||||
apb->get_window_movement_strides(),
|
||||
apb->get_padding_below(),
|
||||
apb->get_padding_above(),
|
||||
apb->get_include_padding_in_avg_computation());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::BroadcastDistributed:
|
||||
{
|
||||
const ngraph::op::BroadcastDistributed* broadcast =
|
||||
@ -570,24 +540,6 @@ protected:
|
||||
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBackpropFilters:
|
||||
{
|
||||
const op::ConvolutionBackpropFilters* c =
|
||||
static_cast<const op::ConvolutionBackpropFilters*>(&node);
|
||||
reference::convolution_backprop_filter<T>(
|
||||
args[0]->get_data_ptr<const T>(), // input
|
||||
args[1]->get_data_ptr<const T>(), // delta_convolution_output
|
||||
out[0]->get_data_ptr<T>(), // delta_filter
|
||||
c->get_input_shape(0), // input_shape
|
||||
c->get_input_shape(1), // convolution_output_shape
|
||||
c->get_filters_shape(), // filter_shape
|
||||
c->get_window_dilation_strides_forward(),
|
||||
c->get_window_movement_strides_forward(),
|
||||
c->get_padding_below_forward(),
|
||||
c->compute_backward_in_pad_above(),
|
||||
c->get_data_dilation_strides_forward());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ConvolutionBackpropData:
|
||||
{
|
||||
// Note that args[1] and args[0] are switched here from the usual order.
|
||||
@ -953,22 +905,6 @@ protected:
|
||||
lrn->get_nsize());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::MaxPoolBackprop:
|
||||
{
|
||||
const op::MaxPoolBackprop* max_pool_backprop =
|
||||
static_cast<const op::MaxPoolBackprop*>(&node);
|
||||
|
||||
reference::max_pool_backprop<T>(args[0]->get_data_ptr<const T>(),
|
||||
args[1]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_shape(1),
|
||||
node.get_output_shape(0),
|
||||
max_pool_backprop->get_window_shape(),
|
||||
max_pool_backprop->get_window_movement_strides(),
|
||||
max_pool_backprop->get_padding_below(),
|
||||
max_pool_backprop->get_padding_above());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Negative:
|
||||
{
|
||||
size_t element_count = shape_size(node.get_output_shape(0));
|
||||
@ -1310,15 +1246,6 @@ protected:
|
||||
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ReluBackprop:
|
||||
{
|
||||
size_t element_count = shape_size(node.get_output_shape(0));
|
||||
reference::relu_backprop<T>(args[0]->get_data_ptr<const T>(),
|
||||
args[1]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
element_count);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::ReplaceSlice:
|
||||
{
|
||||
const op::ReplaceSlice* slice = static_cast<const op::ReplaceSlice*>(&node);
|
||||
@ -1460,15 +1387,6 @@ protected:
|
||||
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::SigmoidBackprop:
|
||||
{
|
||||
size_t element_count = shape_size(node.get_output_shape(0));
|
||||
reference::sigmoid_backprop<T>(args[0]->get_data_ptr<const T>(),
|
||||
args[1]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
element_count);
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::Sign:
|
||||
{
|
||||
size_t element_count = shape_size(node.get_output_shape(0));
|
||||
@ -1561,10 +1479,8 @@ protected:
|
||||
case OP_TYPEID::BatchMatMulTranspose:
|
||||
case OP_TYPEID::ConvolutionBias:
|
||||
case OP_TYPEID::ConvolutionBiasAdd:
|
||||
case OP_TYPEID::ConvolutionBiasBackpropFiltersBias:
|
||||
case OP_TYPEID::CropAndResize:
|
||||
case OP_TYPEID::CrossEntropy:
|
||||
case OP_TYPEID::CrossEntropyBackprop:
|
||||
case OP_TYPEID::DepthToSpace:
|
||||
case OP_TYPEID::DynBroadcast:
|
||||
case OP_TYPEID::DynPad:
|
||||
@ -1573,23 +1489,19 @@ protected:
|
||||
case OP_TYPEID::FakeQuantize:
|
||||
case OP_TYPEID::Gather:
|
||||
case OP_TYPEID::Gelu:
|
||||
case OP_TYPEID::GeluBackpropFactor:
|
||||
case OP_TYPEID::Gemm:
|
||||
case OP_TYPEID::GRN:
|
||||
case OP_TYPEID::GroupConvolution:
|
||||
case OP_TYPEID::GroupConvolutionBackpropData:
|
||||
case OP_TYPEID::GroupConvolutionBackpropFilters:
|
||||
case OP_TYPEID::GRUCell:
|
||||
case OP_TYPEID::HardSigmoid:
|
||||
case OP_TYPEID::Interpolate:
|
||||
case OP_TYPEID::LayerNorm:
|
||||
case OP_TYPEID::LayerNormBackprop:
|
||||
case OP_TYPEID::LSTMCell:
|
||||
case OP_TYPEID::LSTMSequence:
|
||||
case OP_TYPEID::MVN:
|
||||
case OP_TYPEID::NormalizeL2:
|
||||
case OP_TYPEID::PartialSlice:
|
||||
case OP_TYPEID::PartialSliceBackprop:
|
||||
case OP_TYPEID::Passthrough:
|
||||
case OP_TYPEID::PRelu:
|
||||
case OP_TYPEID::RNNCell:
|
||||
@ -1599,7 +1511,6 @@ protected:
|
||||
case OP_TYPEID::Selu:
|
||||
case OP_TYPEID::ShuffleChannels:
|
||||
case OP_TYPEID::SoftmaxCrossEntropy:
|
||||
case OP_TYPEID::SoftmaxCrossEntropyBackprop:
|
||||
case OP_TYPEID::SpaceToDepth:
|
||||
case OP_TYPEID::Split:
|
||||
case OP_TYPEID::SquaredDifference:
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#define ID_SUFFIX(NAME) NAME
|
||||
#include "ngraph/opsets/opset0_tbl.hpp"
|
||||
NGRAPH_OP(AvgPool, op::v0)
|
||||
#undef ID_SUFFIX
|
||||
|
||||
#define ID_SUFFIX(NAME) NAME##_v1
|
||||
|
@ -64,3 +64,19 @@ INTERPRETER.onnx_upsample8_linear_infer
|
||||
INTERPRETER.onnx_upsample9_scales_const_nearest_infer
|
||||
INTERPRETER.onnx_upsample9_scales_const_linear_infer
|
||||
INTERPRETER.onnx_upsample9_scales_input_nearest_infer
|
||||
# Backward conv
|
||||
INTERPRETER.convolution_2d_1item
|
||||
INTERPRETER.convolution_2d_1item_padded_1_1x1_1
|
||||
INTERPRETER.convolution_2d_1item_padded_2_3x4_5
|
||||
INTERPRETER.convolution_2d_2items
|
||||
INTERPRETER.convolution_2d_2items_strided
|
||||
INTERPRETER.convolution_2d_2items_strided_padded
|
||||
INTERPRETER.convolution_2d_2items_strided_padded_same
|
||||
INTERPRETER.convolution_2d_2items_dilated
|
||||
INTERPRETER.convolution_2d_2items_dilated_padded
|
||||
INTERPRETER.convolution_3d_2items
|
||||
INTERPRETER.convolution_2d_1item_1o1i_data_dilated
|
||||
INTERPRETER.convolution_2d_1item_2o1i_data_dilated
|
||||
INTERPRETER.convolution_2d_1item_2o2i_data_dilated
|
||||
INTERPRETER.convolution_2d_1item_5o3i_data_dilated
|
||||
INTERPRETER.convolution_2d_2item_5o3i_data_dilated
|
||||
|
247
ngraph/test/runtime/op/avg_pool.cpp
Normal file
247
ngraph/test/runtime/op/avg_pool.cpp
Normal file
@ -0,0 +1,247 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "avg_pool.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
// *** AvgPool OP SET 0 ***
|
||||
constexpr NodeTypeInfo op::v0::AvgPool::type_info;
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type,
|
||||
bool ceil_mode)
|
||||
: Op({arg})
|
||||
, m_window_shape(window_shape)
|
||||
, m_window_movement_strides(window_movement_strides)
|
||||
, m_padding_below(padding_below)
|
||||
, m_padding_above(padding_above)
|
||||
, m_include_padding_in_avg_computation(include_padding_in_avg_computation)
|
||||
, m_pad_type(pad_type)
|
||||
, m_ceil_mode(ceil_mode)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type)
|
||||
: AvgPool(arg,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation,
|
||||
pad_type,
|
||||
false)
|
||||
{
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation)
|
||||
: AvgPool(arg,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation,
|
||||
PadType::EXPLICIT)
|
||||
{
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("window_shape", m_window_shape);
|
||||
visitor.on_attribute("window_movement_strides", m_window_movement_strides);
|
||||
visitor.on_attribute("padding_below", m_padding_below);
|
||||
visitor.on_attribute("padding_above", m_padding_above);
|
||||
visitor.on_attribute("include_padding_in_avg_computation",
|
||||
m_include_padding_in_avg_computation);
|
||||
visitor.on_attribute("pad_type", m_pad_type);
|
||||
visitor.on_attribute("ceil_mode", m_ceil_mode);
|
||||
return true;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::validate_and_infer_types()
|
||||
{
|
||||
if (0 == m_window_movement_strides.size())
|
||||
{
|
||||
m_window_movement_strides = Strides(m_window_shape.size(), 1);
|
||||
}
|
||||
|
||||
if (0 == m_padding_below.size())
|
||||
{
|
||||
m_padding_below = Shape(m_window_shape.size(), 0);
|
||||
}
|
||||
|
||||
if (0 == m_padding_above.size())
|
||||
{
|
||||
m_padding_above = Shape(m_window_shape.size(), 0);
|
||||
}
|
||||
|
||||
const PartialShape& arg_shape = get_input_partial_shape(0);
|
||||
|
||||
if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
|
||||
{
|
||||
if (arg_shape.is_static())
|
||||
{
|
||||
CoordinateDiff padding_above, padding_below;
|
||||
infer_auto_padding(arg_shape.to_shape(),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
Strides(m_window_shape.size(), 1), // No dilation
|
||||
m_pad_type,
|
||||
padding_above,
|
||||
padding_below);
|
||||
m_padding_above = Shape(padding_above.begin(), padding_above.end());
|
||||
m_padding_below = Shape(padding_below.begin(), padding_below.end());
|
||||
}
|
||||
}
|
||||
|
||||
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
|
||||
// now still take Shape (no negative padding).
|
||||
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
|
||||
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
|
||||
|
||||
set_output_type(0,
|
||||
get_input_element_type(0),
|
||||
infer_batched_pooling_forward(this,
|
||||
arg_shape,
|
||||
padding_below,
|
||||
padding_above,
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_include_padding_in_avg_computation,
|
||||
m_ceil_mode));
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides)
|
||||
: AvgPool(arg, window_shape, window_movement_strides, Shape(), Shape(), false)
|
||||
{
|
||||
}
|
||||
|
||||
op::v0::AvgPool::AvgPool(const Output<Node>& arg, const Shape& window_shape)
|
||||
: AvgPool(arg, window_shape, Strides(), Shape(), Shape(), false)
|
||||
{
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_window_shape() const
|
||||
{
|
||||
return m_window_shape;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_window_shape(const Shape& window_shape)
|
||||
{
|
||||
m_window_shape = window_shape;
|
||||
}
|
||||
|
||||
const Strides& op::v0::AvgPool::get_window_movement_strides() const
|
||||
{
|
||||
return m_window_movement_strides;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_window_movement_strides(const Strides& window_movement_strides)
|
||||
{
|
||||
m_window_movement_strides = window_movement_strides;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_padding_below() const
|
||||
{
|
||||
return m_padding_below;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_padding_below(const Shape& padding_below)
|
||||
{
|
||||
m_padding_below = padding_below;
|
||||
}
|
||||
|
||||
const Shape& op::v0::AvgPool::get_padding_above() const
|
||||
{
|
||||
return m_padding_above;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_padding_above(const Shape& padding_above)
|
||||
{
|
||||
m_padding_above = padding_above;
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::get_include_padding_in_avg_computation() const
|
||||
{
|
||||
return m_include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_include_padding_in_avg_computation(
|
||||
bool include_padding_in_avg_computation)
|
||||
{
|
||||
m_include_padding_in_avg_computation = include_padding_in_avg_computation;
|
||||
}
|
||||
|
||||
const op::PadType& op::v0::AvgPool::get_pad_type() const
|
||||
{
|
||||
return m_pad_type;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_pad_type(const op::PadType& pad_type)
|
||||
{
|
||||
m_pad_type = pad_type;
|
||||
}
|
||||
|
||||
bool op::v0::AvgPool::get_ceil_mode() const
|
||||
{
|
||||
return m_ceil_mode;
|
||||
}
|
||||
|
||||
void op::v0::AvgPool::set_ceil_mode(bool ceil_mode)
|
||||
{
|
||||
m_ceil_mode = ceil_mode;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v0::AvgPool>(new_args.at(0),
|
||||
m_window_shape,
|
||||
m_window_movement_strides,
|
||||
m_padding_below,
|
||||
m_padding_above,
|
||||
m_include_padding_in_avg_computation,
|
||||
m_pad_type,
|
||||
m_ceil_mode);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::AvgPool::get_default_value() const
|
||||
{
|
||||
return Constant::create(get_element_type(), get_shape(), {0});
|
||||
}
|
174
ngraph/test/runtime/op/avg_pool.hpp
Normal file
174
ngraph/test/runtime/op/avg_pool.hpp
Normal file
@ -0,0 +1,174 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "backend_visibility.hpp"
|
||||
#include "ngraph/op/op.hpp"
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
/// \brief Batched average pooling operation, with optional padding and window stride.
|
||||
///
|
||||
class BACKEND_API AvgPool : public Op
|
||||
{
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"AvgPool", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
AvgPool() = default;
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages. \param pad_type Padding type to use
|
||||
/// for additional padded dimensions \param ceil_mode Whether to use ceiling while
|
||||
/// computing output shape.
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type,
|
||||
bool ceil_mode);
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages. \param pad_type Padding type to use
|
||||
/// for additional padded dimensions
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation,
|
||||
const PadType& pad_type);
|
||||
|
||||
/// \brief Constructs a batched average pooling operation.
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_below The below-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param padding_above The above-padding shape.<br>
|
||||
/// `[n]`
|
||||
/// \param include_padding_in_avg_computation If true then averages include padding
|
||||
/// elements, each treated as the number zero. If false, padding elements are
|
||||
/// entirely ignored when computing averages.
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides,
|
||||
const Shape& padding_below,
|
||||
const Shape& padding_above,
|
||||
bool include_padding_in_avg_computation = false);
|
||||
|
||||
/// \brief Constructs a batched, unpadded average pooling operation (i.e., all
|
||||
/// padding shapes are set to 0).
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, ..., dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
/// \param window_movement_strides The window movement strides.<br>
|
||||
/// `[n]`
|
||||
AvgPool(const Output<Node>& arg,
|
||||
const Shape& window_shape,
|
||||
const Strides& window_movement_strides);
|
||||
|
||||
/// \brief Constructs an unstrided batched convolution operation (i.e., all window
|
||||
/// movement strides are 1 and all padding shapes are set to 0).
|
||||
///
|
||||
/// \param arg The output producing the input data batch tensor.<br>
|
||||
/// `[d1, ..., dn]`
|
||||
/// \param window_shape The window shape.<br>
|
||||
/// `[n]`
|
||||
AvgPool(const Output<Node>& arg, const Shape& window_shape);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The window shape.
|
||||
const Shape& get_window_shape() const;
|
||||
void set_window_shape(const Shape& window_shape);
|
||||
/// \return The window movement strides.
|
||||
const Strides& get_window_movement_strides() const;
|
||||
void set_window_movement_strides(const Strides& window_movement_strides);
|
||||
/// \return The below-padding shape.
|
||||
const Shape& get_padding_below() const;
|
||||
void set_padding_below(const Shape& padding_below);
|
||||
/// \return The above-padding shape.
|
||||
const Shape& get_padding_above() const;
|
||||
void set_padding_above(const Shape& padding_above);
|
||||
bool get_include_padding_in_avg_computation() const;
|
||||
void
|
||||
set_include_padding_in_avg_computation(bool include_padding_in_avg_computation);
|
||||
/// \return The pad type for pooling.
|
||||
const PadType& get_pad_type() const;
|
||||
void set_pad_type(const PadType& pad_type);
|
||||
bool get_ceil_mode() const;
|
||||
void set_ceil_mode(bool ceil_mode);
|
||||
/// \return The default value for AvgPool.
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
|
||||
protected:
|
||||
Shape m_window_shape;
|
||||
Strides m_window_movement_strides;
|
||||
Shape m_padding_below;
|
||||
Shape m_padding_above;
|
||||
bool m_include_padding_in_avg_computation{false};
|
||||
PadType m_pad_type{PadType::EXPLICIT};
|
||||
bool m_ceil_mode{false};
|
||||
};
|
||||
} // namespace v0
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
@ -26,11 +26,12 @@
|
||||
#include "ngraph/op/util/attr_types.hpp"
|
||||
#include "ngraph/ops.hpp"
|
||||
#include "ngraph/pass/implicit_broadcast_elimination.hpp"
|
||||
#include "ngraph/pass/opset0_downgrade.hpp"
|
||||
#include "ngraph/provenance.hpp"
|
||||
#include "ngraph/slice_plan.hpp"
|
||||
#include "ngraph/type.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
#include "op/avg_pool.hpp"
|
||||
#include "opset0_downgrade.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
@ -119,31 +120,6 @@ namespace
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::AvgPoolBackprop> node)
|
||||
{
|
||||
NGRAPH_CHECK(node->input_value(1).get_node_shared_ptr()->is_constant());
|
||||
const auto forward_arg_shape =
|
||||
static_pointer_cast<op::Constant>(node->input_value(1).get_node_shared_ptr())
|
||||
->get_shape_val();
|
||||
const auto delta = node->input_value(0);
|
||||
const auto include_padding_in_avg_computation = !node->get_exclude_pad();
|
||||
const auto padding_below = node->get_pads_begin();
|
||||
const auto padding_above = node->get_pads_end();
|
||||
const auto window_movement_strides = node->get_strides();
|
||||
const auto window_shape = node->get_kernel();
|
||||
|
||||
auto replacement_node =
|
||||
make_shared<op::v0::AvgPoolBackprop>(forward_arg_shape,
|
||||
delta,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_avg_computation);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Broadcast> node)
|
||||
{
|
||||
auto arg = node->input_value(0);
|
||||
@ -270,29 +246,6 @@ namespace
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::ConvolutionBackpropFilters> node)
|
||||
{
|
||||
NGRAPH_CHECK(node->input_value(2).get_node_shared_ptr()->is_constant());
|
||||
auto filters_shape =
|
||||
static_pointer_cast<op::Constant>(node->input_value(2).get_node_shared_ptr())
|
||||
->get_shape_val();
|
||||
const auto data_arg = node->input_value(0);
|
||||
const auto delta_arg = node->input_value(1);
|
||||
const auto strides = node->get_strides();
|
||||
const size_t num_spatial_dims = strides.size();
|
||||
auto replacement_node =
|
||||
make_shared<op::v0::ConvolutionBackpropFilters>(data_arg,
|
||||
filters_shape,
|
||||
delta_arg,
|
||||
node->get_strides(),
|
||||
node->get_dilations(),
|
||||
node->get_pads_begin(),
|
||||
node->get_pads_end(),
|
||||
Strides(num_spatial_dims, 1));
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Divide> node)
|
||||
{
|
||||
const auto input_arg0 = node->input_value(0);
|
||||
@ -502,41 +455,6 @@ namespace
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::MaxPoolBackprop> node)
|
||||
{
|
||||
const auto padding_below = node->get_pads_begin();
|
||||
const auto padding_above = node->get_pads_end();
|
||||
const auto window_movement_strides = node->get_strides();
|
||||
const auto window_shape = node->get_kernel();
|
||||
|
||||
const auto arg_forward = node->input_value(0);
|
||||
const auto delta = node->input_value(1);
|
||||
|
||||
shared_ptr<Node> replacement_node;
|
||||
if (node->get_input_size() == 3)
|
||||
{
|
||||
const auto result_forward = node->input_value(2);
|
||||
replacement_node = make_shared<op::v0::MaxPoolBackprop>(arg_forward,
|
||||
delta,
|
||||
result_forward,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above);
|
||||
}
|
||||
else
|
||||
{
|
||||
replacement_node = make_shared<op::v0::MaxPoolBackprop>(arg_forward,
|
||||
delta,
|
||||
window_movement_strides,
|
||||
window_shape,
|
||||
padding_below,
|
||||
padding_above);
|
||||
}
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Minimum> node)
|
||||
{
|
||||
return op_cast_binary_elementwise_node<op::v0::Minimum, op::v1::Minimum>(node);
|
||||
@ -905,8 +823,6 @@ namespace
|
||||
static DispatchMap dispatch_map{
|
||||
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, op_cast_thunk<NAMESPACE::NAME>},
|
||||
#include "ngraph/opsets/opset1_tbl.hpp"
|
||||
NGRAPH_OP(AvgPoolBackprop, op::v1) NGRAPH_OP(ConvolutionBackpropFilters, op::v1)
|
||||
NGRAPH_OP(GenerateMask, op::v1) NGRAPH_OP(MaxPoolBackprop, op::v1)
|
||||
#undef NGRAPH_OP
|
||||
};
|
||||
return dispatch_map;
|
@ -16,13 +16,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "backend_visibility.hpp"
|
||||
#include "ngraph/pass/pass.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace pass
|
||||
{
|
||||
class NGRAPH_API Opset0Downgrade : public NodePass
|
||||
class BACKEND_API Opset0Downgrade : public NodePass
|
||||
{
|
||||
public:
|
||||
///
|
@ -1,768 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, avg_pool_1d_deduce)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
|
||||
Shape window_shape{10};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 91}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), Strides{1});
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), Shape{10});
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), Shape{0});
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), Shape{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_1d_deduce_strided)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
|
||||
Shape window_shape{10};
|
||||
auto move_strides = Strides{2};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 46}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), Strides{2});
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), Shape{10});
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), Shape{0});
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), Shape{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 5});
|
||||
Shape window_shape{2};
|
||||
auto move_strides = Strides{2};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 2}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), Strides{2});
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), Shape{2});
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), Shape{0});
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), Shape{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_1d_deduce_strided_small_even)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 6});
|
||||
Shape window_shape{2};
|
||||
auto move_strides = Strides{2};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 3}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), Strides{2});
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), Shape{2});
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), Shape{0});
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), Shape{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_2d_deduce)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
|
||||
Shape window_shape{10, 20};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 91, 131}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), (Strides{1, 1}));
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), (Shape{10, 20}));
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), (Shape{0, 0}));
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), (Shape{0, 0}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_2d_deduce_strided)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
|
||||
Shape window_shape{10, 20};
|
||||
auto move_strides = Strides{2, 3};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 46, 44}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), (Strides{2, 3}));
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), (Shape{10, 20}));
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), (Shape{0, 0}));
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), (Shape{0, 0}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_3d_deduce_strided_small)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 7, 8, 10});
|
||||
Shape window_shape{2, 3, 2};
|
||||
auto move_strides = Strides{2, 3, 4};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 3, 2, 3}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), (Strides{2, 3, 4}));
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), (Shape{2, 3, 2}));
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), (Shape{0, 0, 0}));
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), (Shape{0, 0, 0}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_3d_deduce_strided_padded_small)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 7, 8, 10});
|
||||
Shape window_shape{2, 3, 2};
|
||||
auto move_strides = Strides{2, 3, 4};
|
||||
Shape padding_below{5, 6, 4};
|
||||
Shape padding_above{6, 4, 5};
|
||||
auto avg_pool = make_shared<op::AvgPool>(
|
||||
param, window_shape, move_strides, padding_below, padding_above, true);
|
||||
|
||||
EXPECT_EQ(avg_pool->get_element_type(), element::f32);
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 9, 6, 5}));
|
||||
|
||||
EXPECT_EQ(avg_pool->get_window_movement_strides(), (Strides{2, 3, 4}));
|
||||
EXPECT_EQ(avg_pool->get_window_shape(), (Shape{2, 3, 2}));
|
||||
EXPECT_EQ(avg_pool->get_padding_below(), (Shape{5, 6, 4}));
|
||||
EXPECT_EQ(avg_pool->get_padding_above(), (Shape{6, 4, 5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_ceil_mode)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 10});
|
||||
Shape window_shape{2};
|
||||
auto move_strides = Strides{4};
|
||||
Shape padding_below{4};
|
||||
Shape padding_above{5};
|
||||
auto avg_pool = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
move_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
true,
|
||||
op::PadType::EXPLICIT,
|
||||
true);
|
||||
|
||||
// ceil((10 + 9 - 2)/4) + 1
|
||||
EXPECT_EQ(avg_pool->get_shape(), (Shape{64, 3, 6}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_0d_input)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
Shape window_shape{};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid 0D input not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Data batch must have rank of at least 3 (one batch axis, one "
|
||||
"input-channel axis, and at least one spatial dimension)");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_1d_input)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
Shape window_shape{};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid 1D input not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Data batch must have rank of at least 3 (one batch axis, one "
|
||||
"input-channel axis, and at least one spatial dimension)");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_2d_input)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{2, 6});
|
||||
Shape window_shape{};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid 2D input not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Data batch must have rank of at least 3 (one batch axis, one "
|
||||
"input-channel axis, and at least one spatial dimension)");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_0_batch_size)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{0, 6, 1});
|
||||
Shape window_shape{1};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with 0 batch size not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), "Batch size is zero");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_0_channels)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 0, 1});
|
||||
Shape window_shape{1};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with 0 channels not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), "Channel count is zero");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 3, 3};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with too many window dimensions not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Ranks for data item shape (data batch has shape {6,2,10,10}, so data "
|
||||
"item rank is 2), padding below (CoordinateDiff{0, 0, 0}), padding "
|
||||
"above (CoordinateDiff{0, 0, 0}), window shape ({3,3,3}), and window "
|
||||
"strides (Strides{1, 1, 1}) do not match");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with too few window dimensions not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Ranks for data item shape (data batch has shape {6,2,10,10}, so data "
|
||||
"item rank is 2), padding below (CoordinateDiff{0}), padding above "
|
||||
"(CoordinateDiff{0}), window shape ({3}), and window strides "
|
||||
"(Strides{1}) do not match");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_movement_stride_rank)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 3};
|
||||
auto move_strides = Strides{2, 3, 8};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with wrong movement stride rank not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Ranks for data item shape (data batch has shape {6,2,10,10}, so data "
|
||||
"item rank is 2), padding below (CoordinateDiff{0, 0}), padding above "
|
||||
"(CoordinateDiff{0, 0}), window shape ({3,3}), and window strides "
|
||||
"(Strides{2, 3, 8}) do not match");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_padding_below_rank)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 3};
|
||||
auto move_strides = Strides{2, 3};
|
||||
Shape padding_below{1, 2, 3};
|
||||
Shape padding_above{1, 2};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(
|
||||
param, window_shape, move_strides, padding_below, padding_above, false);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with wrong below-padding rank not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Ranks for data item shape (data batch has shape {6,2,10,10}, so data "
|
||||
"item rank is 2), padding below (CoordinateDiff{1, 2, 3}), padding "
|
||||
"above (CoordinateDiff{1, 2}), window shape ({3,3}), and window "
|
||||
"strides (Strides{2, 3}) do not match");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_padding_above_rank)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 3};
|
||||
auto move_strides = Strides{2, 3};
|
||||
Shape padding_below{1, 2};
|
||||
Shape padding_above{1, 2, 3};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(
|
||||
param, window_shape, move_strides, padding_below, padding_above, false);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with wrong above-padding rank not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Ranks for data item shape (data batch has shape {6,2,10,10}, so data "
|
||||
"item rank is 2), padding below (CoordinateDiff{1, 2}), padding above "
|
||||
"(CoordinateDiff{1, 2, 3}), window shape ({3,3}), and window strides "
|
||||
"(Strides{2, 3}) do not match");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_input_item_size_0)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 0, 10});
|
||||
Shape window_shape{3, 3};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with zero-length spatial axis not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
"Data shape after padding and dilation has dimension less than 1 (dim: 0) at axis 0");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_window_size_0)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 0};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with zero-length window axis not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Window after dilation has dimension less than 1 (dim: 0) at axis 1");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_dilated_too_large)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 8, 8});
|
||||
Shape window_shape{9, 9};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with oversized window not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Window after dilation has dimension (dim: 9) larger than the data "
|
||||
"shape after padding (dim: 8) at axis 0");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 8, 8});
|
||||
Shape window_shape{9, 9};
|
||||
Strides window_strides{1, 1};
|
||||
Shape padding_below{0, 0};
|
||||
Shape padding_above{1, 1};
|
||||
auto avg_pool =
|
||||
make_shared<op::AvgPool>(param, window_shape, window_strides, padding_below, padding_above);
|
||||
|
||||
ASSERT_EQ(avg_pool->get_output_element_type(0), element::f32);
|
||||
ASSERT_EQ(avg_pool->get_output_shape(0), (Shape{6, 2, 1, 1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_invalid_movement_stride_0)
|
||||
{
|
||||
// Deduce type
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
|
||||
Shape window_shape{3, 3};
|
||||
auto move_strides = Strides{0, 1};
|
||||
try
|
||||
{
|
||||
auto avg_pool = make_shared<op::AvgPool>(param, window_shape, move_strides);
|
||||
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Invalid input with 0-length movement stride axis not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
"Window strides (Strides{0, 1}) has zero dimension at axis 0");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_dynamic_ok)
|
||||
{
|
||||
PartialShape arg_shape{PartialShape::dynamic()};
|
||||
Shape window_shape{2, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
|
||||
ASSERT_EQ(ap->get_output_element_type(0), element::f32);
|
||||
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6)));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch)
|
||||
{
|
||||
PartialShape arg_shape{PartialShape::dynamic()};
|
||||
Shape window_shape{2, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
FAIL() << "Mismatch of attribute ranks not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Ranks for data item shape (data batch has shape ?, so data item rank is "
|
||||
"?), padding below (CoordinateDiff{0, 0, 0, 0}), padding above "
|
||||
"(CoordinateDiff{0, 0, 0, 0}), window shape ({2,3,4,5}), and window "
|
||||
"strides (Strides{1, 1, 1, 1, 1}) do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok)
|
||||
{
|
||||
PartialShape arg_shape{PartialShape::dynamic(6)};
|
||||
Shape window_shape{2, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
|
||||
ASSERT_EQ(ap->get_output_element_type(0), element::f32);
|
||||
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6)));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok)
|
||||
{
|
||||
PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
|
||||
Shape window_shape{2, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
|
||||
ASSERT_EQ(ap->get_output_element_type(0), element::f32);
|
||||
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{5, Dimension::dynamic(), 7, Dimension::dynamic(), 1, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch)
|
||||
{
|
||||
PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
|
||||
Shape window_shape{2, 3, 4, 5, 6};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
FAIL() << "Mismatch of attribute ranks not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Ranks for data item shape (data batch has shape {5,?,8,?,4,7}, so data "
|
||||
"item rank is 4), padding below (CoordinateDiff{0, 0, 0, 0}), padding "
|
||||
"above (CoordinateDiff{0, 0, 0, 0}), window shape ({2,3,4,5,6}), and "
|
||||
"window strides (Strides{1, 1, 1, 1}) do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_not_too_big)
|
||||
{
|
||||
PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
|
||||
Shape window_shape{9, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
FAIL() << "Oversized window not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Window after dilation has dimension (dim: 9) larger than "
|
||||
"the data shape after padding (dim: 8) at axis 0"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big)
|
||||
{
|
||||
PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
|
||||
Shape window_shape{9, 3, 4, 5};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 0};
|
||||
Shape padding_above{1, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
|
||||
ASSERT_EQ(ap->get_output_element_type(0), element::f32);
|
||||
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding)
|
||||
{
|
||||
PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
|
||||
Shape window_shape{9, 3, 4, 3};
|
||||
Strides window_movement_strides{1, 1, 1, 1};
|
||||
Shape padding_below{0, 0, 0, 4};
|
||||
Shape padding_above{0, 0, 0, 0};
|
||||
bool include_padding_in_average = false;
|
||||
|
||||
auto param = make_shared<op::Parameter>(element::f32, arg_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto ap = make_shared<op::AvgPool>(param,
|
||||
window_shape,
|
||||
window_movement_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
include_padding_in_average);
|
||||
FAIL() << "Window in padding not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Window after dilation has dimension (dim: 9) larger than "
|
||||
"the data shape after padding (dim: 8) at axis 0"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
@ -102,81 +102,6 @@ TEST(type_prop, batch_norm_training_shape_check)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_et_check)
|
||||
{
|
||||
auto data_batch = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 2});
|
||||
auto gamma = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto beta = make_shared<op::Parameter>(element::f64, Shape{3});
|
||||
auto mean = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto variance = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 2});
|
||||
|
||||
try
|
||||
{
|
||||
auto bc = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, 0.001);
|
||||
FAIL() << "Deduced type should disagree with c-tor arguments";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input element types do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_shape_check)
|
||||
{
|
||||
auto data_batch = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 2});
|
||||
auto gamma = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto beta = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
auto mean = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto variance = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 2});
|
||||
|
||||
try
|
||||
{
|
||||
auto bc = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, 0.001);
|
||||
FAIL() << "Deduced type should disagree with c-tor arguments";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Shapes for gamma/beta/mean/variance do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_delta_check)
|
||||
{
|
||||
auto dummy = make_shared<op::Parameter>(element::f32, Shape{3});
|
||||
auto dummy2 = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 2});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{4, 3, 2, 3});
|
||||
|
||||
try
|
||||
{
|
||||
auto bc = make_shared<op::BatchNormTrainingBackprop>(
|
||||
param, dummy, dummy, dummy, dummy, delta, 0.001);
|
||||
FAIL() << "Deduced type should disagree with c-tor arguments";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(), std::string("Shape of delta does not match the shape of the input data"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
@ -758,561 +683,3 @@ TEST(type_prop,
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
////
|
||||
////
|
||||
////
|
||||
////
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_all_rank_dynamic)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic());
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape::dynamic(1)));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape::dynamic(1)));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_input_rank_static_dynamic_ok)
|
||||
{
|
||||
PartialShape data_batch_shape{
|
||||
64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape::dynamic(1)));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape::dynamic(1)));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_input_rank_static_dynamic_zero_channels)
|
||||
{
|
||||
PartialShape data_batch_shape{
|
||||
Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Zero channel count not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Channel count must be at least 1"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_delta_rank_static_dynamic_ok)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape::dynamic(1)));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape::dynamic(1)));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_delta_rank_static_dynamic_channels_known)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{Dimension::dynamic(), 5, Dimension::dynamic(), Dimension::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{Dimension::dynamic(), 5, Dimension::dynamic(), Dimension::dynamic()}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape{5}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape{5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, batch_norm_training_backprop_partial_delta_rank_static_dynamic_zero_channels)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{PartialShape::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{PartialShape::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Zero channel count not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Channel count must be at least 1"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_dynamic_some_rank_static_dynamic_ok)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{Dimension::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{Dimension::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape::dynamic(1)));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape::dynamic(1)));
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_dynamic_some_rank_static_dynamic_wrong_rank)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{Dimension::dynamic(), Dimension::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Wrong gamma/beta/mean/variance shape not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Shape for gamma/beta/mean/variance ({?,?}) does not have rank 1"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_dynamic_some_rank_static_dynamic_inconsistent_rank)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{3, Dimension::dynamic()};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{Dimension::dynamic()};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Wrong gamma/beta/mean/variance shape not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Shapes for gamma/beta/mean/variance do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_dynamic_some_static_inconsistent_channel_count)
|
||||
{
|
||||
PartialShape data_batch_shape{PartialShape::dynamic()};
|
||||
PartialShape gamma_shape{3};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{4};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{PartialShape::dynamic()};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "nconsistent gamma/beta/mean/variance channel count not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Shapes for gamma/beta/mean/variance do not match"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_static_dynamic_some_static_ok)
|
||||
{
|
||||
PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224};
|
||||
PartialShape gamma_shape{3};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{3};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{Dimension::dynamic(), 3, 448, 224};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
|
||||
ASSERT_EQ(bn->get_output_size(), 3);
|
||||
ASSERT_EQ(bn->get_output_element_type(0), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(1), data_batch_et);
|
||||
ASSERT_EQ(bn->get_output_element_type(2), data_batch_et);
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(PartialShape{64, 3, 448, 224}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(1).same_scheme(PartialShape{3}));
|
||||
ASSERT_TRUE(bn->get_output_partial_shape(2).same_scheme(PartialShape{3}));
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_static_dynamic_some_static_inconsistent_channel_count)
|
||||
{
|
||||
PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224};
|
||||
PartialShape gamma_shape{3};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{3};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{Dimension::dynamic(), 4, 448, 224};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Inconsistent delta/gamma/beta/mean/variance channel count not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Input channel dimension (4) does not match "
|
||||
"shape for gamma/beta/mean/variance ({3})"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_static_dynamic_some_static_inconsistent_batch_size)
|
||||
{
|
||||
PartialShape data_batch_shape{64, 3, Dimension::dynamic(), 224};
|
||||
PartialShape gamma_shape{3};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{3};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{128, 4, Dimension::dynamic(), 224};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Inconsistent input/delta batch size not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Shape of delta does not match the shape of the input data (input data "
|
||||
"shape: {64,3,?,224}, delta shape: {128,4,?,224})"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(
|
||||
type_prop,
|
||||
batch_norm_training_backprop_partial_input_and_delta_rank_static_dynamic_some_static_inconsistent_spatial_dims)
|
||||
{
|
||||
PartialShape data_batch_shape{Dimension::dynamic(), 3, Dimension::dynamic(), 224};
|
||||
PartialShape gamma_shape{3};
|
||||
PartialShape beta_shape{PartialShape::dynamic()};
|
||||
PartialShape mean_shape{3};
|
||||
PartialShape variance_shape{PartialShape::dynamic()};
|
||||
PartialShape delta_shape{Dimension::dynamic(), 3, Dimension::dynamic(), 448};
|
||||
double epsilon = 0.001;
|
||||
element::Type data_batch_et = element::f32;
|
||||
element::Type gamma_et = element::f32;
|
||||
element::Type beta_et = element::f32;
|
||||
element::Type mean_et = element::f32;
|
||||
element::Type variance_et = element::f32;
|
||||
element::Type delta_et = element::f32;
|
||||
|
||||
auto data_batch = make_shared<op::Parameter>(data_batch_et, data_batch_shape);
|
||||
auto gamma = make_shared<op::Parameter>(gamma_et, gamma_shape);
|
||||
auto beta = make_shared<op::Parameter>(beta_et, beta_shape);
|
||||
auto mean = make_shared<op::Parameter>(mean_et, mean_shape);
|
||||
auto variance = make_shared<op::Parameter>(variance_et, variance_shape);
|
||||
auto delta = make_shared<op::Parameter>(delta_et, delta_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto bn = make_shared<op::BatchNormTrainingBackprop>(
|
||||
data_batch, gamma, beta, mean, variance, delta, epsilon);
|
||||
FAIL() << "Inconsistent input/delta spatial dimensions not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Shape of delta does not match the shape of the input data "
|
||||
"(input data shape: {?,3,?,224}, delta shape: {?,3,?,448})"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
@ -63,32 +63,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 91}); // output delta
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
Strides{1},
|
||||
Strides{1},
|
||||
CoordinateDiff{0},
|
||||
CoordinateDiff{0},
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_padded)
|
||||
{
|
||||
// Deduce type
|
||||
@ -140,36 +114,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_padded)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 96}); // output delta
|
||||
auto move_strides = Strides{1};
|
||||
auto dilation_strides = Strides{1};
|
||||
auto padding_below = CoordinateDiff{2};
|
||||
auto padding_above = CoordinateDiff{3};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
dilation_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_strided)
|
||||
{
|
||||
// Deduce type
|
||||
@ -214,33 +158,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_strided)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 46}); // output delta
|
||||
auto move_strides = Strides{2};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
Strides{1},
|
||||
CoordinateDiff{0},
|
||||
CoordinateDiff{0},
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_strided_padded)
|
||||
{
|
||||
// Deduce type
|
||||
@ -292,36 +209,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_strided_padded)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 48}); // output delta
|
||||
auto move_strides = Strides{2};
|
||||
auto dilation_strides = Strides{1};
|
||||
auto padding_below = CoordinateDiff{2};
|
||||
auto padding_above = CoordinateDiff{3};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
dilation_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_strided_small_uneven)
|
||||
{
|
||||
// Deduce type
|
||||
@ -366,33 +253,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_strided_small_uneven)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 5};
|
||||
Shape filters_shape{128, 3, 2};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 5}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 2}); // output delta
|
||||
auto move_strides = Strides{2};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
Strides{1},
|
||||
CoordinateDiff{0},
|
||||
CoordinateDiff{0},
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_strided_small_even)
|
||||
{
|
||||
// Deduce type
|
||||
@ -437,33 +297,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_strided_small_even)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 6};
|
||||
Shape filters_shape{128, 3, 2};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 6}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 3}); // output delta
|
||||
auto move_strides = Strides{2};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
Strides{1},
|
||||
CoordinateDiff{0},
|
||||
CoordinateDiff{0},
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_window_dilated)
|
||||
{
|
||||
// Deduce type
|
||||
@ -510,34 +343,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_window_dilated)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 82}); // output delta
|
||||
auto move_strides = Strides{1};
|
||||
auto dilate_strides = Strides{2};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
dilate_strides,
|
||||
CoordinateDiff{0},
|
||||
CoordinateDiff{0},
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{0});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{0});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_window_dilated_padded)
|
||||
{
|
||||
// Deduce type
|
||||
@ -589,36 +394,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded)
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_window_dilated_padded)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 87}); // output delta
|
||||
auto move_strides = Strides{1};
|
||||
auto dilate_strides = Strides{2};
|
||||
auto padding_below = CoordinateDiff{2};
|
||||
auto padding_above = CoordinateDiff{3};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
dilate_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
Strides{1});
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{1});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded)
|
||||
{
|
||||
// Deduce type
|
||||
@ -677,37 +452,6 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_1d_back_filters_deduce_window_dilated_data_dilated_padded)
|
||||
{
|
||||
// Deduce type
|
||||
// Shape data_batch_shape{64, 3, 100};
|
||||
Shape filters_shape{128, 3, 10};
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100}); // data batch
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, Shape{64, 128, 285}); // output delta
|
||||
auto move_strides = Strides{1};
|
||||
auto dilate_strides = Strides{2};
|
||||
auto padding_below = CoordinateDiff{2};
|
||||
auto padding_above = CoordinateDiff{3};
|
||||
auto data_dilate_strides = Strides{3};
|
||||
auto conv = make_shared<op::ConvolutionBackpropFilters>(param0,
|
||||
filters_shape,
|
||||
param1,
|
||||
move_strides,
|
||||
dilate_strides,
|
||||
padding_below,
|
||||
padding_above,
|
||||
data_dilate_strides);
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), filters_shape);
|
||||
|
||||
EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1});
|
||||
EXPECT_EQ(conv->get_window_dilation_strides_forward(), Strides{2});
|
||||
EXPECT_EQ(conv->get_data_dilation_strides_forward(), Strides{3});
|
||||
|
||||
EXPECT_EQ(conv->get_padding_below_forward(), CoordinateDiff{2});
|
||||
EXPECT_EQ(conv->get_padding_above_forward(), CoordinateDiff{3});
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_2d_deduce)
|
||||
{
|
||||
// Deduce type
|
||||
@ -775,60 +519,6 @@ struct DeduceAutoPadTest
|
||||
{
|
||||
};
|
||||
|
||||
TEST_P(DeduceAutoPadTest, same_upper)
|
||||
{
|
||||
auto image_shape = std::get<0>(GetParam());
|
||||
image_shape.insert(image_shape.begin(), {1, 1}); // Add {N, C}
|
||||
auto filter_shape = std::get<1>(GetParam());
|
||||
filter_shape.insert(filter_shape.begin(), {1, 1}); // Add {O, I}
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, image_shape);
|
||||
auto param1 = make_shared<op::Parameter>(element::f32, filter_shape);
|
||||
|
||||
auto conv = make_shared<op::Convolution>(param0,
|
||||
param1,
|
||||
std::get<2>(GetParam()),
|
||||
std::get<3>(GetParam()),
|
||||
CoordinateDiff(),
|
||||
CoordinateDiff(),
|
||||
Strides(),
|
||||
op::PadType::SAME_UPPER);
|
||||
EXPECT_EQ(conv->get_padding_below(), std::get<4>(GetParam()));
|
||||
EXPECT_EQ(conv->get_padding_above(), std::get<5>(GetParam()));
|
||||
|
||||
auto no_dilation = std::all_of(std::get<3>(GetParam()).begin(),
|
||||
std::get<3>(GetParam()).end(),
|
||||
[](size_t i) { return i <= 1; });
|
||||
if (no_dilation)
|
||||
{
|
||||
auto max_pool = make_shared<op::MaxPool>(param0,
|
||||
std::get<1>(GetParam()),
|
||||
std::get<2>(GetParam()),
|
||||
Shape(),
|
||||
Shape(),
|
||||
op::PadType::SAME_UPPER);
|
||||
CoordinateDiff padding_below(max_pool->get_padding_below().begin(),
|
||||
max_pool->get_padding_below().end());
|
||||
CoordinateDiff padding_above(max_pool->get_padding_above().begin(),
|
||||
max_pool->get_padding_above().end());
|
||||
EXPECT_EQ(padding_below, std::get<4>(GetParam()));
|
||||
EXPECT_EQ(padding_above, std::get<5>(GetParam()));
|
||||
|
||||
auto avg_pool = make_shared<op::AvgPool>(param0,
|
||||
std::get<1>(GetParam()),
|
||||
std::get<2>(GetParam()),
|
||||
Shape(),
|
||||
Shape(),
|
||||
false,
|
||||
op::PadType::SAME_UPPER);
|
||||
CoordinateDiff pad_below(avg_pool->get_padding_below().begin(),
|
||||
avg_pool->get_padding_below().end());
|
||||
CoordinateDiff pad_above(avg_pool->get_padding_above().begin(),
|
||||
avg_pool->get_padding_above().end());
|
||||
EXPECT_EQ(pad_below, std::get<4>(GetParam()));
|
||||
EXPECT_EQ(pad_above, std::get<5>(GetParam()));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(DeduceAutoPadTest, same_lower)
|
||||
{
|
||||
auto image_shape = std::get<0>(GetParam());
|
||||
@ -2805,23 +2495,6 @@ TEST(type_prop, conv_partial_dynamic_et)
|
||||
PartialShape{64, 100, 1, Dimension::dynamic()}));
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_bprop_filter_v1_output_partial_shape_dynamic)
|
||||
{
|
||||
Shape shape_data{64, 3, 100};
|
||||
auto data = make_shared<op::Parameter>(element::f32, shape_data);
|
||||
Shape shape_delta{64, 128, 96};
|
||||
auto deltas = make_shared<op::Parameter>(element::f32, shape_delta);
|
||||
auto filters_shape = make_shared<op::Parameter>(element::i64, Shape{128, 3, 10});
|
||||
auto strides = Strides{1};
|
||||
auto dilations = Strides{1};
|
||||
auto padding_begin = CoordinateDiff{2};
|
||||
auto padding_end = CoordinateDiff{3};
|
||||
auto conv1 = make_shared<op::v1::ConvolutionBackpropFilters>(
|
||||
data, deltas, filters_shape, strides, dilations, padding_begin, padding_end);
|
||||
|
||||
ASSERT_TRUE(conv1->get_output_partial_shape(0).is_dynamic());
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic)
|
||||
{
|
||||
Shape shape_filter{6, 3, 3, 3};
|
||||
|
@ -58,25 +58,3 @@ TEST(type_prop, conv_bias_add_2d_deduce)
|
||||
EXPECT_EQ(conv->get_element_type(), element::f32);
|
||||
EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131}));
|
||||
}
|
||||
|
||||
TEST(type_prop, conv_bias_bprop_2d_deduce)
|
||||
{
|
||||
// Deduce type
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
|
||||
auto filters = make_shared<op::Parameter>(element::f32, Shape{128, 3, 10, 20});
|
||||
auto bias = make_shared<op::Parameter>(element::f32, Shape{128});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{64, 128, 91, 131});
|
||||
auto conv = make_shared<op::ConvolutionBiasBackpropFiltersBias>(data,
|
||||
filters->get_shape(),
|
||||
bias->get_shape(),
|
||||
delta,
|
||||
Strides{1, 1},
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{0, 0},
|
||||
CoordinateDiff{0, 0},
|
||||
Strides{1, 1});
|
||||
EXPECT_EQ(conv->get_output_element_type(0), element::f32);
|
||||
EXPECT_EQ(conv->get_output_element_type(1), element::f32);
|
||||
EXPECT_EQ(conv->get_output_shape(0), filters->get_shape());
|
||||
EXPECT_EQ(conv->get_output_shape(1), bias->get_shape());
|
||||
}
|
||||
|
@ -85,108 +85,3 @@ TEST(type_prop, layer_norm_affine_rank)
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, layer_norm_bprop_element_type)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::i32, Shape{2, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
try
|
||||
{
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(data, delta);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect element type";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Argument element type must be f16, bf16, f32, f64 or dynamic"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, layer_norm_bprop_begin_norm_axis)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
try
|
||||
{
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(data, delta, 2);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect begin norm axis";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("begin_norm_axis is out of range"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, layer_norm_bprop_delta)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{4});
|
||||
try
|
||||
{
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(data, delta);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect delta rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Delta rank is incorrect"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, layer_norm_bprop_stats)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto mean = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto variance = make_shared<op::Parameter>(element::f32, Shape{2});
|
||||
try
|
||||
{
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(data, delta, mean, variance);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect stats rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Mean and/or variance rank is incorrect"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, layer_norm_bprop_affine)
|
||||
{
|
||||
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto delta = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
auto scale = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
try
|
||||
{
|
||||
auto lnb = make_shared<op::LayerNormBackprop>(data, delta, scale);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect affine rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Scale rank is incorrect"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
@ -120,24 +120,6 @@ TEST(zero_dim_tensor_elimination, zero_const_conv)
|
||||
EXPECT_EQ(count_ops_of_type<op::Convolution>(f), 0);
|
||||
}
|
||||
|
||||
TEST(zero_dim_tensor_elimination, zero_const_avg_pool)
|
||||
{
|
||||
Shape zero_shape{0};
|
||||
auto A = std::make_shared<op::Parameter>(element::f32, Shape{1, 1, 0});
|
||||
|
||||
auto avg_pool =
|
||||
std::make_shared<op::AvgPool>(A, Shape{1}, Strides{1}, Shape{2}, Shape{2}, true);
|
||||
auto abs_node = std::make_shared<op::Abs>(avg_pool);
|
||||
auto constant = std::make_shared<op::Constant>(element::i32, zero_shape, std::vector<string>{});
|
||||
auto f = std::make_shared<Function>(NodeVector{abs_node, constant}, ParameterVector{A});
|
||||
pass::Manager pass_manager;
|
||||
|
||||
pass_manager.register_pass<ngraph::pass::ZeroDimTensorElimination>();
|
||||
EXPECT_EQ(count_ops_of_type<op::AvgPool>(f), 1);
|
||||
pass_manager.run_passes(f);
|
||||
EXPECT_EQ(count_ops_of_type<op::AvgPool>(f), 0);
|
||||
}
|
||||
|
||||
TEST(zero_dim_tensor_elimination, zero_const_pad)
|
||||
{
|
||||
Shape zero_shape{0};
|
||||
|
Loading…
Reference in New Issue
Block a user