Remove obsoleted v0::Reshape operator (#2878)
* Remove obsoleted v0::Reshape operator * Fix handling negative dims on 32 bit platforms Change-Id: Ia18e20df9a79fd0b7b38c434bee7140599833952 * apply code format Change-Id: I4ddfb9e7033cbc97e167922f6ee5873d5487e551
This commit is contained in:
parent
50e6d0e31e
commit
3f5584534a
@ -1147,14 +1147,6 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ReverseSequence>::createLayer(const std:
|
||||
return res;
|
||||
}
|
||||
|
||||
template <>
|
||||
CNNLayer::Ptr NodeConverter<ngraph::op::Reshape>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
|
||||
LayerParams params = {layer->get_friendly_name(), "Reshape",
|
||||
details::convertPrecision(layer->get_output_element_type(0))};
|
||||
auto res = std::make_shared<InferenceEngine::ReshapeLayer>(params);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <>
|
||||
CNNLayer::Ptr NodeConverter<ngraph::op::ShapeOf>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
|
||||
LayerParams params = {layer->get_friendly_name(), "ShapeOf",
|
||||
|
@ -46,7 +46,7 @@ namespace ngraph
|
||||
std::shared_ptr<Node> reorder_axes(const Output<Node>& value,
|
||||
std::vector<size_t> axes_order = {});
|
||||
|
||||
/// \brief Return transposed vlaue (with axes in reversed order).
|
||||
/// \brief Return transposed value (with axes in reversed order).
|
||||
///
|
||||
/// \param Value to transpose.
|
||||
///
|
||||
|
@ -233,8 +233,7 @@ namespace ngraph
|
||||
auto trimmed_value = value;
|
||||
if (value_shape != trimmed_value_shape)
|
||||
{
|
||||
trimmed_value = make_shared<op::Reshape>(
|
||||
value, get_default_order(value_shape), trimmed_value_shape);
|
||||
trimmed_value = builder::opset1::reshape(value, trimmed_value_shape);
|
||||
}
|
||||
|
||||
auto shape_const =
|
||||
|
@ -149,8 +149,7 @@ OutputVector builder::MatmulFactory::make_matmul_op()
|
||||
Shape result_shape(next(begin(shape)), end(shape));
|
||||
result_shape.insert(
|
||||
begin(result_shape), begin(left_shape), next(begin(left_shape), left_shape.size() - 2));
|
||||
return {make_shared<op::Reshape>(result, get_default_order(shape.size()), result_shape)
|
||||
->add_provenance_group_members_above(m_inputs)};
|
||||
return {builder::opset1::reshape(result, result_shape)};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,6 @@ NGRAPH_OP(ReduceSum, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(RegionYolo, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Relu, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(ReorgYolo, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Reshape, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Reshape, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Result, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Reverse, ngraph::op::v1, 1)
|
||||
|
@ -25,96 +25,6 @@ namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
// clang-format off
|
||||
/// \brief Tensor reshape operation.
|
||||
///
|
||||
/// "Converts" an input tensor into a new shape with the same number of elements.
|
||||
///
|
||||
/// Given that the input tensor has shape \f$[d_1,\dots,d_n]\f$, the output may have any
|
||||
/// shape \f$[d'_1,\dots,d'_m]\f$ such that
|
||||
/// \f$\Pi_{0 \leq i \lt n}(d_i) = \Pi_{0 \leq i \lt m}(d'_i)\f$. For example, a
|
||||
/// \f$3\times{}4\f$ matrix can be reshaped into a 3-tensor of shape
|
||||
/// \f$3\times{}2\times{}2\f$, a matrix of shape \f$6\times{}2\f$, or a vector of size
|
||||
/// \f$12\f$, but not, for example, a matrix of size \f$4\times{}4\f$.
|
||||
///
|
||||
/// The parameter `input_order` indicates the order in which to "walk" over the input axes.
|
||||
/// Given a tensor of shape \f$(d_1,\dots,d_n)\f$, an input order of
|
||||
/// \f$(a_0, a_1, \dots, a_{n-1})\f$ results in the coordinate for axis \f$a_{n-1}\f$ being
|
||||
/// varied most frequently, followed by axis \f$a-2\f$, and so on down to \f$a_0\f$.
|
||||
///
|
||||
/// (TODO: example.)
|
||||
///
|
||||
/// ## Parameters
|
||||
///
|
||||
/// | | Description |
|
||||
/// | -------------- | ---------------------------------------------------------- |
|
||||
/// | `input_order` | The order in which to walk over the input axes. |
|
||||
/// | `output_shape` | The shape \f$[d'_1,\dots,d'_m]\f$ for the reshaped tensor. |
|
||||
///
|
||||
/// ## Inputs
|
||||
///
|
||||
/// | | Type | Description |
|
||||
/// | ----- | --------------------------------- | ------------------------------------------------------------------------------------------------------------ |
|
||||
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any type and shape, as long as the product of \f$d_i\f$ equals the product of \f$d'_i\f$. |
|
||||
///
|
||||
/// ## Output
|
||||
///
|
||||
/// | Type | Description |
|
||||
/// | ------------------------ | ------------------------------------------------------------------------------------------------------ |
|
||||
/// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. |
|
||||
// clang-format on
|
||||
class NGRAPH_DEPRECATED(
|
||||
"This operation is deprecated and will be removed soon. "
|
||||
"Use v1::Reshape instead of it.") NGRAPH_API Reshape : public Op
|
||||
{
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{"Reshape", 0};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a reshape operation.
|
||||
Reshape() = default;
|
||||
/// \brief Constructs a reshape operation.
|
||||
///
|
||||
/// \param arg The tensor to be reshaped.
|
||||
/// \param input_order The order in which to iterate over input axes. This must be a
|
||||
/// permutation of the sequence \f$(0,\dots,n-1)\f$ where \f$n\f$
|
||||
/// is
|
||||
/// the rank of the input tensor.
|
||||
/// \param output_shape The output shape. If the input shape is
|
||||
/// \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
|
||||
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where
|
||||
/// \f$\Pi(a_i) = \Pi(b_i)\f$.
|
||||
Reshape(const Output<Node>& arg,
|
||||
const AxisVector& input_order,
|
||||
const Shape& output_shape);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
|
||||
/// \return The order in which to iterate over input axes.
|
||||
const AxisVector& get_input_order() const { return m_input_order; }
|
||||
void set_input_order(const AxisVector& input_order) { m_input_order = input_order; }
|
||||
/// \return The shape of the output tensor.
|
||||
const Shape& get_reshape_output_shape() const { return m_output_shape; }
|
||||
void set_output_shape(const Shape& output_shape) { m_output_shape = output_shape; }
|
||||
bool get_is_transpose() const { return m_is_transpose; }
|
||||
void set_is_transpose(bool is_transpose) { m_is_transpose = is_transpose; }
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
|
||||
protected:
|
||||
AxisVector m_input_order;
|
||||
Shape m_output_shape;
|
||||
bool m_is_transpose{false};
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
};
|
||||
}
|
||||
|
||||
namespace v1
|
||||
{
|
||||
/// \brief Tensor dynamic reshape operation.
|
||||
@ -131,16 +41,19 @@ namespace ngraph
|
||||
/// transpose.
|
||||
///
|
||||
/// \param arg The tensor to be reshaped.
|
||||
/// \param pattern The node that defines output shape pattern.
|
||||
/// \param shape_pattern The node that defines output shape shape_pattern.
|
||||
/// If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape
|
||||
/// must
|
||||
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
|
||||
/// A value of -1 is allowed for at most one dimension, in which case the
|
||||
/// dimension size is inferred based on element count of input tensor.
|
||||
/// \param special_zero Treats zeros in `pattern` as wildcard flags indicating a
|
||||
/// \param special_zero Treats zeros in `shape_pattern` as wildcard flags indicating
|
||||
/// a
|
||||
/// copy from input shape at the same index.
|
||||
///
|
||||
Reshape(const Output<Node>& arg, const Output<Node>& pattern, bool special_zero);
|
||||
Reshape(const Output<Node>& arg,
|
||||
const Output<Node>& shape_pattern,
|
||||
bool special_zero);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
@ -160,8 +73,5 @@ namespace ngraph
|
||||
bool m_special_zero;
|
||||
};
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
using v0::Reshape;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
}
|
||||
|
@ -157,28 +157,6 @@ void op::Dot::validate_and_infer_types()
|
||||
set_output_type(0, result_et, result_shape);
|
||||
}
|
||||
|
||||
shared_ptr<op::Reshape> make_reshape_axes_to_front(const Output<Node>& n,
|
||||
const Shape& front_shape,
|
||||
const Shape& back_shape)
|
||||
{
|
||||
AxisVector input_order;
|
||||
Shape output_shape;
|
||||
|
||||
for (size_t i = 0; i < back_shape.size(); i++)
|
||||
{
|
||||
input_order.push_back(front_shape.size() + i);
|
||||
output_shape.push_back(back_shape[i]);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < front_shape.size(); i++)
|
||||
{
|
||||
input_order.push_back(i);
|
||||
output_shape.push_back(front_shape[i]);
|
||||
}
|
||||
|
||||
return make_shared<op::Reshape>(n, input_order, output_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Dot::get_default_value() const
|
||||
{
|
||||
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
|
||||
|
@ -24,8 +24,6 @@
|
||||
#include "ngraph/runtime/opt_kernel/reshape.hpp"
|
||||
#include "ngraph/runtime/reference/reshape.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
@ -45,126 +43,23 @@ namespace
|
||||
}
|
||||
|
||||
template <element::Type_t ET>
|
||||
void compute_output_shape(const HostTensorPtr& pattern, Shape& output_shape)
|
||||
void compute_output_shape(const HostTensorPtr& shape_pattern,
|
||||
std::vector<int64_t>& output_shape)
|
||||
{
|
||||
using T = typename element_type_traits<ET>::value_type;
|
||||
T* pattern_ptr = pattern->get_data_ptr<ET>();
|
||||
size_t output_rank = pattern->get_shape()[0];
|
||||
T* shape_pattern_ptr = shape_pattern->get_data_ptr<ET>();
|
||||
size_t output_rank = shape_pattern->get_shape()[0];
|
||||
for (int i = 0; i < output_rank; i++)
|
||||
{
|
||||
output_shape.push_back(pattern_ptr[i]);
|
||||
output_shape.push_back(shape_pattern_ptr[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr NodeTypeInfo op::Reshape::type_info;
|
||||
|
||||
op::Reshape::Reshape(const Output<Node>& arg,
|
||||
const AxisVector& input_order,
|
||||
const Shape& output_shape)
|
||||
: Op({arg})
|
||||
, m_input_order(input_order)
|
||||
, m_output_shape(output_shape)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::Reshape::validate_and_infer_types()
|
||||
{
|
||||
auto& input_shape = get_input_partial_shape(0);
|
||||
auto input_rank = input_shape.rank();
|
||||
|
||||
// Check that the input axis order is a permutation of (0,...,n-1) for some n.
|
||||
for (size_t i = 0; i < m_input_order.size(); i++)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
find(begin(m_input_order), end(m_input_order), i) != end(m_input_order),
|
||||
"Input axis order is not a permutation of argument's axis indices (axis order: ",
|
||||
m_input_order,
|
||||
", argument shape: ",
|
||||
input_shape,
|
||||
").");
|
||||
}
|
||||
|
||||
// TODO(amprocte): should be possible to move around unknown dims in the input shape.
|
||||
if (input_rank.is_static())
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
m_input_order.size() == input_rank.get_length(),
|
||||
"Input axis order is not a permutation of argument's axis indices (axis order: ",
|
||||
m_input_order,
|
||||
", argument shape: ",
|
||||
input_shape,
|
||||
").");
|
||||
|
||||
for (size_t i = 0; i < input_rank.get_length(); i++)
|
||||
{
|
||||
auto it = find(begin(m_input_order), end(m_input_order), i);
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
it != end(m_input_order),
|
||||
"Input axis order is not a permutation of argument's axis indices (axis order: ",
|
||||
m_input_order,
|
||||
", argument shape: ",
|
||||
input_shape,
|
||||
").");
|
||||
}
|
||||
|
||||
// TODO(amprocte): make a partial_shape_size() analogous to shape_size().
|
||||
Dimension input_shape_product = 1;
|
||||
for (size_t i = 0; i < input_rank.get_length(); i++)
|
||||
{
|
||||
input_shape_product *= input_shape[i];
|
||||
}
|
||||
|
||||
if (input_shape_product.is_static())
|
||||
{
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
input_shape_product.get_length() == shape_size(m_output_shape),
|
||||
"Product of output shape dimensions does not match product of argument shape "
|
||||
"dimensions ",
|
||||
"(output shape: ",
|
||||
m_output_shape,
|
||||
", argument shape: ",
|
||||
input_shape,
|
||||
").");
|
||||
}
|
||||
}
|
||||
|
||||
if (!std::is_sorted(m_input_order.begin(), m_input_order.end()))
|
||||
{
|
||||
m_is_transpose = true;
|
||||
}
|
||||
set_output_type(0, get_input_element_type(0), m_output_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Reshape::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<Reshape>(new_args.at(0), m_input_order, m_output_shape);
|
||||
}
|
||||
|
||||
bool op::Reshape::visit_attributes(AttributeVisitor& visitor)
|
||||
{
|
||||
visitor.on_attribute("input_order", m_input_order);
|
||||
visitor.on_attribute("output_shape", m_output_shape);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::v0::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Reshape::evaluate");
|
||||
return evaluate_reshape(inputs[0], outputs[0], get_input_order());
|
||||
}
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(op::v1::Reshape, "Reshape", 1);
|
||||
|
||||
op::v1::Reshape::Reshape(const Output<Node>& arg, const Output<Node>& pattern, bool zero_flag)
|
||||
: Op({arg, pattern})
|
||||
op::v1::Reshape::Reshape(const Output<Node>& arg, const Output<Node>& shape_pattern, bool zero_flag)
|
||||
: Op({arg, shape_pattern})
|
||||
, m_special_zero(zero_flag)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
@ -178,20 +73,21 @@ bool op::v1::Reshape::visit_attributes(AttributeVisitor& visitor)
|
||||
|
||||
void op::v1::Reshape::validate_and_infer_types()
|
||||
{
|
||||
auto pattern_et = get_input_element_type(1);
|
||||
auto shape_pattern_et = get_input_element_type(1);
|
||||
// check data types
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, pattern_et.is_integral_number(), "Pattern must be an integral number.");
|
||||
this, shape_pattern_et.is_integral_number(), "Shape pattern must be an integral number.");
|
||||
|
||||
// check shapes
|
||||
const PartialShape& input_pshape = get_input_partial_shape(0);
|
||||
const PartialShape& pattern_shape = get_input_partial_shape(1);
|
||||
const PartialShape& shape_pattern_shape = get_input_partial_shape(1);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
pattern_shape.rank().compatible(1),
|
||||
shape_pattern_shape.rank().compatible(1),
|
||||
"Pattern shape must have rank 1, got ",
|
||||
pattern_shape.rank(),
|
||||
shape_pattern_shape.rank(),
|
||||
".");
|
||||
Rank output_rank = pattern_shape.rank().is_dynamic() ? Rank::dynamic() : pattern_shape[0];
|
||||
Rank output_rank =
|
||||
shape_pattern_shape.rank().is_dynamic() ? Rank::dynamic() : shape_pattern_shape[0];
|
||||
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
@ -339,7 +235,7 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
// infer and set output shape if the output shape contain -1
|
||||
// and zero value dimension
|
||||
size_t output_rank = inputs[1]->get_shape()[0];
|
||||
Shape out_shape_val;
|
||||
std::vector<int64_t> out_shape_val;
|
||||
|
||||
switch (inputs[1]->get_element_type())
|
||||
{
|
||||
@ -367,7 +263,7 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
case element::Type_t::u64:
|
||||
compute_output_shape<element::Type_t::u64>(inputs[1], out_shape_val);
|
||||
break;
|
||||
default: throw ngraph_error("pattern element type is not integral data type");
|
||||
default: throw ngraph_error("shape_pattern element type is not integral data type");
|
||||
}
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -382,9 +278,10 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, negative_dims <= 1, "More than one dimension has size of -1 (", negative_dims, ")");
|
||||
|
||||
Shape output_shape;
|
||||
std::copy(out_shape_val.begin(), out_shape_val.end(), std::back_inserter(output_shape));
|
||||
if (!(zero_dims && m_special_zero) && !negative_dims)
|
||||
{
|
||||
auto output_shape = out_shape_val;
|
||||
if (get_input_partial_shape(0).is_static())
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -398,7 +295,6 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
}
|
||||
else
|
||||
{
|
||||
Shape output_shape = out_shape_val;
|
||||
size_t output_elements = 1;
|
||||
int negative_dim = -1;
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <set>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/squeeze.hpp"
|
||||
@ -122,10 +123,11 @@ OutputVector op::Squeeze::decompose_op() const
|
||||
(get_output_partial_shape(0).is_static()),
|
||||
"output shape was not calculated during pre_validate_and_infer_types. Can not decompose.");
|
||||
auto data = input_value(0);
|
||||
auto data_shape = data.get_shape();
|
||||
auto output_data_shape = get_output_shape(0);
|
||||
AxisVector input_order{get_default_order(data_shape.size())};
|
||||
return {make_shared<op::Reshape>(data, input_order, output_data_shape)};
|
||||
return {make_shared<op::v1::Reshape>(
|
||||
data,
|
||||
op::Constant::create(element::u64, {output_data_shape.size()}, output_data_shape),
|
||||
false)};
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Squeeze::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <set>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/unsqueeze.hpp"
|
||||
@ -87,8 +88,7 @@ OutputVector op::Unsqueeze::decompose_op() const
|
||||
auto data = input_value(0);
|
||||
auto data_shape = data.get_shape();
|
||||
auto output_shape = get_output_shape(0);
|
||||
AxisVector input_order{ngraph::get_default_order(data_shape.size())};
|
||||
return {make_shared<ngraph::op::Reshape>(data, input_order, output_shape)};
|
||||
return {builder::opset1::reshape(data, output_shape)};
|
||||
}
|
||||
|
||||
bool ngraph::op::v0::Unsqueeze::visit_attributes(AttributeVisitor& visitor)
|
||||
|
@ -184,7 +184,8 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vector<S
|
||||
shared_ptr<Node> x_new_shape = make_shared<op::v0::ShapeOf>(x);
|
||||
auto axes = op::Constant::create(element::i64, {}, {0});
|
||||
x_new_shape = make_shared<op::v1::ReduceProd>(x_new_shape, axes);
|
||||
x_new_shape = make_shared<op::Reshape>(x_new_shape, AxisVector{}, Shape{1});
|
||||
x_new_shape = make_shared<op::v1::Reshape>(
|
||||
x_new_shape, op::Constant::create(element::u64, {1}, Shape{1}), false);
|
||||
|
||||
auto x_reshaped = make_shared<op::v1::Reshape>(x, x_new_shape, true);
|
||||
|
||||
@ -298,3 +299,30 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_reverse_shape)
|
||||
Shape{8, 2, 8, 2},
|
||||
Shape{2, 3, 4, 5, 2}});
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, dynamic_transpose)
|
||||
{
|
||||
auto arg = std::make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
|
||||
auto input_order = make_shared<op::Parameter>(element::i32, PartialShape::dynamic());
|
||||
auto transpose = std::make_shared<op::v1::Transpose>(arg, input_order);
|
||||
|
||||
auto f = std::make_shared<Function>(NodeVector{transpose}, ParameterVector{arg, input_order});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
auto ex = backend->compile(f);
|
||||
|
||||
auto arg_data = vector<int32_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
auto input_order_data = vector<int32_t>{2, 0, 1};
|
||||
|
||||
auto arg_tensor = backend->create_tensor(element::i32, Shape{2, 2, 3});
|
||||
auto input_order_tensor = backend->create_tensor(element::i32, Shape{input_order_data.size()});
|
||||
copy_data(arg_tensor, arg_data);
|
||||
copy_data(input_order_tensor, input_order_data);
|
||||
|
||||
auto output = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic());
|
||||
ex->call_with_validate({output}, {arg_tensor, input_order_tensor});
|
||||
|
||||
ASSERT_EQ(output->get_element_type(), element::i32);
|
||||
EXPECT_EQ(read_vector<int32_t>(output),
|
||||
vector<int32_t>({1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12}));
|
||||
}
|
||||
|
@ -49,7 +49,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012)
|
||||
Shape shape_a{2, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{12};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -71,7 +72,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012)
|
||||
Shape shape_a{1, 1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -92,7 +94,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120)
|
||||
Shape shape_a{1, 1, 1};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 2, 0}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -113,7 +116,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t)
|
||||
Shape shape_a{};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 1, 1, 1, 1, 1};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -134,7 +138,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1)
|
||||
Shape shape_a{};
|
||||
auto A = make_shared<op::Parameter>(element::boolean, shape_a);
|
||||
Shape shape_r{1};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -154,7 +159,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col)
|
||||
Shape shape_a{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{3, 1};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -175,7 +181,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row)
|
||||
Shape shape_a{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 3};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -196,7 +203,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle)
|
||||
Shape shape_a{3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{1, 3, 1};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -217,7 +225,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_same)
|
||||
Shape shape_a{3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{3, 3};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -234,217 +243,13 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_same)
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_transpose)
|
||||
{
|
||||
Shape shape_a{3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{3, 3};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 4, 7, 2, 5, 8, 3, 6, 9}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_dim_change_transpose)
|
||||
{
|
||||
Shape shape_a{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 3};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1, 3, 5, 2, 4, 6}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_021)
|
||||
{
|
||||
Shape shape_a{2, 3, 4};
|
||||
Shape shape_r{2, 4, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 2, 1}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12,
|
||||
13, 17, 21, 14, 18, 22, 15, 19, 23, 16, 20, 24}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_210)
|
||||
{
|
||||
Shape shape_a{2, 3, 4};
|
||||
Shape shape_r{4, 3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{2, 1, 0}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22,
|
||||
3, 15, 7, 19, 11, 23, 4, 16, 8, 20, 12, 24}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_201)
|
||||
{
|
||||
Shape shape_a{2, 3, 4};
|
||||
Shape shape_r{4, 2, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{2, 0, 1}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22,
|
||||
3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_102)
|
||||
{
|
||||
Shape shape_a{2, 3, 4};
|
||||
Shape shape_r{3, 2, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0, 2}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 13, 14, 15, 16, 5, 6, 7, 8,
|
||||
17, 18, 19, 20, 9, 10, 11, 12, 21, 22, 23, 24}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_3d_transpose_120)
|
||||
{
|
||||
Shape shape_a{2, 3, 4};
|
||||
Shape shape_r{3, 4, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 2, 0}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18,
|
||||
7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_transpose)
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero)
|
||||
{
|
||||
Shape shape_a{2, 2, 5, 5};
|
||||
Shape shape_r{2, 5, 5, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 2, 3, 1}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, a_data);
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1., 26., 2., 27., 3., 28., 4., 29., 5., 30., 6., 31., 7., 32., 8.,
|
||||
33., 9., 34., 10., 35., 11., 36., 12., 37., 13., 38., 14., 39., 15., 40.,
|
||||
16., 41., 17., 42., 18., 43., 19., 44., 20., 45., 21., 46., 22., 47., 23.,
|
||||
48., 24., 49., 25., 50., 51., 76., 52., 77., 53., 78., 54., 79., 55., 80.,
|
||||
56., 81., 57., 82., 58., 83., 59., 84., 60., 85., 61., 86., 62., 87., 63.,
|
||||
88., 64., 89., 65., 90., 66., 91., 67., 92., 68., 93., 69., 94., 70., 95.,
|
||||
71., 96., 72., 97., 73., 98., 74., 99., 75., 100.}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_no_transpose)
|
||||
{
|
||||
Shape shape_a{2, 2, 5, 5};
|
||||
Shape shape_r{2, 5, 5, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{0, 1, 2, 3}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {4}, Shape{0, 5, 0, 2}), true);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
@ -462,28 +267,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_4d_no_transpose)
|
||||
EXPECT_TRUE(test::all_close_f(a_data, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, reshape_transposed_shape_change)
|
||||
{
|
||||
Shape shape_a{2, 6};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{12};
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{1, 0}, shape_r);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
|
||||
auto result = backend->create_tensor(element::f32, shape_r);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 7, 2, 8, 3, 9, 4, 10, 5, 11, 6, 12}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
//
|
||||
// Numpy:
|
||||
//
|
||||
@ -534,7 +317,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_6d)
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto r = make_shared<op::Reshape>(A, AxisVector{2, 4, 0, 5, 3, 1}, shape_r);
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
@ -547,31 +331,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_6d)
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{
|
||||
1., 73., 9., 81., 17., 89., 2., 74., 10., 82., 18., 90., 3., 75.,
|
||||
11., 83., 19., 91., 4., 76., 12., 84., 20., 92., 145., 217., 153., 225.,
|
||||
161., 233., 146., 218., 154., 226., 162., 234., 147., 219., 155., 227., 163., 235.,
|
||||
148., 220., 156., 228., 164., 236., 5., 77., 13., 85., 21., 93., 6., 78.,
|
||||
14., 86., 22., 94., 7., 79., 15., 87., 23., 95., 8., 80., 16., 88.,
|
||||
24., 96., 149., 221., 157., 229., 165., 237., 150., 222., 158., 230., 166., 238.,
|
||||
151., 223., 159., 231., 167., 239., 152., 224., 160., 232., 168., 240., 25., 97.,
|
||||
33., 105., 41., 113., 26., 98., 34., 106., 42., 114., 27., 99., 35., 107.,
|
||||
43., 115., 28., 100., 36., 108., 44., 116., 169., 241., 177., 249., 185., 257.,
|
||||
170., 242., 178., 250., 186., 258., 171., 243., 179., 251., 187., 259., 172., 244.,
|
||||
180., 252., 188., 260., 29., 101., 37., 109., 45., 117., 30., 102., 38., 110.,
|
||||
46., 118., 31., 103., 39., 111., 47., 119., 32., 104., 40., 112., 48., 120.,
|
||||
173., 245., 181., 253., 189., 261., 174., 246., 182., 254., 190., 262., 175., 247.,
|
||||
183., 255., 191., 263., 176., 248., 184., 256., 192., 264., 49., 121., 57., 129.,
|
||||
65., 137., 50., 122., 58., 130., 66., 138., 51., 123., 59., 131., 67., 139.,
|
||||
52., 124., 60., 132., 68., 140., 193., 265., 201., 273., 209., 281., 194., 266.,
|
||||
202., 274., 210., 282., 195., 267., 203., 275., 211., 283., 196., 268., 204., 276.,
|
||||
212., 284., 53., 125., 61., 133., 69., 141., 54., 126., 62., 134., 70., 142.,
|
||||
55., 127., 63., 135., 71., 143., 56., 128., 64., 136., 72., 144., 197., 269.,
|
||||
205., 277., 213., 285., 198., 270., 206., 278., 214., 286., 199., 271., 207., 279.,
|
||||
215., 287., 200., 272., 208., 280., 216., 288.}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
EXPECT_TRUE(test::all_close_f(a_data, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
EXPECT_EQ(r->get_output_shape(0), shape_r);
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_1D_to_scalar)
|
||||
@ -619,9 +380,12 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_shufflenet_5d)
|
||||
vector<float> a_data(shape_size(shape_a));
|
||||
iota(a_data.begin(), a_data.end(), 1.f);
|
||||
|
||||
auto r0 = make_shared<op::Reshape>(A, AxisVector{0, 1, 2, 3}, shape_b);
|
||||
auto r1 = make_shared<op::Reshape>(r0, AxisVector{0, 2, 1, 3, 4}, shape_c);
|
||||
auto r2 = make_shared<op::Reshape>(r1, AxisVector{0, 1, 2, 3, 4}, shape_r);
|
||||
auto r0 = make_shared<op::v1::Reshape>(
|
||||
A, op::Constant::create(element::u64, {shape_b.size()}, shape_b), false);
|
||||
auto r1 = make_shared<op::v1::Reshape>(
|
||||
r0, op::Constant::create(element::u64, {shape_c.size()}, shape_c), false);
|
||||
auto r2 = make_shared<op::v1::Reshape>(
|
||||
r1, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false);
|
||||
auto f = make_shared<Function>(r2, ParameterVector{A});
|
||||
|
||||
auto ref_func = clone_function(*f);
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
@ -197,15 +198,15 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap)
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_reshape)
|
||||
NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose)
|
||||
{
|
||||
Shape shape_a{4, 5};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_r{2, 4};
|
||||
auto B = make_shared<op::Slice>(A, Coordinate{1, 0}, Coordinate{4, 5});
|
||||
auto C = make_shared<op::Reshape>(B, AxisVector{1, 0}, Shape{5, 3});
|
||||
auto C = builder::opset1::transpose(B);
|
||||
auto D = make_shared<op::Slice>(C, Coordinate{1, 0}, Coordinate{5, 3});
|
||||
auto E = make_shared<op::Reshape>(D, AxisVector{1, 0}, Shape{3, 4});
|
||||
auto E = builder::opset1::transpose(D);
|
||||
auto r = make_shared<op::Slice>(E, Coordinate{1, 0}, Coordinate{3, 4});
|
||||
auto f = make_shared<Function>(r, ParameterVector{A});
|
||||
|
||||
|
@ -210,61 +210,6 @@ TEST(constant_folding, constant_unsqueeze)
|
||||
ASSERT_TRUE(test::all_close_f(values_in, values_out, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
TEST(constant_folding, constant_reshape)
|
||||
{
|
||||
Shape shape_in{2, 4};
|
||||
Shape shape_out{2, 4, 1};
|
||||
|
||||
vector<float> values_in{0, 1, 2, 3, 4, 5, 6, 7};
|
||||
auto constant = make_shared<op::Constant>(element::f32, shape_in, values_in);
|
||||
auto reshape = make_shared<op::Reshape>(constant, AxisVector{0, 1}, shape_out);
|
||||
reshape->set_friendly_name("test");
|
||||
auto f = make_shared<Function>(reshape, ParameterVector{});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::Reshape>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
|
||||
|
||||
auto new_const =
|
||||
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(new_const);
|
||||
ASSERT_EQ(new_const->get_friendly_name(), "test");
|
||||
auto values_out = new_const->get_vector<float>();
|
||||
|
||||
ASSERT_TRUE(test::all_close_f(values_in, values_out, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
TEST(constant_folding, DISABLED_constant_reshape_permute)
|
||||
{
|
||||
Shape shape_in{2, 4};
|
||||
Shape shape_out{4, 2};
|
||||
|
||||
vector<double> values_in{0, 1, 2, 3, 4, 5, 6, 7};
|
||||
auto constant = make_shared<op::Constant>(element::f64, shape_in, values_in);
|
||||
auto reshape = make_shared<op::Reshape>(constant, AxisVector{1, 0}, shape_out);
|
||||
reshape->set_friendly_name("test");
|
||||
auto f = make_shared<Function>(reshape, ParameterVector{});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::Reshape>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
|
||||
|
||||
auto new_const =
|
||||
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(new_const);
|
||||
ASSERT_EQ(new_const->get_friendly_name(), "test");
|
||||
auto values_out = new_const->get_vector<double>();
|
||||
|
||||
vector<double> values_permute{0, 4, 1, 5, 2, 6, 3, 7};
|
||||
ASSERT_TRUE(test::all_close_f(values_permute, values_out, MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
TEST(constant_folding, constant_broadcast_v1)
|
||||
{
|
||||
vector<int32_t> values_in{0, 1};
|
||||
|
@ -271,20 +271,20 @@ TEST(copy, power)
|
||||
TEST(copy, reshape)
|
||||
{
|
||||
Shape shape_in{2, 3, 4};
|
||||
AxisVector axes{0, 1, 2};
|
||||
Shape shape_out{6, 4};
|
||||
|
||||
auto arg0 = make_shared<op::Parameter>(element::f32, shape_in);
|
||||
OutputVector new_args{make_shared<op::Parameter>(element::f32, shape_in)};
|
||||
OutputVector new_args{make_shared<op::Parameter>(element::f32, shape_in),
|
||||
op::Constant::create(element::u64, {shape_out.size()}, shape_out)};
|
||||
|
||||
auto node = make_shared<op::Reshape>(arg0, axes, shape_out);
|
||||
auto shape_pattern = op::Constant::create(element::u64, {shape_out.size()}, shape_out);
|
||||
auto node = make_shared<op::v1::Reshape>(arg0, shape_pattern, false);
|
||||
auto new_node = node->clone_with_new_inputs(new_args);
|
||||
auto node_cast = as_type_ptr<op::Reshape>(new_node);
|
||||
auto node_cast = as_type_ptr<op::v1::Reshape>(new_node);
|
||||
ASSERT_NE(node_cast, nullptr);
|
||||
|
||||
ASSERT_TRUE(nullptr != new_node);
|
||||
ASSERT_TRUE(new_args == new_node->input_values());
|
||||
ASSERT_TRUE(axes == node_cast->get_input_order());
|
||||
ASSERT_TRUE(shape_out == node_cast->get_output_shape(0));
|
||||
}
|
||||
|
||||
|
@ -44,10 +44,10 @@ TEST(dyn_elimination, transpose)
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::Transpose>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Reshape>(f), 1);
|
||||
ASSERT_EQ(count_ops_of_type<op::v1::Reshape>(f), 1);
|
||||
|
||||
auto new_reshape =
|
||||
as_type_ptr<op::Reshape>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
as_type_ptr<op::v1::Reshape>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(new_reshape);
|
||||
|
||||
ASSERT_EQ(new_reshape->get_input_order(), (AxisVector{2, 3, 1, 0}));
|
||||
|
@ -643,7 +643,7 @@ namespace
|
||||
|
||||
void op_is_Reshape()
|
||||
{
|
||||
op::Reshape node;
|
||||
op::v1::Reshape node;
|
||||
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
|
||||
|
@ -89,8 +89,8 @@ runtime::dynamic::DynamicExecutable::DynamicExecutable(shared_ptr<Function> wrap
|
||||
// count_dyn_nodes.
|
||||
bool is_dynamic_op(const std::shared_ptr<Node>& op)
|
||||
{
|
||||
return is_type<op::Transpose>(op) || is_type<op::v1::Reshape>(op) || is_type<op::Range>(op) ||
|
||||
is_type<op::v1::ConvolutionBackpropData>(op) || is_type<op::v3::Broadcast>(op);
|
||||
return is_type<op::Range>(op) || is_type<op::v1::ConvolutionBackpropData>(op) ||
|
||||
is_type<op::v3::Broadcast>(op);
|
||||
}
|
||||
|
||||
// Helper for a vile hack in DynamicExecutable::call. See body of that function for details.
|
||||
|
@ -914,6 +914,9 @@ broadcast_algo_matrix_stride_1
|
||||
broadcast_algo_matrix_stride_2
|
||||
broadcast_algo_matrix_stride_3
|
||||
|
||||
# Cannot find blob with name: Parameter_1
|
||||
dynamic_transpose
|
||||
|
||||
# Failing from new reason after unblocking more Blob types
|
||||
gather_2d_negative_and_positive_indices_axis_0_2d_input
|
||||
gather_axis_0_int8
|
||||
|
@ -1490,7 +1490,7 @@ protected:
|
||||
case OP_TYPEID::NotEqual:
|
||||
case OP_TYPEID::Power:
|
||||
case OP_TYPEID::Range:
|
||||
case OP_TYPEID::Reshape:
|
||||
case OP_TYPEID::Reshape_v1:
|
||||
case OP_TYPEID::Result:
|
||||
case OP_TYPEID::Reverse_v1:
|
||||
case OP_TYPEID::Round_v5:
|
||||
|
@ -34,6 +34,7 @@ NGRAPH_OP(LogicalXor, op::v1)
|
||||
NGRAPH_OP(LogicalNot, op::v1)
|
||||
NGRAPH_OP(GatherTree, op::v1)
|
||||
NGRAPH_OP(OneHot, op::v1)
|
||||
NGRAPH_OP(Reshape, op::v1)
|
||||
NGRAPH_OP(Reverse, op::v1)
|
||||
#undef ID_SUFFIX
|
||||
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "ngraph/builder/split.hpp"
|
||||
#include "ngraph/op/concat.hpp"
|
||||
#include "ngraph/op/convolution.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/slice.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
|
||||
@ -203,16 +202,14 @@ OutputVector op::v0::GroupConvolution::decompose_op() const
|
||||
auto sliced_data = builder::split(data, get_groups(), 1);
|
||||
// slice filters
|
||||
auto sliced_filters = builder::split(filters, get_groups(), 0);
|
||||
auto shape = Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape));
|
||||
for (std::size_t group{0}; group < get_groups(); ++group)
|
||||
{
|
||||
auto sliced_filter = sliced_filters[group];
|
||||
if (m_groups_in_filters)
|
||||
{
|
||||
// Remove group dimmension after slicing
|
||||
sliced_filter = make_shared<op::Reshape>(
|
||||
sliced_filters[group],
|
||||
get_default_order(sliced_filters[group].get_shape().size()),
|
||||
Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape)));
|
||||
// Remove group dimension after slicing
|
||||
sliced_filter = builder::opset1::reshape(sliced_filters[group], shape);
|
||||
}
|
||||
convolution_nodes.push_back(
|
||||
std::make_shared<ngraph::op::v0::Convolution>(sliced_data[group],
|
||||
|
@ -108,7 +108,6 @@ NGRAPH_OP(QuantizedConvolution, ngraph::op)
|
||||
NGRAPH_OP(QuantizedDot, ngraph::op)
|
||||
NGRAPH_OP(Range, ngraph::op)
|
||||
NGRAPH_OP(Relu, ngraph::op)
|
||||
NGRAPH_OP(Reshape, ngraph::op)
|
||||
NGRAPH_OP(Result, ngraph::op)
|
||||
NGRAPH_OP(ReverseSequence, ngraph::op)
|
||||
NGRAPH_OP(Select, ngraph::op)
|
||||
|
@ -17,9 +17,9 @@
|
||||
#include <numeric>
|
||||
|
||||
#include "dyn_elimination.hpp"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
#include "ngraph/op/range.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/slice.hpp"
|
||||
#include "ngraph/op/transpose.hpp"
|
||||
#include "ngraph/pattern/matcher.hpp"
|
||||
@ -35,59 +35,9 @@ using namespace ngraph;
|
||||
pass::DynElimination::DynElimination()
|
||||
: GraphRewrite()
|
||||
{
|
||||
construct_transpose();
|
||||
construct_range();
|
||||
}
|
||||
|
||||
void pass::DynElimination::construct_transpose()
|
||||
{
|
||||
auto data_arg_label = make_shared<pattern::op::Label>(element::f32, Shape{1, 2, 3});
|
||||
auto perm_arg_label =
|
||||
make_shared<pattern::op::Label>(element::i64, Shape{3}, pattern::has_class<op::Constant>());
|
||||
|
||||
auto transpose = make_shared<op::Transpose>(data_arg_label, perm_arg_label);
|
||||
|
||||
auto transpose_callback = [data_arg_label, perm_arg_label](pattern::Matcher& m) {
|
||||
auto pattern_map = m.get_pattern_map();
|
||||
|
||||
auto data_arg = pattern_map[data_arg_label];
|
||||
auto perm_arg = static_pointer_cast<op::Constant>(pattern_map[perm_arg_label]);
|
||||
|
||||
// TODO(amprocte): Can't handle the case where data shape is dynamic, because static
|
||||
// Reshape requries the exact output shape to be declared. See if we can come up with a
|
||||
// workaround.
|
||||
if (data_arg->get_output_partial_shape(0).is_dynamic())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& data_shape = data_arg->get_output_shape(0);
|
||||
|
||||
NGRAPH_CHECK(perm_arg->get_output_partial_shape(0).rank().compatible(1));
|
||||
NGRAPH_CHECK(perm_arg->get_output_element_type(0).compatible(element::i64));
|
||||
|
||||
if (perm_arg->get_output_element_type(0).is_dynamic() ||
|
||||
perm_arg->get_output_partial_shape(0).is_dynamic())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
auto perm = perm_arg->get_axis_vector_val();
|
||||
|
||||
auto output_shape = ngraph::apply_permutation(data_shape, perm);
|
||||
|
||||
auto replacement = std::make_shared<op::Reshape>(data_arg, perm, output_shape);
|
||||
|
||||
replace_node(m.get_match_root(), replacement);
|
||||
return true;
|
||||
};
|
||||
|
||||
auto transpose_matcher = make_shared<pattern::Matcher>(transpose, "DynElimination.Transpose");
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
add_matcher(transpose_matcher, transpose_callback, all_pass_property_off);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<op::Constant> make_range_replacement(const element::Type& et,
|
||||
const Shape& shape,
|
||||
|
@ -57,7 +57,6 @@ namespace ngraph
|
||||
DynElimination();
|
||||
|
||||
private:
|
||||
void construct_transpose();
|
||||
void construct_range();
|
||||
};
|
||||
}
|
||||
|
@ -84,9 +84,10 @@ namespace opset0_downgrade
|
||||
{
|
||||
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
|
||||
}
|
||||
auto reshaped_product = make_shared<op::Reshape>(replacement_node->output(0),
|
||||
get_default_order(output_shape),
|
||||
reshaped_output_shape);
|
||||
auto shape_pattern = op::Constant::create(
|
||||
element::u64, {reshaped_output_shape.size()}, reshaped_output_shape);
|
||||
auto reshaped_product =
|
||||
make_shared<op::v1::Reshape>(replacement_node->output(0), shape_pattern, false);
|
||||
return reshaped_product;
|
||||
}
|
||||
else
|
||||
@ -190,28 +191,6 @@ namespace opset0_downgrade
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Reshape> node)
|
||||
{
|
||||
shared_ptr<Node> replacement_node;
|
||||
|
||||
const auto target_shape_input = node->input_value(1).get_node_shared_ptr();
|
||||
const auto input_rank = node->get_input_partial_shape(0).rank();
|
||||
if (op::is_constant(target_shape_input) && node->get_output_partial_shape(0).is_static() &&
|
||||
input_rank.is_static())
|
||||
{
|
||||
const auto output_shape = node->get_output_shape(0);
|
||||
replacement_node = make_shared<op::Reshape>(
|
||||
node->input_value(0), get_default_order(input_rank.get_length()), output_shape);
|
||||
}
|
||||
else
|
||||
{
|
||||
NGRAPH_CHECK(replacement_node, "Unable to convert Reshape:v1 with dynamic shape.");
|
||||
}
|
||||
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Equal> node)
|
||||
{
|
||||
return op_cast_binary_elementwise_node<op::v0::Equal, op::v1::Equal>(node);
|
||||
@ -354,8 +333,9 @@ namespace opset0_downgrade
|
||||
{
|
||||
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
|
||||
}
|
||||
count_node = make_shared<op::Reshape>(
|
||||
count_node->output(0), get_default_order(output_shape), reshaped_output_shape);
|
||||
auto shape_pattern = op::Constant::create(
|
||||
element::u64, {reshaped_output_shape.size()}, reshaped_output_shape);
|
||||
count_node = make_shared<op::v1::Reshape>(count_node->output(0), shape_pattern, false);
|
||||
}
|
||||
|
||||
const auto replacement_node =
|
||||
@ -431,10 +411,9 @@ namespace opset0_downgrade
|
||||
|
||||
if (p.reshape_in_shape != p.reshape_out_shape)
|
||||
{
|
||||
replacement_node =
|
||||
make_shared<op::Reshape>(replacement_node,
|
||||
ngraph::get_default_order(p.reshape_in_shape),
|
||||
p.reshape_out_shape);
|
||||
auto shape_pattern = op::Constant::create(
|
||||
element::u64, {p.reshape_out_shape.size()}, p.reshape_out_shape);
|
||||
replacement_node = make_shared<op::v1::Reshape>(replacement_node, shape_pattern, false);
|
||||
}
|
||||
|
||||
if (!p.reverse_axes.empty())
|
||||
@ -492,44 +471,6 @@ namespace opset0_downgrade
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Transpose> node)
|
||||
{
|
||||
const auto data = node->input_value(0);
|
||||
|
||||
const auto data_pshape = data.get_partial_shape();
|
||||
NGRAPH_CHECK(data_pshape.is_static(),
|
||||
"Unable to convert Transpose:v1 to Reshape:v0 "
|
||||
"if data shape is dynamic. Node: ",
|
||||
*node);
|
||||
const auto data_shape = data_pshape.to_shape();
|
||||
|
||||
const auto order_node = node->input_value(1).get_node_shared_ptr();
|
||||
NGRAPH_CHECK(op::is_constant(order_node),
|
||||
"Unable to convert Transpose:v1 to Reshape:v0 "
|
||||
"if order node is not constant. Node: ",
|
||||
*node);
|
||||
const auto order_const = as_type_ptr<op::Constant>(order_node);
|
||||
|
||||
auto order = order_const->get_axis_vector_val();
|
||||
Shape out_shape = data_shape;
|
||||
if (order.empty())
|
||||
{
|
||||
order.resize(out_shape.size());
|
||||
iota(begin(order), end(order), 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < order.size(); ++i)
|
||||
{
|
||||
out_shape[i] = data_shape.at(order.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
auto replacement_node = make_shared<op::v0::Reshape>(data, order, out_shape);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::VariadicSplit> node)
|
||||
{
|
||||
const auto split_lengths = node->input_value(2).get_node_shared_ptr();
|
||||
|
@ -127,14 +127,6 @@ namespace opset1_upgrade
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Reshape> node)
|
||||
{
|
||||
shared_ptr<Node> replacement_node =
|
||||
builder::opset1::reshape(node->input_value(0), node->get_reshape_output_shape());
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Equal> node)
|
||||
{
|
||||
return op_cast_binary_elementwise_node<op::v0::Equal, op::v1::Equal>(node);
|
||||
|
@ -18,15 +18,14 @@
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, reshape_deduce_s2v)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{}, Shape{1});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {1}, Shape{1}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{1}));
|
||||
}
|
||||
@ -34,7 +33,8 @@ TEST(type_prop, reshape_deduce_s2v)
|
||||
TEST(type_prop, reshape_deduce_s2m)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{}, Shape{1, 1});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {2}, Shape{1, 1}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{1, 1}));
|
||||
}
|
||||
@ -42,39 +42,17 @@ TEST(type_prop, reshape_deduce_s2m)
|
||||
TEST(type_prop, reshape_deduce_s2t)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{}, Shape{1, 1, 1});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {3}, Shape{1, 1, 1}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{1, 1, 1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_v2s)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{1});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{0}, Shape{});
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_m2s)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 1});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{0, 1}, Shape{});
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_t2s)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{0, 1, 2}, Shape{});
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_m2v_01)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{0, 1}, Shape{12});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {1}, Shape{12}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{12}));
|
||||
}
|
||||
@ -82,7 +60,8 @@ TEST(type_prop, reshape_deduce_m2v_01)
|
||||
TEST(type_prop, reshape_deduce_m2v_10)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 0}, Shape{12});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {1}, Shape{12}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{12}));
|
||||
}
|
||||
@ -90,7 +69,8 @@ TEST(type_prop, reshape_deduce_m2v_10)
|
||||
TEST(type_prop, reshape_deduce_t2v_012)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{0, 1, 2}, Shape{60});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {1}, Shape{60}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{60}));
|
||||
}
|
||||
@ -98,72 +78,19 @@ TEST(type_prop, reshape_deduce_t2v_012)
|
||||
TEST(type_prop, reshape_deduce_t2v_120)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 2, 0}, Shape{60});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {1}, Shape{60}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{60}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_not_enough_axes)
|
||||
TEST(type_prop, reshape_deduce_zero_special)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 0}, Shape{60});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Not enough axes not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_too_many_axes)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 2, 0, 3}, Shape{60});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Too many axes not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_duplicate_axes)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 1, 0}, Shape{60});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Too many axes not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {3}, Shape{6, 2, 0}), true);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{6, 2, 5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_wrong_output_shape)
|
||||
@ -171,49 +98,16 @@ TEST(type_prop, reshape_deduce_wrong_output_shape)
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{1, 2, 0}, Shape{3, 3, 3});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Too many axes not detected";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Product of output shape dimensions does not match "
|
||||
"product of argument shape dimensions"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Input shape rank dynamic, so we should set the desired output shape if the axis vector is not
|
||||
// known invalid (invalid means it's not a permutation of {0,...,n-1} for any n).
|
||||
//
|
||||
TEST(type_prop, reshape_partial_rank_dynamic_axisvector_ok)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0, 3}, Shape{3, 1, 8, 2});
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(r->get_output_partial_shape(0).is_static());
|
||||
ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_partial_rank_dynamic_axisvector_not_ok)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0, 4}, Shape{3, 1, 8, 2});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect malformed AxisVector (input shape rank dynamic)";
|
||||
FAIL() << "No exception was thrown";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
std::string("Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -222,41 +116,31 @@ TEST(type_prop, reshape_partial_rank_dynamic_axisvector_not_ok)
|
||||
}
|
||||
|
||||
//
|
||||
// Input shape rank static but input shape is dynamic, so should set desired output shape if the
|
||||
// axis vector is consistent with the static rank.
|
||||
// Input shape rank dynamic, so we should set the desired output shape
|
||||
//
|
||||
TEST(type_prop, reshape_partial_rank_static_dynamic_axisvector_ok)
|
||||
TEST(type_prop, reshape_partial_rank_dynamic)
|
||||
{
|
||||
auto param_shape =
|
||||
PartialShape{Dimension::dynamic(), 6, Dimension::dynamic(), Dimension::dynamic()};
|
||||
auto param = make_shared<op::Parameter>(element::f32, param_shape);
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0, 3}, Shape{3, 1, 8, 2});
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(r->get_output_partial_shape(0).is_static());
|
||||
ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_partial_rank_static_dynamic_axisvector_not_ok)
|
||||
//
|
||||
// Input shape rank static but input shape is dynamic, so should set desired output shape
|
||||
//
|
||||
TEST(type_prop, reshape_partial_rank_static)
|
||||
{
|
||||
auto param_shape =
|
||||
PartialShape{Dimension::dynamic(), 6, Dimension::dynamic(), Dimension::dynamic()};
|
||||
auto param = make_shared<op::Parameter>(element::f32, param_shape);
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0}, Shape{3, 1, 8, 2});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect AxisVector inconsistent with rank (rank-static dynamic shape)";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(r->get_output_partial_shape(0).is_static());
|
||||
ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2}));
|
||||
}
|
||||
|
||||
//
|
||||
@ -268,32 +152,9 @@ TEST(type_prop, reshape_partial_rank_static_dynamic_but_zero_ok)
|
||||
auto param_shape =
|
||||
PartialShape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()};
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0, 3}, Shape{3, 1, 0, 2});
|
||||
auto r = make_shared<op::v1::Reshape>(
|
||||
param, op::Constant::create(element::u64, {4}, Shape{3, 1, 0, 2}), false);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(r->get_output_partial_shape(0).is_static());
|
||||
ASSERT_EQ(r->get_shape(), (Shape{3, 1, 0, 2}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_partial_rank_static_dynamic_but_zero_not_ok)
|
||||
{
|
||||
auto param_shape =
|
||||
PartialShape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()};
|
||||
auto param = make_shared<op::Parameter>(element::f32, param_shape);
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Reshape>(param, AxisVector{2, 1, 0}, Shape{3, 1, 8, 2});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect inconsistent output shape with static-zero-element rank-dynamic"
|
||||
" static input shape";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Input axis order is not a permutation of argument's axis indices"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/builder/reshape.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/opsets/opset5.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
@ -43,15 +44,14 @@ TEST(type_prop, tensor_iterator_lstm)
|
||||
auto X = make_shared<op::Parameter>(element::f32, Shape{N, 1, I});
|
||||
auto W_body = make_shared<op::Parameter>(element::f32, Shape{4 * H, I});
|
||||
auto R_body = make_shared<op::Parameter>(element::f32, Shape{4 * H, H});
|
||||
auto LSTM_cell = make_shared<opset5::LSTMCell>(
|
||||
make_shared<op::Reshape>(X, AxisVector{0, 1, 2}, Shape{N, I}),
|
||||
make_shared<op::Reshape>(H_t, AxisVector{0, 1, 2}, Shape{N, H}),
|
||||
make_shared<op::Reshape>(C_t, AxisVector{0, 1, 2}, Shape{N, H}),
|
||||
W_body,
|
||||
R_body,
|
||||
H);
|
||||
auto H_o = make_shared<op::Reshape>(LSTM_cell->output(0), AxisVector{0, 1}, Shape{N, 1, H});
|
||||
auto C_o = make_shared<op::Reshape>(LSTM_cell->output(1), AxisVector{0, 1}, Shape{N, 1, H});
|
||||
auto LSTM_cell = make_shared<opset5::LSTMCell>(builder::opset1::reshape(X, Shape{N, I}),
|
||||
builder::opset1::reshape(H_t, Shape{N, H}),
|
||||
builder::opset1::reshape(C_t, Shape{N, H}),
|
||||
W_body,
|
||||
R_body,
|
||||
H);
|
||||
auto H_o = builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H});
|
||||
auto C_o = builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H});
|
||||
auto body = make_shared<ngraph::Function>(OutputVector{H_o, C_o},
|
||||
ParameterVector{X, H_t, C_t, W_body, R_body});
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user