[shape infer]Review Reshape class for shape inference aspects (#18679)
* Add static shape adapter - Adapters holds CPU dimension which can be reference to it or vector - Add ov::optional for holding optional result from shape inference - Add new `infer` function in `IStaticShapeInfer` * Temporary support of StaticShape * Minor corrections in ShapeInferenceTA * Migrate shape_infer to new interface version * Replace StaticShape by adapter implementation * Replace IShapeInferCommon by IStaticShapeInfer * Correct code formatting * Fix build issues * NodeValidationFailure::create for StaticShapeRef * Review shape inference for reshape operator - review shape_infer implementation - add more unit test for static and dynamic shapes * Fix build issues * Correct minus one dim calculation * Fix build issues on windows * Improve resolving special minus one * Use NODE_SHAPE_INFER_CHECK * Update product in/out calculations * Temporary add ngraph header to solve build issue * Correct minus one dim calc when static part same * Add check for scalar input * Remove debug message * Fix `minus one` dynamic dimension calculation * Fix `minus one` dynamic dimension calculation * Fix merge issues in reshape Minor refactor reshape evaluate * Don't pass input label on minus one pattern when input dimension will be modified.
This commit is contained in:
parent
69e1258cc5
commit
751d844b24
@ -2192,7 +2192,7 @@ TEST(TransformationTests, align_mixed_fp16_fp32_with_parameter_for_shape_1) {
|
||||
auto upscale_const = ov::op::v0::Constant::create(element::f32, Shape{1}, {2.0f});
|
||||
auto mul_1 = make_shared<ov::op::v1::Multiply>(shape_input, upscale_const);
|
||||
auto axis_const = ov::op::v0::Constant::create(element::i64, Shape{1}, {0});
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const);
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const, true);
|
||||
auto final_int_shape = make_shared<ov::op::v0::Convert>(final_float_shape, element::i64);
|
||||
auto reshape_1 = make_shared<ov::op::v1::Reshape>(input_1, final_int_shape, false);
|
||||
|
||||
@ -2214,7 +2214,7 @@ TEST(TransformationTests, align_mixed_fp16_fp32_with_parameter_for_shape_1) {
|
||||
auto upscale_const = ov::op::v0::Constant::create(element::f32, Shape{1}, {2.0f});
|
||||
auto mul_1 = make_shared<ov::op::v1::Multiply>(shape_input, upscale_const);
|
||||
auto axis_const = ov::op::v0::Constant::create(element::i64, Shape{1}, {0});
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const);
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const, true);
|
||||
auto final_int_shape = make_shared<ov::op::v0::Convert>(final_float_shape, element::i64);
|
||||
auto reshape_1 = make_shared<ov::op::v1::Reshape>(input_1, final_int_shape, false);
|
||||
|
||||
@ -2235,7 +2235,7 @@ TEST(TransformationTests, align_mixed_fp16_fp32_with_parameter_for_shape_2) {
|
||||
auto upscale_const = ov::op::v0::Constant::create(element::f32, Shape{1}, {2.0f});
|
||||
auto mul_1 = make_shared<ov::op::v1::Multiply>(shape_input, upscale_const);
|
||||
auto axis_const = ov::op::v0::Constant::create(element::i64, Shape{1}, {0});
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const);
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const, true);
|
||||
auto final_int_shape = make_shared<ov::op::v0::Convert>(final_float_shape, element::i64);
|
||||
auto reshape_1 = make_shared<ov::op::v1::Reshape>(input_1, final_int_shape, false);
|
||||
|
||||
@ -2260,7 +2260,7 @@ TEST(TransformationTests, align_mixed_fp16_fp32_with_parameter_for_shape_2) {
|
||||
auto upscale_const = ov::op::v0::Constant::create(element::f32, Shape{1}, {2.0f});
|
||||
auto mul_1 = make_shared<ov::op::v1::Multiply>(shape_input, upscale_const);
|
||||
auto axis_const = ov::op::v0::Constant::create(element::i64, Shape{1}, {0});
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const);
|
||||
auto final_float_shape = make_shared<ov::op::v1::ReduceProd>(mul_1, axis_const, true);
|
||||
auto final_int_shape = make_shared<ov::op::v0::Convert>(final_float_shape, element::i64);
|
||||
auto reshape_1 = make_shared<ov::op::v1::Reshape>(convert_to_f16, final_int_shape, false);
|
||||
auto convert_to_f32 = make_shared<ov::op::v0::Convert>(reshape_1, element::f32);
|
||||
|
364
src/core/shape_inference/include/reshape_shape_inference.hpp
Normal file
364
src/core/shape_inference/include/reshape_shape_inference.hpp
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#pragma once
|
||||
|
||||
#include "compare.hpp"
|
||||
#include "dimension_util.hpp"
|
||||
#include "openvino/core/dimension_tracker.hpp"
|
||||
#include "openvino/op/reshape.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace op {
|
||||
namespace reshape {
|
||||
template <class T, class U = void>
|
||||
struct Product {};
|
||||
|
||||
/** \brief Helper to resolve the input and output product for static dimensions. */
|
||||
template <class T>
|
||||
struct Product<T, typename std::enable_if<!std::is_same<T, Dimension>::value>::type> {
|
||||
T in{1};
|
||||
T out{1};
|
||||
|
||||
void update_in(const T& in_dim) {
|
||||
in *= in_dim;
|
||||
}
|
||||
|
||||
void update_out(const T& out_dim) {
|
||||
out *= out_dim;
|
||||
}
|
||||
|
||||
void set_inf() {
|
||||
in = T(-1);
|
||||
out = T(-1);
|
||||
}
|
||||
|
||||
const T& get_static_in() const {
|
||||
return in;
|
||||
}
|
||||
|
||||
const T& get_static_out() const {
|
||||
return out;
|
||||
}
|
||||
|
||||
void calculate() {}
|
||||
};
|
||||
|
||||
/** \brief Helper to resolve the input and output product for ov::Dimension (dynamic) dimensions. */
|
||||
template <class T>
|
||||
struct Product<T, typename std::enable_if<std::is_same<T, Dimension>::value>::type> {
|
||||
std::pair<T, T> in{1, 1};
|
||||
std::pair<T, T> out{1, 1};
|
||||
|
||||
void update_in(const T& in_dim) {
|
||||
inputs.emplace_back(in_dim);
|
||||
}
|
||||
|
||||
void update_out(const T& out_dim) {
|
||||
outputs.emplace_back(out_dim);
|
||||
}
|
||||
|
||||
void set_inf() {
|
||||
in.second = T(-1);
|
||||
out.second = T(-1);
|
||||
}
|
||||
|
||||
const T& get_static_in() const {
|
||||
return in.first;
|
||||
}
|
||||
|
||||
const T& get_static_out() const {
|
||||
return out.first;
|
||||
}
|
||||
|
||||
const T& get_dynamic_in() const {
|
||||
return in.second;
|
||||
}
|
||||
|
||||
const T& get_dynamic_out() const {
|
||||
return out.second;
|
||||
}
|
||||
|
||||
void calculate() {
|
||||
// dimensions compare to remove same from product calculation
|
||||
auto dim_full_eq = [](const T& lhs, const T& rhs) -> bool {
|
||||
return (lhs == rhs) && DimensionTracker::get_label(lhs) == DimensionTracker::get_label(rhs) &&
|
||||
(lhs.is_static() || DimensionTracker::has_label(lhs));
|
||||
};
|
||||
|
||||
auto outs = outputs;
|
||||
|
||||
// calculate input product
|
||||
for (const auto& d : inputs) {
|
||||
auto out_it = std::find_if(outs.begin(), outs.end(), [&](const T& p) {
|
||||
return dim_full_eq(d, p) && (d != 0);
|
||||
});
|
||||
|
||||
if (out_it == outs.end()) {
|
||||
mul(in, d);
|
||||
} else if (!outs.empty()) {
|
||||
outs.erase(out_it);
|
||||
}
|
||||
}
|
||||
|
||||
// calculate output product
|
||||
for (const auto& o : outs) {
|
||||
mul(out, o);
|
||||
}
|
||||
|
||||
if (in.first != out.first) {
|
||||
in.second *= in.first;
|
||||
out.second *= out.first;
|
||||
} else if (in.first == 1 && in.second == 1) {
|
||||
// If dynamic product is one (no dynamic) and static is also one use static
|
||||
in.second = in.first;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void mul(std::pair<T, T>& prod, const T& value) {
|
||||
if (value.is_static()) {
|
||||
prod.first = value * prod.first;
|
||||
} else {
|
||||
prod.second = value * prod.second;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<T> inputs{};
|
||||
std::vector<T> outputs{};
|
||||
};
|
||||
|
||||
// resolve minus one dimension for ov::Dimension
|
||||
template <class TDim,
|
||||
typename std::enable_if<std::is_same<typename std::decay<TDim>::type, Dimension>::value>::type* = nullptr>
|
||||
TDim resolve_minus_one_dim(const Product<TDim>& product) {
|
||||
auto minus_one_dim = product.get_dynamic_in();
|
||||
auto& product_out = product.get_dynamic_out();
|
||||
|
||||
if (minus_one_dim.is_static() && product_out.is_static()) {
|
||||
minus_one_dim /= product_out.get_length();
|
||||
} else {
|
||||
using namespace ov::util;
|
||||
auto& minus_one_interval = minus_one_dim.get_interval();
|
||||
|
||||
if (minus_one_interval.has_upper_bound() && product_out.get_min_length() != 0 && product_out != TDim{}) {
|
||||
minus_one_interval.set_max_val(minus_one_interval.get_max_val() / product_out.get_min_length());
|
||||
} else {
|
||||
minus_one_interval.set_max_val(Interval::s_max);
|
||||
}
|
||||
|
||||
if (product_out.get_max_length() != 0) {
|
||||
minus_one_interval.set_min_val(
|
||||
ceil_div(minus_one_interval.get_min_val(), product_out.get_interval().get_max_val()));
|
||||
}
|
||||
|
||||
if (product_out.get_min_length() != 1 || product_out.get_max_length() != 1) {
|
||||
DimensionTracker::reset_tracking_info(minus_one_dim);
|
||||
}
|
||||
}
|
||||
return minus_one_dim;
|
||||
}
|
||||
|
||||
// resolve minus one dimension for static dimension
|
||||
template <class TDim,
|
||||
typename std::enable_if<!std::is_same<typename std::decay<TDim>::type, Dimension>::value>::type* = nullptr>
|
||||
TDim resolve_minus_one_dim(const Product<TDim>& product) {
|
||||
return product.get_static_in() / product.get_static_out().get_length();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the pattern and minus one idx from input bounds.
|
||||
*
|
||||
* @param op Pointer to reshape node.
|
||||
* @param bounds Vector of reshape pattern bounds.
|
||||
*
|
||||
* @return Pair which got bounds converted to shape and `minus_one` index in pattern (-1 if not found).
|
||||
*/
|
||||
template <class TShape>
|
||||
std::pair<TShape, int64_t> get_pattern_and_minus_one_idx(const Node* const op,
|
||||
const std::vector<std::pair<int64_t, int64_t>>& bounds) {
|
||||
using namespace ov::util;
|
||||
const auto minus_one_bound = std::make_pair(dim::inf_bound, dim::inf_bound);
|
||||
|
||||
auto result = std::make_pair(TShape{}, dim::inf_bound);
|
||||
auto& shape = std::get<0>(result);
|
||||
shape.reserve(bounds.size());
|
||||
|
||||
auto& minus_one_idx = std::get<1>(result);
|
||||
auto bounds_iter = bounds.begin();
|
||||
|
||||
for (size_t i = 0; i < bounds.size(); ++i, ++bounds_iter) {
|
||||
if (*bounds_iter == minus_one_bound) {
|
||||
NODE_VALIDATION_CHECK(op, minus_one_idx == dim::inf_bound, "More than one dimension has size of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
}
|
||||
NODE_VALIDATION_CHECK(op, *bounds_iter >= minus_one_bound, "Dim size cannot be less than -1");
|
||||
shape.emplace_back(bounds_iter->first, bounds_iter->second);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the pattern labels on pattern shape if this input is labeled.
|
||||
*
|
||||
* @param op Pointer to reshape node.
|
||||
* @param shape Pointer to shape for labels set.
|
||||
*/
|
||||
template <class TShape, typename std::enable_if<std::is_same<TShape, PartialShape>::value>::type* = nullptr>
|
||||
void set_pattern_labels(const Node* const op, TShape& shape) {
|
||||
if (op->get_input_size() > 0) {
|
||||
auto labels = op->get_input_source_output(1).get_tensor().get_value_label();
|
||||
|
||||
if (!labels.empty()) {
|
||||
auto label_iter = labels.begin();
|
||||
for (auto& d : shape) {
|
||||
if (*label_iter != no_label) {
|
||||
DimensionTracker::set_label(d, *label_iter);
|
||||
}
|
||||
++label_iter;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** @brief Shapes other than PartialShape have no labels. */
|
||||
template <class TShape, typename std::enable_if<!std::is_same<TShape, PartialShape>::value>::type* = nullptr>
|
||||
void set_pattern_labels(const Node* const, TShape&) {}
|
||||
|
||||
} // namespace reshape
|
||||
|
||||
namespace v1 {
|
||||
template <class T, class TRShape = result_shape_t<T>>
|
||||
std::vector<TRShape> shape_infer(const Reshape* op,
|
||||
const std::vector<T>& input_shapes,
|
||||
const ITensorAccessor& ta = make_tensor_accessor()) {
|
||||
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
|
||||
|
||||
using namespace ov::util;
|
||||
using TDim = typename T::value_type;
|
||||
|
||||
const auto& input_shape = input_shapes[0];
|
||||
const auto& pattern_shape = input_shapes[1];
|
||||
const auto input_rank = input_shape.rank();
|
||||
const auto pattern_shape_rank = pattern_shape.rank();
|
||||
|
||||
NODE_SHAPE_INFER_CHECK(op,
|
||||
input_shapes,
|
||||
pattern_shape_rank.compatible(0) || pattern_shape_rank.compatible(1),
|
||||
"Pattern shape must have rank 1 or be empty");
|
||||
|
||||
auto output_shapes = std::vector<TRShape>(1);
|
||||
auto& output_shape = output_shapes[0];
|
||||
|
||||
if (const auto output_bounds = get_input_bounds<TRShape, int64_t>(op, 1, ta)) {
|
||||
auto pattern_and_minus_one_idx = reshape::get_pattern_and_minus_one_idx<TRShape>(op, *output_bounds);
|
||||
auto& output_pattern = pattern_and_minus_one_idx.first;
|
||||
const auto minus_one_idx = pattern_and_minus_one_idx.second;
|
||||
|
||||
reshape::set_pattern_labels(op, output_pattern);
|
||||
|
||||
if (pattern_shape_rank.get_max_length() == 0) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
output_pattern[0] == 1,
|
||||
"The value of scalar shape pattern should be equal to 1!");
|
||||
output_pattern.resize(0);
|
||||
}
|
||||
|
||||
const auto special_zero = op->get_special_zero();
|
||||
|
||||
reshape::Product<TDim> product;
|
||||
|
||||
if (input_rank.is_dynamic()) {
|
||||
for (const auto& pattern : output_pattern) {
|
||||
if (special_zero && pattern == 0) {
|
||||
output_shape.emplace_back(dim::inf_bound);
|
||||
product.set_inf();
|
||||
} else {
|
||||
output_shape.emplace_back(pattern);
|
||||
product.update_out(pattern);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto input_iter = input_shape.begin();
|
||||
auto input_last = input_shape.end();
|
||||
|
||||
for (size_t i = 0; i < output_pattern.size(); ++i) {
|
||||
const auto& pattern_dim = output_pattern[i];
|
||||
auto ignore_pattern_dim = special_zero && (pattern_dim == 0);
|
||||
|
||||
if (static_cast<int64_t>(i) == minus_one_idx) {
|
||||
output_shape.emplace_back();
|
||||
} else if (ignore_pattern_dim) {
|
||||
NODE_SHAPE_INFER_CHECK(op, input_shapes, i < input_shape.size(), "'0' dimension is out of range");
|
||||
output_shape.push_back(*input_iter);
|
||||
// Exclude special zero dimension from product calculation
|
||||
} else {
|
||||
output_shape.push_back(pattern_dim);
|
||||
product.update_out(pattern_dim);
|
||||
}
|
||||
|
||||
if (input_iter != input_last) {
|
||||
if (!ignore_pattern_dim) {
|
||||
product.update_in(*input_iter);
|
||||
}
|
||||
++input_iter;
|
||||
}
|
||||
}
|
||||
|
||||
// update input product by remaining input dimensions.
|
||||
for (; input_iter != input_last; ++input_iter) {
|
||||
product.update_in(*input_iter);
|
||||
}
|
||||
}
|
||||
product.calculate();
|
||||
|
||||
// resolving -1 masked dimension
|
||||
const auto has_minus_one_idx = !dim::is_inf_bound(minus_one_idx);
|
||||
if (has_minus_one_idx) {
|
||||
auto& minus_one_dim = output_shape[minus_one_idx];
|
||||
minus_one_dim = reshape::resolve_minus_one_dim(product);
|
||||
|
||||
if (product.get_static_out() == 0) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
product.get_static_in() == 0,
|
||||
"Cannot infer '-1' dimension with zero-size output dimension unless at least one "
|
||||
"input dimension is also zero-size");
|
||||
} else {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
!dim::is_empty(minus_one_dim),
|
||||
"Non-'-1' output dimensions do not evenly divide the input dimensions");
|
||||
}
|
||||
}
|
||||
|
||||
if (input_shape.is_static() && output_shape.is_static()) {
|
||||
const auto zero_dims = std::any_of(output_pattern.begin(), output_pattern.end(), cmp::Equal<TDim>(0));
|
||||
const auto backward_compatible_check = (zero_dims && special_zero) || has_minus_one_idx;
|
||||
const auto in_out_elements_equal = (product.get_static_in() == product.get_static_out());
|
||||
|
||||
NODE_SHAPE_INFER_CHECK(op,
|
||||
input_shapes,
|
||||
backward_compatible_check || in_out_elements_equal,
|
||||
"Requested output shape ",
|
||||
output_shape,
|
||||
" is incompatible with input shape");
|
||||
}
|
||||
} else if (pattern_shape_rank.is_static()) {
|
||||
if (pattern_shape_rank.get_length() == 0) {
|
||||
NODE_SHAPE_INFER_CHECK(op,
|
||||
input_shapes,
|
||||
input_rank.compatible(0),
|
||||
"Input must be scalar as pattern is scalar!");
|
||||
} else {
|
||||
output_shape =
|
||||
PartialShape::dynamic(Rank(pattern_shape[0].get_min_length(), pattern_shape[0].get_max_length()));
|
||||
}
|
||||
} else {
|
||||
output_shape = PartialShape::dynamic();
|
||||
}
|
||||
return output_shapes;
|
||||
}
|
||||
} // namespace v1
|
||||
} // namespace op
|
||||
} // namespace ov
|
@ -10,93 +10,6 @@
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
template <class T, class TRShape = ov::result_shape_t<T>>
|
||||
std::vector<TRShape> shape_infer(const ov::op::v1::Reshape* op,
|
||||
const std::vector<T>& input_shapes,
|
||||
const ov::ITensorAccessor& ta = ov::make_tensor_accessor()) {
|
||||
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
|
||||
auto output_pattern = ov::op::get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
|
||||
NODE_VALIDATION_CHECK(op, output_pattern, "Shape inference lacks input data");
|
||||
|
||||
auto& input_shape = input_shapes[0];
|
||||
OPENVINO_ASSERT(input_shape.is_static());
|
||||
auto output_shapes = std::vector<TRShape>(1);
|
||||
auto& output_shape = output_shapes[0];
|
||||
output_shape.resize(output_pattern->size());
|
||||
|
||||
auto output_rank = input_shapes[1].size() == 0 ? 0 : input_shapes[1][0];
|
||||
if (output_rank == 0 && output_shape.size() != 0) {
|
||||
output_pattern->clear();
|
||||
OPENVINO_ASSERT(output_pattern->size() == 1);
|
||||
NODE_VALIDATION_CHECK(op, (*output_pattern)[0] == 1, "The value of scalar shape pattern should be equal to 1!");
|
||||
}
|
||||
|
||||
auto special_zero = op->get_special_zero();
|
||||
|
||||
size_t output_product(1);
|
||||
int64_t minus_one_idx = -1;
|
||||
for (size_t i = 0; i < output_pattern->size(); ++i) {
|
||||
if ((*output_pattern)[i] == -1) { // resolving everything except -1
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
minus_one_idx == -1,
|
||||
"More than one element of output shape pattern has value of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto pattern_dim = (*output_pattern)[i];
|
||||
if (pattern_dim == 0 && special_zero) {
|
||||
NODE_VALIDATION_CHECK(op, i < input_shape.size(), "'0' dimension is out of range");
|
||||
output_shape[i] = input_shape[i];
|
||||
// we do not include dimension to output product here and won't include in input
|
||||
// product later because we will divide output_product by input_product. This
|
||||
// dimension contributes to both products equally
|
||||
} else {
|
||||
output_shape[i] = pattern_dim;
|
||||
output_product *= pattern_dim;
|
||||
}
|
||||
}
|
||||
size_t input_product(1);
|
||||
for (size_t i = 0; i < input_shape.size(); ++i) {
|
||||
if (i < output_pattern->size() && (*output_pattern)[i] == 0 && special_zero)
|
||||
continue;
|
||||
input_product = input_shape[i].get_length() * input_product;
|
||||
}
|
||||
|
||||
if (minus_one_idx != -1) // resolving -1 masked dimension
|
||||
{
|
||||
if (output_product == 0) {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
input_product == 0,
|
||||
"Cannot infer '-1' dimension with zero-size output "
|
||||
"dimension unless at least one input dimension is "
|
||||
"also zero-size");
|
||||
output_shape[minus_one_idx] = 0;
|
||||
} else {
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
input_product % output_product == 0,
|
||||
"Non-'-1' output dimensions do not evenly divide the input dimensions");
|
||||
output_shape[minus_one_idx] = input_product / output_product;
|
||||
}
|
||||
}
|
||||
|
||||
size_t zero_dims = std::count_if(output_pattern->begin(), output_pattern->end(), [](const int64_t& dim) {
|
||||
return dim == 0;
|
||||
});
|
||||
|
||||
bool backward_compatible_check = (zero_dims && special_zero) || minus_one_idx != -1;
|
||||
bool in_out_elements_equal = input_product == output_product;
|
||||
|
||||
NODE_VALIDATION_CHECK(op,
|
||||
backward_compatible_check || in_out_elements_equal,
|
||||
"Requested output shape ",
|
||||
output_shape,
|
||||
" is incompatible with input shape ",
|
||||
input_shape);
|
||||
|
||||
return output_shapes;
|
||||
}
|
||||
|
||||
namespace ov {
|
||||
namespace op {
|
||||
namespace shape_of {
|
||||
|
@ -385,19 +385,20 @@ ov::optional<TResult> get_input_bounds(const ov::Node* op, size_t port, const IT
|
||||
};
|
||||
};
|
||||
|
||||
constexpr auto cast = ov::util::Cast<TData>();
|
||||
ov::optional<TResult> out;
|
||||
|
||||
if (auto lowers = op::get_input_const_data_as<TShape, TData>(op, port, ta)) {
|
||||
const auto& et = get_input_const_element_type(op, port, ta);
|
||||
if (const auto t = ta(port)) {
|
||||
const auto& et = t.get_element_type();
|
||||
const auto lowers = get_tensor_data_as<TData>(t, cast);
|
||||
out.emplace();
|
||||
out->reserve(lowers->size());
|
||||
std::transform(lowers->cbegin(), lowers->cend(), lowers->begin(), std::back_inserter(*out), make_bound(et));
|
||||
out->reserve(lowers.size());
|
||||
std::transform(lowers.cbegin(), lowers.cend(), lowers.cbegin(), std::back_inserter(*out), make_bound(et));
|
||||
} else {
|
||||
auto bounds = ov::evaluate_both_bounds(op->get_input_source_output(port));
|
||||
|
||||
if (bounds.first && bounds.second) {
|
||||
const auto& et = bounds.first.get_element_type();
|
||||
constexpr auto cast = ov::util::Cast<TData>();
|
||||
auto lowers = get_tensor_data_as<TData>(bounds.first, cast);
|
||||
auto uppers = get_tensor_data_as<TData>(bounds.second, cast);
|
||||
|
||||
@ -406,6 +407,10 @@ ov::optional<TResult> get_input_bounds(const ov::Node* op, size_t port, const IT
|
||||
std::transform(lowers.begin(), lowers.end(), uppers.begin(), std::back_inserter(*out), make_bound(et));
|
||||
}
|
||||
}
|
||||
|
||||
if (!std::is_same<TShape, PartialShape>::value) {
|
||||
NODE_VALIDATION_CHECK(op, out, "Static shape inference lacks constant data on port ", port);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -5,221 +5,95 @@
|
||||
#include "openvino/op/reshape.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "bound_evaluate.hpp"
|
||||
#include "compare.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
#include "openvino/core/dimension_tracker.hpp"
|
||||
#include "openvino/core/validation_util.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/util/precision_sensitive_attribute.hpp"
|
||||
#include "openvino/reference/reshape.hpp"
|
||||
#include "reshape_shape_inference.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ov;
|
||||
namespace ov {
|
||||
namespace op {
|
||||
namespace v1 {
|
||||
|
||||
namespace reshapeop {
|
||||
namespace {
|
||||
|
||||
template <element::Type_t ET>
|
||||
void compute_output_shape(const ov::Tensor& shape_pattern, std::vector<int64_t>& output_shape) {
|
||||
size_t output_rank;
|
||||
if (shape_pattern.get_size() != 0) {
|
||||
output_rank = shape_pattern.get_shape().empty() ? 0 : shape_pattern.get_shape()[0];
|
||||
} else {
|
||||
// Can be dynamic during shape infer as conversion result from empty ov::Tensor
|
||||
output_rank = 0;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < output_rank; i++) {
|
||||
output_shape.push_back(shape_pattern.data<typename ov::element_type_traits<ET>::value_type>()[i]);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
} // namespace reshapeop
|
||||
|
||||
op::v1::Reshape::Reshape(const Output<Node>& arg, const Output<Node>& shape_pattern, bool zero_flag)
|
||||
Reshape::Reshape(const Output<Node>& arg, const Output<Node>& shape_pattern, bool zero_flag)
|
||||
: Op({arg, shape_pattern}),
|
||||
m_special_zero(zero_flag) {
|
||||
ov::mark_as_precision_sensitive(input(1));
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::visit_attributes(AttributeVisitor& visitor) {
|
||||
bool Reshape::visit_attributes(AttributeVisitor& visitor) {
|
||||
OV_OP_SCOPE(v1_Reshape_visit_attributes);
|
||||
visitor.on_attribute("special_zero", m_special_zero);
|
||||
return true;
|
||||
}
|
||||
void op::v1::Reshape::validate_and_infer_types() {
|
||||
void Reshape::validate_and_infer_types() {
|
||||
OV_OP_SCOPE(v1_Reshape_validate_and_infer_types);
|
||||
auto shape_pattern_et = get_input_element_type(1);
|
||||
const auto& shape_pattern_et = get_input_element_type(1);
|
||||
// check data types
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
shape_pattern_et.is_integral_number(),
|
||||
"PartialShape pattern must be an integral number.");
|
||||
|
||||
// check shapes
|
||||
const ov::PartialShape& input_pshape = get_input_partial_shape(0);
|
||||
const ov::PartialShape& shape_pattern_shape = get_input_partial_shape(1);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
shape_pattern_shape.rank().compatible(1) ||
|
||||
(shape_pattern_shape.rank().is_static() && shape_pattern_shape.rank().get_length() == 0),
|
||||
"Pattern shape must have rank 1 or be empty, got ",
|
||||
shape_pattern_shape.rank(),
|
||||
".");
|
||||
Rank output_rank = shape_pattern_shape.rank().is_dynamic()
|
||||
? Rank::dynamic()
|
||||
: shape_pattern_shape.rank().get_length() == 0 ? 0 : shape_pattern_shape[0];
|
||||
set_output_type(0, get_input_element_type(0), ov::PartialShape::dynamic(output_rank));
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
std::vector<Dimension> reshape_pattern;
|
||||
bool shape_can_be_calculated = false;
|
||||
int64_t minus_one_idx = -1;
|
||||
|
||||
ov::Tensor lb, ub;
|
||||
std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1));
|
||||
if (lb && ub) {
|
||||
const auto lower_bound = std::make_shared<op::v0::Constant>(lb.get_element_type(), lb.get_shape(), lb.data())
|
||||
->cast_vector<int64_t>();
|
||||
auto upper_bound = std::make_shared<op::v0::Constant>(ub.get_element_type(), ub.get_shape(), ub.data())
|
||||
->cast_vector<int64_t>();
|
||||
shape_can_be_calculated = true;
|
||||
OPENVINO_ASSERT(lower_bound.size() == upper_bound.size());
|
||||
const TensorLabel& labels = get_input_source_output(1).get_tensor().get_value_label();
|
||||
OPENVINO_ASSERT(labels.empty() || lower_bound.size() == labels.size());
|
||||
|
||||
for (size_t i = 0; i < lower_bound.size(); ++i) {
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
lower_bound[i] >= -1 && upper_bound[i] >= -1,
|
||||
"Dim size cannot be less than -1");
|
||||
|
||||
if (lower_bound[i] == -1 &&
|
||||
upper_bound[i] == -1) { // ctor of Dimension(-1) would turn input Dimension(0, max_int)
|
||||
NODE_VALIDATION_CHECK(this, minus_one_idx == -1, "More than one dimension has size of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
}
|
||||
|
||||
// We must handle i32 fully dynamic dimension in a special way
|
||||
if (get_input_element_type(1) == element::i32 &&
|
||||
upper_bound[i] == std::numeric_limits<std::int32_t>::max()) {
|
||||
upper_bound[i] = std::numeric_limits<std::int64_t>::max();
|
||||
}
|
||||
auto d = Dimension(lower_bound[i], upper_bound[i]);
|
||||
if (!labels.empty() && labels[i])
|
||||
ov::DimensionTracker::set_label(d, labels[i]);
|
||||
reshape_pattern.emplace_back(d);
|
||||
}
|
||||
// For scalar case reshape_patter should be empty but scalar reshape pattern should be empty
|
||||
// or equal to 1
|
||||
if (output_rank.is_static() && output_rank.get_length() == 0 && !lower_bound.empty()) {
|
||||
reshape_pattern.clear();
|
||||
OPENVINO_ASSERT(lower_bound.size() == 1);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
lower_bound[0] == 1 && upper_bound[0] == 1,
|
||||
"The value of scalar shape pattern should be equal to 1!");
|
||||
}
|
||||
}
|
||||
|
||||
if (shape_can_be_calculated) {
|
||||
std::vector<Dimension> output_shape(output_rank.get_length());
|
||||
calculate_output_shape(reshape_pattern, minus_one_idx, input_pshape, output_shape);
|
||||
set_output_type(0, get_input_element_type(0), output_shape);
|
||||
}
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
auto input_shapes = ov::get_node_input_partial_shapes(*this);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
const auto output_shapes = shape_infer(this, input_shapes);
|
||||
set_output_type(0, get_input_element_type(0), output_shapes.front());
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::Reshape::clone_with_new_inputs(const OutputVector& new_args) const {
|
||||
std::shared_ptr<Node> Reshape::clone_with_new_inputs(const OutputVector& new_args) const {
|
||||
OV_OP_SCOPE(v1_Reshape_clone_with_new_inputs);
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<v1::Reshape>(new_args.at(0), new_args.at(1), m_special_zero);
|
||||
return std::make_shared<Reshape>(new_args.at(0), new_args.at(1), m_special_zero);
|
||||
}
|
||||
|
||||
#define COMPUTE_OUT_SHAPE_CASE(a, ...) \
|
||||
case element::Type_t::a: { \
|
||||
OV_OP_SCOPE(OV_PP_CAT3(compute_reshape_out_shape, _, a)); \
|
||||
reshapeop::compute_output_shape<element::Type_t::a>(__VA_ARGS__); \
|
||||
} break;
|
||||
|
||||
bool op::v1::Reshape::evaluate_reshape(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
|
||||
// infer and set output shape if the output shape contain -1
|
||||
// and zero value dimension
|
||||
std::vector<int64_t> out_shape_val;
|
||||
|
||||
switch (inputs[1].get_element_type()) {
|
||||
COMPUTE_OUT_SHAPE_CASE(i8, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(i16, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(i32, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(i64, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(u8, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(u16, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(u32, inputs[1], out_shape_val);
|
||||
COMPUTE_OUT_SHAPE_CASE(u64, inputs[1], out_shape_val);
|
||||
default:
|
||||
OPENVINO_THROW("shape_pattern element type is not integral data type");
|
||||
bool Reshape::evaluate_reshape(TensorVector& outputs, const TensorVector& inputs) const {
|
||||
std::vector<PartialShape> input_shapes;
|
||||
input_shapes.reserve(inputs.size());
|
||||
for (const auto& in : inputs) {
|
||||
input_shapes.push_back(in.get_shape());
|
||||
}
|
||||
|
||||
std::vector<Dimension> reshape_pattern;
|
||||
int64_t minus_one_idx = -1;
|
||||
for (size_t i = 0; i < out_shape_val.size(); ++i) {
|
||||
NODE_VALIDATION_CHECK(this, out_shape_val[i] >= -1, "Dim size cannot be less than -1");
|
||||
if (out_shape_val[i] == -1) { // ctor of Dimension(-1) would turn input Dimension(0, max_int)
|
||||
NODE_VALIDATION_CHECK(this, minus_one_idx == -1, "More than one dimension has size of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
}
|
||||
reshape_pattern.emplace_back(out_shape_val[i]);
|
||||
const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape();
|
||||
if (outputs.empty()) {
|
||||
outputs.emplace_back(inputs[0].get_element_type(), output_shape);
|
||||
} else {
|
||||
OPENVINO_ASSERT(outputs.size() == 1);
|
||||
outputs[0].set_shape(output_shape);
|
||||
}
|
||||
|
||||
std::vector<Dimension> output_shape(out_shape_val.size());
|
||||
calculate_output_shape(reshape_pattern, minus_one_idx, inputs[0].get_shape(), output_shape);
|
||||
OPENVINO_ASSERT(ov::PartialShape(output_shape).is_static());
|
||||
outputs[0].set_shape(ov::PartialShape(output_shape).to_shape());
|
||||
|
||||
ov::reference::reshape(static_cast<char*>(inputs[0].data()),
|
||||
ov::reference::reshape(static_cast<const char*>(inputs[0].data()),
|
||||
static_cast<char*>(outputs[0].data()),
|
||||
inputs[0].get_shape(),
|
||||
inputs[0].get_element_type().size());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
|
||||
bool Reshape::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
|
||||
OV_OP_SCOPE(v1_Reshape_evaluate);
|
||||
OPENVINO_ASSERT(inputs.size() == 2);
|
||||
if (outputs.empty())
|
||||
outputs.emplace_back(ov::Tensor(inputs[0].get_element_type(), {0}));
|
||||
else
|
||||
OPENVINO_ASSERT(outputs.size() == 1);
|
||||
return evaluate_reshape(outputs, inputs);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::has_evaluate() const {
|
||||
bool Reshape::has_evaluate() const {
|
||||
OV_OP_SCOPE(v1_Reshape_has_evaluate);
|
||||
switch (get_input_element_type(1)) {
|
||||
case ov::element::i8:
|
||||
case ov::element::i16:
|
||||
case ov::element::i32:
|
||||
case ov::element::i64:
|
||||
case ov::element::u8:
|
||||
case ov::element::u16:
|
||||
case ov::element::u32:
|
||||
case ov::element::u64:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
const auto& shape_pattern_et = get_input_element_type(1);
|
||||
return shape_pattern_et.is_integral_number() && (shape_pattern_et.bitwidth() >= 8);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate_lower(ov::TensorVector& output_values) const {
|
||||
bool Reshape::evaluate_lower(ov::TensorVector& output_values) const {
|
||||
return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate_upper(ov::TensorVector& output_values) const {
|
||||
bool Reshape::evaluate_upper(ov::TensorVector& output_values) const {
|
||||
return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate_label(TensorLabelVector& output_labels) const {
|
||||
bool Reshape::evaluate_label(TensorLabelVector& output_labels) const {
|
||||
if (!get_input_tensor(1).has_and_set_bound())
|
||||
return false;
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
@ -227,7 +101,7 @@ bool op::v1::Reshape::evaluate_label(TensorLabelVector& output_labels) const {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) {
|
||||
bool Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) {
|
||||
if (get_output_partial_shape(0).is_dynamic() || is_const_fold_disabled()) {
|
||||
return false;
|
||||
}
|
||||
@ -240,189 +114,6 @@ bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVec
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
namespace {
|
||||
bool fully_eq(const Dimension& rhs, const Dimension& lhs) {
|
||||
return rhs == lhs && ov::DimensionTracker::get_label(rhs) == ov::DimensionTracker::get_label(lhs) &&
|
||||
(ov::DimensionTracker::get_label(rhs) || rhs.is_static());
|
||||
}
|
||||
|
||||
Dimension resolve_minus_one(const Node* reshape_node,
|
||||
vector<Dimension>& input_product,
|
||||
vector<Dimension>& output_product) {
|
||||
std::vector<Dimension> to_delete_from_output, to_delete_from_input;
|
||||
Dimension input_const_part(1), output_const_part(1);
|
||||
|
||||
for (const auto& dim : output_product)
|
||||
if (dim.is_static()) {
|
||||
output_const_part *= dim;
|
||||
to_delete_from_output.push_back(dim);
|
||||
}
|
||||
|
||||
for (const auto& dim : input_product)
|
||||
if (dim.is_static()) {
|
||||
input_const_part *= dim;
|
||||
to_delete_from_input.push_back(dim);
|
||||
}
|
||||
|
||||
for (const auto& dim : to_delete_from_input) {
|
||||
input_product.erase(std::remove_if(input_product.begin(),
|
||||
input_product.end(),
|
||||
[=](const Dimension& d) {
|
||||
return fully_eq(dim, d);
|
||||
}),
|
||||
input_product.end());
|
||||
}
|
||||
for (const auto& dim : to_delete_from_output) {
|
||||
output_product.erase(std::remove_if(output_product.begin(),
|
||||
output_product.end(),
|
||||
[=](const Dimension& d) {
|
||||
return fully_eq(dim, d);
|
||||
}),
|
||||
output_product.end());
|
||||
}
|
||||
|
||||
to_delete_from_input.clear();
|
||||
to_delete_from_output.clear();
|
||||
|
||||
if (input_const_part != output_const_part) {
|
||||
input_product.push_back(input_const_part);
|
||||
output_product.push_back(output_const_part);
|
||||
}
|
||||
|
||||
for (const auto& out_dim : output_product) {
|
||||
const auto& it = std::find_if(input_product.begin(), input_product.end(), [out_dim](const Dimension& in_dim) {
|
||||
return fully_eq(out_dim, in_dim);
|
||||
});
|
||||
if (it != input_product.end()) {
|
||||
to_delete_from_output.push_back(out_dim);
|
||||
to_delete_from_input.push_back(out_dim);
|
||||
}
|
||||
}
|
||||
for (const auto& dim : to_delete_from_input) {
|
||||
input_product.erase(std::remove_if(input_product.begin(),
|
||||
input_product.end(),
|
||||
[=](const Dimension& d) {
|
||||
return fully_eq(dim, d);
|
||||
}),
|
||||
input_product.end());
|
||||
}
|
||||
for (const auto& dim : to_delete_from_output) {
|
||||
output_product.erase(std::remove_if(output_product.begin(),
|
||||
output_product.end(),
|
||||
[=](const Dimension& d) {
|
||||
return fully_eq(dim, d);
|
||||
}),
|
||||
output_product.end());
|
||||
}
|
||||
|
||||
if (output_product.empty() && input_product.size() == 1)
|
||||
return input_product[0];
|
||||
|
||||
Dimension input_dim(1), output_dim(1);
|
||||
for (const auto& i : input_product) {
|
||||
input_dim *= i;
|
||||
}
|
||||
for (const auto& i : output_product) {
|
||||
output_dim *= i;
|
||||
}
|
||||
|
||||
if (output_dim == 0) {
|
||||
NODE_VALIDATION_CHECK(reshape_node,
|
||||
input_dim == 0,
|
||||
"Cannot infer '-1' dimension with zero-size output "
|
||||
"dimension unless at least one input dimension is "
|
||||
"also zero-size");
|
||||
return Dimension(0);
|
||||
} else {
|
||||
if (input_dim.is_static() && output_dim.is_static()) {
|
||||
NODE_VALIDATION_CHECK(reshape_node,
|
||||
input_dim.get_length() % output_dim.get_length() == 0,
|
||||
"Non-'-1' output dimensions do not evenly divide the input dimensions");
|
||||
}
|
||||
|
||||
if (output_dim == Dimension() || input_dim == Dimension()) {
|
||||
return Dimension::dynamic();
|
||||
} else {
|
||||
auto in_min = input_dim.get_min_length(), in_max = input_dim.get_max_length();
|
||||
auto out_min = output_dim.get_min_length(), out_max = output_dim.get_max_length();
|
||||
|
||||
Dimension::value_type lower;
|
||||
if (in_min == -1 || out_max == -1)
|
||||
lower = -1; // dynamic
|
||||
else
|
||||
lower = static_cast<Dimension::value_type>(ceil(static_cast<double>(in_min) / (out_max ? out_max : 1)));
|
||||
|
||||
Dimension::value_type upper;
|
||||
if (in_max == -1 || out_min == -1)
|
||||
upper = -1; // dynamic
|
||||
else
|
||||
upper =
|
||||
static_cast<Dimension::value_type>(floor(static_cast<double>(in_max) / (out_min ? out_min : 1)));
|
||||
|
||||
if (lower == -1 || (lower > upper && upper > -1))
|
||||
return Dimension::dynamic();
|
||||
else
|
||||
return {lower, upper};
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void op::v1::Reshape::calculate_output_shape(vector<Dimension>& reshape_pattern,
|
||||
const int64_t& minus_one_idx,
|
||||
const ov::PartialShape& input_pshape,
|
||||
vector<Dimension>& output_shape) const {
|
||||
std::vector<Dimension> output_product;
|
||||
for (int64_t i = 0; i < static_cast<int64_t>(reshape_pattern.size()); ++i) {
|
||||
if (i == minus_one_idx) // resolving everything except -1
|
||||
continue;
|
||||
|
||||
auto pattern_dim = reshape_pattern[i];
|
||||
if (pattern_dim == 0 && get_special_zero()) {
|
||||
if (input_pshape.rank().is_dynamic()) {
|
||||
output_shape[i] = Dimension::dynamic();
|
||||
output_product.push_back(Dimension::dynamic());
|
||||
} else {
|
||||
NODE_VALIDATION_CHECK(this, i < input_pshape.rank().get_length(), "'0' dimension is out of range");
|
||||
output_shape[i] = input_pshape[i];
|
||||
// we do not include dimension to output product here and won't include in input
|
||||
// product later because we will divide output_product by input_product. This
|
||||
// dimension contributes to both products equally, but in case this dimension
|
||||
// is dynamic and others are not we could fully define output dimension that
|
||||
// is masked by -1
|
||||
}
|
||||
} else {
|
||||
output_shape[i] = pattern_dim;
|
||||
output_product.push_back(pattern_dim);
|
||||
}
|
||||
}
|
||||
std::vector<Dimension> input_product;
|
||||
if (input_pshape.rank().is_static())
|
||||
for (int64_t i = 0; i < input_pshape.rank().get_length(); ++i) {
|
||||
if (i < static_cast<int64_t>(reshape_pattern.size()) && reshape_pattern[i].get_min_length() == 0 &&
|
||||
reshape_pattern[i].get_max_length() == 0)
|
||||
continue;
|
||||
input_product.push_back(input_pshape[i]);
|
||||
}
|
||||
else
|
||||
input_product.push_back(Dimension::dynamic());
|
||||
|
||||
if (minus_one_idx != -1) // resolving -1 masked dimension
|
||||
output_shape[minus_one_idx] = resolve_minus_one(this, input_product, output_product);
|
||||
|
||||
ov::PartialShape output_pshape(output_shape);
|
||||
if (input_pshape.is_static() && output_pshape.is_static()) {
|
||||
size_t zero_dims = std::count_if(reshape_pattern.begin(), reshape_pattern.end(), cmp::Equal<Dimension>(0));
|
||||
|
||||
bool backward_compatible_check = (zero_dims && get_special_zero()) || minus_one_idx != -1;
|
||||
bool in_out_elements_equal = shape_size(input_pshape.get_shape()) == shape_size(output_pshape.to_shape());
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
backward_compatible_check || in_out_elements_equal,
|
||||
"Requested output shape ",
|
||||
output_shape,
|
||||
" is incompatible with input shape ",
|
||||
input_pshape);
|
||||
}
|
||||
}
|
||||
} // namespace v1
|
||||
} // namespace op
|
||||
} // namespace ov
|
||||
|
@ -758,7 +758,7 @@ TEST(eval, evaluate_reshape_v1_not_backward_compatible_and_in_out_size_not_eq) {
|
||||
|
||||
OV_EXPECT_THROW(model->evaluate(out_vector, in_vector),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Requested output shape [2,1,1,1,1] is incompatible with input shape [2,2,2]"));
|
||||
HasSubstr("Requested output shape [2,1,1,1,1] is incompatible with input shape"));
|
||||
}
|
||||
|
||||
TEST(eval, evaluate_convert) {
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "openvino/op/reshape.hpp"
|
||||
|
||||
#include "common_test_utils/test_assertions.hpp"
|
||||
#include "common_test_utils/type_prop.hpp"
|
||||
#include "openvino/core/dimension_tracker.hpp"
|
||||
#include "openvino/op/broadcast.hpp"
|
||||
@ -18,8 +19,12 @@
|
||||
#include "openvino/op/squeeze.hpp"
|
||||
#include "openvino/op/unsqueeze.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ov;
|
||||
using std::ignore;
|
||||
using std::make_shared;
|
||||
using testing::Each;
|
||||
using testing::ElementsAre;
|
||||
using testing::HasSubstr;
|
||||
|
||||
TEST(type_prop, static_value_propagation) {
|
||||
auto param = make_shared<ov::op::v0::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
@ -263,10 +268,10 @@ TEST(type_prop, interval_value_propagation_reshape_zero_special_value) {
|
||||
PartialShape({Dimension(1, 8), 3, Dimension(16, 64), Dimension(200, 400)}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_reshape_zero_minus_one_special_values) {
|
||||
TEST(type_prop, reshape_interval_value_propagation_reshape_zero_minus_one_special_values) {
|
||||
auto param =
|
||||
make_shared<ov::op::v0::Parameter>(element::f32,
|
||||
PartialShape{Dimension(1, 8), Dimension(16, 64), 6, Dimension(200, 400)});
|
||||
make_shared<op::v0::Parameter>(element::f32,
|
||||
PartialShape{Dimension(1, 8), Dimension(16, 64), 6, Dimension(200, 400)});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto dim_0 = make_shared<op::v1::Gather>(shape_of,
|
||||
@ -325,20 +330,6 @@ TEST(type_prop, reshape_deduce_zero_special) {
|
||||
ASSERT_EQ(r->get_shape(), (Shape{6, 2, 5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_wrong_output_shape) {
|
||||
auto param = make_shared<ov::op::v0::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
try {
|
||||
auto r =
|
||||
make_shared<op::v1::Reshape>(param, ov::op::v0::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "No exception was thrown";
|
||||
} catch (const NodeValidationFailure& error) {
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("is incompatible with input shape"));
|
||||
} catch (...) {
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Input shape rank dynamic, so we should set the desired output shape
|
||||
//
|
||||
@ -615,12 +606,14 @@ TEST(type_prop, reshape_to_zero_shape_dynamic) {
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_to_zero_shape_incorrect) {
|
||||
auto param = make_shared<ov::op::v0::Parameter>(element::f32, Shape{2, 1});
|
||||
ASSERT_THROW(const auto unused = make_shared<op::v1::Reshape>(
|
||||
param,
|
||||
ov::op::v0::Constant::create(element::i64, {1}, std::vector<int64_t>{0}),
|
||||
false),
|
||||
std::exception);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{2, 1});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(param,
|
||||
op::v0::Constant::create(element::i64, {1}, std::vector<int64_t>{0}),
|
||||
false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Requested output shape [0] is incompatible with input shape"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_to_zero) {
|
||||
@ -651,23 +644,36 @@ TEST(type_prop, reshape_to_scalar_2) {
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_to_scalar_3) {
|
||||
auto param = make_shared<ov::op::v0::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
ASSERT_THROW(const auto unused = make_shared<op::v1::Reshape>(
|
||||
param,
|
||||
ov::op::v0::Constant::create(element::i64, {}, std::vector<int64_t>{100}),
|
||||
false),
|
||||
std::exception);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(param,
|
||||
op::v0::Constant::create(element::i64, {}, std::vector<int64_t>{100}),
|
||||
false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("The value of scalar shape pattern should be equal to 1"));
|
||||
}
|
||||
|
||||
TEST(type_prop, dynamic_shape_propagation_with_i32_precision) {
|
||||
auto param = make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{1, -1, -1});
|
||||
auto shape_of = std::make_shared<op::v3::ShapeOf>(param, element::i32);
|
||||
TEST(type_prop, reshape_to_scalar_4) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
|
||||
auto indices = ov::op::v0::Constant::create(element::i32, {3}, {1, 2, 0});
|
||||
auto axis = ov::op::v0::Constant::create(element::i32, {1}, {0});
|
||||
auto gather = std::make_shared<op::v1::Gather>(shape_of, indices, axis);
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(param,
|
||||
op::v0::Constant::create(element::i64, {}, std::vector<int64_t>{1}),
|
||||
false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Requested output shape [] is incompatible with input shape"));
|
||||
}
|
||||
|
||||
auto reshape = std::make_shared<op::v1::Reshape>(param, gather, true);
|
||||
TEST(type_prop, reshape_dynamic_shape_propagation_with_i32_precision) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, PartialShape{1, -1, -1});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param, element::i32);
|
||||
|
||||
auto indices = op::v0::Constant::create(element::i32, {3}, {1, 2, 0});
|
||||
auto axis = op::v0::Constant::create(element::i32, {1}, {0});
|
||||
auto gather = make_shared<op::v1::Gather>(shape_of, indices, axis);
|
||||
|
||||
auto reshape = make_shared<op::v1::Reshape>(param, gather, true);
|
||||
|
||||
ASSERT_EQ(reshape->get_element_type(), element::f32);
|
||||
ASSERT_EQ(reshape->get_output_partial_shape(0), (PartialShape{-1, -1, 1}));
|
||||
@ -684,33 +690,573 @@ TEST(type_prop, reshape_dynamic_value_and_label_propagation) {
|
||||
|
||||
const auto& et = element::i64;
|
||||
std::vector<int64_t> zero{0};
|
||||
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
|
||||
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
|
||||
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
|
||||
const auto indices = make_shared<op::v0::Constant>(et, Shape{}, zero);
|
||||
const auto axis = make_shared<op::v0::Constant>(et, Shape{}, zero);
|
||||
const auto gather = make_shared<op::v7::Gather>(shape_0, indices, axis);
|
||||
|
||||
const auto output_pattern = std::make_shared<op::v0::Constant>(et, Shape{1}, std::vector<int64_t>{-1});
|
||||
const auto unsqueeze = std::make_shared<op::v1::Reshape>(gather, output_pattern, false);
|
||||
const auto output_pattern = make_shared<op::v0::Constant>(et, Shape{1}, std::vector<int64_t>{-1});
|
||||
const auto unsqueeze = make_shared<op::v1::Reshape>(gather, output_pattern, false);
|
||||
|
||||
auto bc = std::make_shared<op::v1::Broadcast>(param, unsqueeze);
|
||||
auto bc = make_shared<op::v1::Broadcast>(param, unsqueeze);
|
||||
ASSERT_EQ(bc->get_shape(), (Shape{3}));
|
||||
|
||||
const auto& output_shape = bc->get_output_partial_shape(0);
|
||||
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
|
||||
EXPECT_EQ(output_shape, PartialShape({3}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_shape_propagation_minus_one) {
|
||||
Dimension marked_0 = Dimension(-1);
|
||||
ov::DimensionTracker::set_label(marked_0, 10);
|
||||
TEST(type_prop, reshape_when_pattern_has_static_shape_only) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{3, 4});
|
||||
auto shape_pattern = make_shared<op::v0::Parameter>(element::u64, PartialShape{3});
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape_pattern, false);
|
||||
|
||||
PartialShape initial_shape = PartialShape{marked_0, 4, 3, 1};
|
||||
EXPECT_EQ(r->get_element_type(), element::f32);
|
||||
EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic(3));
|
||||
}
|
||||
|
||||
auto input = std::make_shared<ov::op::v0::Parameter>(element::f32, initial_shape);
|
||||
auto output_pattern = std::make_shared<ov::op::v0::Constant>(element::i64, Shape{2}, std::vector<int64_t>{-1, 12});
|
||||
TEST(type_prop, reshape_when_pattern_has_interval_shape_only) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{3, 4});
|
||||
auto shape_pattern = make_shared<op::v0::Parameter>(element::u64, PartialShape{{1, 3}});
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape_pattern, false);
|
||||
|
||||
const auto reshape = std::make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
EXPECT_EQ(r->get_element_type(), element::f32);
|
||||
EXPECT_EQ(r->get_output_partial_shape(0), PartialShape::dynamic());
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_when_pattern_has_scalar_shape_only) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{3, 4});
|
||||
auto shape_pattern = make_shared<op::v0::Parameter>(element::u64, PartialShape{});
|
||||
|
||||
OV_EXPECT_THROW(ignore = make_shared<op::v1::Reshape>(param, shape_pattern, false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Input must be scalar as pattern is scalar!"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation) {
|
||||
auto param_shape = PartialShape{{1, 2}, {2, 4}, 6, {2, 4}, 8};
|
||||
auto out_shape = PartialShape{{3, 5}, 0, 1, 0};
|
||||
set_shape_labels(param_shape, 10);
|
||||
set_shape_labels(out_shape, 20);
|
||||
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, param_shape);
|
||||
const auto out = make_shared<op::v0::Parameter>(element::f32, out_shape);
|
||||
const auto shape_of = make_shared<op::v3::ShapeOf>(out);
|
||||
const auto special_volume = op::v0::Constant::create(element::i64, {1}, {-1});
|
||||
const auto shape = make_shared<op::v0::Concat>(OutputVector{shape_of, special_volume}, 0);
|
||||
|
||||
const auto op = make_shared<op::v1::Reshape>(data, shape, true);
|
||||
|
||||
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{3, 5}, {2, 4}, 1, {2, 4}, {10, 32}}));
|
||||
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(20, 11, 22, 13, ov::no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_dynamic_pattern_got_same_label_as_input) {
|
||||
auto param_shape = PartialShape{{1, 2}, {2, 4}, {3, 5}, {2, 4}, 8};
|
||||
auto out_shape = PartialShape{{3, 5}, 0, 1, 0, 8};
|
||||
set_shape_labels(param_shape, 10);
|
||||
set_shape_labels(out_shape, {12, 21, 22, 23, 24});
|
||||
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, param_shape);
|
||||
const auto out = make_shared<op::v0::Parameter>(element::f32, out_shape);
|
||||
const auto shape_of = make_shared<op::v3::ShapeOf>(out);
|
||||
const auto special_volume = op::v0::Constant::create(element::i64, {1}, {-1});
|
||||
const auto shape = make_shared<op::v0::Concat>(OutputVector{shape_of, special_volume}, 0);
|
||||
|
||||
const auto op = make_shared<op::v1::Reshape>(data, shape, true);
|
||||
|
||||
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{3, 5}, {2, 4}, 1, {2, 4}, 8, {1, 2}}));
|
||||
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(12, 11, 22, 13, 24, 10));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_corner_case_zero_div_by_inf) {
|
||||
auto param_shape = PartialShape{0, 0};
|
||||
auto out_shape = PartialShape{-1, 2};
|
||||
set_shape_labels(param_shape, 10);
|
||||
set_shape_labels(out_shape, 20);
|
||||
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, param_shape);
|
||||
const auto out = make_shared<op::v0::Parameter>(element::f32, out_shape);
|
||||
const auto shape_of = make_shared<op::v3::ShapeOf>(out);
|
||||
const auto special_volume = op::v0::Constant::create(element::i64, {1}, {-1});
|
||||
const auto shape = make_shared<op::v0::Concat>(OutputVector{special_volume, shape_of}, 0);
|
||||
|
||||
const auto op = make_shared<op::v1::Reshape>(data, shape, true);
|
||||
|
||||
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, -1, 2}));
|
||||
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(ov::no_label, 20, 21));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_default_ctor) {
|
||||
auto param_shape = PartialShape{{1, 2}, {2, 4}, 6, {2, 4}, 8};
|
||||
auto out_shape = PartialShape{{3, 5}, 0, 1, 0};
|
||||
set_shape_labels(param_shape, 10);
|
||||
set_shape_labels(out_shape, 20);
|
||||
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, param_shape);
|
||||
const auto out = make_shared<op::v0::Parameter>(element::f32, out_shape);
|
||||
const auto shape_of = make_shared<op::v3::ShapeOf>(out);
|
||||
const auto special_volume = op::v0::Constant::create(element::i64, {1}, {-1});
|
||||
const auto shape = make_shared<op::v0::Concat>(OutputVector{shape_of, special_volume}, 0);
|
||||
|
||||
const auto op = make_shared<op::v1::Reshape>();
|
||||
op->set_arguments(OutputVector{data, shape});
|
||||
op->set_special_zero(true);
|
||||
op->validate_and_infer_types();
|
||||
|
||||
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{3, 5}, {2, 4}, 1, {2, 4}, {10, 32}}));
|
||||
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(20, 11, 22, 13, ov::no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_wrong_output_shape) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(param, op::v0::Constant::create(element::u64, {3}, {3, 3, 3}), false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("is incompatible with input shape"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_pattern_shape_not_1d) {
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore =
|
||||
make_shared<op::v1::Reshape>(param, op::v0::Constant::create(element::u64, {3, 1}, Shape{3, 5, 4}), false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Pattern shape must have rank 1 or be empty"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_multiple_minus_one_no_special_zero) {
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, PartialShape{{1, 2}, {2, 4}, 6, {2, 4}, 8});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(data, op::v0::Constant::create(element::i64, {3}, {-1, 5, -1}), false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("More than one dimension has size of -1"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_multiple_minus_one_special_zero_set) {
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, PartialShape{{1, 2}, {2, 4}, 6, {2, 4}, 8});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(data, op::v0::Constant::create(element::i64, {3}, {-1, 5, -1}), true),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("More than one dimension has size of -1"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_special_zero_out_of_data_rank) {
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, PartialShape{{1, 2}, {2, 4}, 8});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(data, op::v0::Constant::create(element::i64, {4}, {5, 1, 1, 0}), true),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("'0' dimension is out of range"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_special_zero_cannot_div) {
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, PartialShape{2, 5, 4});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(data, op::v0::Constant::create(element::i64, {3}, {10, -1, 3}), false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Non-'-1' output dimensions do not evenly divide the input dimensions"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_zero_dim_in_output_pattern_but_not_in_data_shape) {
|
||||
const auto data = make_shared<op::v0::Parameter>(element::f32, PartialShape{2, 5, 4});
|
||||
|
||||
OV_EXPECT_THROW(
|
||||
ignore = make_shared<op::v1::Reshape>(data, op::v0::Constant::create(element::i64, {3}, {5, 0, -1}), false),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Cannot infer '-1' dimension with zero-size output dimension unless at least one input dimension is "
|
||||
"also zero-size"));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_input_has_zero) {
|
||||
auto data_shape = PartialShape{4, 0, 2, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{3}, std::vector<int64_t>{12, 0, 1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
ASSERT_EQ(output_shape, PartialShape({-1, 12}));
|
||||
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
|
||||
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[1]), 0);
|
||||
EXPECT_EQ(output_shape, PartialShape({12, 0, 1}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_1) {
|
||||
auto data_shape = PartialShape{4, -1, 2, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{3}, std::vector<int64_t>{-1, 12, 2});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({-1, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(11, 21, 22));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_2) {
|
||||
auto data_shape = PartialShape{4, 2, {2, 6}, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{3}, std::vector<int64_t>{-1, 12, 2});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{2, 6}, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(12, 21, 22));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_3) {
|
||||
auto data_shape = PartialShape{{2, 4}, 2, {2, 6}, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{3}, std::vector<int64_t>{-1, 12, 2});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{1, 6}, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, 21, 22));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_4) {
|
||||
PartialShape data_shape = PartialShape{2, {2, 4}, 2, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern =
|
||||
make_shared<op::v0::Constant>(element::i64, Shape{6}, std::vector<int64_t>{1, 4, 3, 1, 1, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24, 25});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 4, 3, 1, 1, {2, 4}}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22, 23, 24, 11));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_5) {
|
||||
PartialShape data_shape = PartialShape{2, 4, 2, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern =
|
||||
make_shared<op::v0::Constant>(element::i64, Shape{6}, std::vector<int64_t>{1, 4, 3, 1, 1, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24, 25});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 4, 3, 1, 1, 4}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22, 23, 24, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_6) {
|
||||
PartialShape data_shape = PartialShape{2, 3, 2, 1, 4};
|
||||
DimensionTracker::set_label(data_shape[1], 11);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{5}, std::vector<int64_t>{4, 1, -1, 1, 4});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({4, 1, 3, 1, 4}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, no_label, 23, 24));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_7) {
|
||||
PartialShape data_shape = PartialShape{{1, 2}, 4, 2, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{4}, std::vector<int64_t>{4, 2, 3, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({4, 2, 3, {1, 2}}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22, 10));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_8) {
|
||||
PartialShape data_shape = PartialShape{{1, 2}, 4, 2, 3};
|
||||
DimensionTracker::set_label(data_shape[0], 121);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{4}, std::vector<int64_t>{4, 2, 3, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({4, 2, 3, {1, 2}}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22, 121));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_9) {
|
||||
PartialShape data_shape = PartialShape{2, 4, 2, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{4}, std::vector<int64_t>{4, 2, 3, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({4, 2, 3, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 21, 22, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_no_special_zero_case_10) {
|
||||
PartialShape data_shape = PartialShape{1, {1, -1}, {1, -1}, 512};
|
||||
set_shape_labels(data_shape, 10);
|
||||
constexpr int64_t squeeze_dim = 7 * 7 * 512;
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{2}, std::vector<int64_t>{-1, squeeze_dim});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{1, -1}, squeeze_dim}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, 21));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_1) {
|
||||
auto data_shape = PartialShape{4, -1, 2, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{3}, std::vector<int64_t>{-1, 12, 0});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({-1, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(11, 21, 12));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_2) {
|
||||
auto data_shape = PartialShape{{2, 4}, 8, {2, 6}, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, -1, 12, 2});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{2, 4}, {2, 6}, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, 12, 22, 23));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_3) {
|
||||
auto data_shape = PartialShape{{2, 4}, 8, 6, 1, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, -1, 12, 2});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{2, 4}, 6, 12, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, no_label, 22, 23));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_4) {
|
||||
PartialShape data_shape = PartialShape{2, 10, 4, {1, 5}, {1, 2}, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern =
|
||||
make_shared<op::v0::Constant>(element::i64, Shape{7}, std::vector<int64_t>{1, 0, 4, 0, 6, 1, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24, 25, 26});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 10, 4, {1, 5}, 6, 1, {1, 2}}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 11, 22, 13, 24, 25, 14));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_5) {
|
||||
PartialShape data_shape = PartialShape{2, 10, 4, {1, 5}, 2, 3};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern =
|
||||
make_shared<op::v0::Constant>(element::i64, Shape{7}, std::vector<int64_t>{1, 0, 4, 0, 6, 1, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24, 25, 26});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 10, 4, {1, 5}, 6, 1, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, 11, 22, 13, 24, 25, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_6) {
|
||||
PartialShape data_shape = PartialShape{2, 3, 2, 1, 4};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{5}, std::vector<int64_t>{0, 0, -1, 0, 0});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({2, 3, 2, 1, 4}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, 11, 12, 13, 14));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_7) {
|
||||
auto data_shape = PartialShape{{2, 4}, 12, -1, 1, 2};
|
||||
DimensionTracker::set_label(data_shape[2], 121);
|
||||
DimensionTracker::set_label(data_shape[0], 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{5}, std::vector<int64_t>{0, -1, 3, 4, 3});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, no_label});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{2, 4}, -1, 3, 4, 3}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, no_label, 22, 23, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_8) {
|
||||
auto data_shape = PartialShape{{2, 4}, 4, -1, 1, 3, 3};
|
||||
DimensionTracker::set_label(data_shape[2], 121);
|
||||
DimensionTracker::set_label(data_shape[0], 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{5}, std::vector<int64_t>{0, -1, 3, 4, 3});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, no_label});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({{2, 4}, -1, 3, 4, 3}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, 121, 22, 23, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_propagation_minus_one_special_zero_case_9) {
|
||||
PartialShape data_shape = PartialShape{2, 3, {2, 4}, 1, 4};
|
||||
set_shape_labels(data_shape, 10);
|
||||
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{5}, std::vector<int64_t>{0, 0, -1, 1, 0});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21, 22, 23, 24});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, true);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({2, 3, {2, 4}, 1, 4}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(10, 11, 12, 23, 14));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_tricky_label_propagation_for_auto_batch_case_1) {
|
||||
auto shape = PartialShape({1, 1280, 1, 1});
|
||||
DimensionTracker::set_label(shape[0], 1);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, shape);
|
||||
auto pattern = op::v0::Constant::create(element::i64, {2}, {-1, 1280});
|
||||
auto r = make_shared<op::v1::Reshape>(param, pattern, false);
|
||||
|
||||
auto output_shape = r->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 1280}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_tricky_label_propagation_for_auto_batch_case_2) {
|
||||
auto shape = ov::PartialShape({1, 1280, 1, 1});
|
||||
DimensionTracker::set_label(shape[2], 2);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, shape);
|
||||
auto pattern = op::v0::Constant::create(element::i64, {2}, {-1, 1280});
|
||||
auto r = make_shared<op::v1::Reshape>(param, pattern, false);
|
||||
|
||||
auto output_shape = r->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 1280}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_tricky_label_propagation_for_auto_batch_case_3) {
|
||||
auto shape = PartialShape({1, 1280, 1, 1});
|
||||
DimensionTracker::set_label(shape[0], 1);
|
||||
DimensionTracker::set_label(shape[2], 2);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, shape);
|
||||
auto pattern = op::v0::Constant::create(element::i64, {2}, {-1, 1280});
|
||||
auto r = make_shared<op::v1::Reshape>(param, pattern, false);
|
||||
|
||||
auto output_shape = r->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 1280}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_tricky_label_propagation_for_auto_batch_case_4) {
|
||||
auto shape = PartialShape({1, 1280});
|
||||
DimensionTracker::set_label(shape[0], 1);
|
||||
auto param = make_shared<op::v0::Parameter>(element::f32, shape);
|
||||
auto pattern = op::v0::Constant::create(element::i64, {2}, {-1, 1280});
|
||||
auto r = make_shared<op::v1::Reshape>(param, pattern, false);
|
||||
|
||||
auto output_shape = r->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({1, 1280}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(1, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_resolve_minus_one_when_static_product_same_value) {
|
||||
auto data_shape = PartialShape{2, 3, 4, 5};
|
||||
set_shape_labels(data_shape, 10);
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
auto output_pattern = make_shared<op::v0::Constant>(element::i64, Shape{2}, std::vector<int64_t>{120, -1});
|
||||
output_pattern->get_default_output().get_tensor().set_value_label({20, 21});
|
||||
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({120, 1}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(20, no_label));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_label_not_propagated_on_minus_one_dim_as_not_same_dynamic_dim) {
|
||||
auto data_shape = PartialShape{-1, 2};
|
||||
auto pattern_shape = PartialShape{-1, -1, 2};
|
||||
set_shape_labels(data_shape, {90, no_label});
|
||||
set_shape_labels(pattern_shape, {37, 87, 98});
|
||||
|
||||
auto pattern = make_shared<op::v0::Parameter>(element::i32, pattern_shape);
|
||||
auto pattern_shape_of = make_shared<op::v3::ShapeOf>(pattern, element::i32);
|
||||
auto dim_minus_one = ov::op::v0::Constant::create(element::i32, {1}, {-1});
|
||||
dim_minus_one->get_default_output().get_tensor().set_value_label({93});
|
||||
auto output_pattern = make_shared<op::v0::Concat>(OutputVector{dim_minus_one, pattern_shape_of}, 0);
|
||||
auto input = make_shared<op::v0::Parameter>(element::f32, data_shape);
|
||||
const auto reshape = make_shared<op::v1::Reshape>(input, output_pattern, false);
|
||||
|
||||
auto output_shape = reshape->get_output_partial_shape(0);
|
||||
EXPECT_EQ(output_shape, PartialShape({-1, -1, -1, 2}));
|
||||
EXPECT_THAT(get_shape_labels(output_shape), ElementsAre(no_label, 37, 87, 98));
|
||||
}
|
||||
|
@ -82,6 +82,7 @@
|
||||
#include "reduce_shape_inference.hpp"
|
||||
#include "region_yolo_shape_inference.hpp"
|
||||
#include "reorg_yolo_shape_inference.hpp"
|
||||
#include "reshape_shape_inference.hpp"
|
||||
#include "reverse_sequence_shape_inference.hpp"
|
||||
#include "reverse_shape_inference.hpp"
|
||||
#include "rnn_cell_shape_inference.hpp"
|
||||
|
@ -0,0 +1,105 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
#include "common_test_utils/test_assertions.hpp"
|
||||
#include "openvino/op/reshape.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
using namespace ov;
|
||||
using namespace ov::intel_cpu;
|
||||
using namespace testing;
|
||||
|
||||
class ReshapeV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::Reshape> {};
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, default_ctor_no_args) {
|
||||
op = make_op();
|
||||
op->set_special_zero(true);
|
||||
|
||||
int64_t shape_pattern[] = {2, 4, 0, 1, -1};
|
||||
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, Tensor(element::i64, ov::Shape{5}, shape_pattern)}};
|
||||
input_shapes = ShapeVector{{2, 9, 12, 8}, {5}};
|
||||
|
||||
output_shapes = shape_inference(op.get(), input_shapes, const_data);
|
||||
|
||||
EXPECT_EQ(output_shapes.size(), 1);
|
||||
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 12, 1, 18}));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, all_inputs_are_dynamic_rank) {
|
||||
int64_t shape_pattern[] = {2, 4, 0, 1, -1};
|
||||
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, Tensor(element::i64, ov::Shape{5}, shape_pattern)}};
|
||||
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::i16, PartialShape::dynamic());
|
||||
const auto pattern = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
|
||||
op = make_op(data, pattern, true);
|
||||
|
||||
input_shapes = ShapeVector{{9, 24, 8}, {5}};
|
||||
output_shapes = shape_inference(op.get(), input_shapes, const_data);
|
||||
|
||||
EXPECT_EQ(output_shapes.size(), 1);
|
||||
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 8, 1, 27}));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, all_inputs_are_static_rank) {
|
||||
int64_t shape_pattern[] = {2, 4, 1, -1};
|
||||
auto const_data = std::unordered_map<size_t, ov::Tensor>{{1, Tensor(element::i64, ov::Shape{4}, shape_pattern)}};
|
||||
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::i16, PartialShape::dynamic(5));
|
||||
const auto pattern = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic(1));
|
||||
op = make_op(data, pattern, false);
|
||||
|
||||
input_shapes = ShapeVector{{9, 24, 8}, {4}};
|
||||
output_shapes = shape_inference(op.get(), input_shapes, const_data);
|
||||
|
||||
EXPECT_EQ(output_shapes.size(), 1);
|
||||
EXPECT_EQ(output_shapes.front(), StaticShape({2, 4, 1, 216}));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, pattern_with_special_values) {
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
const auto pattern = op::v0::Constant::create(element::i32, ov::Shape{2}, {0, -1});
|
||||
|
||||
op = make_op(data, pattern, true);
|
||||
|
||||
input_shapes = ShapeVector{{3, 6, 5, 5}, {2}};
|
||||
output_shapes = shape_inference(op.get(), input_shapes);
|
||||
|
||||
EXPECT_EQ(output_shapes.front(), StaticShape({3, 150}));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, reshape_to_empty_volume) {
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::f32, PartialShape{-1, 2, 2});
|
||||
const auto pattern = op::v0::Constant::create(element::i32, ov::Shape{2}, {0, 4});
|
||||
|
||||
op = make_op(data, pattern, false);
|
||||
|
||||
input_shapes = ShapeVector{{0, 2, 2}, {2}};
|
||||
output_shapes = shape_inference(op.get(), input_shapes);
|
||||
|
||||
EXPECT_EQ(output_shapes.front(), StaticShape({0, 4}));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, reshape_pattern_not_defined) {
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::i16, PartialShape::dynamic());
|
||||
const auto pattern = std::make_shared<op::v0::Parameter>(element::i64, PartialShape::dynamic());
|
||||
op = make_op(data, pattern, true);
|
||||
|
||||
input_shapes = ShapeVector{{9, 24, 8}, {5}};
|
||||
OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("Static shape inference lacks constant data on port 1"));
|
||||
}
|
||||
|
||||
TEST_F(ReshapeV1StaticShapeInferenceTest, shape_pattern_as_constant) {
|
||||
const auto data = std::make_shared<op::v0::Parameter>(element::i16, PartialShape::dynamic(5));
|
||||
const auto pattern = op::v0::Constant::create(element::i32, ov::Shape{3}, {2, 4, 1});
|
||||
op = make_op(data, pattern, false);
|
||||
|
||||
input_shapes = ShapeVector{{9, 24, 8}, {4}};
|
||||
OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes),
|
||||
NodeValidationFailure,
|
||||
HasSubstr("is incompatible with input shape"));
|
||||
}
|
@ -9,30 +9,6 @@
|
||||
using namespace ov;
|
||||
using namespace ov::intel_cpu;
|
||||
|
||||
TEST(StaticShapeInferenceTest, ReshapeTest) {
|
||||
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
|
||||
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{0, -1});
|
||||
|
||||
auto reduce = std::make_shared<op::v1::Reshape>(data, pattern, true);
|
||||
|
||||
std::vector<StaticShape> static_input_shapes = {StaticShape{3, 6, 5, 5}, StaticShape{2}};
|
||||
const auto static_output_shapes = shape_inference(reduce.get(), static_input_shapes);
|
||||
|
||||
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 150}));
|
||||
}
|
||||
|
||||
TEST(StaticShapeInferenceTest, ReshapeEmptyTest) {
|
||||
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, 2, 2});
|
||||
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{0, 4});
|
||||
|
||||
auto reduce = std::make_shared<op::v1::Reshape>(data, pattern, false);
|
||||
|
||||
std::vector<StaticShape> static_input_shapes = {StaticShape{0, 2, 2}, StaticShape{2}};
|
||||
const auto static_output_shapes = shape_inference(reduce.get(), static_input_shapes);
|
||||
|
||||
ASSERT_EQ(static_output_shapes[0], StaticShape({0, 4}));
|
||||
}
|
||||
|
||||
TEST(StaticShapeInferenceTest, ShapeOf5DTest) {
|
||||
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "shape_inference/static_shape.hpp"
|
||||
|
||||
namespace ov {
|
||||
|
||||
namespace intel_cpu {
|
||||
|
||||
using ShapeVector = std::vector<ov::intel_cpu::StaticShape>;
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "json_object.h"
|
||||
#include "primitive_type_base.h"
|
||||
#include "reshape_inst.h"
|
||||
#include "shape_nodes.hpp"
|
||||
#include "reshape_shape_inference.hpp"
|
||||
#include "squeeze_shape_inference.hpp"
|
||||
#include "unsqueeze_shape_inference.hpp"
|
||||
|
||||
@ -94,7 +94,7 @@ std::vector<layout> reshape_inst::calc_output_layouts(reshape_node const& /*node
|
||||
ov::op::v1::Reshape op;
|
||||
op.set_special_zero(prim->special_zero);
|
||||
op.set_friendly_name(prim->id.c_str());
|
||||
output_shapes = shape_infer(&op, input_shapes, ta);
|
||||
output_shapes = ov::op::v1::shape_infer(&op, input_shapes, ta);
|
||||
break;
|
||||
}
|
||||
case reshape::reshape_mode::squeeze: {
|
||||
|
Loading…
Reference in New Issue
Block a user