[shape_infer]Implement shape inference of DepthToSpace,SpaceToDepth, BatchToSpace,SpaceToBatch (#8465)

* Implement the batch to space shape infer

* Implement the space_to_batch shape inference.

* Implement shape infer of space_to_depth and depth_to_space OPs

* Fix Azure building issue.

* Add namespace for the shape_infer function.

* Avoid using friend declaration for shape infer.

* update coding style issue

* Update based on review comments

* Apply review comments

* Add test cases.

* Update the shape infer flow.

* Fix the bug in the previous test case.

* Update coding style.

* Fix the code bug caused by the DepthToSpace check fix.

* update coding style.

* Implment the Dimension/StaticDimension division operator by a value

* Refine the the code.

* Fix the issue when T is implicitly construct StaticShape with PartialShape when comparing

* Update the CI issue.

* Move the shape_infer helper into src folder.

* Apply the review comments.

* Coding style fix.

* Remove the ngraph folder

* Applied review comments

* Fix CI windows building issue

* Move test into new folder.

* Not support divisor is negative.

* Apply review comments.

* Fix CI issues

* Apply review comments.

* Update

Co-authored-by: Evgenya Stepyreva <evgenya.stepyreva@intel.com>
This commit is contained in:
Luwei Zhou 2022-01-12 08:32:14 +08:00 committed by GitHub
parent 6fbfd96ba0
commit 5bf44b92e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 757 additions and 233 deletions

View File

@ -132,6 +132,18 @@ public:
/// \return Smallest interval dimension enclosing inputs
Dimension operator-(const Dimension& dim) const;
/// \brief Division operator for Dimension divided by a value_type parameter.
/// \param divisor Right operand for division.
/// \return Smallest interval dimension enclosing inputs
Dimension operator/(const value_type divisor) const;
/// \brief Divided-into operator for Dimension.
/// \param divisor Right operand for multiplication.
/// \return A reference to `*this`, after updating `*this` to the value `*this * dim`.
Dimension& operator/=(const value_type divisor) {
return (*this = *this / divisor);
}
/// \brief Multiplication operator for Dimension.
/// \param dim Right operand for multiplicaiton.
/// \return Smallest interval containing all "produces" which are 0 if either of `this` or

View File

@ -42,7 +42,7 @@ public:
DepthToSpace(const Output<Node>& data, const std::string& mode, std::size_t block_size = 1);
bool visit_attributes(AttributeVisitor& visitor) override;
std::size_t get_block_size() const {
const std::size_t& get_block_size() const {
return m_blocksize;
}
DepthToSpaceMode get_mode() const {

View File

@ -40,7 +40,7 @@ public:
SpaceToDepth(const Output<Node>& data, const std::string& mode, std::size_t block_size = 1);
bool visit_attributes(AttributeVisitor& visitor) override;
std::size_t get_block_size() const {
const std::size_t& get_block_size() const {
return m_blocksize;
}
SpaceToDepthMode get_mode() const {

View File

@ -0,0 +1,119 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <openvino/core/validation_util.hpp>
#include <openvino/op/batch_to_space.hpp>
#include <openvino/opsets/opset2.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
namespace v1 {
template <class T>
void shape_infer(const ov::op::v1::BatchToSpace* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
using ValType = typename std::iterator_traits<typename T::iterator>::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const auto& block_shape = input_shapes[1];
const auto& crops_begin_shape = input_shapes[2];
const auto& crops_end_shape = input_shapes[3];
bool got_const_data = false;
auto inputs_same_ps = crops_begin_shape;
NODE_VALIDATION_CHECK(op,
T::merge_into(inputs_same_ps, crops_end_shape) && T::merge_into(inputs_same_ps, block_shape),
"block_shape, crops_begin and crops_end inputs must have the same shape. Got: ",
block_shape,
", ",
crops_begin_shape,
" and ",
crops_end_shape);
NODE_VALIDATION_CHECK(op,
inputs_same_ps.rank().compatible(1),
"block_shape and crops inputs must have rank 1. Got: ",
inputs_same_ps.rank());
const ov::Rank data_rank = data_shape.rank();
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(op,
(data_rank.get_length() >= 2),
"data input must have rank greater or equal than 2. Got: ",
data_rank.get_length());
if (inputs_same_ps.is_static()) {
NODE_VALIDATION_CHECK(op,
data_rank.get_length() == inputs_same_ps[0].get_length(),
"block_shape and crop inputs must have same number of elements "
"as data input rank. Got: ",
inputs_same_ps[0],
" and ",
data_rank);
}
auto& output_shape = output_shapes[0];
output_shape.resize(data_shape.size());
std::vector<int64_t> block_val, crops_begin_val, crops_end_val;
if (get_data_as_int64<T>(1, op, block_val, constant_data) &&
get_data_as_int64<T>(2, op, crops_begin_val, constant_data) &&
get_data_as_int64<T>(3, op, crops_end_val, constant_data)) {
got_const_data = true;
bool block_vals_valid = std::all_of(begin(block_val), end(block_val), [](int64_t elem) {
return elem >= 1;
});
NODE_VALIDATION_CHECK(op,
block_vals_valid,
"Elements of block_shape input must be greater or equal to one.");
bool crops_begin_vals_valid = std::all_of(begin(crops_begin_val), end(crops_begin_val), [](int64_t elem) {
return elem >= 0;
});
bool crops_end_vals_valid = std::all_of(begin(crops_end_val), end(crops_end_val), [](int64_t elem) {
return elem >= 0;
});
NODE_VALIDATION_CHECK(op,
crops_begin_vals_valid && crops_end_vals_valid,
"Elements of crops_begin and crops_end inputs must be greater or equal to zero.");
if (data_shape.is_static()) {
for (size_t idx = 0; idx < data_shape.size(); idx++) {
const bool is_valid_crops_and_shape =
crops_begin_val[idx] + crops_end_val[idx] <= block_val[idx] * data_shape[idx].get_length();
NODE_VALIDATION_CHECK(op,
is_valid_crops_and_shape,
"crops_begin[i] + crops_end[i] must be less or equal to "
"block_shape[i] * input_shape[i]");
}
}
int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies<int64_t>());
const auto divisor = static_cast<ValType>(block_prod);
output_shape[0] = data_shape[0] / divisor;
check_divided_result(op, output_shape[0], data_shape[0], divisor);
for (size_t idx = 1; idx < data_shape.size(); idx++) {
output_shape[idx] = data_shape[idx] * static_cast<ValType>(block_val[idx]) -
static_cast<ValType>(crops_begin_val[idx]) -
static_cast<ValType>(crops_end_val[idx]);
}
}
}
if (!got_const_data)
// For PartialShape, Set the output to be dynamic;
// For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument;
output_shapes[0] = ov::PartialShape::dynamic(data_rank);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <openvino/core/validation_util.hpp>
#include <openvino/op/depth_to_space.hpp>
#include <openvino/opsets/opset1.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
namespace v0 {
template <class T>
void shape_infer(const ov::op::v0::DepthToSpace* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
using ValType = typename std::iterator_traits<typename T::iterator>::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const ov::Rank data_rank = data_shape.rank();
const auto& block_size = op->get_block_size();
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(op,
data_shape.size() >= 3,
"The input tensor with rank lower than 3 is not supported (input rank: ",
data_shape.size(),
")");
const size_t divider = std::pow(block_size, data_shape.size() - 2);
NODE_VALIDATION_CHECK(op, (divider), "DepthToSpace: The divider must not be 0");
auto& output_shape = output_shapes[0];
output_shape.resize(data_shape.size());
output_shape[0] = data_shape[0];
const auto divisor = static_cast<ValType>(divider);
output_shape[1] = data_shape[1] / divisor;
check_divided_result(op, output_shape[1], data_shape[1], divisor);
for (size_t i = 2; i < output_shape.size(); i++) {
output_shape[i] = data_shape[i] * static_cast<ValType>(block_size);
}
} else {
// For PartialShape, Set the output to be dynamic;
// For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument;
output_shapes[0] = ov::PartialShape::dynamic(data_rank);
}
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -0,0 +1,90 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <openvino/core/validation_util.hpp>
#include <openvino/op/space_to_batch.hpp>
#include <openvino/opsets/opset2.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
namespace v1 {
template <class T>
void shape_infer(const ov::op::v1::SpaceToBatch* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
using ValType = typename std::iterator_traits<typename T::iterator>::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const auto& block_shape = input_shapes[1];
const auto& pads_begin_shape = input_shapes[2];
const auto& pads_end_shape = input_shapes[3];
const ov::Rank data_rank = data_shape.rank();
bool got_const_data = false;
auto inputs_same_ps = pads_begin_shape;
NODE_VALIDATION_CHECK(op,
T::merge_into(inputs_same_ps, pads_end_shape) && T::merge_into(inputs_same_ps, block_shape),
"block_shape, pads_begin and pads_end inputs must have the same shape. Got: ",
block_shape,
", ",
pads_begin_shape,
" and ",
pads_end_shape);
NODE_VALIDATION_CHECK(op,
inputs_same_ps.rank().compatible(1),
"block_shape and pads inputs must have rank 1. Got: ",
inputs_same_ps.rank());
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(op,
(data_shape.size() >= 2),
"The data tensor with rank lower than 2 is not supported (data rank: ",
data_shape.size(),
")");
std::vector<int64_t> block_val, pads_begin_val, pads_end_val;
auto& output_shape = output_shapes[0];
output_shape.resize(data_shape.size());
if (get_data_as_int64<T>(1, op, block_val, constant_data) &&
get_data_as_int64<T>(2, op, pads_begin_val, constant_data) &&
get_data_as_int64<T>(3, op, pads_end_val, constant_data)) {
got_const_data = true;
int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies<int64_t>());
output_shape[0] = data_shape[0] * static_cast<ValType>(block_prod);
for (size_t idx = 1; idx < output_shape.size(); ++idx) {
NODE_VALIDATION_CHECK(op, block_val[idx] > 0, "block_shape values must be greater than 0");
if (data_shape[idx].is_dynamic() && data_shape[idx] == ov::Dimension::dynamic()) {
output_shape[idx] = ov::Dimension::dynamic();
} else {
const auto divided =
data_shape[idx] + static_cast<ValType>((pads_begin_val[idx] + pads_end_val[idx]));
const auto divisor = static_cast<ValType>(block_val[idx]);
output_shape[idx] = divided / divisor;
check_divided_result(op, output_shape[idx], divided, divisor);
}
}
}
}
if (!got_const_data)
// For PartialShape, Set the output to be dynamic;
// For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument;
output_shapes[0] = ov::PartialShape::dynamic(data_rank);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,57 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <openvino/core/validation_util.hpp>
#include <openvino/op/space_to_depth.hpp>
#include <openvino/opsets/opset1.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
namespace v0 {
template <class T>
void shape_infer(const ov::op::v0::SpaceToDepth* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes) {
using ValType = typename std::iterator_traits<typename T::iterator>::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const ov::Rank data_rank = data_shape.rank();
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(op,
!(data_shape.size() < 3),
"The input tensor with rank lower than 3 is not supported (input rank: ",
data_shape.size(),
")");
const auto& block_size = op->get_block_size();
NODE_VALIDATION_CHECK(op, block_size > 0, "The block size must begreater then 0 ", block_size);
const ValType multiplier = std::pow(block_size, data_shape.size() - 2);
auto& out_shape = output_shapes[0];
out_shape.resize(data_shape.size());
out_shape[0] = data_shape[0];
out_shape[1] = data_shape[1] * multiplier;
const auto divisor = static_cast<ValType>(block_size);
for (size_t i = 2; i < out_shape.size(); i++) {
out_shape[i] = data_shape[i] / divisor;
check_divided_result(op, out_shape[i], data_shape[i], divisor);
}
} else {
// For PartialShape, Set the output to be dynamic;
// For StaticShape, will throw error caused by implicitly constructing StaticShape with PartialShape argument;
output_shapes[0] = ov::PartialShape::dynamic(data_rank);
}
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -119,4 +119,36 @@ inline bool get_data_as_shape<ov::PartialShape>(
} else {
return ov::evaluate_as_partial_shape(op->input_value(idx), shape);
}
}
template <class T>
inline void check_divided_result(const ov::Node* op,
const T& res,
const T& divided,
const typename T::value_type& divisor) {
NODE_VALIDATION_CHECK(op,
res != T{},
"Dimension value: [ ",
divided.get_min_length(),
", ",
divided.get_max_length(),
"]",
" must be a multiple of divisor: ",
divisor);
}
template <>
inline void check_divided_result<ov::Dimension>(const ov::Node* op,
const ov::Dimension& res,
const ov::Dimension& divided,
const typename ov::Dimension::value_type& divisor) {
NODE_VALIDATION_CHECK(op,
!res.get_interval().empty(),
"Dimension value: [ ",
divided.get_min_length(),
", ",
divided.get_max_length(),
"]",
" must be a multiple of divisor: ",
divisor);
}

View File

@ -41,6 +41,15 @@ Dimension Dimension::operator-(const Dimension& dim) const {
return Dimension(m_dimension - dim.m_dimension);
}
Dimension Dimension::operator/(const value_type divisor) const {
OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0");
if (m_dimension.get_max_val() == Interval::s_max && m_dimension.get_min_val() == 0)
return Dimension::dynamic();
return Dimension((m_dimension.get_min_val() + divisor - 1) / divisor, m_dimension.get_max_val() / divisor);
}
Dimension Dimension::operator*(const Dimension& dim) const {
return Dimension(m_dimension * dim.m_dimension);
}

View File

@ -4,6 +4,7 @@
#include "ngraph/op/batch_to_space.hpp"
#include <batch_to_space_shape_inference.hpp>
#include <cmath>
#include <cstddef>
#include <memory>
@ -61,103 +62,13 @@ void op::v1::BatchToSpace::validate_and_infer_types() {
"block_shape and crops inputs must have integer element type. Got: ",
inputs_integer_et);
const ov::PartialShape& data_pshape = get_input_partial_shape(0);
const ov::PartialShape& block_shape_ps = get_input_partial_shape(1);
const ov::PartialShape& crops_begin_ps = get_input_partial_shape(2);
const ov::PartialShape& crops_end_ps = get_input_partial_shape(3);
ov::PartialShape inputs_same_ps{ov::PartialShape::dynamic()};
NODE_VALIDATION_CHECK(this,
ov::PartialShape::merge_into(inputs_same_ps, crops_begin_ps) &&
ov::PartialShape::merge_into(inputs_same_ps, crops_end_ps) &&
ov::PartialShape::merge_into(inputs_same_ps, block_shape_ps),
"block_shape, crops_begin and crops_end inputs must have the same shape. Got: ",
block_shape_ps,
", ",
crops_begin_ps,
" and ",
crops_end_ps);
const Rank inputs_rank_one = inputs_same_ps.rank();
NODE_VALIDATION_CHECK(this,
inputs_rank_one.compatible(1),
"block_shape and crops inputs must have rank 1. Got: ",
inputs_rank_one);
const Rank data_rank = data_pshape.rank();
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(this,
(data_rank.get_length() >= 2),
"data input must have rank greater or equal than 2. Got: ",
data_rank.get_length());
if (inputs_same_ps.is_static()) {
NODE_VALIDATION_CHECK(this,
data_rank.get_length() == inputs_same_ps[0].get_length(),
"block_shape and crop inputs must have same number of elements "
"as data input rank. Got: ",
inputs_same_ps[0],
" and ",
data_rank);
}
}
const auto block_const = get_constant_from_source(input_value(1));
const auto crops_begin_const = get_constant_from_source(input_value(2));
const auto crops_end_const = get_constant_from_source(input_value(3));
if (block_const && crops_begin_const && crops_end_const && data_pshape.is_static()) {
const ov::Shape& data_sshape = data_pshape.to_shape();
auto block_val = block_const->cast_vector<int64_t>();
auto crops_begin_val = crops_begin_const->cast_vector<int64_t>();
auto crops_end_val = crops_end_const->cast_vector<int64_t>();
bool block_vals_valid = std::all_of(begin(block_val), end(block_val), [](int64_t elem) {
return elem >= 1;
});
NODE_VALIDATION_CHECK(this, block_vals_valid, "Elements of block_shape input must be greater or equal to one.");
bool crops_begin_vals_valid = std::all_of(begin(crops_begin_val), end(crops_begin_val), [](int64_t elem) {
return elem >= 0;
});
bool crops_end_vals_valid = std::all_of(begin(crops_end_val), end(crops_end_val), [](int64_t elem) {
return elem >= 0;
});
NODE_VALIDATION_CHECK(this,
crops_begin_vals_valid && crops_end_vals_valid,
"Elements of crops_begin and crops_end inputs must be greater or equal to zero.");
int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies<int64_t>());
NODE_VALIDATION_CHECK(this,
data_sshape[0] % block_prod == 0,
"The input data's 'batch' axis size: ",
data_sshape[0],
" must be a multiple of",
" product of block_shape values: ",
block_prod);
for (size_t idx = 0; idx < data_sshape.size(); idx++) {
const bool is_valid_crops_and_shape =
crops_begin_val[idx] + crops_end_val[idx] <= block_val[idx] * static_cast<int64_t>(data_sshape[idx]);
NODE_VALIDATION_CHECK(this,
is_valid_crops_and_shape,
"crops_begin[i] + crops_end[i] must be less or equal to "
"block_shape[i] * input_shape[i]");
}
ov::Shape output_sshape = {static_cast<size_t>(data_sshape[0] / block_prod)};
for (size_t idx = 1; idx < data_sshape.size(); ++idx) {
output_sshape.push_back(
static_cast<size_t>(data_sshape[idx] * block_val[idx] - crops_begin_val[idx] - crops_end_val[idx]));
}
set_output_size(1);
set_output_type(0, data_et, output_sshape);
} else {
set_output_type(0, data_et, ov::PartialShape::dynamic(data_rank));
}
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
const std::vector<ov::PartialShape> input_shapes = {get_input_partial_shape(0),
get_input_partial_shape(1),
get_input_partial_shape(2),
get_input_partial_shape(3)};
shape_infer(this, input_shapes, output_shapes);
set_output_type(0, data_et, output_shapes[0]);
}
std::shared_ptr<ngraph::Node> ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const {

View File

@ -6,6 +6,7 @@
#include <cmath>
#include <cstddef>
#include <depth_to_space_shape_inference.hpp>
#include <memory>
#include <ngraph/op/constant.hpp>
#include <ngraph/ops.hpp>
@ -45,42 +46,12 @@ std::shared_ptr<Node> op::DepthToSpace::clone_with_new_inputs(const OutputVector
void op::DepthToSpace::validate_and_infer_types() {
NGRAPH_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types);
ov::PartialShape data_pshape = get_input_partial_shape(0);
const auto& data_type = get_input_element_type(0);
auto data = input_value(0);
if (data_pshape.is_static()) {
const auto& data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
!(data_shape.size() < 3),
"The input tensor with rank lower than 3 is not supported (input rank: ",
data_shape.size(),
")");
auto divider = std::pow(m_blocksize, data_shape.size() - 2);
NODE_VALIDATION_CHECK(this, (divider), "DepthToSpace: The divider must not be 0");
NODE_VALIDATION_CHECK(this,
m_blocksize > 0 && !(data_shape[1] % m_blocksize),
"DepthToSpace: The input data's 'channels' axis size: ",
data_shape[1],
" must be a equivalent to 'block_size'^'spatial_dims': ",
divider);
auto out_shape = data_shape;
out_shape[1] /= divider;
for (size_t i = 2; i < out_shape.size(); i++) {
out_shape[i] *= m_blocksize;
}
set_output_size(1);
set_output_type(0, data_type, out_shape);
} else {
set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank()));
}
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
const std::vector<ov::PartialShape> input_shapes = {get_input_partial_shape(0)};
shape_infer(this, input_shapes, output_shapes);
set_output_type(0, data_type, output_shapes[0]);
}
namespace {

View File

@ -9,6 +9,7 @@
#include <memory>
#include <ngraph/validation_util.hpp>
#include <numeric>
#include <space_to_batch_shape_inference.hpp>
#include "itt.hpp"
#include "ngraph/builder/make_constant.hpp"
@ -37,7 +38,6 @@ ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& d
void op::v1::SpaceToBatch::validate_and_infer_types() {
NGRAPH_OP_SCOPE(v1_SpaceToBatch_validate_and_infer_types);
ov::PartialShape data_pshape = get_input_partial_shape(0);
const auto& data_type = get_input_element_type(0);
const auto& block_shape_type = get_input_element_type(1);
const auto& pads_begin_type = get_input_element_type(2);
@ -60,54 +60,13 @@ void op::v1::SpaceToBatch::validate_and_infer_types() {
"pads_end must be an integral number but got (",
pads_end_type,
").");
auto data = input_value(0);
auto block = input_value(1);
auto pads_begin = input_value(2);
auto pads_end = input_value(3);
const auto& block_const = get_constant_from_source(block);
const auto& pads_begin_const = get_constant_from_source(pads_begin);
const auto& pads_end_const = get_constant_from_source(pads_end);
if (block_const && pads_begin_const && pads_end_const && data_pshape.is_static()) {
const auto& data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
(data_shape.size() >= 2),
"The data tensor with rank lower than 2 is not supported (data rank: ",
data_shape.size(),
")");
auto block_val = block_const->cast_vector<int64_t>();
auto pads_begin_val = pads_begin_const->cast_vector<int64_t>();
auto pads_end_val = pads_end_const->cast_vector<int64_t>();
int64_t block_prod = 1;
for (long idx : block_val)
block_prod *= idx;
ov::Shape output_shape = {static_cast<size_t>(data_shape[0] * block_prod)};
for (size_t idx = 1; idx < data_shape.size(); ++idx) {
NODE_VALIDATION_CHECK(this, block_val.at(idx) > 0, "block_shape values must be greater than 0");
NODE_VALIDATION_CHECK(
this,
(pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx)) % block_val.at(idx) == 0,
"The dimension on position: ",
idx,
" equal to: ",
pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx),
" must be a multiple of block_values[i]: ",
block_val.at(idx));
output_shape.push_back(static_cast<size_t>(pads_begin_val[idx] + data_shape[idx] + pads_end_val[idx]) /
block_val[idx]);
}
set_output_size(1);
set_output_type(0, data_type, output_shape);
} else {
set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank()));
}
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
const std::vector<ov::PartialShape> input_shapes = {get_input_partial_shape(0),
get_input_partial_shape(1),
get_input_partial_shape(2),
get_input_partial_shape(3)};
shape_infer(this, input_shapes, output_shapes);
set_output_type(0, data_type, output_shapes[0]);
}
std::shared_ptr<Node> ngraph::op::v1::SpaceToBatch::clone_with_new_inputs(const OutputVector& new_args) const {

View File

@ -8,6 +8,7 @@
#include <cstddef>
#include <memory>
#include <numeric>
#include <space_to_depth_shape_inference.hpp>
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
@ -46,43 +47,12 @@ std::shared_ptr<Node> ov::op::v0::SpaceToDepth::clone_with_new_inputs(const Outp
void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() {
NGRAPH_OP_SCOPE(v0_SpaceToDepth_validate_and_infer_types);
ov::PartialShape data_pshape = get_input_partial_shape(0);
const auto& data_type = get_input_element_type(0);
auto data = input_value(0);
if (data_pshape.is_static()) {
const auto& data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
!(data_shape.size() < 3),
"The input tensor with rank lower than 3 is not supported (input rank: ",
data_shape.size(),
")");
auto multiplier = std::pow(m_blocksize, data_shape.size() - 2);
auto out_shape = data_shape;
out_shape[1] *= multiplier;
for (size_t i = 2; i < out_shape.size(); i++) {
NODE_VALIDATION_CHECK(this,
m_blocksize > 0 && !(out_shape[i] % m_blocksize),
"The dimension on position: ",
i,
" equal to: ",
out_shape[i],
" must be a multiple of m_blocksize: ",
m_blocksize);
out_shape[i] /= m_blocksize;
}
set_output_size(1);
set_output_type(0, data_type, out_shape);
} else {
set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank()));
}
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
const std::vector<ov::PartialShape> input_shapes = {get_input_partial_shape(0)};
shape_infer(this, input_shapes, output_shapes);
set_output_type(0, data_type, output_shapes[0]);
}
namespace {

View File

@ -27,8 +27,7 @@ TEST(onnx_importer, exception_msg_ngraph_error) {
// Should have thrown, so fail if it didn't
FAIL() << "ONNX Importer did not detected incorrect model!";
} catch (const ngraph_error& e) {
EXPECT_HAS_SUBSTRING(e.what(), std::string("While validating ONNX node '<Node(DepthToSpace)"));
EXPECT_HAS_SUBSTRING(e.what(), std::string("While validating node 'v0::DepthToSpace"));
EXPECT_HAS_SUBSTRING(e.what(), std::string("must be a multiple of divisor"));
} catch (...) {
FAIL() << "The ONNX model importer failed for unexpected reason";
}

View File

@ -10,6 +10,7 @@
using namespace std;
using namespace ngraph;
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
namespace {
constexpr size_t data_input_idx = 0;
@ -275,10 +276,8 @@ TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_s
try {
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Incompatible data shape and block_shape input values not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(
error.what(),
"The input data's 'batch' axis size: 80 must be a multiple of product of block_shape values: 50");
} catch (const ov::Exception& error) {
EXPECT_HAS_SUBSTRING(error.what(), "[ 80, 80] must be a multiple of divisor: 50");
} catch (...) {
FAIL() << "Data shape and block_shape input values check failed for unexpected reason";
}
@ -340,6 +339,37 @@ TEST(type_prop, batch_to_space_output_shape_5D) {
ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
}
TEST(type_prop, batch_to_space_output_dynamicshape_5D_when_batch_is_static) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}});
auto block_shape = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto crops_end = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0),
(PartialShape{960 / (6 * 5 * 16),
{2 * 6 - 2 - 2, 20 * 6 - 2 - 2},
{12 * 5 - 1, 14 * 5 - 1},
{100, 150},
{10 * 16, 20 * 16}}));
}
TEST(type_prop, batch_to_space_output_dynamicshape_5D_when_batch_is_dynamic) {
auto data =
make_shared<op::Parameter>(element::f32, PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}});
auto block_shape = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto crops_end = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0),
(PartialShape{{DIV_ROUND_UP(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)},
{2 * 6 - 2 - 2, 34 * 6 - 2 - 2},
{9 * 5 - 1, 21 * 5 - 1},
{100, 162},
{1 * 16, 1999 * 16}}));
}
TEST(type_prop, batch_to_space_and_space_to_batch) {
auto data = make_shared<op::Parameter>(element::f32, Shape{4800, 9, 11, 2});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});

View File

@ -9,6 +9,29 @@
using namespace std;
using namespace ngraph;
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
TEST(type_prop, depth_to_space_output_dynamicshape_block_first_5D_when_depth_is_static) {
auto A = make_shared<op::Parameter>(element::f32, PartialShape{{2, 10}, 24, {3, 7}, {423, 3000}, {235, 1345}});
auto space_to_depth = make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
ASSERT_EQ(space_to_depth->get_output_partial_shape(0),
(PartialShape{{2, 10}, 3, {3 * 2, 7 * 2}, {423 * 2, 3000 * 2}, {235 * 2, 1345 * 2}}));
}
TEST(type_prop, depth_to_space_output_dynamicshape_block_first_5D_when_depth_is_dynamic) {
auto A =
make_shared<op::Parameter>(element::f32, PartialShape{{2, 10}, {81, 82}, {3, 7}, {423, 3000}, {235, 1345}});
auto space_to_depth = make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 3);
ASSERT_EQ(space_to_depth->get_output_partial_shape(0),
(PartialShape{{2, 10},
{DIV_ROUND_UP(81, 27), 82 / 27},
{3 * 3, 7 * 3},
{423 * 3, 3000 * 3},
{235 * 3, 1345 * 3}}));
}
TEST(type_prop, depth_to_space_output_shape_block_first_4D) {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 128, 8, 8});
auto space_to_depth = make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 8);
@ -67,9 +90,7 @@ TEST(type_prop, depth_to_space_blocksize_not_matched) {
auto space_to_depth = make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
FAIL() << "Not matched blocksize for DepthToSpace exception not thrown";
} catch (const ngraph_error& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"DepthToSpace: The input data's 'channels' axis size: 7"
" must be a equivalent to 'block_size'^'spatial_dims': 4");
EXPECT_HAS_SUBSTRING(error.what(), "Dimension value: [ 7, 7] must be a multiple of divisor: 4");
} catch (...) {
FAIL() << "DepthToSpace decomposition failed for unexpected reason";
}

View File

@ -9,6 +9,8 @@
using namespace std;
using namespace ngraph;
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
TEST(type_prop, space_to_batch_output_shape_2D) {
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 128});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5});
@ -62,6 +64,34 @@ TEST(type_prop, space_to_batch_and_batch_to_space) {
ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3}));
}
TEST(type_prop, space_to_batch_when_space_is_static) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{{2, 5}, 100, 1024, 3});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(
space_to_batch->get_output_partial_shape(0),
(PartialShape{{2 * 12 * 100 * 2, 5 * 12 * 100 * 2}, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
}
TEST(type_prop, space_to_batch_when_space_is_dynamic) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{{2, 5}, {5, 100}, {100, 1024}, {3, 10}});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto pads_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
ASSERT_EQ(space_to_batch->get_output_partial_shape(0),
(PartialShape{{2 * 12 * 100 * 2, 5 * 12 * 100 * 2},
{DIV_ROUND_UP((5 + 5 + 3), 12), (100 + 5 + 3) / 12},
{DIV_ROUND_UP((100 + 38 + 38), 100), (1024 + 38 + 38) / 100},
{DIV_ROUND_UP((3 + 1), 2), (10 + 1) / 2}}));
}
TEST(type_prop, space_to_batch_dynamic_shape_static_rank) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(4));
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1});

View File

@ -9,6 +9,8 @@
using namespace std;
using namespace ngraph;
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
TEST(type_prop, space_to_depth_output_shape_block_first_4D) {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 64, 64});
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
@ -45,6 +47,27 @@ TEST(type_prop, space_to_depth_output_shape_depth_first_5D) {
ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2}));
}
TEST(type_prop, space_to_depth_output_shape_when_space_is_static) {
auto A = make_shared<op::Parameter>(element::f32, PartialShape{{1, 4}, {12, 36}, 1080, 1616});
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST;
auto space_to_depth = make_shared<op::SpaceToDepth>(A, mode, 2);
ASSERT_EQ(space_to_depth->get_element_type(), element::f32);
ASSERT_EQ(space_to_depth->get_output_partial_shape(0),
(PartialShape{{1, 4}, {12 * 4, 36 * 4}, 1080 / 2, 1616 / 2}));
}
TEST(type_prop, space_to_depth_output_shape_when_space_is_dynamic) {
auto A = make_shared<op::Parameter>(element::f32, PartialShape{{1, 4}, {12, 36}, {100, 1081}, {99, 1616}});
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST;
auto space_to_depth = make_shared<op::SpaceToDepth>(A, mode, 2);
ASSERT_EQ(space_to_depth->get_element_type(), element::f32);
ASSERT_EQ(
space_to_depth->get_output_partial_shape(0),
(PartialShape{{1, 4}, {12 * 4, 36 * 4}, {DIV_ROUND_UP(100, 2), 1081 / 2}, {DIV_ROUND_UP(99, 2), 1616 / 2}}));
}
TEST(type_prop, space_to_depth_dynamic_shape_static_rank) {
auto A = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(4));
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
@ -81,8 +104,7 @@ TEST(type_prop, space_to_depth_blocksize_not_matched) {
auto space_to_depth = make_shared<op::SpaceToDepth>(A, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 4);
FAIL() << "Not matched blocksize SpaceToDepth exception not thrown";
} catch (const ngraph_error& error) {
EXPECT_HAS_SUBSTRING(error.what(),
"The dimension on position: 3 equal to: 7 must be a multiple of m_blocksize: 4");
EXPECT_HAS_SUBSTRING(error.what(), "Dimension value: [ 7, 7] must be a multiple of divisor: 4");
} catch (...) {
FAIL() << "SpaceToDepth decomposition failed for unexpected reason";
}

View File

@ -38,6 +38,10 @@
#include "shape_inference.hpp"
#include "shape_nodes.hpp"
#include "fake_quantize.hpp"
#include "batch_to_space_shape_inference.hpp"
#include "depth_to_space_shape_inference.hpp"
#include "space_to_batch_shape_inference.hpp"
#include "space_to_depth_shape_inference.hpp"
#include "experimental_detectron_detection_output_shape_inference.hpp"
#include "bucketize_shape_inference.hpp"
#include "embedding_segments_sum_shape_inference.hpp"
@ -230,6 +234,14 @@ void shape_inference(ov::Node* op,
shape_infer(node, input_shapes, output_shapes);
} else if (auto node = ov::as_type<ov::opset1::ShuffleChannels>(op)) {
shape_infer(node, input_shapes, output_shapes);
} else if (auto node = ov::as_type<ov::opset2::BatchToSpace>(op)) {
shape_infer(node, input_shapes, output_shapes, constant_data);
} else if (auto node = ov::as_type<ov::opset2::SpaceToBatch>(op)) {
shape_infer(node, input_shapes, output_shapes, constant_data);
} else if (auto node = ov::as_type<ov::opset1::DepthToSpace>(op)) {
shape_infer(node, input_shapes, output_shapes);
} else if (auto node = ov::as_type<ov::opset1::SpaceToDepth>(op)) {
shape_infer(node, input_shapes, output_shapes);
} else if (auto node = ov::as_type<ov::opset4::Broadcast>(op)) {
shape_infer(node, input_shapes, output_shapes, constant_data);
} else if (auto node = ov::as_type<ov::opset1::Broadcast>(op)) {

View File

@ -41,6 +41,19 @@ StaticDimension& StaticDimension::operator*=(const StaticDimension& dim) {
return (*this = *this * dim);
}
StaticDimension StaticDimension::operator/(const value_type divisor) const {
OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0");
if (m_dimension % divisor) {
return StaticDimension{};
}
return StaticDimension(m_dimension / divisor);
}
StaticDimension& StaticDimension::operator/=(const value_type divisor) {
return (*this = *this / divisor);
}
StaticDimension StaticDimension::operator&(const StaticDimension& dim) const {
return (*this == dim) ? dim : 0;
}

View File

@ -60,6 +60,8 @@ public:
StaticDimension& operator+=(const StaticDimension& dim);
StaticDimension& operator*=(const StaticDimension& dim);
StaticDimension& operator&=(const StaticDimension& dim);
StaticDimension operator/(const value_type divisor) const;
StaticDimension &operator/=(const value_type divisor);
private:
value_type m_dimension = 0;

View File

@ -127,7 +127,7 @@ TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidMode) {
TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidRank) {
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{16, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,
@ -139,7 +139,7 @@ TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidRank) {
manager.register_pass<pass::BatchToSpaceFusion>();
}
{
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{12, 3, 4, 8, 8});
auto data = std::make_shared<opset6::Parameter>(element::f32, Shape{16, 3, 4, 8, 8});
auto trans_before = std::make_shared<opset6::Transpose>(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4}));
auto depth_to_space = std::make_shared<opset6::DepthToSpace>(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
auto slice = std::make_shared<opset6::StridedSlice>(depth_to_space,

View File

@ -17,7 +17,7 @@ namespace builder {
namespace subgraph {
std::shared_ptr<Function> TransformationsAfterSplitFunction::get(const std::string transformationName) {
const auto input = std::make_shared<opset1::Parameter>(element::u8, Shape{ 1, 3, 16, 16 });
const auto input = std::make_shared<opset1::Parameter>(element::u8, Shape{ 1, 9, 16, 16 });
const size_t outputSize = 2ul;
const auto axis = opset1::Constant::create(element::i64, Shape{}, { 2 });
@ -68,7 +68,7 @@ std::shared_ptr<Node> TransformationsAfterSplitFunction::getLayerByTransformatio
}
if (transformationName == "ConvolutionTransformation") {
const auto dequantizationOnData = makeDequantization(parent, { {element::f32}, {}, { 0.1f } });
const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 3, 1, 1 }, { 2 });
const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 9, 1, 1 }, { 2 });
const auto dequantizationOnWeights = makeDequantization(weights, { {element::f32}, {}, {0.3f} });
return std::make_shared<opset1::Convolution>(
dequantizationOnData,
@ -80,7 +80,7 @@ std::shared_ptr<Node> TransformationsAfterSplitFunction::getLayerByTransformatio
}
if (transformationName == "AsymmetricConvolutionTransformation") {
const auto dequantizationOnData = makeDequantization(parent, { {element::f32}, { 128.f }, { 0.1f } });
const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 3, 1, 1 }, { 2 });
const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 9, 1, 1 }, { 2 });
const auto dequantizationOnWeights = makeDequantization(weights, { {element::f32}, {}, {0.3f} });
return std::make_shared<opset1::Convolution>(
dequantizationOnData,

View File

@ -0,0 +1,79 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <openvino/core/coordinate_diff.hpp>
#include <openvino/op/batch_to_space.hpp>
#include <openvino/op/parameter.hpp>
#include <openvino/op/constant.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include <utils/shape_inference/static_shape.hpp>
using namespace ov;
static std::shared_ptr<op::v1::BatchToSpace> make_batch_to_space(
PartialShape data_shape = PartialShape::dynamic(ov::Rank(2)),
PartialShape block_shape = PartialShape::dynamic(),
PartialShape crops_begin_shape = PartialShape::dynamic(),
PartialShape crops_end_shape = PartialShape::dynamic()) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, data_shape);
auto block = std::make_shared<ov::op::v0::Parameter>(element::i32, block_shape);
auto crops_begin = std::make_shared<ov::op::v0::Parameter>(element::i32, crops_begin_shape);
auto crops_end = std::make_shared<ov::op::v0::Parameter>(element::i32, crops_end_shape);
const auto batch_to_space = std::make_shared<op::v1::BatchToSpace>(data, block, crops_begin, crops_end);
return batch_to_space;
}
TEST(StaticShapeInferenceTest, BatchToSpaceWithHostTensorData) {
auto space_to_batch = make_batch_to_space();
int32_t block_val[] = {1, 6, 5, 1, 16};
int32_t pads_begin_val[] = {0, 2, 0, 0, 0};
int32_t pads_end_val[] = {0, 2, 1, 0, 0};
auto block = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{5}, block_val);
auto crops_begin = std::make_shared<ngraph::runtime::HostTensor>(element::i32, ov::Shape{5}, pads_begin_val);
auto crops_end = std::make_shared<ngraph::runtime::HostTensor>(element::i32, ov::Shape{5}, pads_end_val);
const std::vector<StaticShape> input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}};
std::vector<StaticShape> output_shapes = {{}};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] = block;
constant_data[2] = crops_begin;
constant_data[3] = crops_end;
shape_inference(space_to_batch.get(), input_shapes, output_shapes, constant_data);
ASSERT_EQ(output_shapes[0], (StaticShape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
}
TEST(StaticShapeInferenceTest, BatchToSpaceWithMissingTensorData) {
auto batch_to_space = make_batch_to_space();
int32_t block_val[] = {1, 6, 5, 1, 16};
int32_t pads_end_val[] = {0, 2, 1, 0, 0};
auto block = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{5}, block_val);
auto crops_end = std::make_shared<ngraph::runtime::HostTensor>(element::i32, ov::Shape{5}, pads_end_val);
const std::vector<StaticShape> input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}};
std::vector<StaticShape> output_shapes = {{}};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] = block;
constant_data[3] = crops_end;
EXPECT_THROW(shape_inference(batch_to_space.get(), input_shapes, output_shapes, constant_data), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, batch_to_space_output_with_const_inputs) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, ov::PartialShape{-1, -1, -1, -1});
auto block_shape = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{1, 10, 5, 1});
auto crops_begin = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
auto crops_end = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
const auto batch_to_space = std::make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
std::vector<StaticShape> input_shapes = {{100, 7, 13, 3}, {4}, {4}, {4}};
std::vector<StaticShape> output_shapes = {{}};
shape_inference(batch_to_space.get(), input_shapes, output_shapes);
ASSERT_EQ(output_shapes[0], (StaticShape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
}

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <openvino/op/depth_to_space.hpp>
#include <openvino/op/parameter.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include <utils/shape_inference/static_shape.hpp>
TEST(StaticShapeInferenceTest, DepthToSpaceTest) {
auto A = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic(ov::Rank(4)));
auto depth_to_space =
std::make_shared<ov::op::v0::DepthToSpace>(A, ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
const std::vector<ov::StaticShape> input_shapes = {ov::StaticShape{1, 16, 3, 1080, 1616}};
std::vector<ov::StaticShape> output_shapes = {ov::StaticShape{}};
shape_inference(depth_to_space.get(), input_shapes, output_shapes);
ASSERT_EQ(output_shapes[0], (ov::StaticShape{1, 2, 2 * 3, 2 * 1080, 2 * 1616}));
}

View File

@ -0,0 +1,87 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <openvino/core/coordinate_diff.hpp>
#include <openvino/op/constant.hpp>
#include <openvino/op/parameter.hpp>
#include <openvino/op/space_to_batch.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include <utils/shape_inference/static_shape.hpp>
using namespace ov;
static std::shared_ptr<op::v1::SpaceToBatch> build_space_to_batch(
PartialShape data_shape = PartialShape::dynamic(ov::Rank(2)),
PartialShape block_shape = PartialShape::dynamic(),
PartialShape pads_begin_shape = PartialShape::dynamic(),
PartialShape pad_end_shape = PartialShape::dynamic()) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, data_shape);
auto block = std::make_shared<ov::op::v0::Parameter>(element::i32, block_shape);
auto pads_begin = std::make_shared<ov::op::v0::Parameter>(element::i32, pads_begin_shape);
auto pads_end = std::make_shared<ov::op::v0::Parameter>(element::i32, pad_end_shape);
auto space_to_batch = std::make_shared<op::v1::SpaceToBatch>(data, block, pads_begin, pads_end);
return space_to_batch;
}
TEST(StaticShapeInferenceTest, SpaceToBatchTest) {
auto space_to_batch = build_space_to_batch();
int32_t block_val[] = {1, 6, 5, 1, 16};
int32_t pads_begin_val[] = {0, 2, 0, 0, 0};
int32_t pads_end_val[] = {0, 2, 1, 0, 0};
auto block = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{5}, block_val);
auto pads_begin = std::make_shared<ngraph::runtime::HostTensor>(element::i32, ov::Shape{5}, pads_begin_val);
auto pads_end = std::make_shared<ngraph::runtime::HostTensor>(element::i32, ov::Shape{5}, pads_end_val);
const std::vector<StaticShape> input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
std::vector<StaticShape> output_shapes = {{}};
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] = block;
constant_data[2] = pads_begin;
constant_data[3] = pads_end;
shape_inference(space_to_batch.get(), input_shapes, output_shapes, constant_data);
ASSERT_EQ(output_shapes[0], (StaticShape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16}));
}
TEST(StaticShapeInferenceTest, SpaceToBatchThrowExceptionWithoutHostTensorData) {
auto space_to_batch = build_space_to_batch();
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
const std::vector<StaticShape> input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
std::vector<StaticShape> output_shapes = {{}};
EXPECT_THROW(shape_inference(space_to_batch.get(), input_shapes, output_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, SpaceToBatchThrowExceptionWithMissingPadsHostTensorData) {
auto space_to_batch = build_space_to_batch();
int32_t block_val[] = {1, 6, 5, 1, 16};
auto block = std::make_shared<ngraph::runtime::HostTensor>(ngraph::element::Type_t::i32, ov::Shape{5}, block_val);
std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>> constant_data;
constant_data[1] = block;
const std::vector<StaticShape> input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}};
std::vector<StaticShape> output_shapes = {{}};
EXPECT_THROW(shape_inference(space_to_batch.get(), input_shapes, output_shapes), NodeValidationFailure);
}
TEST(StaticShapeInferenceTest, space_to_batch_output_with_const_inputs) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, ov::PartialShape{-1, -1, -1, -1});
auto block_shape = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{1, 12, 100, 2});
auto pads_begin = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 3, 38, 1});
auto pads_end = std::make_shared<ov::op::v0::Constant>(element::i64, ov::Shape{4}, std::vector<int64_t>{0, 5, 38, 0});
const auto space_to_batch = std::make_shared<ov::op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
std::vector<StaticShape> input_shapes = {{2, 100, 1024, 3}, {4}, {4}, {4}};
std::vector<StaticShape> output_shapes = {{}};
shape_inference(space_to_batch.get(), input_shapes, output_shapes);
ASSERT_EQ(output_shapes[0], (StaticShape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2}));
}

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <openvino/op/parameter.hpp>
#include <openvino/op/space_to_depth.hpp>
#include <utils/shape_inference/shape_inference.hpp>
#include <utils/shape_inference/static_shape.hpp>
TEST(StaticShapeInferenceTest, SpaceToDepthTest) {
auto A = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic(ov::Rank(4)));
auto space_to_depth =
std::make_shared<ov::op::v0::SpaceToDepth>(A, ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2);
const std::vector<ov::StaticShape> input_shapes = {ov::StaticShape{1, 12, 4, 1080, 1616}};
std::vector<ov::StaticShape> output_shapes = {ov::StaticShape{}};
shape_inference(space_to_depth.get(), input_shapes, output_shapes);
ASSERT_EQ(output_shapes[0], (ov::StaticShape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2}));
}