Fix incomprehensible error message during layout conversion when layout rank doesn't match with shape rank

This commit is contained in:
Mikhail Nosov 2021-12-08 14:55:36 +03:00
parent 82415f00d8
commit 37064741b2
5 changed files with 46 additions and 7 deletions

View File

@ -19,7 +19,7 @@ class Layout;
namespace layout { namespace layout {
std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& src_shape_rank, const Layout& dst_layout); std::vector<int64_t> find_permutation(const Layout& src_layout, const PartialShape& src_shape, const Layout& dst_layout);
Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims); Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims);
} // namespace layout } // namespace layout
@ -88,7 +88,7 @@ private:
friend Layout layout::apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims); friend Layout layout::apply_permutation(const Layout& src_layout, const std::vector<uint64_t>& dims);
friend std::vector<int64_t> layout::find_permutation(const Layout& src_layout, friend std::vector<int64_t> layout::find_permutation(const Layout& src_layout,
const Rank& src_shape_rank, const PartialShape& src_shape,
const Layout& dst_layout); const Layout& dst_layout);
}; };

View File

@ -274,7 +274,8 @@ Layout apply_permutation(const Layout& src_layout, const std::vector<uint64_t>&
return res; return res;
} }
std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& rank, const Layout& dst) { std::vector<int64_t> find_permutation(const Layout& src_layout, const PartialShape& src_shape, const Layout& dst) {
auto rank = src_shape.rank();
auto check_trivial = [](std::vector<int64_t>& res) -> std::vector<int64_t>& { auto check_trivial = [](std::vector<int64_t>& res) -> std::vector<int64_t>& {
size_t i = 0; size_t i = 0;
while (i < res.size() && res[i] == i) { while (i < res.size() && res[i] == i) {
@ -326,10 +327,21 @@ std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& rank
auto dst_static = to_static(dst, rank); auto dst_static = to_static(dst, rank);
OPENVINO_ASSERT(src_static.m_left_size == dst_static.m_left_size, OPENVINO_ASSERT(src_static.m_left_size == dst_static.m_left_size,
"Conversion is not supported for layouts with different sizes"); "Conversion is not supported for layouts with different sizes");
OPENVINO_ASSERT(rank.is_dynamic() || src_static.m_left_size == rank.get_length(),
"Conversion layout ",
src_layout.to_string(),
" <-> ",
dst.to_string(),
" failure. Layout is not consistent with input shape ",
src_shape,
". Layout length ",
src_static.m_left_size,
" shall match with input shape rank ",
rank.get_length());
std::vector<int64_t> res(src_static.m_left_size, -1); std::vector<int64_t> res(src_static.m_left_size, -1);
if (src_static.m_names.size() > dst_static.m_names.size()) { if (src_static.m_names.size() > dst_static.m_names.size()) {
// find inverted permutation from least specified layout to most one // find inverted permutation from least specified layout to most one
auto inverted = find_permutation(dst_static, rank, src_static); auto inverted = find_permutation(dst_static, src_shape, src_static);
if (inverted.empty()) { if (inverted.empty()) {
return {}; return {};
} }

View File

@ -399,7 +399,7 @@ std::shared_ptr<Function> PrePostProcessor::build() {
param->get_layout() != input->get_tensor_data()->get_layout()) { param->get_layout() != input->get_tensor_data()->get_layout()) {
// Find transpose between model and tensor layouts and update tensor shape // Find transpose between model and tensor layouts and update tensor shape
auto net_to_tensor = auto net_to_tensor =
layout::find_permutation(param->get_layout(), net_shape.rank(), input->get_tensor_data()->get_layout()); layout::find_permutation(param->get_layout(), net_shape, input->get_tensor_data()->get_layout());
if (!net_to_tensor.empty()) { if (!net_to_tensor.empty()) {
std::vector<ov::Dimension> dims(new_param_shape.size()); std::vector<ov::Dimension> dims(new_param_shape.size());
std::transform(net_to_tensor.begin(), net_to_tensor.end(), dims.begin(), [&](int64_t v) { std::transform(net_to_tensor.begin(), net_to_tensor.end(), dims.begin(), [&](int64_t v) {

View File

@ -174,7 +174,7 @@ void PreStepsList::add_convert_layout_impl(const Layout& layout) {
"Can't convert layout for multi-plane input. Suggesting to convert current image to " "Can't convert layout for multi-plane input. Suggesting to convert current image to "
"RGB/BGR color format using 'convert_color'"); "RGB/BGR color format using 'convert_color'");
Layout dst_layout = layout.empty() ? context.target_layout() : layout; Layout dst_layout = layout.empty() ? context.target_layout() : layout;
auto permutation = layout::find_permutation(context.layout(), nodes[0].get_partial_shape().rank(), dst_layout); auto permutation = layout::find_permutation(context.layout(), nodes[0].get_partial_shape(), dst_layout);
if (permutation.empty()) { if (permutation.empty()) {
// No transpose is needed, just update layout // No transpose is needed, just update layout
if (!layout.empty()) { if (!layout.empty()) {
@ -430,7 +430,7 @@ void PostStepsList::add_convert_impl(const element::Type& type) {
void PostStepsList::add_convert_layout_impl(const Layout& layout) { void PostStepsList::add_convert_layout_impl(const Layout& layout) {
m_actions.emplace_back([layout](const Output<Node>& node, PostprocessingContext& context) { m_actions.emplace_back([layout](const Output<Node>& node, PostprocessingContext& context) {
Layout dst_layout = layout.empty() ? context.target_layout() : layout; Layout dst_layout = layout.empty() ? context.target_layout() : layout;
auto permutation = layout::find_permutation(context.layout(), node.get_partial_shape().rank(), dst_layout); auto permutation = layout::find_permutation(context.layout(), node.get_partial_shape(), dst_layout);
if (permutation.empty()) { if (permutation.empty()) {
// No transpose is needed, just update layout // No transpose is needed, just update layout
if (!layout.empty()) { if (!layout.empty()) {

View File

@ -534,6 +534,33 @@ TEST(pre_post_process, reuse_model_layout_no_tensor_info) {
EXPECT_EQ(f->get_parameters().front()->get_layout(), "NC??"); EXPECT_EQ(f->get_parameters().front()->get_layout(), "NC??");
} }
TEST(pre_post_process, set_layout_out_of_bounds) {
auto shape = PartialShape{Dimension::dynamic(), 3, 2, 1};
std::stringstream shape_str;
shape_str << shape;
auto f = create_simple_function(element::f32, shape);
Layout from {"N???C"};
Layout to {"NC???"};
// TODO: replace with EXPECT_THAT after upgrade gtest to v1.11
try {
auto p = PrePostProcessor(f);
p.input().tensor().set_layout(from);
p.input().model().set_layout(to);
f = p.build();
FAIL() << "Layout conversion shall throw";
} catch (const ov::Exception& err) {
std::cout << err.what() << "---\n";
// Verify that error message contains tensor and network layout
EXPECT_TRUE(std::string(err.what()).find(from.to_string()) != std::string::npos) << err.what();
EXPECT_TRUE(std::string(err.what()).find(to.to_string()) != std::string::npos) << err.what();
// Verify that error message contains 'shape' word
EXPECT_TRUE(std::string(err.what()).find(shape_str.str()) != std::string::npos) << err.what();
} catch (...) {
FAIL() << "Expected ov::Exception";
}
// EXPECT_EQ(f->get_parameters().front()->get_layout(), "NC??");
}
TEST(pre_post_process, reuse_model_layout_tensor_info) { TEST(pre_post_process, reuse_model_layout_tensor_info) {
auto f = create_simple_function(element::u8, PartialShape{Dimension::dynamic(), 3, 2, 1}); auto f = create_simple_function(element::u8, PartialShape{Dimension::dynamic(), 3, 2, 1});
f->get_parameters().front()->set_layout("NC??"); f->get_parameters().front()->set_layout("NC??");