Partial shape serialization and deserialization in nGraph (#8859)

* Added support of partial shape serialization and deserialization.

* Small correction.

* Conflicts resolve.

* Clang-format.

* Fixed checks.

* Added partial shape, dimension support in visitor.

* Code style.

* Code style.

* Fixed IR reader parameter extender for partial shape.

* Moved IR reader changes to MO PR.

* Added rank check.

* Fixed dimension test.

* Code refactoring.

* Undo refactor.

* Code style

* Removed unnecessary code changes from cnn network.

* Extended visitor tests for PartialShape.

* Code style

* Removed code duplications, added dynamic rank test.

* Code style.

* Added negative tests for deserialization.

* Align with changes on master.

* Changed ov::Function to ov::Model.

* Fixed tests.

* Small fix.

* Fixed tests.

* Code style.
This commit is contained in:
Anastasia Popova 2021-12-14 00:09:45 +03:00 committed by GitHub
parent 28756d6ccf
commit c39dba62b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 754 additions and 89 deletions

View File

@ -0,0 +1,378 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <inference_engine.hpp>
#include <memory>
#include <string>
#include "manager.hpp"
#include "graph_comparator.hpp"
#include "ie_blob.h"
#include "ngraph/op/parameter.hpp"
#include "ngraph/type/element_type.hpp"
#include "ngraph/variant.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/runtime/core.hpp"
class PartialShapeDeserialization : public testing::Test {
protected:
std::shared_ptr<ngraph::Function> getWithIRFrontend(const std::string& model) {
std::istringstream modelStringStream(model);
std::istream& modelStream = modelStringStream;
ov::frontend::FrontEnd::Ptr FE;
ov::frontend::InputModel::Ptr inputModel;
ov::AnyVector params{&modelStream};
FE = manager.load_by_model(params);
if (FE)
inputModel = FE->load(params);
if (inputModel)
return FE->convert(inputModel);
return nullptr;
}
private:
ov::frontend::FrontEndManager manager;
};
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase1) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1,3,100..200,120..320"/>
<output>
<port id="0" precision="FP16" names="input_tensor">
<dim>1</dim>
<dim>3</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer name="Round" id="1" type="Round" version="opset8">
<data mode="half_to_even"/>
<input>
<port id="1" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP16" names="output_tensor">
<dim>1</dim>
<dim>3</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset8">
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
auto f = getWithIRFrontend(model);
ASSERT_NE(nullptr, f);
ov::PartialShape shape{1, 3, ov::Dimension(100, 200), ov::Dimension(120, 320)};
auto type = ov::element::f16;
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor"});
auto round = std::make_shared<ov::opset8::Round>(param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
round->set_friendly_name("Round");
round->get_output_tensor(0).set_names({"output_tensor"});
auto result = std::make_shared<ov::opset8::Result>(round);
result->set_friendly_name("output");
auto f_11_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
f_11_ref->set_friendly_name("Network");
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::PRECISIONS)
.enable(FunctionsComparator::RUNTIME_KEYS)
.enable(FunctionsComparator::NAMES)
.enable(FunctionsComparator::CONST_VALUES);
auto res = fc.compare(f, f_11_ref);
EXPECT_TRUE(res.valid) << res.message;
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestCase2) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1,?,..200,120.."/>
<output>
<port id="0" precision="FP16" names="input_tensor">
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer name="Round" id="1" type="Round" version="opset8">
<data mode="half_to_even"/>
<input>
<port id="1" precision="FP16">
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP16" names="output_tensor">
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset8">
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
auto f = getWithIRFrontend(model);
ASSERT_NE(nullptr, f);
ov::PartialShape shape{1, ov::Dimension(), ov::Dimension(0, 200), ov::Dimension(120, -1)};
auto type = ov::element::f16;
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor"});
auto round = std::make_shared<ov::opset8::Round>(param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
round->set_friendly_name("Round");
round->get_output_tensor(0).set_names({"output_tensor"});
auto result = std::make_shared<ov::opset8::Result>(round);
result->set_friendly_name("output");
auto f_11_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
f_11_ref->set_friendly_name("Network");
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::PRECISIONS)
.enable(FunctionsComparator::RUNTIME_KEYS)
.enable(FunctionsComparator::NAMES)
.enable(FunctionsComparator::CONST_VALUES);
auto res = fc.compare(f, f_11_ref);
EXPECT_TRUE(res.valid) << res.message;
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRank) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="..."/>
<output>
<port id="0" precision="FP16" names="input_tensor">
</port>
</output>
</layer>
<layer name="Round" id="1" type="Round" version="opset8">
<data mode="half_to_even"/>
<input>
<port id="1" precision="FP16">
</port>
</input>
<output>
<port id="2" precision="FP16" names="output_tensor">
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset8">
<input>
<port id="0" precision="FP16">
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
auto f = getWithIRFrontend(model);
ASSERT_NE(nullptr, f);
ov::PartialShape shape = ov::PartialShape::dynamic();
auto type = ov::element::f16;
auto param = std::make_shared<ov::opset8::Parameter>(type, shape);
param->set_friendly_name("in1");
param->get_output_tensor(0).set_names({"input_tensor"});
auto round = std::make_shared<ov::opset8::Round>(param, ov::opset8::Round::RoundMode::HALF_TO_EVEN);
round->set_friendly_name("Round");
round->get_output_tensor(0).set_names({"output_tensor"});
auto result = std::make_shared<ov::opset8::Result>(round);
result->set_friendly_name("output");
auto f_11_ref = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
f_11_ref->set_friendly_name("Network");
const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::PRECISIONS)
.enable(FunctionsComparator::RUNTIME_KEYS)
.enable(FunctionsComparator::NAMES)
.enable(FunctionsComparator::CONST_VALUES);
auto res = fc.compare(f, f_11_ref);
EXPECT_TRUE(res.valid) << res.message;
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRankNegative) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="...,..."/>
<output>
<port id="0" precision="FP16" names="input_tensor">
</port>
</output>
</layer>
<layer name="output" type="Result" id="1" version="opset8">
<input>
<port id="0" precision="FP16">
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception);
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicDimNegative) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1,...,2"/>
<output>
<port id="0" precision="FP16" names="input_tensor">
</port>
</output>
</layer>
<layer name="output" type="Result" id="1" version="opset8">
<input>
<port id="0" precision="FP16">
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception);
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongDim) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1s,2"/>
<output>
<port id="0" precision="FP16" names="input_tensor">
</port>
</output>
</layer>
<layer name="output" type="Result" id="1" version="opset8">
<input>
<port id="0" precision="FP16">
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception);
}
TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongBoundary) {
std::string model = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1..g,2"/>
<output>
<port id="0" precision="FP16" names="input_tensor">
</port>
</output>
</layer>
<layer name="output" type="Result" id="1" version="opset8">
<input>
<port id="0" precision="FP16">
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
</edges>
</net>
)V0G0N";
ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception);
}

View File

@ -0,0 +1,117 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <file_utils.h>
#include <gtest/gtest.h>
#include <common_test_utils/file_utils.hpp>
#include <openvino/core/preprocess/pre_post_process.hpp>
#include <openvino/opsets/opset8.hpp>
#include <transformations/rt_info/attributes.hpp>
#include "manager.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include "ngraph/ngraph.hpp"
#include "transformations/serialize.hpp"
class PartialShapeSerializationTest : public CommonTestUtils::TestsCommon {
protected:
std::string test_name = GetTestName() + "_" + GetTimestamp();
std::string m_out_xml_path = test_name + ".xml";
std::string m_out_bin_path = test_name + ".bin";
void TearDown() override {
CommonTestUtils::removeIRFiles(m_out_xml_path, m_out_bin_path);
}
std::shared_ptr<ngraph::Function> getWithIRFrontend(const std::string& model_path,
const std::string& weights_path) {
ov::frontend::FrontEnd::Ptr FE;
ov::frontend::InputModel::Ptr inputModel;
ov::AnyVector params{model_path, weights_path};
FE = manager.load_by_model(params);
if (FE)
inputModel = FE->load(params);
if (inputModel)
return FE->convert(inputModel);
return nullptr;
}
private:
ov::frontend::FrontEndManager manager;
};
TEST_F(PartialShapeSerializationTest, pshape_serialize) {
auto check_shape = [](const ov::PartialShape& shape1, ov::PartialShape shape2) {
ASSERT_EQ(shape1.rank().is_dynamic(), shape2.rank().is_dynamic());
if (shape1.rank().is_dynamic())
return;
ASSERT_EQ(shape1.size(), shape2.size());
for (auto i = 0; i < shape1.size(); i++) {
auto dim1 = shape1[i];
auto dim2 = shape2[i];
ASSERT_EQ(dim1.is_dynamic(), dim2.is_dynamic());
ASSERT_EQ(dim1.get_min_length(), dim2.get_min_length());
ASSERT_EQ(dim1.get_max_length(), dim2.get_max_length());
}
};
std::shared_ptr<ov::Model> function;
{
auto pshape = ov::PartialShape{-1, ov::Dimension(-1, 20), ov::Dimension(10, -1), ov::Dimension(2, 100)};
auto data = std::make_shared<ov::opset8::Parameter>(ov::element::Type_t::f32, pshape);
auto add = std::make_shared<ov::opset8::Add>(data, data);
auto result = std::make_shared<ov::opset8::Result>(add);
function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{data});
}
ov::pass::Manager m;
m.register_pass<ov::pass::Serialize>(m_out_xml_path, m_out_bin_path);
m.run_passes(function);
auto f = getWithIRFrontend(m_out_xml_path, m_out_bin_path);
ASSERT_NE(nullptr, f);
auto pshape_ref = ov::PartialShape{-1, ov::Dimension(-1, 20), ov::Dimension(10, -1), ov::Dimension(2, 100)};
check_shape(f->get_parameters()[0]->get_partial_shape(), pshape_ref);
}
TEST_F(PartialShapeSerializationTest, pshape_serialize_dymamic_rank) {
auto check_shape = [](const ov::PartialShape& shape1, ov::PartialShape shape2) {
ASSERT_EQ(shape1.rank().is_dynamic(), shape2.rank().is_dynamic());
if (shape1.rank().is_dynamic())
return;
ASSERT_EQ(shape1.size(), shape2.size());
for (auto i = 0; i < shape1.size(); i++) {
auto dim1 = shape1[i];
auto dim2 = shape2[i];
ASSERT_EQ(dim1.is_dynamic(), dim2.is_dynamic());
ASSERT_EQ(dim1.get_min_length(), dim2.get_min_length());
ASSERT_EQ(dim1.get_max_length(), dim2.get_max_length());
}
};
std::shared_ptr<ov::Model> function;
{
auto pshape = ov::PartialShape::dynamic();
auto data = std::make_shared<ov::opset8::Parameter>(ov::element::Type_t::f32, pshape);
auto add = std::make_shared<ov::opset8::Add>(data, data);
auto result = std::make_shared<ov::opset8::Result>(add);
function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{data});
}
ov::pass::Manager m;
m.register_pass<ov::pass::Serialize>(m_out_xml_path, m_out_bin_path);
m.run_passes(function);
auto f = getWithIRFrontend(m_out_xml_path, m_out_bin_path);
ASSERT_NE(nullptr, f);
auto pshape_ref = ov::PartialShape::dynamic();
check_shape(f->get_parameters()[0]->get_partial_shape(), pshape_ref);
}

View File

@ -230,9 +230,9 @@ TEST(ConvertFunctionToCNNNetworkTests, UnsupportedDynamicOps) {
FAIL() << "InferenceEngine::Exception must be thrown";
} catch(InferenceEngine::Exception & e) {
EXPECT_THAT(e.what(), testing::HasSubstr(std::string("Unsupported dynamic ops: \n"
"v0::Parameter param () -> (f32?)\n"
"v0::Relu relu (param[0]:f32?) -> (f32?)\n"
"v3::NonZero non_zero (relu[0]:f32?) -> (i64{?,?})\n"
"v0::Parameter param () -> (f32...)\n"
"v0::Relu relu (param[0]:f32...) -> (f32...)\n"
"v3::NonZero non_zero (relu[0]:f32...) -> (i64{?,?})\n"
"v0::Result result (non_zero[0]:i64{?,?}) -> (i64{?,?})")));
}
}

View File

@ -30,7 +30,7 @@ def test_dimension():
assert dim.is_dynamic
assert dim.get_min_length() == 5
assert dim.get_max_length() == 15
assert repr(dim) == "<Dimension: [5, 15]>"
assert repr(dim) == "<Dimension: 5..15>"
def test_dimension_comparisons():
@ -129,7 +129,7 @@ def test_partial_shape():
assert ps.rank == Dimension.dynamic()
assert list(ps.get_min_shape()) == []
assert list(ps.get_max_shape()) == []
assert repr(ps) == "<PartialShape: ?>"
assert repr(ps) == "<PartialShape: ...>"
ps = PartialShape.dynamic(r=Dimension(2))
assert not ps.is_static

View File

@ -30,7 +30,7 @@ def test_dimension():
assert dim.is_dynamic
assert dim.get_min_length() == 5
assert dim.get_max_length() == 15
assert repr(dim) == "<Dimension: [5, 15]>"
assert repr(dim) == "<Dimension: 5..15>"
def test_dimension_comparisons():
@ -129,7 +129,7 @@ def test_partial_shape():
assert ps.rank == Dimension.dynamic()
assert list(ps.get_min_shape()) == []
assert list(ps.get_max_shape()) == []
assert repr(ps) == "<PartialShape: ?>"
assert repr(ps) == "<PartialShape: ...>"
ps = PartialShape.dynamic(r=Dimension(2))
assert not ps.is_static

View File

@ -391,8 +391,13 @@ void CNNLayerCreator::on_adapter(const std::string& name,
params[name] = details::convertPrecision(type).name();
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<::ngraph::PartialShape>>(&adapter)) {
std::string dims;
auto shape = static_cast<::ngraph::PartialShape&>(*a);
auto shape = a->get();
if (shape.rank().is_dynamic()) {
IE_THROW() << "Error converting ngraph to CNN network. Dynamic rank is not supported.";
}
for (int64_t i = 0; i < shape.rank().get_length(); i++) {
if (shape[i].is_dynamic())
IE_THROW() << "Error converting ngraph to CNN network. Dynamic dimension is not supported.";
if (!dims.empty()) dims += ",";
dims += std::to_string(shape[i].get_length());
}

View File

@ -172,21 +172,10 @@ OPENVINO_API
std::ostream& operator<<(std::ostream& str, const Dimension& dimension);
template <>
class OPENVINO_API AttributeAdapter<ov::Dimension> : public ValueAccessor<int64_t> {
class OPENVINO_API AttributeAdapter<ov::Dimension> : public DirectValueAccessor<ov::Dimension> {
public:
AttributeAdapter(ov::Dimension& value) : m_ref(value) {}
const int64_t& get() override;
void set(const int64_t& value) override;
operator ov::Dimension&() {
return m_ref;
}
AttributeAdapter(ov::Dimension& value) : DirectValueAccessor<ov::Dimension>(value) {}
OPENVINO_RTTI("AttributeAdapter<ov::Dimension>");
protected:
ov::Dimension& m_ref;
int64_t m_buffer{0};
bool m_buffer_valid{false};
};
} // namespace ov

View File

@ -417,22 +417,11 @@ OPENVINO_API
std::ostream& operator<<(std::ostream& str, const PartialShape& shape);
template <>
class OPENVINO_API AttributeAdapter<ov::PartialShape> : public ValueAccessor<std::vector<int64_t>> {
class OPENVINO_API AttributeAdapter<ov::PartialShape> : public DirectValueAccessor<ov::PartialShape> {
public:
AttributeAdapter(ov::PartialShape& value) : m_ref(value) {}
const std::vector<int64_t>& get() override;
void set(const std::vector<int64_t>& value) override;
operator ov::PartialShape&() {
return m_ref;
}
AttributeAdapter(ov::PartialShape& value) : DirectValueAccessor<ov::PartialShape>(value) {}
OPENVINO_RTTI("AttributeAdapter<PartialShape>");
BWDCMP_RTTI_DECLARATION;
protected:
ov::PartialShape& m_ref;
std::vector<int64_t> m_buffer;
bool m_buffer_valid{false};
};
} // namespace ov

View File

@ -14,8 +14,14 @@ using namespace ngraph;
std::ostream& ov::operator<<(std::ostream& str, const Dimension& dimension) {
if (dimension.is_static()) {
return str << dimension.get_length();
} else if (dimension.get_min_length() > 0) {
str << dimension.get_min_length() << "..";
if (dimension.get_interval().has_upper_bound())
return str << dimension.get_max_length();
else
return str;
} else if (dimension.get_interval().has_upper_bound()) {
return str << "[" << dimension.get_min_length() << ", " << dimension.get_max_length() << "]";
return str << ".." << dimension.get_max_length();
} else {
return str << "?";
}
@ -105,16 +111,3 @@ Dimension::value_type Dimension::get_max_length() const {
Dimension::value_type Dimension::get_min_length() const {
return dimension_length(m_dimension.get_min_val());
}
const int64_t& ov::AttributeAdapter<ov::Dimension>::get() {
if (!m_buffer_valid) {
m_buffer = m_ref.is_dynamic() ? -1 : m_ref.get_length();
m_buffer_valid = true;
}
return m_buffer;
}
void ov::AttributeAdapter<ov::Dimension>::set(const int64_t& value) {
m_ref = value == -1 ? ov::Dimension::dynamic() : Dimension(value);
m_buffer_valid = false;
}

View File

@ -135,7 +135,7 @@ std::ostream& ov::operator<<(std::ostream& str, const PartialShape& shape) {
}
return (str << "}");
} else {
return (str << "?");
return (str << "...");
}
}
@ -355,35 +355,4 @@ ov::Dimension& ov::PartialShape::operator[](size_t i) {
return m_dimensions[i];
}
const std::vector<int64_t>& ov::AttributeAdapter<ov::PartialShape>::get() {
if (!m_buffer_valid) {
m_buffer.clear();
if (m_ref.rank().is_dynamic()) {
m_buffer.push_back(-2);
} else {
for (int64_t i = 0; i < m_ref.rank().get_length(); ++i) {
const auto& elt = static_cast<const ov::PartialShape&>(m_ref)[i];
m_buffer.push_back(elt.is_dynamic() ? -1 : elt.get_length());
}
}
m_buffer_valid = true;
}
return m_buffer;
}
void ov::AttributeAdapter<ov::PartialShape>::set(const std::vector<int64_t>& value) {
m_ref = ov::PartialShape();
if (value.size() == 1 && value[0] == -2) {
m_ref = ov::PartialShape::dynamic();
} else {
std::vector<Dimension> dims;
dims.reserve(value.size());
for (auto elt : value) {
dims.push_back(elt == -1 ? Dimension::dynamic() : elt);
}
m_ref = ov::PartialShape(dims);
}
m_buffer_valid = false;
}
BWDCMP_RTTI_DEFINITION(ov::AttributeAdapter<ov::PartialShape>);

View File

@ -452,6 +452,22 @@ public:
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::TypeVector>>(&adapter)) {
const auto& attrs = a->get();
m_xml_node.append_attribute(name.c_str()).set_value(join(attrs).c_str());
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ov::PartialShape>>(&adapter)) {
const auto& attrs = a->get();
std::stringstream shape_str_stream;
shape_str_stream << attrs;
auto shape_str = shape_str_stream.str();
if (shape_str[0] == '{' && shape_str[shape_str.size() - 1] == '}')
shape_str = shape_str.substr(1, shape_str.size() - 2);
m_xml_node.append_attribute(name.c_str()).set_value(shape_str.c_str());
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ov::Dimension>>(&adapter)) {
const auto& attrs = a->get();
std::stringstream dim_str_stream;
dim_str_stream << attrs;
auto dim_str = dim_str_stream.str();
if (dim_str[0] == '{' && dim_str[dim_str.size() - 1] == '}')
dim_str = dim_str.substr(1, dim_str.size() - 2);
m_xml_node.append_attribute(name.c_str()).set_value(dim_str.c_str());
} else {
throw ngraph_error("Unsupported attribute type for serialization: " + name);
}

View File

@ -247,6 +247,7 @@ set(SRC
type_prop/variadic_split.cpp
type_prop_layers.cpp
visitors/partial_shape.cpp
visitors/dimension.cpp
visitors/user_op.cpp
visitors/value_map.cpp
visitors/op/acos.cpp

View File

@ -799,6 +799,10 @@ void ReadAndStoreAttributes::on_adapter(const std::string& name, ngraph::ValueAc
} else if (auto variable_ptr =
ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::Variable>>>(&adapter)) {
insert(name, variable_ptr->get());
} else if (auto shape_ptr = ngraph::as_type<ngraph::AttributeAdapter<ov::PartialShape>>(&adapter)) {
insert(name, shape_ptr->get());
} else if (auto dim_ptr = ngraph::as_type<ngraph::AttributeAdapter<ov::Dimension>>(&adapter)) {
insert(name, dim_ptr->get());
} else {
m_read_result += "store attr [ ERR ]: " + name + " [drop `void` comparison which is '" +
adapter.get_type_info().name + "']";
@ -874,6 +878,10 @@ void ReadAndCompareAttributes::verify_others(const std::string& name, ngraph::Va
} else if (auto variable_ptr =
ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::Variable>>>(&adapter)) {
verify(name, variable_ptr->get());
} else if (auto shape_ptr = ngraph::as_type<ngraph::AttributeAdapter<ov::PartialShape>>(&adapter)) {
verify(name, shape_ptr->get());
} else if (auto dim_ptr = ngraph::as_type<ngraph::AttributeAdapter<ov::Dimension>>(&adapter)) {
verify(name, dim_ptr->get());
} else {
m_cmp_result += "compare attr [ ERR ]: " + name + " [drop `void` comparison which is '" +
adapter.get_type_info().name + "']";

View File

@ -14,6 +14,7 @@
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/pass/pass.hpp"
#include "openvino/pass/serialize.hpp"
class FunctionsComparator {
public:
@ -425,7 +426,9 @@ class Storage : private AttributeStorage<MemoryChunk>,
private AttributeStorage<SubGraphOpInputDescription>,
private AttributeStorage<SubGraphOpOutputDescription>,
private AttributeStorage<ov::op::util::FrameworkNodeAttrs>,
private AttributeStorage<std::shared_ptr<ngraph::Variable>> {
private AttributeStorage<std::shared_ptr<ngraph::Variable>>,
private AttributeStorage<ov::PartialShape>,
private AttributeStorage<ov::Dimension> {
public:
template <typename AttrValue>
const AttributeStorage<AttrValue>& storage() const {
@ -458,7 +461,8 @@ public:
storage<SubGraphOpInputDescription>().get_attributes_number() +
storage<SubGraphOpOutputDescription>().get_attributes_number() +
storage<ov::op::util::FrameworkNodeAttrs>().get_attributes_number() +
storage<std::shared_ptr<ngraph::Variable>>().get_attributes_number();
storage<std::shared_ptr<ngraph::Variable>>().get_attributes_number() +
storage<ov::PartialShape>().get_attributes_number() + storage<ov::Dimension>().get_attributes_number();
}
};
@ -737,6 +741,22 @@ struct Equal<std::shared_ptr<Constant>> {
return false;
}
};
template <>
struct Equal<std::shared_ptr<ov::Dimension>> {
static bool equal_value(const std::shared_ptr<ov::Dimension>& dim1, const std::shared_ptr<ov::Dimension>& dim2) {
return dim1 == dim2;
}
};
template <>
struct Equal<std::shared_ptr<ov::PartialShape>> {
static bool equal_value(const std::shared_ptr<ov::PartialShape>& shape1,
const std::shared_ptr<ov::PartialShape>& shape2) {
return shape1 == shape2;
}
};
} // namespace equal
namespace str {
@ -800,6 +820,24 @@ struct Get<ov::op::util::FrameworkNodeAttrs, void> {
}
};
template <>
struct Get<ov::Dimension, void> {
static std::string value(const ov::Dimension& dim) {
std::stringstream dim_str;
dim_str << dim;
return dim_str.str();
}
};
template <>
struct Get<ov::PartialShape, void> {
static std::string value(const ov::PartialShape& shape) {
std::stringstream shape_str;
shape_str << shape;
return shape_str.str();
}
};
template <>
struct Get<std::shared_ptr<ngraph::Variable>, void> {
static std::string value(const std::shared_ptr<ngraph::Variable>& variable) {

View File

@ -111,6 +111,12 @@ public:
virtual operator std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::InputDescription>>&() {
NGRAPH_CHECK(false, "Invalid type access");
}
virtual operator ov::PartialShape&() {
NGRAPH_CHECK(false, "Invalid type access");
}
virtual operator ov::Dimension&() {
NGRAPH_CHECK(false, "Invalid type access");
}
uint64_t get_index() {
return m_index;
}
@ -209,6 +215,10 @@ public:
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>>>>(&adapter)) {
a->set(m_values.get<std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>>>(name));
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ov::PartialShape>>(&adapter)) {
a->set(m_values.get<ov::PartialShape>(name));
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ov::Dimension>>(&adapter)) {
a->set(m_values.get<ov::Dimension>(name));
} else {
NGRAPH_CHECK(false, "Attribute \"", name, "\" cannot be unmarshalled");
}
@ -289,6 +299,10 @@ public:
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>>>>(&adapter)) {
m_values.insert_vector(name, a->get());
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ov::PartialShape>>(&adapter)) {
m_values.insert_vector(name, a->get());
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ov::Dimension>>(&adapter)) {
m_values.insert(name, a->get());
} else {
NGRAPH_CHECK(false, "Attribute \"", name, "\" cannot be marshalled");
}

View File

@ -0,0 +1,53 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "ngraph/opsets/opset3.hpp"
#include "ngraph/opsets/opset4.hpp"
#include "ngraph/opsets/opset5.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
using ngraph::test::ValueMap;
TEST(attributes, dimension) {
NodeBuilder builder;
AttributeVisitor& loader = builder.get_node_loader();
AttributeVisitor& saver = builder.get_node_saver();
Dimension dyn = Dimension(-1);
saver.on_attribute("dyn", dyn);
Dimension g_dyn;
loader.on_attribute("dyn", g_dyn);
EXPECT_EQ(dyn, g_dyn);
Dimension scalar = Dimension(10);
saver.on_attribute("scalar", scalar);
Dimension g_scalar;
loader.on_attribute("scalar", g_scalar);
EXPECT_EQ(scalar, g_scalar);
Dimension boundaries1 = Dimension(2, 100);
saver.on_attribute("boundaries1", boundaries1);
Dimension g_boundaries1;
loader.on_attribute("boundaries1", g_boundaries1);
EXPECT_EQ(boundaries1, g_boundaries1);
Dimension boundaries2 = Dimension(-1, 100);
saver.on_attribute("boundaries2", boundaries2);
Dimension g_boundaries2;
loader.on_attribute("boundaries2", g_boundaries2);
EXPECT_EQ(boundaries2, g_boundaries2);
Dimension boundaries3 = Dimension(5, -1);
saver.on_attribute("boundaries3", boundaries3);
Dimension g_boundaries3;
loader.on_attribute("boundaries3", g_boundaries3);
EXPECT_EQ(boundaries3, g_boundaries3);
}

View File

@ -50,4 +50,16 @@ TEST(attributes, partial_shape) {
PartialShape g_general;
loader.on_attribute("general", g_general);
EXPECT_EQ(general, g_general);
PartialShape shape_with_boundaries{Dimension(2, 20)};
saver.on_attribute("shape_with_boundaries", shape_with_boundaries);
PartialShape g_shape_with_boundaries;
loader.on_attribute("shape_with_boundaries", g_shape_with_boundaries);
EXPECT_EQ(shape_with_boundaries, g_shape_with_boundaries);
PartialShape shape_with_undefined_boundaries{Dimension(10, -1), Dimension(-1, 100), Dimension::dynamic(), 4};
saver.on_attribute("shape_with_undefined_boundaries", shape_with_undefined_boundaries);
PartialShape g_shape_with_undefined_boundaries;
loader.on_attribute("shape_with_undefined_boundaries", g_shape_with_undefined_boundaries);
EXPECT_EQ(shape_with_undefined_boundaries, g_shape_with_undefined_boundaries);
}

View File

@ -251,14 +251,16 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<
return;
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::Type>>(&adapter)) {
static_cast<ngraph::element::Type&>(*a) = InferenceEngine::details::convertPrecision(val);
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::PartialShape>>(&adapter)) {
std::vector<int64_t> shape;
std::vector<ngraph::Dimension> dims;
if (!getParameters<int64_t>(m_node.child("data"), name, shape))
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<PartialShape>>(&adapter)) {
PartialShape shape;
if (!get_partial_shape_from_attribute(m_node.child("data"), name, shape))
return;
for (const auto& dim : shape)
dims.emplace_back(dim);
static_cast<ngraph::PartialShape&>(*a) = ngraph::PartialShape(dims);
a->set(shape);
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<Dimension>>(&adapter)) {
Dimension dim;
if (!get_dimension_from_attribute(m_node.child("data"), name, dim))
return;
a->set(dim);
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::Shape>>(&adapter)) {
std::vector<size_t> shape;
if (!getParameters<size_t>(m_node.child("data"), name, shape))

View File

@ -22,4 +22,80 @@ bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::s
value = std::string(attr.value());
return true;
}
bool check_all_digits(const std::string& value) {
auto val = ov::util::trim(value);
for (const auto& c : val) {
if (!std::isdigit(c) || c == '-')
return false;
}
return true;
}
Dimension str_to_dimension(const std::string& value) {
auto val = ov::util::trim(value);
if (val == "?" || val == "-1") {
return {-1};
}
if (val.find("..") == std::string::npos) {
if (!check_all_digits(val))
IE_THROW() << "Cannot parse dimension: \"" << val << "\"";
return {stringToType<int64_t>(val)};
}
std::string min_value_str = val.substr(0, val.find(".."));
if (!check_all_digits(min_value_str))
IE_THROW() << "Cannot parse min bound: \"" << min_value_str << "\"";
int64_t min_value;
if (min_value_str.empty())
min_value = 0;
else
min_value = stringToType<int64_t>(min_value_str);
std::string max_value_str = val.substr(val.find("..") + 2);
int64_t max_value;
if (max_value_str.empty())
max_value = -1;
else
max_value = stringToType<int64_t>(max_value_str);
if (!check_all_digits(max_value_str))
IE_THROW() << "Cannot parse max bound: \"" << max_value_str << "\"";
return {min_value, max_value};
}
PartialShape str_to_partial_shape(const std::string& value) {
auto val = ov::util::trim(value);
if (val == "...") {
return PartialShape::dynamic();
}
PartialShape res;
std::stringstream ss(val);
std::string field;
while (getline(ss, field, ',')) {
if (field.empty())
IE_THROW() << "Cannot get vector of dimensions! \"" << val << "\" is incorrect";
res.insert(res.end(), str_to_dimension(field));
}
return res;
}
bool get_partial_shape_from_attribute(const pugi::xml_node& node, const std::string& name, PartialShape& value) {
std::string param;
if (!getStrAttribute(node, name, param))
return false;
value = str_to_partial_shape(param);
return true;
}
bool get_dimension_from_attribute(const pugi::xml_node& node, const std::string& name, Dimension& value) {
std::string param;
if (!getStrAttribute(node, name, param))
return false;
value = str_to_dimension(param);
return true;
}
} // namespace ov

View File

@ -5,6 +5,7 @@
#pragma once
#include <memory>
#include <openvino/core/partial_shape.hpp>
#include "openvino/core/type/element_type.hpp"
#include "xml_parse_utils.h"
@ -13,6 +14,10 @@ namespace ov {
void operator>>(const std::stringstream& in, ov::element::Type& type);
bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value);
Dimension str_to_dimension(const std::string& value);
PartialShape str_to_partial_shape(const std::string& value);
bool get_dimension_from_attribute(const pugi::xml_node& node, const std::string& name, Dimension& value);
bool get_partial_shape_from_attribute(const pugi::xml_node& node, const std::string& name, PartialShape& value);
template <class T>
void str_to_container(const std::string& value, T& res) {