[OV2.0+Bugfix] Layout serialization + Fix caching issues with auto-generated friendly names (#8153)

* Fix caching issues with auto-generated friendly names

Introduce "HashPass" to calculate hash like a serialize but without auto-generated friendly names
IE Compilation context: use HashPass to calculate hash of ov::Function
IE Compilation context tests: removed "friendly names" from tests
Layout: serialization support + tests
Parameter/Result: update set/get_layout according to layout changes

* Fix clang

* Tiny correction of CmakeLists

* Renamed VariantWrapper<Layout> to LayoutAttribute
Removed test for conversion from/to fully dynamic layout (allowed now and does nothing)
'set_layout' - remove layout runtime info if empty layout is passed. This allows hashes to be the same if set_layout(param->get_layout()) is called

* Fix build

* change size_t to uint64_t for hash value
Using size_t leads to high probability of conflict for 32-bit platforms

* Removed debug print

* Fix hash calculation for 32-bit platforms

* Fix review comment
This commit is contained in:
Mikhail Nosov 2021-10-27 21:05:06 +03:00 committed by GitHub
parent a2a8969201
commit 846d1e0467
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 353 additions and 139 deletions

View File

@ -18,7 +18,8 @@
#include "ie_itt.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "ngraph/variant.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/pass/manager.hpp"
#include "transformations/hash.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
@ -29,7 +30,7 @@
namespace InferenceEngine {
template <typename T>
static std::size_t hash_combine(std::size_t seed, const T& a) {
static uint64_t hash_combine(uint64_t seed, const T& a) {
// Hash combine formula from boost
return seed ^ (std::hash<T>()(a) + 0x9e3779b9 + (seed << 6) + (seed >> 2));
}
@ -39,34 +40,10 @@ static int32_t as_int32_t(T v) {
return static_cast<int32_t>(v);
}
class OstreamHashWrapper final : public std::streambuf {
std::size_t m_res = 0;
public:
std::size_t getResult() const {
return m_res;
}
std::streamsize xsputn(const char* s, std::streamsize n) override {
const std::int64_t* intS = (const std::int64_t*)s;
std::streamsize n64 = n / sizeof(std::int64_t);
std::streamsize i = 0;
// Using 64-bit values executes much faster than char
while (i++ < n64) {
m_res += *(intS++);
}
std::streamsize rest = n % sizeof(std::int64_t);
for (i = 0; i < rest; i++) {
m_res += s[n - rest + i];
}
return n;
}
};
//////////////////////////////////////////////////
std::string NetworkCompilationContext::calculateFileInfo(const std::string& filePath) {
size_t seed = 0;
uint64_t seed = 0;
auto absPath = filePath;
try {
absPath = FileUtils::absoluteFilePath(filePath);
@ -88,23 +65,17 @@ std::string NetworkCompilationContext::calculateFileInfo(const std::string& file
std::string NetworkCompilationContext::computeHash(const CNNNetwork& network,
const std::map<std::string, std::string>& compileOptions) {
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::IE_LT, "NetworkCompilationContext::computeHash - CNN");
OstreamHashWrapper xmlHash;
OstreamHashWrapper binHash;
std::ostream xml(&xmlHash);
std::ostream bin(&binHash);
IE_ASSERT(network.getFunction());
// 1. Serialize
uint64_t seed = 0;
// 1. Calculate hash on function
CNNNetwork net(network);
ov::pass::Serialize serializer(xml, bin);
serializer.run_on_function(net.getFunction());
ov::pass::Manager m;
m.register_pass<ov::pass::Hash>(seed);
m.run_passes(net.getFunction());
// 2. Compute hash on serialized data and options
size_t seed = 0;
seed = hash_combine(seed, xmlHash.getResult());
seed = hash_combine(seed, binHash.getResult());
for (const auto& kvp : compileOptions) {
seed = hash_combine(seed, kvp.first + kvp.second);
}
@ -163,7 +134,7 @@ std::string NetworkCompilationContext::computeHash(const CNNNetwork& network,
std::string NetworkCompilationContext::computeHash(const std::string& modelName,
const std::map<std::string, std::string>& compileOptions) {
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::IE_LT, "NetworkCompilationContext::computeHash - ModelName");
size_t seed = 0;
uint64_t seed = 0;
try {
seed = hash_combine(seed, FileUtils::absoluteFilePath(modelName));
} catch (...) {

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "ngraph/opsets/opset.hpp"
#include "openvino/core/function.hpp"
#include "openvino/pass/serialize.hpp"
namespace ov {
namespace pass {
/**
* @brief Hash transformation calculates hash value for ov::Function
*/
class NGRAPH_API Hash : public ov::pass::FunctionPass {
public:
OPENVINO_RTTI("HashPass");
bool run_on_function(std::shared_ptr<ov::Function> f) override;
/**
* @brief Hash pass constructor
*
* @param output_hash_value Reference to output value. By applying hash pass on function, resulting hash value
* will be set to this variable
*/
Hash(uint64_t& output_hash_value);
private:
uint64_t& m_hash;
};
} // namespace pass
} // namespace ov

View File

@ -11,6 +11,7 @@ ov::pass::Attributes::Attributes() {
register_factory<NmsSelectedIndices>();
register_factory<StridesPropagation>();
register_factory<OldApiMap>();
register_factory<LayoutAttribute>();
}
ov::Variant* ov::pass::Attributes::create_by_type_info(const ov::DiscreteTypeInfo& type_info) {

View File

@ -205,6 +205,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
<port id="0" precision="I64" names="input_tensor">
<rt_info>
<attribute name="fused_names" version="0" value="test1,test2"/>
<attribute name="layout" version="0" layout="[N,C,H,W]" />
</rt_info>
<dim>1</dim>
<dim>3</dim>
@ -251,6 +252,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
<port id="0" precision="I64">
<rt_info>
<attribute name="fused_names" version="0" value="test5,test6"/>
<attribute name="layout" version="0" layout="[?,C,H,W]" />
</rt_info>
<dim>1</dim>
<dim>3</dim>
@ -285,9 +287,11 @@ TEST_F(RTInfoDeserialization, InputAndOutputV10) {
check_version(f, 10);
auto param = f->get_parameters()[0];
EXPECT_EQ(param->get_layout(), "");
check_rt_info(param->output(0).get_rt_info());
auto result = f->get_results()[0];
EXPECT_EQ(result->get_layout(), "");
check_rt_info(result->input(0).get_rt_info());
auto add = result->get_input_node_ptr(0);
@ -562,6 +566,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
<port id="0" precision="FP32">
<rt_info>
<attribute name="fused_names" version="0" value="test1,test2"/>
<attribute name="layout" version="0" layout="[N,C,H,W]" />
</rt_info>
<dim>1</dim>
<dim>3</dim>
@ -611,6 +616,7 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
<port id="0" precision="FP32">
<rt_info>
<attribute name="fused_names" version="0" value="test5,test6"/>
<attribute name="layout" version="0" layout="[?,C,H,W]" />
</rt_info>
<dim>1</dim>
<dim>3</dim>
@ -660,10 +666,12 @@ TEST_F(RTInfoDeserialization, InputAndOutputV11) {
auto param = f->get_parameters()[0];
check_fused_names(param->output(0).get_rt_info(), "test1,test2");
check_old_api_map(param->get_rt_info(), std::vector<uint64_t>({}), ngraph::element::Type_t::undefined);
EXPECT_EQ(param->get_layout(), "NCHW");
auto result = f->get_result();
check_fused_names(result->input(0).get_rt_info(), "test5,test6");
check_old_api_map(result->get_rt_info(), std::vector<uint64_t>({}), ngraph::element::Type_t::undefined);
EXPECT_EQ(f->get_results()[0]->get_layout(), "?CHW");
auto add = result->get_input_node_ptr(0);
check_fused_names(add->input(0).get_rt_info(), "test2,test3");

View File

@ -62,12 +62,15 @@ TEST_F(RTInfoSerializationTest, all_attributes_latest) {
std::shared_ptr<ngraph::Function> function;
{
auto data = std::make_shared<ov::opset8::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
data->set_layout("NCHW");
auto add = std::make_shared<ov::opset8::Add>(data, data);
init_info(add->get_rt_info());
init_info(add->input(0).get_rt_info());
init_info(add->input(1).get_rt_info());
init_info(add->output(0).get_rt_info());
function = std::make_shared<ngraph::Function>(OutputVector{add}, ParameterVector{data});
auto result = std::make_shared<ov::opset8::Result>(add);
result->set_layout("????");
function = std::make_shared<ngraph::Function>(ResultVector{result}, ParameterVector{data});
}
pass::Manager m;
@ -100,6 +103,8 @@ TEST_F(RTInfoSerializationTest, all_attributes_latest) {
};
auto add = f->get_results()[0]->get_input_node_ptr(0);
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
EXPECT_EQ(f->get_results()[0]->get_layout(), "????");
check_info(add->get_rt_info());
check_info(add->input(0).get_rt_info());
check_info(add->input(1).get_rt_info());
@ -117,6 +122,7 @@ TEST_F(RTInfoSerializationTest, all_attributes_v10) {
std::shared_ptr<ngraph::Function> function;
{
auto data = std::make_shared<ov::opset8::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
data->set_layout("NCHW");
auto add = std::make_shared<ov::opset8::Add>(data, data);
init_info(add->get_rt_info());
init_info(add->input(0).get_rt_info());
@ -142,6 +148,7 @@ TEST_F(RTInfoSerializationTest, all_attributes_v10) {
check_info(add->input(0).get_rt_info());
check_info(add->input(1).get_rt_info());
check_info(add->output(0).get_rt_info());
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "");
}
TEST_F(RTInfoSerializationTest, all_attributes_v11) {
@ -155,12 +162,15 @@ TEST_F(RTInfoSerializationTest, all_attributes_v11) {
std::shared_ptr<ngraph::Function> function;
{
auto data = std::make_shared<ov::opset8::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
data->set_layout("NCHW");
auto add = std::make_shared<ov::opset8::Add>(data, data);
init_info(add->get_rt_info());
init_info(add->input(0).get_rt_info());
init_info(add->input(1).get_rt_info());
init_info(add->output(0).get_rt_info());
function = std::make_shared<ngraph::Function>(OutputVector{add}, ParameterVector{data});
auto result = std::make_shared<ov::opset8::Result>(add);
result->set_layout("????");
function = std::make_shared<ngraph::Function>(ResultVector{result}, ParameterVector{data});
}
pass::Manager m;
@ -185,6 +195,8 @@ TEST_F(RTInfoSerializationTest, all_attributes_v11) {
};
auto add = f->get_results()[0]->get_input_node_ptr(0);
EXPECT_EQ(f->get_parameters()[0]->get_layout(), "NCHW");
EXPECT_EQ(f->get_results()[0]->get_layout(), "????");
check_info(add->get_rt_info());
check_info(add->input(0).get_rt_info());
check_info(add->input(1).get_rt_info());

View File

@ -41,7 +41,7 @@ static std::string generateTestFilePrefix() {
class FileGuard {
std::string m_fileName;
public:
FileGuard(const std::string& name): m_fileName(name) {}
explicit FileGuard(std::string name): m_fileName(std::move(name)) {}
~FileGuard() { std::remove(m_fileName.c_str()); }
};
@ -134,26 +134,16 @@ static std::shared_ptr<ngraph::Function> create_simple_function() {
data->get_output_tensor(0).set_names({"parameter"});
auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3});
mul_constant->set_friendly_name("mul_constant");
mul_constant->get_output_tensor(0).set_names({"mul_constant"});
auto mul = std::make_shared<ngraph::opset6::Multiply>(data, mul_constant);
mul->set_friendly_name("mul");
mul->get_output_tensor(0).set_names({"mul"});
auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2});
add_constant->set_friendly_name("add_constant");
add_constant->get_output_tensor(0).set_names({"add_constant"});
auto add = std::make_shared<ngraph::opset6::Add>(mul, add_constant);
add->set_friendly_name("add");
add->get_output_tensor(0).set_names({"add"});
// Create opset3::Result operation
auto res = std::make_shared<ngraph::opset6::Result>(add);
res->set_friendly_name("res");
// Create nGraph function
auto func = std::make_shared<ngraph::Function>(ngraph::ResultVector{res}, ngraph::ParameterVector{data});
func->set_friendly_name("function");
return func;
}
@ -162,8 +152,15 @@ static CNNNetwork createNetwork() {
return res;
}
static void checkCustomRt(std::function<void(Node::RTMap&)> emptyCb,
std::function<void(Node::RTMap&, const std::string& name)> nameCb) {
static CNNNetwork createNetworkWithLayout(const ov::Layout& layout) {
auto fun = create_simple_function();
fun->get_parameters()[0]->set_layout(layout);
fun->get_results()[0]->set_layout(layout);
return CNNNetwork(fun);
}
static void checkCustomRt(const std::function<void(Node::RTMap&)>& emptyCb,
const std::function<void(Node::RTMap&, const std::string& name)>& nameCb) {
auto net1 = createNetwork();
auto net2 = createNetwork();
auto & op1 = net1.getFunction()->get_ops().front()->get_rt_info();
@ -306,6 +303,66 @@ TEST(NetworkContext_CNNNetwork, HashWithFutureRt_int64) {
NetworkCompilationContext::computeHash(net3, {}));
}
TEST(NetworkContext_CNNNetwork, HashWithLayout) {
auto net1 = createNetworkWithLayout("NCHW");
auto net2 = createNetworkWithLayout("nchw");
auto net3 = createNetworkWithLayout("?CHW");
auto net3_1 = createNetworkWithLayout("?CH?");
auto net4 = createNetworkWithLayout("");
auto fun5 = create_simple_function();
fun5->get_parameters()[0]->set_layout("NCHW");
fun5->get_parameters()[0]->set_layout("");
fun5->get_results()[0]->set_layout("NHCW");
fun5->get_results()[0]->set_layout(ov::Layout());
auto net5 = CNNNetwork(fun5);
EXPECT_EQ(NetworkCompilationContext::computeHash(net1, {}),
NetworkCompilationContext::computeHash(net2, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net2, {}),
NetworkCompilationContext::computeHash(net3, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}),
NetworkCompilationContext::computeHash(net3_1, {}));
EXPECT_NE(NetworkCompilationContext::computeHash(net3, {}),
NetworkCompilationContext::computeHash(net4, {}));
EXPECT_EQ(NetworkCompilationContext::computeHash(net4, {}),
NetworkCompilationContext::computeHash(net5, {}));
}
TEST(NetworkContext_CNNNetwork, HashWithTensorNames) {
auto fun1 = create_simple_function();
auto fun2 = create_simple_function();
auto fun3 = create_simple_function();
std::unordered_set<std::string> names1, names2;
std::vector<std::string> testNames;
testNames.reserve(100);
for (int i = 0; i < 100; i++) {
testNames.push_back("test" + std::to_string(i));
}
std::for_each(testNames.begin(), testNames.end(), [&names1](const std::string& name) {
names1.insert(name);
});
std::for_each(testNames.rbegin(), testNames.rend(), [&names2](const std::string& name) {
names2.insert(name);
});
fun1->input().set_names(names1);
fun2->input().set_names(names2);
auto net1 = CNNNetwork(fun1);
auto net2 = CNNNetwork(fun2);
auto net3 = CNNNetwork(fun3);
ASSERT_EQ(NetworkCompilationContext::computeHash(net1, {}),
NetworkCompilationContext::computeHash(net2, {}));
ASSERT_NE(NetworkCompilationContext::computeHash(net2, {}),
NetworkCompilationContext::computeHash(net3, {}));
}
TEST(NetworkContext_CNNNetwork, HashWithDifferentResults) {
auto net1 = createNetwork();
auto net2 = createNetwork();

View File

@ -90,6 +90,7 @@ file(GLOB_RECURSE rt_info_srcs ${CMAKE_CURRENT_SOURCE_DIR}/src/pass/rt_info/*.cp
set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_precision.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_fp32_to_fp16.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/pass/init_node_info.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/pass/serialize.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/op/type_relaxed.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/function.cpp" # for SmartReshape
${smart_reshape_srcs} ${rt_info_srcs}

View File

@ -164,15 +164,15 @@ protected:
std::string m_dump;
};
template <>
class OPENVINO_API VariantWrapper<Layout> : public VariantImpl<Layout> {
class OPENVINO_API LayoutAttribute : public VariantImpl<Layout> {
public:
static constexpr VariantTypeInfo type_info{"Variant::Layout", 0};
const VariantTypeInfo& get_type_info() const override {
return type_info;
}
OPENVINO_RTTI("layout", "0");
explicit VariantWrapper(const value_type& value) : VariantImpl<value_type>(value) {}
LayoutAttribute() = default;
explicit LayoutAttribute(const Layout& value) : VariantImpl<Layout>(value) {}
bool visit_attributes(AttributeVisitor& visitor) override;
};
} // namespace ov

View File

@ -55,7 +55,9 @@ public:
/// \brief Returns current layout, or empty Layout if it is not set
Layout get_layout() const;
/// \brief Sets layout runtime information to tensor
/// \brief Sets layout runtime information to tensor.
///
/// \param layout Layout to set. If empty (default constructed), layout runtime information is erased.
void set_layout(const Layout& layout);
protected:

View File

@ -46,7 +46,9 @@ public:
/// \brief Returns current layout, or empty Layout if it is not set
Layout get_layout() const;
/// \brief Sets layout runtime information to tensor
/// \brief Sets layout runtime information to tensor.
///
/// \param layout Layout to set. If empty (default constructed), layout runtime information is erased.
void set_layout(const Layout& layout);
};
} // namespace v0

View File

@ -280,6 +280,9 @@ std::vector<int64_t> find_permutation(const Layout& src_layout, const Rank& rank
if (src_layout == dst) {
return {}; // No permutation is needed
}
if (src_layout.empty() || dst.empty()) {
return {};
}
OPENVINO_ASSERT(!src_layout.m_dynamic && !dst.m_dynamic, "Conversion is not supported for dynamic layouts");
OPENVINO_ASSERT(src_layout.m_left_size == src_layout.m_left_size,
"Conversion is not supported for layouts with different sizes");
@ -332,6 +335,11 @@ void AttributeAdapter<ov::Layout>::set(const std::string& value) {
m_ref = Layout(value);
}
constexpr VariantTypeInfo VariantWrapper<ov::Layout>::type_info;
bool LayoutAttribute::visit_attributes(AttributeVisitor& visitor) {
std::string layout_str = m_value.to_string();
visitor.on_attribute("layout", layout_str);
m_value = Layout(layout_str);
return true;
}
} // namespace ov

View File

@ -49,17 +49,25 @@ void op::Parameter::set_is_relevant_to_shapes(bool is_relevant) {
}
ov::Layout op::Parameter::get_layout() const {
auto it = get_output_tensor(0).get_rt_info().find("LAYOUT");
if (it == get_output_tensor(0).get_rt_info().end()) {
return ov::Layout();
auto it = output(0).get_rt_info().find(ov::LayoutAttribute::get_type_info_static());
if (it == output(0).get_rt_info().end()) {
return "";
}
auto layout = std::dynamic_pointer_cast<VariantWrapper<ov::Layout>>(it->second);
OPENVINO_ASSERT(layout, "'LAYOUT' runtime info for node is invalid, use set_layout API");
auto layout = std::dynamic_pointer_cast<ov::LayoutAttribute>(it->second);
OPENVINO_ASSERT(layout,
"'",
ov::LayoutAttribute::get_type_info_static(),
"' runtime info for parameter is invalid, use set_layout API");
return layout->get();
}
void op::Parameter::set_layout(const ov::Layout& layout) {
get_output_tensor(0).get_rt_info()["LAYOUT"] = std::make_shared<VariantWrapper<ov::Layout>>(layout);
if (layout.empty()) {
output(0).get_rt_info().erase(ov::LayoutAttribute::get_type_info_static());
} else {
output(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] =
std::make_shared<ov::LayoutAttribute>(layout);
}
}
BWDCMP_RTTI_DEFINITION(ov::AttributeAdapter<ParameterVector>);

View File

@ -68,17 +68,25 @@ bool op::Result::constant_fold(OutputVector& output_values, const OutputVector&
}
ov::Layout op::Result::get_layout() const {
auto it = get_output_tensor(0).get_rt_info().find("LAYOUT");
if (it == get_output_tensor(0).get_rt_info().end()) {
auto it = input(0).get_rt_info().find(ov::LayoutAttribute::get_type_info_static());
if (it == input(0).get_rt_info().end()) {
return {};
}
auto layout = std::dynamic_pointer_cast<VariantWrapper<ov::Layout>>(it->second);
OPENVINO_ASSERT(layout, "'LAYOUT' runtime info for node is invalid, use set_layout API");
auto layout = std::dynamic_pointer_cast<ov::LayoutAttribute>(it->second);
OPENVINO_ASSERT(layout,
"'",
ov::LayoutAttribute::get_type_info_static(),
"' runtime info for result is invalid, use set_layout API");
return layout->get();
}
void op::Result::set_layout(const ov::Layout& layout) {
get_output_tensor(0).get_rt_info()["LAYOUT"] = std::make_shared<VariantWrapper<ov::Layout>>(layout);
if (layout.empty()) {
input(0).get_rt_info().erase(ov::LayoutAttribute::get_type_info_static());
} else {
input(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] =
std::make_shared<ov::LayoutAttribute>(layout);
}
}
BWDCMP_RTTI_DEFINITION(ov::AttributeAdapter<ResultVector>);

View File

@ -19,6 +19,7 @@
#include "openvino/op/util/framework_node.hpp"
#include "openvino/pass/constant_folding.hpp"
#include "pugixml.hpp"
#include "transformations/hash.hpp"
using namespace ngraph;
@ -121,7 +122,8 @@ void ngfunction_2_ir(pugi::xml_node& node,
const ngraph::Function& f,
const std::map<std::string, ngraph::OpSet>& custom_opsets,
ConstantWriter& constant_write_handler,
int64_t version);
int64_t version,
bool deterministic);
// Some of the operators were added to wrong opsets. This is a mapping
// that allows such operators to be serialized with proper opsets.
@ -253,6 +255,7 @@ class XmlSerializer : public ngraph::AttributeVisitor {
const std::map<std::string, ngraph::OpSet>& m_custom_opsets;
ConstantWriter& m_constant_write_handler;
int64_t m_version;
bool m_deterministic;
template <typename T>
std::string create_atribute_list(ngraph::ValueAccessor<std::vector<T>>& adapter) {
@ -374,12 +377,14 @@ public:
const std::string& node_type_name,
const std::map<std::string, ngraph::OpSet>& custom_opsets,
ConstantWriter& constant_write_handler,
int64_t version)
int64_t version,
bool deterministic = false)
: m_xml_node(data),
m_node_type_name(node_type_name),
m_custom_opsets(custom_opsets),
m_constant_write_handler(constant_write_handler),
m_version(version) {}
m_version(version),
m_deterministic(deterministic) {}
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override {
using BodyTargetNames = std::tuple<std::string, std::string, std::vector<std::string>>;
@ -508,11 +513,21 @@ public:
// to layer above (m_xml_node.parent()) as in ngfunction_2_ir() layer (m_xml_node) with empty attributes
// is removed.
pugi::xml_node xml_body = m_xml_node.parent().append_child(name.c_str());
ngfunction_2_ir(xml_body, *adapter.get(), m_custom_opsets, m_constant_write_handler, m_version);
ngfunction_2_ir(xml_body,
*adapter.get(),
m_custom_opsets,
m_constant_write_handler,
m_version,
m_deterministic);
xml_body.remove_attribute("name");
xml_body.remove_attribute("version");
} else if (name == "net") {
ngfunction_2_ir(m_xml_node, *adapter.get(), m_custom_opsets, m_constant_write_handler, m_version);
ngfunction_2_ir(m_xml_node,
*adapter.get(),
m_custom_opsets,
m_constant_write_handler,
m_version,
m_deterministic);
} else {
NGRAPH_CHECK(false, "Unsupported Function name.");
}
@ -668,6 +683,11 @@ std::string generate_unique_name(const std::unordered_set<std::string>& unique_n
}
}
template <typename T>
bool is_name_auto_generated(const T& n) {
return n.get_friendly_name() == n.get_name();
}
// TODO: remove when CNNNetwork will be supporting not-unique names
std::string get_node_unique_name(std::unordered_set<std::string>& unique_names, const ngraph::Node* n) {
std::string name = n->get_friendly_name();
@ -837,8 +857,12 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
const ngraph::Function& f,
const std::map<std::string, ngraph::OpSet>& custom_opsets,
ConstantWriter& constant_node_write_handler,
int64_t version) {
netXml.append_attribute("name").set_value(f.get_friendly_name().c_str());
int64_t version,
bool deterministic) {
// If determinism is not required, include auto-generated names into xml
if (!deterministic || !is_name_auto_generated(f)) {
netXml.append_attribute("name").set_value(f.get_friendly_name().c_str());
}
netXml.append_attribute("version").set_value(version);
pugi::xml_node layers = netXml.append_child("layers");
@ -878,7 +902,10 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
// <layers>
pugi::xml_node layer = layers.append_child("layer");
layer.append_attribute("id").set_value(layer_ids.find(node)->second);
layer.append_attribute("name").set_value(get_node_unique_name(unique_names, node).c_str());
// If determinism is not required, include auto-generated names into xml
if (!deterministic || !is_name_auto_generated(*node)) {
layer.append_attribute("name").set_value(get_node_unique_name(unique_names, node).c_str());
}
layer.append_attribute("type").set_value(translate_type_name(node_type_name).c_str());
if (!exec_graph) {
layer.append_attribute("version").set_value(get_opset_name(node, custom_opsets).c_str());
@ -984,7 +1011,7 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
// fill <data> general attributes
auto_pad_resolving(node); // Backward compatibility: clear padding values for nodes with auto_pad
XmlSerializer visitor(data, node_type_name, custom_opsets, constant_node_write_handler, version);
XmlSerializer visitor(data, node_type_name, custom_opsets, constant_node_write_handler, version, deterministic);
NGRAPH_CHECK(node->visit_attributes(visitor), "Visitor API is not supported in ", node);
rt_info::XmlSerializer{data}.serialize(node->get_rt_info());
@ -1042,43 +1069,48 @@ std::string provide_bin_path(const std::string& xmlPath, const std::string& binP
return bestPath;
}
void serializeFunc(std::ostream& xml_file,
std::ostream& bin_file,
std::shared_ptr<ov::Function> f,
ov::pass::Serialize::Version ver,
const std::map<std::string, ngraph::OpSet>& custom_opsets,
bool deterministic = false) {
auto version = static_cast<int64_t>(ver);
auto& rt_info = f->get_rt_info();
if (rt_info.count("version")) {
auto version_var = std::dynamic_pointer_cast<VariantWrapper<int64_t>>(rt_info.at("version"));
version = version_var->get();
}
if (version != static_cast<int64_t>(ver) && ver != ov::pass::Serialize::Version::UNSPECIFIED)
throw ngraph_error("Cannot serialize function to incompatible IR version");
if (version == static_cast<int64_t>(ov::pass::Serialize::Version::UNSPECIFIED))
version = static_cast<int64_t>(ov::pass::Serialize::Version::IR_V11);
if (version != static_cast<int64_t>(ov::pass::Serialize::Version::IR_V10) &&
version != static_cast<int64_t>(ov::pass::Serialize::Version::IR_V11)) {
throw ngraph_error("Unsupported version");
}
std::string name = "net";
pugi::xml_document xml_doc;
pugi::xml_node net_node = xml_doc.append_child(name.c_str());
ConstantWriter constant_write_handler(bin_file);
XmlSerializer visitor(net_node, name, custom_opsets, constant_write_handler, version, deterministic);
visitor.on_attribute(name, f);
xml_doc.save(xml_file);
xml_file.flush();
bin_file.flush();
};
} // namespace
namespace ov {
bool pass::Serialize::run_on_function(std::shared_ptr<ngraph::Function> f) {
auto serializeFunc = [&](std::ostream& xml_file, std::ostream& bin_file) {
auto version = static_cast<int64_t>(m_version);
auto& rt_info = f->get_rt_info();
if (rt_info.count("version")) {
auto version_var = std::dynamic_pointer_cast<VariantWrapper<int64_t>>(rt_info.at("version"));
version = version_var->get();
}
if (version != static_cast<int64_t>(m_version) && m_version != Serialize::Version::UNSPECIFIED)
throw ngraph_error("Cannot serialize function to incompatible IR version");
if (version == static_cast<int64_t>(Serialize::Version::UNSPECIFIED))
version = static_cast<int64_t>(Serialize::Version::IR_V11);
if (version != static_cast<int64_t>(Serialize::Version::IR_V10) &&
version != static_cast<int64_t>(Serialize::Version::IR_V11)) {
throw ngraph_error("Unsupported version");
}
std::string name = "net";
pugi::xml_document xml_doc;
pugi::xml_node net_node = xml_doc.append_child(name.c_str());
ConstantWriter constant_write_handler(bin_file);
XmlSerializer visitor(net_node, name, m_custom_opsets, constant_write_handler, version);
visitor.on_attribute(name, f);
xml_doc.save(xml_file);
xml_file.flush();
bin_file.flush();
};
if (m_xmlFile && m_binFile) {
serializeFunc(*m_xmlFile, *m_binFile);
serializeFunc(*m_xmlFile, *m_binFile, f, m_version, m_custom_opsets);
} else {
std::ofstream bin_file(m_binPath, std::ios::out | std::ios::binary);
NGRAPH_CHECK(bin_file, "Can't open bin file: \"" + m_binPath + "\"");
@ -1088,9 +1120,9 @@ bool pass::Serialize::run_on_function(std::shared_ptr<ngraph::Function> f) {
NGRAPH_CHECK(xml_file, "Can't open xml file: \"" + m_xmlPath + "\"");
try {
serializeFunc(xml_file, bin_file);
serializeFunc(xml_file, bin_file, f, m_version, m_custom_opsets);
} catch (const ngraph::CheckFailure&) {
// optimization decission was made to create .bin file upfront and
// optimization decision was made to create .bin file upfront and
// write to it directly instead of buffering its content in memory,
// hence we need to delete it here in case of failure
xml_file.close();
@ -1123,7 +1155,6 @@ pass::Serialize::Serialize(const std::string& xmlPath,
std::map<std::string, ngraph::OpSet> custom_opsets,
pass::Serialize::Version version)
: m_xmlFile{nullptr},
m_binFile{nullptr},
m_xmlPath{valid_xml_path(xmlPath)},
m_binPath{provide_bin_path(xmlPath, binPath)},
m_version{version},
@ -1211,4 +1242,60 @@ bool pass::StreamSerialize::run_on_function(std::shared_ptr<ngraph::Function> f)
// Return false because we didn't change nGraph Function
return false;
}
/// -------- Hash calculation pass -------------
namespace {
template <typename T>
static uint64_t hash_combine(uint64_t seed, const T& a) {
// Hash combine formula from boost
return seed ^ (std::hash<T>()(a) + 0x9e3779b9 + (seed << 6) + (seed >> 2));
}
class OstreamHashWrapper final : public std::streambuf {
uint64_t m_res = 0;
public:
uint64_t getResult() const {
return m_res;
}
std::streamsize xsputn(const char* s, std::streamsize n) override {
auto* intS = (const std::streamsize*)s;
std::streamsize n64 = n / static_cast<std::streamsize>(sizeof(std::streamsize));
std::streamsize i = 0;
// Using 64-bit values executes much faster than char
while (i++ < n64) {
m_res += *(intS++);
}
std::streamsize rest = n % static_cast<std::streamsize>(sizeof(std::streamsize));
for (i = 0; i < rest; i++) {
m_res += s[n - rest + i];
}
return n;
}
};
} // namespace
bool pass::Hash::run_on_function(std::shared_ptr<ov::Function> f) {
OstreamHashWrapper xmlHash;
OstreamHashWrapper binHash;
std::ostream xml(&xmlHash);
std::ostream bin(&binHash);
// Determinism is important for hash calculation
serializeFunc(xml, bin, f, Serialize::Version::UNSPECIFIED, {}, true);
uint64_t seed = 0;
seed = hash_combine(seed, xmlHash.getResult());
seed = hash_combine(seed, binHash.getResult());
m_hash = seed;
// Return false because we didn't change nGraph Function
return false;
}
pass::Hash::Hash(uint64_t& output_hash_value) : m_hash(output_hash_value) {}
} // namespace ov

View File

@ -881,13 +881,6 @@ TEST(pre_post_process, postprocess_set_layout_network) {
EXPECT_EQ(f->get_results()[0]->get_layout(), "NCHW");
}
TEST(pre_post_process, postprocess_set_layout_tensor) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
// no layout is specified for network, no way to implicitly convert it to user's layout
EXPECT_THROW(f = PrePostProcessor().output(OutputInfo().tensor(OutputTensorInfo().set_layout("NHWC"))).build(f),
ov::AssertFailure);
}
TEST(pre_post_process, postprocess_convert_layout_implicit) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
@ -940,17 +933,6 @@ TEST(pre_post_process, postprocess_convert_layout_same) {
EXPECT_EQ(size_old, f->get_ordered_ops().size());
}
TEST(pre_post_process, postprocess_convert_layout_default_error) {
auto f = create_simple_function(element::f32, Shape{1, 3, 2, 2});
EXPECT_THROW(f = PrePostProcessor()
.output(OutputInfo()
.network(OutputNetworkInfo().set_layout("NCHW"))
.postprocess(PostProcessSteps().convert_layout()))
.build(f),
ov::AssertFailure);
}
TEST(pre_post_process, postprocess_convert_layout_dims) {
auto f = create_simple_function(element::f32, Shape{1, 3, 480, 640});

View File

@ -35,15 +35,19 @@ TEST(type_prop, param_layout) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
a->set_layout("NHWC");
ASSERT_EQ(a->get_layout(), "NHWC");
a->set_layout(ov::Layout());
EXPECT_TRUE(a->get_layout().empty());
EXPECT_EQ(a->get_output_tensor(0).get_rt_info().count(ov::LayoutAttribute::get_type_info_static()), 0);
}
TEST(type_prop, param_layout_empty) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
ASSERT_EQ(a->get_layout(), ov::Layout());
ASSERT_TRUE(a->get_layout().empty());
}
TEST(type_prop, param_layout_invalid) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
a->get_output_tensor(0).get_rt_info()["LAYOUT"] = ov::make_variant("NCHW"); // incorrect way
a->get_output_tensor(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] =
ov::make_variant("NCHW"); // incorrect way
ASSERT_THROW(a->get_layout(), ov::AssertFailure);
}

View File

@ -28,3 +28,27 @@ TEST(type_prop, result_dynamic_shape) {
EXPECT_EQ(result->get_output_element_type(0), element::f32);
EXPECT_TRUE(result->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, result_layout) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
result->set_layout("NHWC");
EXPECT_EQ(result->get_layout(), "NHWC");
result->set_layout(ov::Layout());
EXPECT_TRUE(result->get_layout().empty());
EXPECT_EQ(result->input(0).get_rt_info().count(ov::LayoutAttribute::get_type_info_static()), 0);
}
TEST(type_prop, result_layout_empty) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
EXPECT_TRUE(result->get_layout().empty());
}
TEST(type_prop, result_layout_invalid) {
auto a = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto result = make_shared<opset1::Result>(a);
result->input(0).get_rt_info()[ov::LayoutAttribute::get_type_info_static()] =
ov::make_variant("NCHW"); // incorrect way
ASSERT_THROW(result->get_layout(), ov::AssertFailure);
}