Removed autogeneration of tensor names, added new reshape API (#8134)

* Removed autogeneration of tensor names, added new reshape API

* Fixed code style

* Fixed tests

* Fixed tests

* Fixed comments
This commit is contained in:
Ilya Churaev 2021-10-25 07:34:24 +03:00 committed by GitHub
parent eb7f261d15
commit 838bd20065
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 116 additions and 111 deletions

View File

@ -25,13 +25,21 @@ LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) {
core = PluginCache::get().ie(targetDevice);
}
void LayerTestsCommon::ResizeNgraphFunction() {
std::map<ov::Output<ov::Node>, ov::PartialShape> shapes;
auto params = function->get_parameters();
std::map<std::string, ngraph::PartialShape> shapes;
ASSERT_LE(params.size(), targetStaticShapes[index].size());
for (size_t i = 0; i < params.size(); i++) {
shapes.insert({*params[i]->get_output_tensor(0).get_names().begin(), targetStaticShapes[index][i]});
shapes.insert({params[i]->output(0), targetStaticShapes[index][i]});
}
function->reshape(shapes);
shapes.clear();
params = functionRefs->get_parameters();
ASSERT_LE(params.size(), targetStaticShapes[index].size());
for (size_t i = 0; i < params.size(); i++) {
shapes.insert({params[i]->output(0), targetStaticShapes[index][i]});
}
functionRefs->reshape(shapes);
}

View File

@ -107,9 +107,7 @@ protected:
ngraph::HostTensorPtr m_lower_value, m_upper_value;
std::string m_name;
mutable std::atomic_bool m_names_changing{false};
mutable std::unordered_set<std::string> m_names;
static std::atomic<size_t> m_next_instance_id;
std::unordered_set<std::string> m_names;
std::map<std::string, std::shared_ptr<Variant>> m_rt_info;
};

View File

@ -112,6 +112,7 @@ public:
void add_output(const ov::Output<ov::Node>& port);
void reshape(const std::map<std::string, ov::PartialShape>& partial_shapes);
void reshape(const std::map<ov::Output<ov::Node>, ov::PartialShape>& partial_shapes);
/// Return the element type of output i
const ov::element::Type& get_output_element_type(size_t i) const;

View File

@ -8,7 +8,6 @@
#include "ngraph/node.hpp"
using namespace std;
atomic<size_t> ov::descriptor::Tensor::m_next_instance_id(0);
ov::descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name)
: m_element_type(element_type),
@ -92,9 +91,6 @@ const std::string& ov::descriptor::Tensor::get_name() const {
NGRAPH_SUPPRESS_DEPRECATED_END
const std::unordered_set<std::string>& ov::descriptor::Tensor::get_names() const {
AtomicGuard lock(m_names_changing);
if (m_names.empty())
m_names.insert("Tensor_" + to_string(m_next_instance_id.fetch_add(1)));
return m_names;
}

View File

@ -205,8 +205,6 @@ void ov::Function::validate_nodes_and_infer_types() const {
std::map<ov::op::util::Variable*, Counter> pair_checker;
std::stringstream unregistered_parameters;
std::stringstream unregistered_variables;
// TODO: enable tensor names check after fixes in transformations
// std::unordered_set<std::string> tensor_names;
std::unordered_set<const ov::descriptor::Tensor*> tensors;
for (auto& node : get_ordered_ops()) {
node->revalidate_and_infer_types();
@ -216,12 +214,6 @@ void ov::Function::validate_nodes_and_infer_types() const {
if (tensors.count(&tensor))
continue;
tensors.insert(&tensor);
// for (const auto& name : output.get_tensor().get_names()) {
// if (tensor_names.count(name))
// throw ov::Exception("Function is incorrect. All Tensors should have unique names. " + name +
// " is not unique.");
// tensor_names.insert(name);
// }
}
if (op::util::is_parameter(node) &&
std::find(m_parameters.begin(), m_parameters.end(), node) == m_parameters.end())
@ -739,12 +731,35 @@ ov::Output<ov::Node> ov::Function::input(const std::string& tensor_name) {
}
void ov::Function::reshape(const std::map<std::string, ov::PartialShape>& partial_shapes) {
std::map<ov::Output<ov::Node>, ov::PartialShape> const_pshape;
std::unordered_map<ov::Node*, std::string> port_tensor_map;
for (const auto& it : partial_shapes) {
const auto port = input(it.first);
if (port_tensor_map.find(port.get_node()) != port_tensor_map.end()) {
OPENVINO_ASSERT(it.second == const_pshape.at(port),
"Tensor with names {'",
it.first,
"', '",
port_tensor_map[port.get_node()],
"'} has "
"conflicting shapes ",
it.second,
" and ",
const_pshape.at(port),
", but they define the same tensor");
}
port_tensor_map[port.get_node()] = it.first;
const_pshape[port] = it.second;
}
reshape(const_pshape);
}
void ov::Function::reshape(const std::map<ov::Output<ov::Node>, ov::PartialShape>& partial_shapes) {
if (partial_shapes.empty())
return;
const auto& params = get_parameters();
std::unordered_map<std::string, std::shared_ptr<ov::op::v0::Parameter>> tensor_param_map;
std::unordered_map<std::shared_ptr<ov::op::v0::Parameter>, std::string> param_tensor_map;
std::unordered_map<ov::op::v0::Parameter*, ov::PartialShape> new_param_shapes;
// Check that we need to do reshape only if input shapes will be changed
bool need_reshape = false;
@ -752,39 +767,22 @@ void ov::Function::reshape(const std::map<std::string, ov::PartialShape>& partia
bool shape_is_used = false;
for (const auto& param : params) {
const auto& tensor_names = param->get_output_tensor(0).get_names();
if (tensor_names.count(partial_shape.first)) {
const auto port = param->output(0);
if (port == partial_shape.first) {
shape_is_used = true;
tensor_param_map[partial_shape.first] = param;
auto it = param_tensor_map.find(param);
if (it != param_tensor_map.end()) {
OPENVINO_ASSERT(partial_shape.second == partial_shapes.at(it->second),
"Tensor with names {'",
partial_shape.first,
"', '",
it->second,
"'} has "
"conflicting shapes ",
partial_shape.second,
" and ",
partial_shapes.at(it->second),
", but they define the same tensor");
} else {
param_tensor_map[param] = partial_shape.first;
}
if (param->get_output_partial_shape(0).is_dynamic() ||
param->get_output_partial_shape(0) != partial_shape.second) {
need_reshape = true;
new_param_shapes[param.get()] = partial_shape.second;
}
break;
}
}
OPENVINO_ASSERT(shape_is_used,
"PartialShape for tensor with name '",
partial_shape.first,
"PartialShape for port '",
*partial_shape.first.get_node(),
"' is not used in ov::Function::reshape");
}
@ -792,15 +790,14 @@ void ov::Function::reshape(const std::map<std::string, ov::PartialShape>& partia
return;
// save original parameters shape
std::map<std::string, ov::PartialShape> original_input_shapes;
std::unordered_map<ov::op::v0::Parameter*, ov::PartialShape> original_input_shapes;
for (const auto& param : params) {
std::string any_tensor_name = *param->get_output_tensor(0).get_names().begin();
original_input_shapes[any_tensor_name] = param->get_output_partial_shape(0);
original_input_shapes[param.get()] = param->get_output_partial_shape(0);
}
auto reshape_only = [&](const std::map<std::string, ov::PartialShape>& pshapes) {
auto reshape_only = [&](const std::unordered_map<ov::op::v0::Parameter*, ov::PartialShape>& pshapes) {
for (const auto& pshape : pshapes) {
tensor_param_map[pshape.first]->set_partial_shape(pshape.second);
pshape.first->set_partial_shape(pshape.second);
}
validate_nodes_and_infer_types();
@ -811,7 +808,7 @@ void ov::Function::reshape(const std::map<std::string, ov::PartialShape>& partia
ssr_manager.register_pass<ngraph::pass::SmartReshape>();
ssr_manager.run_passes(shared_from_this());
reshape_only(partial_shapes);
reshape_only(new_param_shapes);
} catch (std::exception& ex) {
// restore shapes to original ones
reshape_only(original_input_shapes);

View File

@ -951,19 +951,8 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
port.append_attribute("precision").set_value(get_precision_name(o.get_element_type()).c_str());
// Sort tensor names
// Skip autogenerated names
// TODO: remove this code after transformation fixes
const auto& autogenerated_name = [](const std::string& name) {
if (name.rfind("Tensor_", 0) != 0)
return false;
return true;
};
const auto& tensor_names = o.get_tensor().get_names();
std::vector<std::string> vector_names;
for (const auto& name : tensor_names) {
if (!autogenerated_name(name))
vector_names.emplace_back(name);
}
std::vector<std::string> vector_names(tensor_names.begin(), tensor_names.end());
sort(vector_names.begin(), vector_names.end());
std::string names;

View File

@ -48,6 +48,41 @@ TEST(function, get_output_by_tensor_name) {
EXPECT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_input_by_tensor_index_without_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input(0);
EXPECT_THROW(f->input("input"), ov::Exception);
EXPECT_EQ(input.get_node(), arg0.get());
EXPECT_EQ(input.get_element_type(), ov::element::f32);
EXPECT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_by_tensor_index_without_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output(0);
EXPECT_THROW(f->output("relu_t"), ov::Exception);
EXPECT_EQ(output.get_tensor().get_names().size(), 0);
EXPECT_EQ(output.get_node(), result.get());
EXPECT_THROW(f->output("identity"), ov::Exception);
EXPECT_EQ(output.get_element_type(), ov::element::f32);
EXPECT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_incorrect_output_by_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
@ -249,18 +284,6 @@ TEST(function, multiple_inputs_outputs_function) {
EXPECT_EQ(f->outputs().size(), 2);
}
TEST(function, DISABLED_create_function_with_incorrect_tensor_names) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"input"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
EXPECT_THROW(f->validate_nodes_and_infer_types(), ov::Exception);
}
TEST(function, get_input_by_tensor_name_from_const) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
@ -501,18 +524,6 @@ TEST(function, multiple_inputs_outputs_function_from_const_function) {
EXPECT_EQ(f->outputs().size(), 2);
}
TEST(function, DISABLED_create_function_with_incorrect_tensor_names_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"input"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
EXPECT_THROW(f->validate_nodes_and_infer_types(), ov::Exception);
}
TEST(function_reshape, ReshapedDynamicShapeLayout) {
std::shared_ptr<ov::Function> ngraph;
{
@ -768,6 +779,37 @@ TEST(function_reshape, TestReshapeWithInvalidShapesForTheSameTensor) {
EXPECT_ANY_THROW(f->reshape({{"tensor1", ov::Shape({2, 500, 4})}, {"tensor2", ov::Shape({4, 250, 4})}}));
}
TEST(function_reshape, ReshapeBatchReLUByPort) {
std::shared_ptr<ov::Function> ngraph;
ov::Output<ov::Node> port;
{
ov::PartialShape shape({1, 3, 22, 22});
ov::element::Type type(ov::element::Type_t::f32);
auto param = std::make_shared<ov::op::v0::Parameter>(type, shape);
param->get_output_tensor(0).set_names({"tensor", "tensor2"});
port = param->output(0);
auto relu = std::make_shared<ov::op::v0::Relu>(param);
auto result = std::make_shared<ov::op::v0::Result>(relu);
ov::ParameterVector params = {param};
ov::ResultVector results = {result};
ngraph = std::make_shared<ov::Function>(results, params);
}
EXPECT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({1, 3, 22, 22}));
EXPECT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({1, 3, 22, 22}));
{
std::map<ov::Output<ov::Node>, ov::PartialShape> new_shape;
new_shape[port] = ov::PartialShape{2, 3, 22, 22};
EXPECT_NO_THROW(ngraph->reshape(new_shape));
}
EXPECT_EQ(ngraph->get_parameters()[0]->get_shape(), ov::Shape({2, 3, 22, 22}));
EXPECT_EQ(ngraph->get_results()[0]->get_shape(), ov::Shape({2, 3, 22, 22}));
}
TEST(function, add_output_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");

View File

@ -94,7 +94,7 @@ TEST(node_input_output, output_create_const) {
auto add_out_0 = add->output(0);
EXPECT_EQ(add_out_0.get_names().size(), 1);
EXPECT_EQ(add_out_0.get_names().size(), 0);
EXPECT_EQ(add_out_0.get_node(), add.get());
EXPECT_EQ(add_out_0.get_index(), 0);
EXPECT_EQ(add_out_0.get_element_type(), element::f32);

View File

@ -35,21 +35,3 @@ TEST(tensor, tensor_names) {
ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names());
}
TEST(tensor, generation_tensor_names) {
auto arg0 = make_shared<opset6::Parameter>(element::f32, Shape{1});
arg0->set_friendly_name("data");
auto relu = make_shared<opset6::Relu>(arg0);
relu->set_friendly_name("relu");
auto f0 = make_shared<Function>(relu, ParameterVector{arg0});
ASSERT_FALSE(arg0->get_output_tensor(0).get_names().empty());
ASSERT_FALSE(f0->get_result()->input_value(0).get_tensor().get_names().empty());
ASSERT_NE(arg0->get_output_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names());
ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->input_value(0).get_tensor().get_names());
ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names());
}

View File

@ -711,20 +711,12 @@ void Comparator::compare_inputs(ngraph::Node* node1, ngraph::Node* node2, std::o
void Comparator::compare_outputs(ngraph::Node* node1, ngraph::Node* node2, std::ostream& err_log) {
// Some transformations creates new tensors with autogenerated names
const auto& autogenerated_names = [](const std::unordered_set<std::string>& names) {
for (const auto& name : names) {
if (name.rfind("Tensor_", 0) != 0)
return false;
}
return true;
};
for (int i = 0; i < node1->outputs().size(); ++i) {
const auto& tensor1 = node1->output(i).get_tensor();
const auto& tensor2 = node2->output(i).get_tensor();
if (should_compare(CmpValues::TENSOR_NAMES)) {
if (tensor1.get_names() != tensor2.get_names() &&
(!autogenerated_names(tensor1.get_names()) || !autogenerated_names(tensor2.get_names()))) {
if (tensor1.get_names() != tensor2.get_names()) {
err_log << "Output tensors names " << tensor_names(tensor1) << " and " << tensor_names(tensor2)
<< " are different for nodes: " << node1->get_friendly_name() << " and "
<< node2->get_friendly_name() << std::endl;