Merge remote-tracking branch 'upstream/master' into sy/test/ConvolutionLayerTest_dynamic_shape_case

This commit is contained in:
Steve Yoo 2021-09-24 14:45:01 +09:00
commit 25c399d922
25 changed files with 912 additions and 106 deletions

View File

@ -0,0 +1,87 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
#include <vector>
#include "base_reference_test.hpp"
using namespace ngraph;
namespace reference_tests {
namespace {
struct AcosParams {
Tensor input;
Tensor expected;
};
struct Builder : ParamsBuilder<AcosParams> {
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input);
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expected);
};
class ReferenceAcosLayerTest : public testing::TestWithParam<AcosParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.input.shape, params.input.type);
inputData = {params.input.data};
refOutData = {params.expected.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<AcosParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "shape=" << param.input.shape << "_";
result << "type=" << param.input.type;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const Shape& shape, const element::Type& type) {
const auto in = std::make_shared<op::Parameter>(type, shape);
const auto acos = std::make_shared<op::Acos>(in);
return std::make_shared<Function>(NodeVector {acos}, ParameterVector {in});
}
};
TEST_P(ReferenceAcosLayerTest, AcosWithHardcodedRefs) {
Exec();
}
} // namespace
INSTANTIATE_TEST_SUITE_P(
smoke_Acos_With_Hardcoded_Refs, ReferenceAcosLayerTest,
::testing::Values(Builder {}
.input({{11}, element::f16, std::vector<ngraph::float16> {-1.f, -0.75f, -0.5f, -0.25f, -0.125f,
0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}})
.expected({{11}, element::f16, std::vector<ngraph::float16> {3.14159265f, 2.41885841f, 2.09439510f, 1.82347658f, 1.69612416f,
1.57079633f, 1.44546850f, 1.31811607f, 1.04719755f, 0.72273425f,
0.00000000f}}),
Builder {}
.input({{11}, element::f32, std::vector<float> {-1.f, -0.75f, -0.5f, -0.25f, -0.125f,
0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}})
.expected({{11}, element::f32, std::vector<float> {3.14159265f, 2.41885841f, 2.09439510f, 1.82347658f, 1.69612416f,
1.57079633f, 1.44546850f, 1.31811607f, 1.04719755f, 0.72273425f,
0.00000000f}}),
Builder {}
.input({{3}, element::i32, std::vector<int32_t> {-1, 0, 1}})
.expected({{3}, element::i32, std::vector<int32_t> {3, 1, 0}}),
Builder {}
.input({{3}, element::i64, std::vector<int64_t> {-1, 0, 1}})
.expected({{3}, element::i64, std::vector<int64_t> {3, 1, 0}}),
Builder {}
.input({{2}, element::u32, std::vector<uint32_t> {0, 1}})
.expected({{2}, element::u32, std::vector<uint32_t> {1, 0}}),
Builder {}
.input({{2}, element::u64, std::vector<uint64_t> {0, 1}})
.expected({{2}, element::u64, std::vector<uint64_t> {1, 0}})),
ReferenceAcosLayerTest::getTestCaseName);
} // namespace reference_tests

View File

@ -36,6 +36,8 @@ bool ngraph::pass::GenerateMappingFile::run_on_function(std::shared_ptr<ngraph::
for (auto && node : f->get_ordered_ops()) {
uint64_t ie_port_index{node->inputs().size()};
uint64_t ng_port_index{0};
if (std::dynamic_pointer_cast<ov::op::v0::Result>(node))
continue;
for (auto && output : node->outputs()) {
const auto & node_name = node->get_friendly_name();
const auto & t = output.get_tensor_ptr();

View File

@ -764,8 +764,19 @@ void ngfunction_2_irv10(pugi::xml_node& netXml,
.set_value(get_precision_name(o.get_element_type()).c_str());
// Sort tensor names
// Skip autogenerated names
// TODO: remove this code after transformation fixes
const auto& autogenerated_name = [](const std::string& name) {
if (name.rfind("Tensor_", 0) != 0)
return false;
return true;
};
const auto & tensor_names = o.get_tensor().get_names();
std::vector<std::string> vector_names(tensor_names.begin(), tensor_names.end());
std::vector<std::string> vector_names;
for (const auto& name : tensor_names) {
if (!autogenerated_name(name))
vector_names.emplace_back(name);
}
sort(vector_names.begin(), vector_names.end());
std::string names;

View File

@ -103,7 +103,6 @@ protected:
// static rank only in cases when the second input is Concat
std::vector<ngraph::Dimension> broadcastOutShape(shapeOfNode->get_output_shape(0)[0], ngraph::Dimension::dynamic());
broadcast->set_output_type(0, tensorParam->get_output_element_type(0), ngraph::PartialShape(broadcastOutShape));
function->get_result()->set_output_type(0, tensorParam->get_output_element_type(0), targetShape);
const auto transformations = vpu::Transformations{{ngraph::opset3::Broadcast::type_info, vpu::dynamicToStaticShapeBroadcast}};
vpu::DynamicToStaticShape(transformations).run_on_function(function);

View File

@ -741,11 +741,19 @@ void Comparator::compare_inputs(ngraph::Node* node1, ngraph::Node* node2, std::o
}
void Comparator::compare_outputs(ngraph::Node* node1, ngraph::Node* node2, std::ostream& err_log) {
// Some transformations creates new tensors with autogenerated names
const auto& autogenerated_names = [](const std::unordered_set<std::string>& names) {
for (const auto& name : names) {
if (name.rfind("Tensor_", 0) != 0)
return false;
}
return true;
};
for (int i = 0; i < node1->outputs().size(); ++i) {
const auto& tensor1 = node1->output(i).get_tensor();
const auto& tensor2 = node2->output(i).get_tensor();
if (tensor1.get_names() != tensor2.get_names()) {
if (tensor1.get_names() != tensor2.get_names() && (!autogenerated_names(tensor1.get_names()) || !autogenerated_names(tensor2.get_names()))) {
err_log << "Output tensors names " << tensor_names(tensor1) << " and "
<< tensor_names(tensor2)
<< " are different for nodes: " << node1->get_friendly_name() << " and "

View File

@ -131,16 +131,21 @@ static std::shared_ptr<ngraph::Function> create_simple_function() {
// Create opset6::Parameter operation with static shape
auto data = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::i8, ngraph::Shape{3, 1, 2});
data->set_friendly_name("Parameter");
data->get_output_tensor(0).set_names({"parameter"});
auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3});
mul_constant->set_friendly_name("mul_constant");
mul_constant->get_output_tensor(0).set_names({"mul_constant"});
auto mul = std::make_shared<ngraph::opset6::Multiply>(data, mul_constant);
mul->set_friendly_name("mul");
mul->get_output_tensor(0).set_names({"mul"});
auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2});
add_constant->set_friendly_name("add_constant");
add_constant->get_output_tensor(0).set_names({"add_constant"});
auto add = std::make_shared<ngraph::opset6::Add>(mul, add_constant);
add->set_friendly_name("add");
add->get_output_tensor(0).set_names({"add"});
// Create opset3::Result operation
auto res = std::make_shared<ngraph::opset6::Result>(add);

View File

@ -13,12 +13,18 @@
namespace ov {
class Node;
namespace op {
namespace v0 {
class Result;
} // namespace v0
} // namespace op
namespace descriptor {
class Output;
// Describes a tensor that is an input to an op, directly or indirectly via a tuple
class OPENVINO_API Input {
friend class ov::Node;
friend class ov::op::v0::Result;
public:
/// \param node The node that owns this input

View File

@ -97,7 +97,7 @@ protected:
// const PartialShape& descriptor::Tensor::get_shape() const
// It was decided to move m_shape and m_partial_shape synchronization point there and
// to keep methods signature backward compatible.
mutable std::mutex shape_mutex;
mutable std::mutex m_mutex;
mutable std::atomic_bool m_shape_changed;
mutable Shape m_shape;
// TODO: end
@ -105,7 +105,10 @@ protected:
PartialShape m_partial_shape;
ngraph::HostTensorPtr m_lower_value, m_upper_value;
std::string m_name;
std::unordered_set<std::string> m_names;
mutable std::atomic_bool m_names_changing{false};
mutable std::unordered_set<std::string> m_names;
static std::atomic<size_t> m_next_instance_id;
std::map<std::string, std::shared_ptr<Variant>> m_rt_info;
};

View File

@ -92,7 +92,24 @@ public:
/// Return the op that generates output i
std::shared_ptr<ngraph::Node> get_output_op(size_t i) const;
ngraph::Output<ngraph::Node> output(size_t i) const;
/// Output functions
std::vector<ov::Output<ov::Node>> outputs();
ov::Output<ov::Node> output();
ov::Output<ov::Node> output(size_t i);
ov::Output<ov::Node> output(const std::string& tensor_name);
std::vector<ov::Output<const ov::Node>> outputs() const;
ov::Output<const ov::Node> output() const;
ov::Output<const ov::Node> output(size_t i) const;
ov::Output<const ov::Node> output(const std::string& tensor_name) const;
/// Input functions
std::vector<ov::Output<ov::Node>> inputs();
ov::Output<ov::Node> input();
ov::Output<ov::Node> input(size_t i);
ov::Output<ov::Node> input(const std::string& tensor_name);
std::vector<ov::Output<const ov::Node>> inputs() const;
ov::Output<const ov::Node> input() const;
ov::Output<const ov::Node> input(size_t i) const;
ov::Output<const ov::Node> input(const std::string& tensor_name) const;
/// Return the element type of output i
const ngraph::element::Type& get_output_element_type(size_t i) const;

View File

@ -132,20 +132,10 @@ class OPENVINO_API Node : public std::enable_shared_from_this<Node> {
template <typename NodeType>
friend class Output;
public:
/// \brief Verifies that attributes and inputs are consistent and computes output shapes
/// and element types. Must be implemented by concrete child classes so that it
/// can be run any number of times.
///
/// Throws if the node is invalid.
virtual void validate_and_infer_types();
// Called in constructors during transition
void constructor_validate_and_infer_types();
using type_info_t = DiscreteTypeInfo;
protected:
descriptor::Input& get_input_descriptor(size_t position);
descriptor::Output& get_output_descriptor(size_t position);
/// \brief Construct an unitialized Node
Node() = default;
/// \brief Copying a node
@ -190,6 +180,18 @@ protected:
void set_input_is_relevant_to_value(size_t i, bool relevant = true);
public:
/// \brief Verifies that attributes and inputs are consistent and computes output shapes
/// and element types. Must be implemented by concrete child classes so that it
/// can be run any number of times.
///
/// Throws if the node is invalid.
virtual void validate_and_infer_types();
// Called in constructors during transition
void constructor_validate_and_infer_types();
using type_info_t = DiscreteTypeInfo;
virtual ~Node();
virtual bool visit_attributes(AttributeVisitor&) {
@ -377,7 +379,6 @@ public:
std::shared_ptr<Node> get_input_node_shared_ptr(size_t index) const;
Output<Node> get_input_source_output(size_t i) const;
public:
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& inputs) const = 0;
std::shared_ptr<Node> copy_with_new_inputs(const OutputVector& new_args) const;
@ -496,15 +497,13 @@ public:
virtual bool match_node(ov::pass::pattern::Matcher* matcher, const Output<Node>& graph_value);
private:
descriptor::Input& get_input_descriptor(size_t position);
descriptor::Output& get_output_descriptor(size_t position);
std::vector<Node*> m_control_dependents;
std::vector<std::shared_ptr<Node>> m_control_dependencies;
std::string m_node_type;
size_t m_instance_id{m_next_instance_id.fetch_add(1)};
std::string m_friendly_name;
std::string m_unique_name;
mutable std::string m_unique_name;
mutable std::atomic_bool m_name_changing{false};
static std::atomic<size_t> m_next_instance_id;
std::unordered_set<std::string> m_provenance_tags;
std::set<std::shared_ptr<Node>> m_provenance_group;
@ -514,7 +513,6 @@ private:
std::shared_ptr<ngraph::op::util::OpAnnotations> m_op_annotations;
OPENVINO_SUPPRESS_DEPRECATED_END
std::map<std::string, std::shared_ptr<Variant>> m_rt_info;
mutable std::mutex m_mutex;
};
using NodeTypeInfo = Node::type_info_t;

View File

@ -120,7 +120,7 @@ public:
/// \brief Constructs a Output, referencing the zeroth output of the node.
/// \param node A `shared_ptr` to the node for the output handle.
template <typename T>
Output(const std::shared_ptr<T>& node) : Output(node ? node->get_default_output() : Output<const Node>()) {}
Output(const std::shared_ptr<const T>& node) : Output(node ? node->get_default_output() : Output<const Node>()) {}
/// A null output
Output() = default;

View File

@ -27,11 +27,11 @@ static bool call(const HostTensorVector& func_outputs,
}
}
std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> results_map;
// map function outputs -> HostTensor
for (size_t output_count = 0; output_count < function->get_results().size(); ++output_count) {
auto output = function->get_results()[output_count];
descriptor::Tensor* tensor = &output->get_output_tensor(0);
tensor_map.insert({tensor, func_outputs[output_count]});
results_map[output] = output_count;
}
// for each ordered op in the graph
@ -53,7 +53,9 @@ static bool call(const HostTensorVector& func_outputs,
descriptor::Tensor* tensor = &op->output(i).get_tensor();
std::shared_ptr<HostTensor> host_tensor;
auto it = tensor_map.find(tensor);
if (it == tensor_map.end()) {
if (op::is_output(op)) {
host_tensor = func_outputs[results_map[op]];
} else if (it == tensor_map.end()) {
host_tensor = std::make_shared<HostTensor>(op->output(i));
tensor_map.insert({tensor, host_tensor});
} else {

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <atomic>
namespace ov {
// The class AtomicGuard is an atomic wrapper that provides a convenient RAII-style mechanism for emulate mutex
class AtomicGuard {
public:
AtomicGuard(std::atomic_bool& b) : m_atomic(b) {
bool exp = false;
while (!m_atomic.compare_exchange_strong(exp, true)) {
exp = false;
}
}
~AtomicGuard() {
m_atomic = false;
}
private:
std::atomic_bool& m_atomic;
};
} // namespace ov

View File

@ -4,9 +4,11 @@
#include "openvino/core/descriptor/tensor.hpp"
#include "atomic_guard.hpp"
#include "ngraph/node.hpp"
using namespace std;
atomic<size_t> ov::descriptor::Tensor::m_next_instance_id(0);
ov::descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name)
: m_element_type(element_type),
@ -58,7 +60,7 @@ void ov::descriptor::Tensor::set_upper_value(const ngraph::HostTensorPtr& value)
const ov::Shape& ov::descriptor::Tensor::get_shape() const {
if (m_partial_shape.is_static()) {
if (m_shape_changed.load(std::memory_order_relaxed)) {
std::lock_guard<std::mutex> guard(shape_mutex);
std::lock_guard<std::mutex> guard(m_mutex);
if (m_shape_changed) // double check after mutex lock
{
m_shape = m_partial_shape.to_shape();
@ -90,6 +92,9 @@ const std::string& ov::descriptor::Tensor::get_name() const {
NGRAPH_SUPPRESS_DEPRECATED_END
const std::unordered_set<std::string>& ov::descriptor::Tensor::get_names() const {
AtomicGuard lock(m_names_changing);
if (m_names.empty())
m_names.insert("Tensor_" + to_string(m_next_instance_id.fetch_add(1)));
return m_names;
}

View File

@ -197,8 +197,24 @@ void ov::Function::validate_nodes_and_infer_types() const {
std::map<ov::op::util::Variable*, Counter> pair_checker;
std::stringstream unregistered_parameters;
std::stringstream unregistered_variables;
// TODO: enable tensor names check after fixes in transformations
// std::unordered_set<std::string> tensor_names;
std::unordered_set<const ov::descriptor::Tensor*> tensors;
for (auto& node : get_ordered_ops()) {
node->revalidate_and_infer_types();
for (const auto& output : node->outputs()) {
const auto& tensor = output.get_tensor();
// Skip results outputs tensors because result_input_tensor == result_output_tensor
if (tensors.count(&tensor))
continue;
tensors.insert(&tensor);
// for (const auto& name : output.get_tensor().get_names()) {
// if (tensor_names.count(name))
// throw ov::Exception("Function is incorrect. All Tensors should have unique names. " + name +
// " is not unique.");
// tensor_names.insert(name);
// }
}
if (op::util::is_parameter(node) &&
std::find(m_parameters.begin(), m_parameters.end(), node) == m_parameters.end())
unregistered_parameters << node << std::endl;
@ -313,10 +329,6 @@ shared_ptr<ov::Node> ov::Function::get_output_op(size_t i) const {
return m_results.at(i);
}
ov::Output<ov::Node> ov::Function::output(size_t i) const {
return m_results.at(i);
}
shared_ptr<ov::Node> ov::Function::get_result() const {
if (m_results.size() != 1) {
throw ov::Exception("get_result() must be called on a function with exactly one result.");
@ -518,3 +530,113 @@ ov::op::util::Variable::Ptr ov::Function::get_variable_by_id(const string& varia
else
return ov::op::util::Variable::Ptr();
}
/// Output functions
std::vector<ov::Output<const ov::Node>> ov::Function::outputs() const {
std::vector<ov::Output<const ov::Node>> results;
for (const auto& res : m_results) {
std::shared_ptr<const ov::Node> result = res;
results.emplace_back(result);
}
return results;
}
ov::Output<const ov::Node> ov::Function::output() const {
if (m_results.size() != 1) {
throw ov::Exception("output() must be called on a function with exactly one result.");
}
std::shared_ptr<const ov::Node> result = m_results.at(0);
return result;
}
ov::Output<const ov::Node> ov::Function::output(size_t i) const {
std::shared_ptr<const ov::Node> result = m_results.at(i);
return result;
}
ov::Output<const ov::Node> ov::Function::output(const std::string& tensor_name) const {
for (const auto& res : m_results) {
if (res->get_input_tensor(0).get_names().count(tensor_name)) {
std::shared_ptr<const ov::Node> result = res;
return result;
}
}
throw ov::Exception("Output for tensor name " + tensor_name + " was not found.");
}
std::vector<ov::Output<ov::Node>> ov::Function::outputs() {
std::vector<ov::Output<ov::Node>> results;
for (const auto& result : m_results) {
results.emplace_back(result);
}
return results;
}
ov::Output<ov::Node> ov::Function::output() {
if (m_results.size() != 1) {
throw ov::Exception("output() must be called on a function with exactly one result.");
}
return m_results.at(0);
}
ov::Output<ov::Node> ov::Function::output(size_t i) {
return m_results.at(i);
}
ov::Output<ov::Node> ov::Function::output(const std::string& tensor_name) {
for (const auto& res : m_results) {
if (res->get_input_tensor(0).get_names().count(tensor_name))
return res;
}
throw ov::Exception("Output for tensor name " + tensor_name + " was not found.");
}
/// Input functions
std::vector<ov::Output<const ov::Node>> ov::Function::inputs() const {
std::vector<ov::Output<const ov::Node>> inputs;
for (const auto& input : m_parameters) {
std::shared_ptr<const ov::Node> parameter = input;
inputs.emplace_back(parameter);
}
return inputs;
}
ov::Output<const ov::Node> ov::Function::input() const {
if (m_parameters.size() != 1) {
throw ov::Exception("input() must be called on a function with exactly one parameter.");
}
std::shared_ptr<const ov::Node> parameter = m_parameters.at(0);
return parameter;
}
ov::Output<const ov::Node> ov::Function::input(size_t i) const {
std::shared_ptr<const ov::Node> parameter = m_parameters.at(i);
return parameter;
}
ov::Output<const ov::Node> ov::Function::input(const std::string& tensor_name) const {
for (const auto& param : m_parameters) {
if (param->get_output_tensor(0).get_names().count(tensor_name)) {
std::shared_ptr<const ov::Node> parameter = param;
return parameter;
}
}
throw ov::Exception("Input for tensor name " + tensor_name + " was not found.");
}
std::vector<ov::Output<ov::Node>> ov::Function::inputs() {
std::vector<ov::Output<ov::Node>> inputs;
for (const auto& input : m_parameters) {
inputs.emplace_back(input);
}
return inputs;
}
ov::Output<ov::Node> ov::Function::input() {
if (m_parameters.size() != 1) {
throw ov::Exception("input() must be called on a function with exactly one parameter.");
}
return m_parameters.at(0);
}
ov::Output<ov::Node> ov::Function::input(size_t i) {
return m_parameters.at(i);
}
ov::Output<ov::Node> ov::Function::input(const std::string& tensor_name) {
for (const auto& param : m_parameters) {
if (param->get_output_tensor(0).get_names().count(tensor_name))
return param;
}
throw ov::Exception("Input for tensor name " + tensor_name + " was not found.");
}

View File

@ -10,6 +10,7 @@
#include <typeindex>
#include <typeinfo>
#include "atomic_guard.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/constant.hpp"
@ -238,11 +239,9 @@ const std::string& ov::Node::get_friendly_name() const {
}
const std::string& ov::Node::get_name() const {
if (m_unique_name.empty()) {
std::lock_guard<std::mutex> lock(m_mutex);
if (m_unique_name.empty())
const_cast<Node*>(this)->m_unique_name = description() + "_" + to_string(m_instance_id);
}
AtomicGuard lock(m_name_changing);
if (m_unique_name.empty())
m_unique_name = description() + "_" + to_string(m_instance_id);
return m_unique_name;
}

View File

@ -43,7 +43,6 @@ bool evaluate_acos(const ov::HostTensorPtr& arg0, const ov::HostTensorPtr& out,
out->set_unary(arg0);
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_acos, boolean, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i32, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, i64, arg0, out, count);
NGRAPH_TYPE_CASE(evaluate_acos, u32, arg0, out, count);
@ -72,7 +71,6 @@ bool ov::op::v0::Acos::has_evaluate() const {
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case ngraph::element::boolean:
return true;
default:
break;

View File

@ -32,7 +32,10 @@ void op::Result::validate_and_infer_types() {
NGRAPH_OP_SCOPE(v0_Result_validate_and_infer_types);
NODE_VALIDATION_CHECK(this, get_input_size() == 1, "Argument has ", get_input_size(), " outputs (1 expected).");
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
// Result doesn't change change in/out tensors
auto& output = get_output_descriptor(0);
auto& input = get_input_descriptor(0);
output.set_tensor_ptr(input.get_tensor_ptr());
}
shared_ptr<Node> op::Result::clone_with_new_inputs(const OutputVector& new_args) const {

View File

@ -39,6 +39,7 @@ set(SRC
eval.cpp
file_util.cpp
float16.cpp
function.cpp
graph_rewrite.cpp
includes.cpp
input_output_assign.cpp
@ -244,6 +245,7 @@ set(SRC
visitors/partial_shape.cpp
visitors/user_op.cpp
visitors/value_map.cpp
visitors/op/acos.cpp
visitors/op/acosh.cpp
visitors/op/adaptive_avg_pool.cpp
visitors/op/adaptive_max_pool.cpp
@ -410,7 +412,6 @@ add_subdirectory(util)
set(MULTI_TEST_SRC
backend/abc.in.cpp
backend/abs.in.cpp
backend/acos.in.cpp
backend/adaptive_avg_pool.in.cpp
backend/adaptive_max_pool.in.cpp
backend/add.in.cpp
@ -590,6 +591,9 @@ add_executable(unit-test ${SRC})
target_include_directories(unit-test PRIVATE ".")
target_include_directories(unit-test PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/runtime)
get_target_property(NGRAPH_SRC_DIR openvino::core SOURCE_DIR)
target_include_directories(unit-test PRIVATE ${NGRAPH_SRC_DIR}/src)
add_definitions("-DCURDIR=\"${CMAKE_CURRENT_SOURCE_DIR}\"")
add_definitions("-DJSON_INCLUDES=\"${JSON_INCLUDE_DIR}\"")

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, acos) {
Shape shape{11};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Acos>(A), ParameterVector{A});
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f});
test_case.add_expected_output<float>(shape,
{3.14159265f,
2.41885841f,
2.09439510f,
1.82347658f,
1.69612416f,
1.57079633f,
1.44546850f,
1.31811607f,
1.04719755f,
0.72273425f,
0.00000000f});
test_case.run();
}

513
ngraph/test/function.cpp Normal file
View File

@ -0,0 +1,513 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/function.hpp"
#include <gtest/gtest.h>
#include "openvino/opsets/opset8.hpp"
TEST(function, get_input_by_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input("input");
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_by_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
const std::unordered_set<std::string> out_names = {"relu_t", "identity"};
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names(out_names);
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output("relu_t");
ASSERT_EQ(output.get_tensor().get_names().size(), 2);
ASSERT_EQ(output.get_tensor().get_names(), out_names);
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(f->output("identity"), output);
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_incorrect_output_by_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->output("input"), ov::Exception);
}
TEST(function, get_incorrect_input_by_tensor_name) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input("relu_t"), ov::Exception);
}
TEST(function, get_input_by_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input(0);
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_by_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output(0);
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_input_without_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input();
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_without_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output();
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_incorrect_output_by_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->output(2), std::exception);
}
TEST(function, get_incorrect_input_by_index) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input(2), std::exception);
}
TEST(function, incorrect_multiple_inputs_outputs_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input1"});
auto arg1 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg1->set_friendly_name("data1");
arg1->get_output_tensor(0).set_names({"input2", "data1"});
auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{arg0, arg1}, 1);
concat->set_friendly_name("concat");
concat->get_output_tensor(0).set_names({"concat_t"});
auto result1 = std::make_shared<ov::opset8::Result>(concat);
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result2 = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<ov::Function>(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input(), ov::Exception);
ASSERT_THROW(f->output(), ov::Exception);
}
TEST(function, multiple_inputs_outputs_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 3, 3});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input1"});
auto arg1 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg1->set_friendly_name("data1");
arg1->get_output_tensor(0).set_names({"input2", "data1"});
auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{arg0, arg1}, 1);
concat->set_friendly_name("concat");
concat->get_output_tensor(0).set_names({"concat_t"});
auto result1 = std::make_shared<ov::opset8::Result>(concat);
auto shape_of = std::make_shared<ov::opset8::ShapeOf>(concat);
shape_of->set_friendly_name("shape_of");
shape_of->get_output_tensor(0).set_names({"shape_of_t", "identity"});
auto result2 = std::make_shared<ov::opset8::Result>(shape_of);
auto f = std::make_shared<ov::Function>(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1});
f->validate_nodes_and_infer_types();
auto input1 = f->input(0);
auto input2 = f->input("data1");
ASSERT_NE(input1, input2);
ASSERT_EQ(input1, f->input("input1"));
ASSERT_EQ(input2, f->input("input2"));
ASSERT_EQ(input2, f->input(1));
ASSERT_EQ(input1.get_node(), arg0.get());
ASSERT_EQ(input2.get_node_shared_ptr(), arg1);
auto output1 = f->output(0);
auto output2 = f->output("shape_of_t");
ASSERT_NE(output1, output2);
ASSERT_EQ(output1, f->output("concat_t"));
ASSERT_EQ(output2, f->output("identity"));
ASSERT_EQ(output2, f->output(1));
ASSERT_EQ(arg0.get(), f->input(0).get_node());
ASSERT_EQ(arg1.get(), f->input(1).get_node());
ASSERT_EQ(result1.get(), f->output(0).get_node());
ASSERT_EQ(result2.get(), f->output(1).get_node());
ASSERT_EQ(output1, result1);
ASSERT_EQ(output2, result2);
ASSERT_EQ(f->inputs().size(), 2);
ASSERT_EQ(f->outputs().size(), 2);
}
TEST(function, DISABLED_create_function_with_incorrect_tensor_names) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"input"});
auto f = std::make_shared<ov::Function>(relu, ov::ParameterVector{arg0});
ASSERT_THROW(f->validate_nodes_and_infer_types(), ov::Exception);
}
TEST(function, get_input_by_tensor_name_from_const) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input("input");
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_by_tensor_name_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
const std::unordered_set<std::string> out_names = {"relu_t", "identity"};
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names(out_names);
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<const ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output("relu_t");
ASSERT_EQ(output.get_tensor().get_names().size(), 2);
ASSERT_EQ(output.get_tensor().get_names(), out_names);
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(f->output("identity"), output);
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_incorrect_output_by_tensor_name_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->output("input"), ov::Exception);
}
TEST(function, get_incorrect_input_by_tensor_name_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input("relu_t"), ov::Exception);
}
TEST(function, get_input_by_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input(0);
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_by_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<const ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output(0);
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_input_without_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto input = f->input();
ASSERT_EQ(input.get_node(), arg0.get());
ASSERT_EQ(input.get_element_type(), ov::element::f32);
ASSERT_EQ(input.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_output_without_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<const ov::Function>(result, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
auto output = f->output();
ASSERT_EQ(output.get_node(), result.get());
ASSERT_EQ(output.get_element_type(), ov::element::f32);
ASSERT_EQ(output.get_partial_shape(), ov::PartialShape{1});
}
TEST(function, get_incorrect_output_by_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->output(2), std::exception);
}
TEST(function, get_incorrect_input_by_index_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input(2), std::exception);
}
TEST(function, incorrect_multiple_inputs_outputs_function_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input1"});
auto arg1 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg1->set_friendly_name("data1");
arg1->get_output_tensor(0).set_names({"input2", "data1"});
auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{arg0, arg1}, 1);
concat->set_friendly_name("concat");
concat->get_output_tensor(0).set_names({"concat_t"});
auto result1 = std::make_shared<ov::opset8::Result>(concat);
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto result2 = std::make_shared<ov::opset8::Result>(relu);
auto f = std::make_shared<const ov::Function>(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1});
f->validate_nodes_and_infer_types();
ASSERT_THROW(f->input(), ov::Exception);
ASSERT_THROW(f->output(), ov::Exception);
}
TEST(function, multiple_inputs_outputs_function_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 3, 3});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input1"});
auto arg1 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1, 2, 3, 3});
arg1->set_friendly_name("data1");
arg1->get_output_tensor(0).set_names({"input2", "data1"});
auto concat = std::make_shared<ov::opset8::Concat>(ov::NodeVector{arg0, arg1}, 1);
concat->set_friendly_name("concat");
concat->get_output_tensor(0).set_names({"concat_t"});
auto result1 = std::make_shared<ov::opset8::Result>(concat);
auto shape_of = std::make_shared<ov::opset8::ShapeOf>(concat);
shape_of->set_friendly_name("shape_of");
shape_of->get_output_tensor(0).set_names({"shape_of_t", "identity"});
auto result2 = std::make_shared<ov::opset8::Result>(shape_of);
auto f = std::make_shared<const ov::Function>(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1});
f->validate_nodes_and_infer_types();
auto input1 = f->input(0);
auto input2 = f->input("data1");
ASSERT_NE(input1, input2);
ASSERT_EQ(input1, f->input("input1"));
ASSERT_EQ(input2, f->input("input2"));
ASSERT_EQ(input2, f->input(1));
ASSERT_EQ(input1.get_node(), arg0.get());
ASSERT_EQ(input2.get_node_shared_ptr(), arg1);
auto output1 = f->output(0);
auto output2 = f->output("shape_of_t");
ASSERT_NE(output1, output2);
ASSERT_EQ(output1, f->output("concat_t"));
ASSERT_EQ(output2, f->output("identity"));
ASSERT_EQ(arg0.get(), f->input(0).get_node());
ASSERT_EQ(arg1.get(), f->input(1).get_node());
ASSERT_EQ(result1.get(), f->output(0).get_node());
ASSERT_EQ(result2.get(), f->output(1).get_node());
ASSERT_EQ(output2, f->output(1));
ASSERT_EQ(output1.get_node(), result1.get());
ASSERT_EQ(output2.get_node(), result2.get());
ASSERT_EQ(f->inputs().size(), 2);
ASSERT_EQ(f->outputs().size(), 2);
}
TEST(function, DISABLED_create_function_with_incorrect_tensor_names_from_const_function) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"input"});
auto f = std::make_shared<const ov::Function>(relu, ov::ParameterVector{arg0});
ASSERT_THROW(f->validate_nodes_and_infer_types(), ov::Exception);
}

View File

@ -93,16 +93,12 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
}
}
std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> results_map;
// map function outputs -> HostTensor
for (size_t output_count = 0; output_count < get_results().size(); ++output_count)
{
auto output = get_results()[output_count];
if (!ov::is_type<op::Result>(output))
{
throw ngraph_error("One of function's outputs isn't op::Result");
}
descriptor::Tensor* tensor = &output->get_output_tensor(0);
tensor_map.insert({tensor, func_outputs[output_count]});
results_map[output] = output_count;
}
// for each ordered op in the graph
@ -136,8 +132,9 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
descriptor::Tensor* tensor = &op->output(i).get_tensor();
shared_ptr<HostTensor> host_tensor;
auto it = tensor_map.find(tensor);
if (it == tensor_map.end())
{
if (op::is_output(op)) {
host_tensor = func_outputs[results_map[op]];
} else if (it == tensor_map.end()) {
// Use cloned_node to create HostTensor with static dimensions
host_tensor = make_shared<HostTensor>(cloned_node->output(i));
tensor_map.insert({tensor, host_tensor});

View File

@ -35,3 +35,21 @@ TEST(tensor, tensor_names) {
ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names());
}
TEST(tensor, generation_tensor_names) {
auto arg0 = make_shared<opset6::Parameter>(element::f32, Shape{1});
arg0->set_friendly_name("data");
auto relu = make_shared<opset6::Relu>(arg0);
relu->set_friendly_name("relu");
auto f0 = make_shared<Function>(relu, ParameterVector{arg0});
ASSERT_FALSE(arg0->get_output_tensor(0).get_names().empty());
ASSERT_FALSE(f0->get_result()->input_value(0).get_tensor().get_names().empty());
ASSERT_NE(arg0->get_output_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names());
ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->input_value(0).get_tensor().get_names());
ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(), relu->get_output_tensor(0).get_names());
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names());
}

View File

@ -2,10 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <chrono>
#include <mutex>
#include <thread>
#include <vector>
#include "atomic_guard.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
@ -60,3 +62,28 @@ TEST(threading, get_friendly_name) {
th.join();
}
}
TEST(threading, check_atomic_guard) {
std::atomic_bool test_val{false};
int result = 2;
const auto& thread1_fun = [&]() {
ov::AtomicGuard lock(test_val);
std::chrono::milliseconds ms{2000};
std::this_thread::sleep_for(ms);
result += 3;
};
const auto& thread2_fun = [&]() {
std::chrono::milliseconds ms{500};
std::this_thread::sleep_for(ms);
ov::AtomicGuard lock(test_val);
result *= 3;
};
std::vector<std::thread> threads(2);
threads[0] = std::thread(thread1_fun);
threads[1] = std::thread(thread2_fun);
for (auto&& th : threads) {
th.join();
}
ASSERT_EQ(result, 15);
}

View File

@ -0,0 +1,9 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "unary_ops.hpp"
using Type = ::testing::Types<UnaryOperatorType<ngraph::op::Acos, ngraph::element::f32>>;
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_without_attribute, UnaryOperatorVisitor, Type, UnaryOperatorTypeName);