Remove ngraph from new API (#7838)

This commit is contained in:
Ilya Churaev
2021-10-06 14:17:27 +03:00
committed by GitHub
parent cd675a56a4
commit 80e0bed855
54 changed files with 456 additions and 422 deletions

View File

@@ -226,16 +226,16 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
exeNetwork->setNetworkInputs(copyInfo(constMapCast(inputs)));
exeNetwork->setNetworkOutputs(copyInfo(constMapCast(outputs)));
ngraph::ParameterVector parameters;
ngraph::ResultVector results;
ov::ParameterVector parameters;
ov::ResultVector results;
std::vector<ngraph::Output<ngraph::Node>> node_outputs;
for (auto&& input : inputs) {
auto tensor_desc = input.second->getTensorDesc();
auto dims = tensor_desc.getDims();
parameters.push_back(
std::make_shared<ngraph::op::v0::Parameter>(details::convertPrecision(tensor_desc.getPrecision()),
std::vector<ov::Dimension>{dims.begin(), dims.end()}));
std::make_shared<ov::op::v0::Parameter>(details::convertPrecision(tensor_desc.getPrecision()),
std::vector<ov::Dimension>{dims.begin(), dims.end()}));
parameters.back()->set_friendly_name(input.first);
node_outputs.push_back(parameters.back()->output(0));
}
@@ -261,16 +261,16 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
IE_ASSERT(exeNetwork != nullptr);
IE_ASSERT(function != nullptr);
ngraph::ParameterVector parameters;
ngraph::ResultVector results;
ngraph::NodeVector nodes;
ov::ParameterVector parameters;
ov::ResultVector results;
ov::NodeVector nodes;
std::map<ngraph::Output<ngraph::Node>, ngraph::Output<ngraph::Node>> output_map;
for (auto&& node : function->get_ordered_ops()) {
ngraph::Node* new_node = nullptr;
if (ngraph::is_type<ngraph::op::Parameter>(node)) {
parameters.push_back(std::static_pointer_cast<ngraph::op::v0::Parameter>(node->clone_with_new_inputs({})));
if (ngraph::is_type<ov::op::v0::Parameter>(node)) {
parameters.push_back(std::static_pointer_cast<ov::op::v0::Parameter>(node->clone_with_new_inputs({})));
for (std::size_t i = 0; i < node->outputs().size(); ++i) {
output_map.emplace(node->output(i), parameters.back()->output(i));
}
@@ -280,7 +280,7 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
for (auto&& input : node->inputs()) {
outputs.emplace_back(output_map.at(input.get_source_output()));
}
if (ngraph::is_type<ngraph::op::Result>(node)) {
if (ngraph::is_type<ov::op::v0::Result>(node)) {
results.push_back(
std::static_pointer_cast<ngraph::op::v0::Result>(node->clone_with_new_inputs(outputs)));
new_node = results.back().get();

View File

@@ -4,6 +4,14 @@
#pragma once
#include "ngraph/ngraph_visibility.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/assign.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/read_value.hpp"
#include "ngraph/op/result.hpp"
#include "ngraph/op/sink.hpp"
#include "ngraph/op/util/variable.hpp"
#include "openvino/core/function.hpp"
namespace ngraph {

View File

@@ -23,7 +23,8 @@ namespace ov {
namespace op {
namespace v0 {
class Parameter;
}
class Result;
} // namespace v0
} // namespace op
} // namespace ov
namespace ngraph {
@@ -31,7 +32,8 @@ namespace ngraph {
namespace op {
namespace v0 {
using ov::op::v0::Parameter;
}
using ov::op::v0::Result;
} // namespace v0
} // namespace op
using ov::clone_function;
@@ -129,7 +131,7 @@ std::list<std::shared_ptr<ngraph::Node>> clone_nodes(const std::vector<std::shar
NGRAPH_DEPRECATED("This method is deprecated and will be removed soon")
NGRAPH_API
std::pair<std::shared_ptr<op::Result>, std::shared_ptr<op::v0::Parameter>> insert_result_parameter_split(
std::pair<std::shared_ptr<op::v0::Result>, std::shared_ptr<op::v0::Parameter>> insert_result_parameter_split(
const std::shared_ptr<Node>& src_node,
const std::shared_ptr<Node>& dst_node);

View File

@@ -25,11 +25,13 @@
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/shape.hpp"
#include "openvino/core/enum_mask.hpp"
namespace ov {
class Node;
}
namespace ngraph {
using ov::EnumMask;
using ov::Node;
class stopwatch;
@@ -227,105 +229,6 @@ AxisVector get_default_order(const Shape& shape);
NGRAPH_API
AxisVector get_default_order(const PartialShape& shape);
//
// EnumMask is intended to work with a scoped enum type. It's used to store
// a combination of enum values and provides easy access and manipulation
// of these enum values as a mask.
//
// EnumMask does not provide a set_all() or invert() operator because they
// could do things unexpected by the user, i.e. for enum with 4 bit values,
// invert(001000...) != 110100..., due to the extra bits.
//
template <typename T>
class EnumMask {
public:
/// Make sure the template type is an enum.
static_assert(std::is_enum<T>::value, "EnumMask template type must be an enum");
/// Extract the underlying type of the enum.
typedef typename std::underlying_type<T>::type value_type;
/// Some bit operations are not safe for signed values, we require enum
/// type to use unsigned underlying type.
static_assert(std::is_unsigned<value_type>::value, "EnumMask enum must use unsigned type.");
constexpr EnumMask() : m_value{0} {}
constexpr EnumMask(const T& enum_value) : m_value{static_cast<value_type>(enum_value)} {}
EnumMask(const EnumMask& other) : m_value{other.m_value} {}
EnumMask(std::initializer_list<T> enum_values) : m_value{0} {
for (auto& v : enum_values) {
m_value |= static_cast<value_type>(v);
}
}
value_type value() const {
return m_value;
}
/// Check if any of the input parameter enum bit mask match
bool is_any_set(const EnumMask& p) const {
return m_value & p.m_value;
}
/// Check if all of the input parameter enum bit mask match
bool is_set(const EnumMask& p) const {
return (m_value & p.m_value) == p.m_value;
}
/// Check if any of the input parameter enum bit mask does not match
bool is_any_clear(const EnumMask& p) const {
return !is_set(p);
}
/// Check if all of the input parameter enum bit mask do not match
bool is_clear(const EnumMask& p) const {
return !is_any_set(p);
}
void set(const EnumMask& p) {
m_value |= p.m_value;
}
void clear(const EnumMask& p) {
m_value &= ~p.m_value;
}
void clear_all() {
m_value = 0;
}
bool operator[](const EnumMask& p) const {
return is_set(p);
}
bool operator==(const EnumMask& other) const {
return m_value == other.m_value;
}
bool operator!=(const EnumMask& other) const {
return m_value != other.m_value;
}
EnumMask& operator=(const EnumMask& other) {
m_value = other.m_value;
return *this;
}
EnumMask& operator&=(const EnumMask& other) {
m_value &= other.m_value;
return *this;
}
EnumMask& operator|=(const EnumMask& other) {
m_value |= other.m_value;
return *this;
}
EnumMask operator&(const EnumMask& other) const {
return EnumMask(m_value & other.m_value);
}
EnumMask operator|(const EnumMask& other) const {
return EnumMask(m_value | other.m_value);
}
friend std::ostream& operator<<(std::ostream& os, const EnumMask& m) {
os << m.m_value;
return os;
}
private:
/// Only used internally
explicit EnumMask(const value_type& value) : m_value{value} {}
value_type m_value;
};
/// \brief Function to query parsed version information of the version of ngraph which
/// contains this function. Version information strictly follows Semantic Versioning
/// http://semver.org

View File

@@ -94,7 +94,7 @@ public:
/// \brief Hook for adapters that need visitor access
virtual void on_adapter(const std::string& name, VisitorAdapter& adapter);
/// \brief Provides API to handle nGraph Function attribute type, accessed as ValueAccessor
/// \brief Provides API to handle openvino Function attribute type, accessed as ValueAccessor
/// \param name attribute name
/// \param adapter reference to a Function ValueAccessor<VAT>
virtual void on_adapter(const std::string& name, ValueAccessor<std::shared_ptr<ov::Function>>& adapter);

View File

@@ -7,8 +7,8 @@
#include "openvino/core/visibility.hpp"
#define OV_NEW_API 1
// Now we use the generic helper definitions above to define NGRAPH_API
// NGRAPH_API is used for the public API symbols. It either DLL imports or DLL exports
// Now we use the generic helper definitions above to define OPENVINO_API
// OPENVINO_API is used for the public API symbols. It either DLL imports or DLL exports
// (or does nothing for static build)
#ifdef _WIN32

View File

@@ -36,9 +36,9 @@ public:
Tensor(const Tensor&) = delete;
Tensor& operator=(const Tensor&) = delete;
NGRAPH_DEPRECATED("get_name() is deprecated! Please use get_names() instead.")
OPENVINO_DEPRECATED("get_name() is deprecated! Please use get_names() instead.")
const std::string& get_name() const;
NGRAPH_DEPRECATED("set_name() is deprecated! Please use set_names() instead.")
OPENVINO_DEPRECATED("set_name() is deprecated! Please use set_names() instead.")
void set_name(const std::string& name);
const std::unordered_set<std::string>& get_names() const;
@@ -91,7 +91,7 @@ protected:
// Support for dynamic shapes required transition to ov::PartialShape.
// To smoothly transition to ov::PartialShape we introduced m_partial_shape
// and kept m_shape in sync with m_partial_shape. Synchronization point was placed
// in set_partial_shape which dramatically affected performance of ngraph::Function
// in set_partial_shape which dramatically affected performance of ov::Function
// validation. Since we have started the transition to ov::PartialShape and reduced
// Shape usage the only user of m_shape was get_shape method with signature:
// const PartialShape& descriptor::Tensor::get_shape() const

View File

@@ -0,0 +1,110 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
namespace ov {
//
// EnumMask is intended to work with a scoped enum type. It's used to store
// a combination of enum values and provides easy access and manipulation
// of these enum values as a mask.
//
// EnumMask does not provide a set_all() or invert() operator because they
// could do things unexpected by the user, i.e. for enum with 4 bit values,
// invert(001000...) != 110100..., due to the extra bits.
//
template <typename T>
class EnumMask {
public:
/// Make sure the template type is an enum.
static_assert(std::is_enum<T>::value, "EnumMask template type must be an enum");
/// Extract the underlying type of the enum.
using value_type = typename std::underlying_type<T>::type;
/// Some bit operations are not safe for signed values, we require enum
/// type to use unsigned underlying type.
static_assert(std::is_unsigned<value_type>::value, "EnumMask enum must use unsigned type.");
constexpr EnumMask() = default;
constexpr EnumMask(const T& enum_value) : m_value{static_cast<value_type>(enum_value)} {}
EnumMask(const EnumMask& other) : m_value{other.m_value} {}
EnumMask(std::initializer_list<T> enum_values) {
for (auto& v : enum_values) {
m_value |= static_cast<value_type>(v);
}
}
value_type value() const {
return m_value;
}
/// Check if any of the input parameter enum bit mask match
bool is_any_set(const EnumMask& p) const {
return m_value & p.m_value;
}
/// Check if all of the input parameter enum bit mask match
bool is_set(const EnumMask& p) const {
return (m_value & p.m_value) == p.m_value;
}
/// Check if any of the input parameter enum bit mask does not match
bool is_any_clear(const EnumMask& p) const {
return !is_set(p);
}
/// Check if all of the input parameter enum bit mask do not match
bool is_clear(const EnumMask& p) const {
return !is_any_set(p);
}
void set(const EnumMask& p) {
m_value |= p.m_value;
}
void clear(const EnumMask& p) {
m_value &= ~p.m_value;
}
void clear_all() {
m_value = 0;
}
bool operator[](const EnumMask& p) const {
return is_set(p);
}
bool operator==(const EnumMask& other) const {
return m_value == other.m_value;
}
bool operator!=(const EnumMask& other) const {
return m_value != other.m_value;
}
EnumMask& operator=(const EnumMask& other) {
m_value = other.m_value;
return *this;
}
EnumMask& operator&=(const EnumMask& other) {
m_value &= other.m_value;
return *this;
}
EnumMask& operator|=(const EnumMask& other) {
m_value |= other.m_value;
return *this;
}
EnumMask operator&(const EnumMask& other) const {
return EnumMask(m_value & other.m_value);
}
EnumMask operator|(const EnumMask& other) const {
return EnumMask(m_value | other.m_value);
}
friend std::ostream& operator<<(std::ostream& os, const EnumMask& m) {
os << m.m_value;
return os;
}
private:
/// Only used internally
explicit EnumMask(const value_type& value) : m_value{value} {}
value_type m_value{};
};
} // namespace ov

View File

@@ -8,7 +8,7 @@
#include <string>
#include <utility>
#include "ngraph/check.hpp"
#include "openvino/core/except.hpp"
namespace ov {
/// Uses a pairings defined by EnumTypes::get() to convert between strings
@@ -30,7 +30,7 @@ public:
return p.second;
}
}
NGRAPH_CHECK(false, "\"", name, "\"", " is not a member of enum ", get().m_enum_name);
OPENVINO_ASSERT(false, "\"", name, "\"", " is not a member of enum ", get().m_enum_name);
}
/// Converts enum values to strings
@@ -40,7 +40,7 @@ public:
return p.first;
}
}
NGRAPH_CHECK(false, " invalid member of enum ", get().m_enum_name);
OPENVINO_ASSERT(false, " invalid member of enum ", get().m_enum_name);
}
private:

View File

@@ -11,87 +11,81 @@
#include <string>
#include <vector>
#include "ngraph/op/assign.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/read_value.hpp"
#include "ngraph/op/result.hpp"
#include "ngraph/op/sink.hpp"
#include "ngraph/op/util/variable.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/rtti.hpp"
#include "openvino/core/variant.hpp"
#include "openvino/op/assign.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/read_value.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/sink.hpp"
#include "openvino/op/util/variable.hpp"
namespace ov {
/// A user-defined function.
class OPENVINO_API Function : public std::enable_shared_from_this<Function> {
public:
static constexpr ngraph::DiscreteTypeInfo type_info{"Function", 0};
const ngraph::DiscreteTypeInfo& get_type_info() const {
static constexpr ov::DiscreteTypeInfo type_info{"Function", 0};
const ov::DiscreteTypeInfo& get_type_info() const {
return type_info;
}
Function(const ngraph::NodeVector& results,
const ngraph::ParameterVector& parameters,
Function(const ov::NodeVector& results, const ov::ParameterVector& parameters, const std::string& name = "");
Function(const ov::OutputVector& results, const ov::ParameterVector& parameters, const std::string& name = "");
Function(const std::shared_ptr<ov::Node>& result,
const ov::ParameterVector& parameters,
const std::string& name = "");
Function(const ngraph::OutputVector& results,
const ngraph::ParameterVector& parameters,
Function(const ov::ResultVector& results, const ov::ParameterVector& parameters, const std::string& name = "");
Function(const ov::ResultVector& results,
const ov::SinkVector& sinks,
const ov::ParameterVector& parameters,
const std::string& name = "");
Function(const std::shared_ptr<ngraph::Node>& result,
const ngraph::ParameterVector& parameters,
Function(const ov::OutputVector& results,
const ov::SinkVector& sinks,
const ov::ParameterVector& parameters,
const std::string& name = "");
Function(const ngraph::ResultVector& results,
const ngraph::ParameterVector& parameters,
Function(const ov::ResultVector& results,
const ov::SinkVector& sinks,
const ov::ParameterVector& parameters,
const ov::op::util::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::ResultVector& results,
const ngraph::SinkVector& sinks,
const ngraph::ParameterVector& parameters,
Function(const ov::OutputVector& results,
const ov::SinkVector& sinks,
const ov::ParameterVector& parameters,
const ov::op::util::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::OutputVector& results,
const ngraph::SinkVector& sinks,
const ngraph::ParameterVector& parameters,
Function(const ov::ResultVector& results,
const ov::ParameterVector& parameters,
const ov::op::util::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::ResultVector& results,
const ngraph::SinkVector& sinks,
const ngraph::ParameterVector& parameters,
const ngraph::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::OutputVector& results,
const ngraph::SinkVector& sinks,
const ngraph::ParameterVector& parameters,
const ngraph::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::ResultVector& results,
const ngraph::ParameterVector& parameters,
const ngraph::VariableVector& variables,
const std::string& name = "");
Function(const ngraph::OutputVector& results,
const ngraph::ParameterVector& parameters,
const ngraph::VariableVector& variables,
Function(const ov::OutputVector& results,
const ov::ParameterVector& parameters,
const ov::op::util::VariableVector& variables,
const std::string& name = "");
/// Constructs a Function. Lists of parameters and variables will be generated automatically
/// based on traversing the graph from the results.
explicit Function(const ngraph::OutputVector& results, const std::string& name = "");
explicit Function(const ov::OutputVector& results, const std::string& name = "");
/// Constructs a Function. Lists of parameters and variables will be generated automatically
/// based on traversing the graph from the results and the sinks.
Function(const ngraph::OutputVector& results, const ngraph::SinkVector& sinks, const std::string& name = "");
Function(const ov::OutputVector& results, const ov::SinkVector& sinks, const std::string& name = "");
virtual ~Function() = default;
/// Return the number of outputs for this function.
size_t get_output_size() const;
/// Return the op that generates output i
std::shared_ptr<ngraph::Node> get_output_op(size_t i) const;
std::shared_ptr<ov::Node> get_output_op(size_t i) const;
/// Output functions
std::vector<ov::Output<ov::Node>> outputs();
@@ -115,7 +109,7 @@ public:
void reshape(const std::map<std::string, ov::PartialShape>& partial_shapes);
/// Return the element type of output i
const ngraph::element::Type& get_output_element_type(size_t i) const;
const ov::element::Type& get_output_element_type(size_t i) const;
/// Return the shape of element i
const Shape& get_output_shape(size_t i) const;
@@ -124,7 +118,7 @@ public:
const PartialShape& get_output_partial_shape(size_t i) const;
/// Check that there is a single result and return it.
std::shared_ptr<ngraph::Node> get_result() const;
std::shared_ptr<ov::Node> get_result() const;
/// \brief Get the unique name of the function.
/// \returns A const reference to the function's unique name.
@@ -141,13 +135,13 @@ public:
/// \returns A const reference to the function's friendly name.
const std::string& get_friendly_name() const;
std::vector<std::shared_ptr<ngraph::Node>> get_ops() const;
std::vector<std::shared_ptr<ngraph::Node>> get_ordered_ops() const;
void map_unordered_ops(std::function<void(ngraph::Node*)> f) const;
std::vector<std::shared_ptr<ov::Node>> get_ops() const;
std::vector<std::shared_ptr<ov::Node>> get_ordered_ops() const;
void map_unordered_ops(std::function<void(ov::Node*)> f) const;
friend std::ostream& operator<<(std::ostream&, const Function&);
// updates graph and m_results list
void replace_node(std::shared_ptr<ngraph::Node> old, std::shared_ptr<ngraph::Node> repl);
void replace_node(std::shared_ptr<ov::Node> old, std::shared_ptr<ov::Node> repl);
void validate_nodes_and_infer_types() const;
@@ -166,59 +160,59 @@ public:
///
/// \param parameter_index The index of the parameter to replace.
/// \param parameter The parameter to substitute for the `parameter_index`th parameter.
void replace_parameter(size_t parameter_index, const std::shared_ptr<ngraph::op::Parameter>& parameter);
void replace_parameter(size_t parameter_index, const std::shared_ptr<ov::op::v0::Parameter>& parameter);
using topological_sort_t = std::function<std::vector<std::shared_ptr<ngraph::Node>>(
const std::vector<std::shared_ptr<ngraph::Node>>& root_nodes)>;
using topological_sort_t =
std::function<std::vector<std::shared_ptr<ov::Node>>(const std::vector<std::shared_ptr<ov::Node>>& root_nodes)>;
void set_topological_sort(topological_sort_t);
virtual bool visit_attributes(ngraph::AttributeVisitor& visitor);
virtual bool visit_attributes(ov::AttributeVisitor& visitor);
/// Return the function parameters
const ngraph::ParameterVector& get_parameters() const {
const ov::ParameterVector& get_parameters() const {
return m_parameters;
};
/// Return a list of function's outputs
const ngraph::ResultVector& get_results() const {
const ov::ResultVector& get_results() const {
return m_results;
};
/// Index for parameter, or -1
int64_t get_parameter_index(const std::shared_ptr<ngraph::op::Parameter>& parameter) const;
int64_t get_parameter_index(const std::shared_ptr<ov::op::v0::Parameter>& parameter) const;
/// Index for value or result referencing it, or -1
int64_t get_result_index(const ngraph::Output<ngraph::Node>& value) const;
int64_t get_result_index(const ov::Output<ov::Node>& value) const;
/// \brief Evaluate the function on inputs, putting results in outputs.
/// \param output_tensors Tensors for the outputs to compute. One for each result
/// \param input_tensors Tensors for the inputs. One for each inputs.
/// \param evaluation_context Storage of additional settings and attributes that can be used
/// when evaluating the function. This additional information can be shared across nodes.
bool evaluate(const ngraph::HostTensorVector& output_tensors,
const ngraph::HostTensorVector& input_tensors,
ngraph::EvaluationContext evaluation_context = ngraph::EvaluationContext()) const;
bool evaluate(const ov::HostTensorVector& output_tensors,
const ov::HostTensorVector& input_tensors,
ov::EvaluationContext evaluation_context = ov::EvaluationContext()) const;
/// \brief Return a list of function's sinks.
const ngraph::SinkVector& get_sinks() const {
const ov::SinkVector& get_sinks() const {
return m_sinks;
}
/// \brief Add new sink nodes to the list. Method doesn't validate graph, it should be done
/// manually after all changes.
/// \param sinks new sink nodes
void add_sinks(const ngraph::SinkVector& sinks);
void add_sinks(const ov::SinkVector& sinks);
/// \brief Delete sink node from the list of sinks. Method doesn't delete node from graph.
/// \param sink Sink to delete
void remove_sink(const std::shared_ptr<ngraph::op::Sink>& sink);
void remove_sink(const std::shared_ptr<ov::op::Sink>& sink);
/// \brief Add new Result nodes to the list. Method doesn't validate graph, it should be
/// done manually after all changes.
/// \param results new Result nodes
void add_results(const ngraph::ResultVector& results);
void add_results(const ov::ResultVector& results);
/// \brief Delete Result node from the list of results. Method will not delete node from
/// graph.
/// \param result Result node to delete
void remove_result(const std::shared_ptr<ngraph::op::Result>& result);
void remove_result(const std::shared_ptr<ov::op::v0::Result>& result);
/// \brief Add new Parameter nodes to the list.
///
@@ -230,7 +224,7 @@ public:
/// * call graph validation to check correctness of changes
///
/// \param params new Parameter nodes
void add_parameters(const ngraph::ParameterVector& params);
void add_parameters(const ov::ParameterVector& params);
/// \brief Delete Parameter node from the list of parameters. Method will not delete node
/// from graph. You need to replace Parameter with other operation manually.
@@ -245,25 +239,25 @@ public:
/// * call graph validation to check all changes
///
/// \param param Parameter node to delete
void remove_parameter(const std::shared_ptr<ngraph::op::Parameter>& param);
void remove_parameter(const std::shared_ptr<ov::op::v0::Parameter>& param);
/// \brief Add new variables to the list. Method doesn't validate graph, it should be done
/// manually after all changes.
/// \param variables new variables to add
void add_variables(const ngraph::VariableVector& variables);
void add_variables(const ov::op::util::VariableVector& variables);
/// \brief Delete variable from the list of variables.
/// Method doesn't delete nodes that used this variable from the graph.
/// \param variable Variable to delete
void remove_variable(const ngraph::VariablePtr& variable);
void remove_variable(const ov::op::util::Variable::Ptr& variable);
/// \brief Return a list of function's variables.
const ngraph::VariableVector& get_variables() const {
const ov::op::util::VariableVector& get_variables() const {
return m_variables;
}
/// \brief Return a variable by specified variable_id.
ngraph::VariablePtr get_variable_by_id(const std::string& variable_id) const;
ov::op::util::Variable::Ptr get_variable_by_id(const std::string& variable_id) const;
RTMap& get_rt_info() {
return m_rt_info;
}
@@ -291,17 +285,17 @@ private:
size_t m_placement{0};
topological_sort_t m_topological_sorter;
ngraph::ResultVector m_results;
ov::ResultVector m_results;
// List of the nodes with side effect in graph.
// These nodes are not outputs of graph but should not be removed even if have no children.
ngraph::SinkVector m_sinks;
ngraph::ParameterVector m_parameters;
ngraph::VariableVector m_variables;
ov::SinkVector m_sinks;
ov::ParameterVector m_parameters;
ov::op::util::VariableVector m_variables;
RTMap m_rt_info;
};
template <>
class NGRAPH_API AttributeAdapter<std::shared_ptr<ov::Function>>
class OPENVINO_API AttributeAdapter<std::shared_ptr<ov::Function>>
: public DirectValueAccessor<std::shared_ptr<ov::Function>> {
public:
AttributeAdapter(std::shared_ptr<ov::Function>& value)

View File

@@ -7,7 +7,7 @@
#include <string>
#include <unordered_map>
#include "ngraph/attribute_adapter.hpp"
#include "openvino/core/attribute_adapter.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/rank.hpp"

View File

@@ -71,7 +71,7 @@ class Output;
class Node;
/// EvaluationContext stores and manages a context (additional parameters, values and
/// environment) for evaluating ngraph::function.
/// environment) for evaluating ov::Function.
using EvaluationContext = std::map<std::string, std::shared_ptr<Variant>>;
using ResultVector = std::vector<std::shared_ptr<ov::op::v0::Result>>;
@@ -222,7 +222,7 @@ public:
virtual bool evaluate_upper(const ov::HostTensorVector& output_values) const;
virtual bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values);
/// \brief Decomposes the FusedOp into a sub-graph consisting of core ngraph ops
/// \brief Decomposes the FusedOp into a sub-graph consisting of core openvino ops
///
/// \return A vector of nodes comprising the sub-graph. The order of output
/// tensors must match the match output tensors of the FusedOp
@@ -558,11 +558,11 @@ using RawNodeOutputMap = std::map<RawNodeOutput, Output<Node>>;
class OPENVINO_API NodeValidationFailure : public ov::AssertFailure {
public:
NodeValidationFailure(const ngraph::CheckLocInfo& check_loc_info, const Node* node, const std::string& explanation)
NodeValidationFailure(const ov::CheckLocInfo& check_loc_info, const Node* node, const std::string& explanation)
: AssertFailure(check_loc_info, node_validation_failure_loc_string(node), explanation) {}
};
} // namespace ov
#define NODE_VALIDATION_CHECK(node, ...) NGRAPH_CHECK_HELPER(::ov::NodeValidationFailure, (node), __VA_ARGS__)
#define NODE_VALIDATION_CHECK(node, ...) OPENVINO_ASSERT_HELPER(::ov::NodeValidationFailure, (node), __VA_ARGS__)
namespace ov {
template <typename T>

View File

@@ -7,10 +7,10 @@
#include <cstring>
#include <map>
#include "ngraph/shape.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/descriptor/tensor.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/core/variant.hpp"

View File

@@ -14,10 +14,10 @@
#include <string>
#include <vector>
#include "ngraph/attribute_adapter.hpp"
#include "ngraph/deprecated.hpp"
#include "ngraph/except.hpp"
#include "openvino/core/attribute_adapter.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/deprecated.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/rtti.hpp"
#include "openvino/core/type/bfloat16.hpp"
#include "openvino/core/type/float16.hpp"
@@ -196,7 +196,7 @@ protected:
};
template <>
class NGRAPH_API AttributeAdapter<ov::element::TypeVector> : public DirectValueAccessor<ov::element::TypeVector> {
class OPENVINO_API AttributeAdapter<ov::element::TypeVector> : public DirectValueAccessor<ov::element::TypeVector> {
public:
OPENVINO_RTTI("AttributeAdapter<ov::element::TypeVector>");
BWDCMP_RTTI_DECLARATION;

View File

@@ -31,7 +31,7 @@ public:
///
AdaptiveMaxPool(const Output<Node>& data,
const Output<Node>& output_shape,
const ngraph::element::Type& index_element_type = ngraph::element::i64);
const ov::element::Type& index_element_type = ov::element::i64);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
@@ -43,7 +43,7 @@ public:
}
protected:
ngraph::element::Type m_index_element_type = ngraph::element::i64;
ov::element::Type m_index_element_type = ov::element::i64;
};
} // namespace v8
} // namespace op

View File

@@ -60,7 +60,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::string get_variable_id() const override {
NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable");
OPENVINO_ASSERT(m_variable, "Variable is not initialized. Variable_id is unavailable");
return m_variable->get_info().variable_id;
}
bool evaluate(const HostTensorVector& outputs,

View File

@@ -10,7 +10,6 @@
#include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/shared_buffer.hpp"
#include "ngraph/util.hpp"
#include "openvino/core/coordinate_diff.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/type/element_type.hpp"
@@ -364,7 +363,7 @@ public:
template <element::Type_t ET>
const typename element_type_traits<ET>::value_type* get_data_ptr() const {
NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr() called for incorrect element type.");
OPENVINO_ASSERT(ET == get_element_type(), "get_data_ptr() called for incorrect element type.");
return static_cast<const typename element_type_traits<ET>::value_type*>(get_data_ptr());
}
@@ -530,7 +529,7 @@ private:
template <element::Type_t ET>
typename element_type_traits<ET>::value_type* get_data_ptr_nc() {
NGRAPH_CHECK(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type.");
OPENVINO_ASSERT(ET == get_element_type(), "get_data_ptr_nc() called for incorrect element type.");
return static_cast<typename element_type_traits<ET>::value_type*>(get_data_ptr_nc());
}
@@ -668,21 +667,21 @@ private:
# pragma GCC diagnostic pop
#endif
}
template <ngraph::element::Type_t Type,
template <ov::element::Type_t Type,
typename ValueT,
typename std::enable_if<Type == ngraph::element::Type_t::u4, bool>::type = true>
static ngraph::fundamental_type_for<Type> value_in_range(const ValueT& value) {
const auto result = ngraph::fundamental_type_for<Type>(value);
NGRAPH_CHECK(0 <= result && result <= 15, "assigned value out of range u4 values");
typename std::enable_if<Type == ov::element::Type_t::u4, bool>::type = true>
static ov::fundamental_type_for<Type> value_in_range(const ValueT& value) {
const auto result = ov::fundamental_type_for<Type>(value);
OPENVINO_ASSERT(0 <= result && result <= 15, "assigned value out of range u4 values");
return result;
}
template <ngraph::element::Type_t Type,
template <ov::element::Type_t Type,
typename ValueT,
typename std::enable_if<Type == ngraph::element::Type_t::i4, bool>::type = true>
static ngraph::fundamental_type_for<Type> value_in_range(const ValueT& value) {
const auto result = ngraph::fundamental_type_for<Type>(value);
NGRAPH_CHECK(-8 <= result && result <= 7, "assigned value out of range i4 values");
typename std::enable_if<Type == ov::element::Type_t::i4, bool>::type = true>
static ov::fundamental_type_for<Type> value_in_range(const ValueT& value) {
const auto result = ov::fundamental_type_for<Type>(value);
OPENVINO_ASSERT(-8 <= result && result <= 7, "assigned value out of range i4 values");
return result;
}

View File

@@ -114,119 +114,140 @@ public:
/// \brief Constructs a batched-convolution data batch-backprop operation.
ConvolutionBackpropData() = default;
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT, X1, ..., XD].
// \param filters The node producing the filter from forward-prop. Shape:
// [C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor. clang-format on
//
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Output<Node>& output_shape,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT, X1, ..., XD].
// \param filters The node producing the filter from forward-prop. Shape:
// [C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param output_shape The shape of the data batch from forward-prop. It's size
// should be equal to number of data spatial dimensions.
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor. clang-format on
//
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Output<Node>& output_shape,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT, X1, ..., XD].
// \param filters The node producing the filter from forward-prop. Shape:
// [C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor. clang-format on
//
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
// clang-format off
//
// \brief Constructs a batched-convolution data batch-backprop operation.
//
// \param data The node producing data from forward-prop. Shape: [N,
// C_INPUT, X1, ..., XD].
// \param filters The node producing the filter from forward-prop. Shape:
// [C_INPUT, C_OUTPUT, K_D, ..., K_1]
// \param strides The strides from forward-prop.
// \param pads_begin The padding-below sizes from forward-prop.
// \param pads_end The padding-above sizes from forward-prop.
// \param dilations The dilations from forward-prop.
// \param auto_pad The pad type for automatically computing padding sizes.
// \param output_padding The output padding adds additional amount of paddings per
// each spatial axis in the output tensor. clang-format on
//
// clang-format on
ConvolutionBackpropData(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const Strides& dilations,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {});
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
bool is_dynamic() const override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
bool is_dynamic() const override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The output spatial dimensions shape.
const PartialShape get_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const { return m_strides; }
void set_strides(const Strides& strides) { m_strides = strides; }
/// \return The dilations from the forward prop.
const Strides& get_dilations() const { return m_dilations; }
void set_dilations(const Strides& dilations) { m_dilations = dilations; }
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_begin() const { return m_pads_begin; }
void set_pads_begin(const CoordinateDiff& pads_begin) { m_pads_begin = pads_begin; }
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const { return m_pads_end; }
void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; }
/// \return The auto pad.
const PadType& get_auto_pad() const { return m_auto_pad; }
void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; }
/// \return The output padding.
const CoordinateDiff& get_output_padding() const { return m_output_padding; }
void set_output_padding(const CoordinateDiff& output_padding)
{
m_output_padding = output_padding;
}
/// \brief Calculates output spatial features size.
///
/// \param[in] input_data_shape The input data partial shape
/// \param[in] filters_shape The filters partial shape
/// \param[in] strides The strides values.
/// \param[in] dilations The dilations values.
/// \param[in] pads_begin The paddings at the beginning of axis.
/// \param[in] pads_end The paddings at the end of axis.
/// \param[in] output_padding The output padding values.
/// \param output_spatial_shape The placeholder for computed output spatial partial
/// shape.
///
void
infer_conv_backprop_output_spatial_shape(const std::vector<Dimension>& input_data_shape,
const std::vector<Dimension>& filters_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding,
std::vector<Dimension>& output_spatial_shape);
/// \return The output spatial dimensions shape.
const PartialShape get_output_shape() const;
void set_output_shape(const Shape& output_shape);
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The output padding.
const CoordinateDiff& get_output_padding() const {
return m_output_padding;
}
void set_output_padding(const CoordinateDiff& output_padding) {
m_output_padding = output_padding;
}
/// \brief Calculates output spatial features size.
///
/// \param[in] input_data_shape The input data partial shape
/// \param[in] filters_shape The filters partial shape
/// \param[in] strides The strides values.
/// \param[in] dilations The dilations values.
/// \param[in] pads_begin The paddings at the beginning of axis.
/// \param[in] pads_end The paddings at the end of axis.
/// \param[in] output_padding The output padding values.
/// \param output_spatial_shape The placeholder for computed output spatial partial
/// shape.
///
void infer_conv_backprop_output_spatial_shape(const std::vector<Dimension>& input_data_shape,
const std::vector<Dimension>& filters_shape,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const CoordinateDiff& output_padding,
std::vector<Dimension>& output_spatial_shape);
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
};
} // namespace v1
} // namespace op
} // namespace ngraph
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
CoordinateDiff m_output_padding;
};
} // namespace v1
} // namespace op
} // namespace ov

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -31,28 +31,28 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \brief gets then_body as ngraph::Function.
/// \brief gets then_body as ov::Function.
///
/// \return then_body as ngraph::Function.
/// \return then_body as ov::Function.
const std::shared_ptr<Function>& get_then_body() const {
return m_bodies[THEN_BODY_INDEX];
}
/// \brief gets else_body as ngraph::Function.
/// \brief gets else_body as ov::Function.
///
/// \return else_body as ngraph::Function.
/// \return else_body as ov::Function.
const std::shared_ptr<Function>& get_else_body() const {
return m_bodies[ELSE_BODY_INDEX];
}
/// \brief sets new ngraph::Function as new then_body.
/// \brief sets new ov::Function as new then_body.
///
/// \param body new body for 'then' branch.
void set_then_body(const std::shared_ptr<Function>& body) {
m_bodies[THEN_BODY_INDEX] = body;
}
/// \brief sets new ngraph::Function as new else_body.
/// \brief sets new ov::Function as new else_body.
///
/// \param body new body for 'else' branch.
void set_else_body(const std::shared_ptr<Function>& body) {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -15,7 +15,7 @@ namespace ov {
namespace op {
namespace v5 {
/// \brief Iterate a body over tensors, accumulating into tensors.
class NGRAPH_API Loop : public op::util::SubGraphOp {
class OPENVINO_API Loop : public op::util::SubGraphOp {
public:
/// \brief Allows to define the purpose of inputs/outputs in the body
struct SpecialBodyPorts {
@@ -76,7 +76,7 @@ private:
} // namespace op
template <>
class NGRAPH_API AttributeAdapter<op::v5::Loop::SpecialBodyPorts>
class OPENVINO_API AttributeAdapter<op::v5::Loop::SpecialBodyPorts>
: public DirectValueAccessor<op::v5::Loop::SpecialBodyPorts> {
public:
AttributeAdapter(op::v5::Loop::SpecialBodyPorts& value)

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {
@@ -24,7 +24,7 @@ namespace v0 {
/// | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$N[n, c, d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[n, c, d_1,\dots,d_n] = \frac{N[n,i,d_1,\dots,d_n]}{ (bias + alpha * (\sum_{i=max(0,(nsize-1)/2)}^{min(C, (nsize-1)/2)+1} N[n,i,d_1,\dots,d_n]^{2}) ^ {2})}\f$ |
// clang-format on
class NGRAPH_API LRN : public Op {
class OPENVINO_API LRN : public Op {
public:
OPENVINO_OP("LRN", "opset1");
BWDCMP_RTTI_DECLARATION;

View File

@@ -25,7 +25,7 @@ public:
// not
bool sort_result_across_batch = false;
// specifies the output tensor type
ngraph::element::Type output_type = ngraph::element::i64;
ov::element::Type output_type = ov::element::i64;
// specifies minimum score to consider box for the processing
float score_threshold = 0.0f;
// specifies maximum number of boxes to be selected per class, -1 meaning to

View File

@@ -23,7 +23,7 @@ public:
// not
bool sort_result_across_batch = false;
// specifies the output tensor type
ngraph::element::Type output_type = ngraph::element::i64;
ov::element::Type output_type = ov::element::i64;
// specifies intersection over union threshold
float iou_threshold = 0.0f;
// specifies minimum score to consider box for the processing

View File

@@ -108,7 +108,7 @@ public:
const Output<Node>& score_threshold,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation with default values for the last
/// 3 inputs
@@ -123,7 +123,7 @@ public:
const Output<Node>& scores,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
@@ -154,7 +154,7 @@ public:
protected:
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
bool m_sort_result_descending = true;
ngraph::element::Type m_output_type = ngraph::element::i64;
ov::element::Type m_output_type = ov::element::i64;
void validate();
int64_t max_boxes_output_from_input() const;
};
@@ -188,7 +188,7 @@ public:
const Output<Node>& score_threshold,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation with default values for the last
/// 3 inputs
@@ -203,7 +203,7 @@ public:
const Output<Node>& scores,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
void validate_and_infer_types() override;
@@ -235,7 +235,7 @@ public:
const Output<Node>& scores,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation with default values in the last.
/// 3 inputs.
@@ -253,7 +253,7 @@ public:
const Output<Node>& max_output_boxes_per_class,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation with default values in the last.
/// 2 inputs.
@@ -273,7 +273,7 @@ public:
const Output<Node>& iou_threshold,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation with default value in the last.
/// input.
@@ -295,7 +295,7 @@ public:
const Output<Node>& score_threshold,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
/// \brief Constructs a NonMaxSuppression operation.
///
@@ -318,7 +318,7 @@ public:
const Output<Node>& soft_nms_sigma,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const bool sort_result_descending = true,
const ngraph::element::Type& output_type = ngraph::element::i64);
const ov::element::Type& output_type = ov::element::i64);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
@@ -355,7 +355,7 @@ public:
protected:
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
bool m_sort_result_descending = true;
ngraph::element::Type m_output_type = ngraph::element::i64;
ov::element::Type m_output_type = ov::element::i64;
void validate();
};
} // namespace v5

View File

@@ -25,7 +25,7 @@ public:
///
/// \param element_type The element type of the parameter.
/// \param pshape The partial shape of the parameter.
Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape);
Parameter(const ov::element::Type& element_type, const PartialShape& pshape);
bool visit_attributes(AttributeVisitor& visitor) override;

View File

@@ -28,7 +28,7 @@ public:
RandomUniform(const Output<Node>& out_shape,
const Output<Node>& min_val,
const Output<Node>& max_val,
const ngraph::element::Type& out_type,
const ov::element::Type& out_type,
uint64_t global_seed = 0,
uint64_t op_seed = 0);
@@ -44,10 +44,10 @@ public:
}
/// \return The output tensor type.
const ngraph::element::Type& get_out_type() const {
const ov::element::Type& get_out_type() const {
return m_output_type;
}
void set_out_type(const ngraph::element::Type& output_type) {
void set_out_type(const ov::element::Type& output_type) {
m_output_type = output_type;
}
@@ -72,7 +72,7 @@ public:
bool has_evaluate() const override;
protected:
ngraph::element::Type m_output_type;
ov::element::Type m_output_type;
uint64_t m_global_seed;
uint64_t m_op_seed;

View File

@@ -65,7 +65,7 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::string get_variable_id() const override {
NGRAPH_CHECK(m_variable, "Variable is not initialized. Variable_id is unavailable");
OPENVINO_ASSERT(m_variable, "Variable is not initialized. Variable_id is unavailable");
return m_variable->get_info().variable_id;
}

View File

@@ -21,7 +21,7 @@ public:
/// \brief Constructs a Relu operation.
///
/// \param arg Node that produces the input tensor.
Relu(const Output<ngraph::Node>& arg);
Relu(const Output<ov::Node>& arg);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "openvino/op/util/unary_elementwise_arithmetic.hpp"
namespace ov {
namespace op {

View File

@@ -37,8 +37,8 @@ public:
/// input.
SpaceToBatch(const Output<Node>& data,
const Output<Node>& block_shape,
const Output<ngraph::Node>& pads_begin,
const Output<ngraph::Node>& pads_end);
const Output<ov::Node>& pads_begin,
const Output<ov::Node>& pads_end);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@@ -7,8 +7,8 @@
#include <memory>
#include <vector>
#include "ngraph/op/util/attr_types.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace ov {
namespace op {

View File

@@ -4,8 +4,8 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace ov {
namespace op {

View File

@@ -98,8 +98,8 @@ public:
void cache_output_descriptor();
private:
std::vector<std::tuple<ngraph::PartialShape, ngraph::element::Type>> m_inputs_desc;
std::vector<std::tuple<ngraph::PartialShape, ngraph::element::Type>> m_output_desc;
std::vector<std::tuple<ov::PartialShape, ov::element::Type>> m_inputs_desc;
std::vector<std::tuple<ov::PartialShape, ov::element::Type>> m_output_desc;
FrameworkNodeAttrs m_attrs;
};

View File

@@ -14,7 +14,7 @@
namespace ov {
namespace op {
namespace util {
class NGRAPH_API IndexReduction : public Op {
class OPENVINO_API IndexReduction : public Op {
protected:
IndexReduction();

View File

@@ -4,9 +4,9 @@
#pragma once
#include "ngraph/op/parameter.hpp"
#include "openvino/core/function.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/parameter.hpp"
namespace ov {
namespace op {
@@ -266,7 +266,7 @@ public:
///
/// \param value The value supplied as an input to the block.
/// \param bodies_parameters vector of bodies parameters.
virtual void set_invariant_inputs(const Output<Node>& value, const ngraph::ParameterVector& bodies_parameters);
virtual void set_invariant_inputs(const Output<Node>& value, const ov::ParameterVector& bodies_parameters);
///
/// \brief Set output decriptions for MultiSubGraphOp output.
///
@@ -303,7 +303,7 @@ public:
AttributeAdapter(std::vector<std::shared_ptr<op::util::MultiSubGraphOp::InputDescription>>& value)
: DirectValueAccessor<std::vector<std::shared_ptr<op::util::MultiSubGraphOp::InputDescription>>>(value) {}
OPENVINO_RTTI("AttributeAdapter<std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::InputDescription>>>")
OPENVINO_RTTI("AttributeAdapter<std::vector<std::shared_ptr<ov::op::util::MultiSubGraphOp::InputDescription>>>")
BWDCMP_RTTI_DECLARATION;
};
@@ -314,8 +314,7 @@ public:
AttributeAdapter(std::vector<std::shared_ptr<op::util::MultiSubGraphOp::OutputDescription>>& value)
: DirectValueAccessor<std::vector<std::shared_ptr<op::util::MultiSubGraphOp::OutputDescription>>>(value) {}
OPENVINO_RTTI(
"AttributeAdapter<std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::OutputDescription>>>");
OPENVINO_RTTI("AttributeAdapter<std::vector<std::shared_ptr<ov::op::util::MultiSubGraphOp::OutputDescription>>>");
BWDCMP_RTTI_DECLARATION;
};

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "ngraph/op/op.hpp"
#include "openvino/op/op.hpp"
namespace ov {
namespace op {

View File

@@ -4,8 +4,8 @@
#pragma once
#include "ngraph/op/parameter.hpp"
#include "ngraph/op/util/multi_subgraph_base.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/util/multi_subgraph_base.hpp"
namespace ov {
namespace op {
@@ -57,7 +57,7 @@ public:
/// \param end The last index on axis of the slicing
/// \param axis The axis to slice along
///
virtual void set_sliced_input(const std::shared_ptr<ngraph::op::Parameter>& parameter,
virtual void set_sliced_input(const std::shared_ptr<ov::op::v0::Parameter>& parameter,
const Output<Node>& value,
int64_t start,
int64_t stride,
@@ -75,7 +75,7 @@ public:
/// The value is what is active in the most recent
/// completed iteration.
///
virtual void set_merged_input(const std::shared_ptr<ngraph::op::Parameter>& body_parameter,
virtual void set_merged_input(const std::shared_ptr<ov::op::v0::Parameter>& body_parameter,
const Output<Node>& initial_value,
const Output<Node>& successive_value);
///
@@ -86,7 +86,7 @@ public:
/// \param body_parameter The body parameter
/// \param value The value supplied as an input to the block
///
virtual void set_invariant_input(const std::shared_ptr<ngraph::op::Parameter>& body_parameter,
virtual void set_invariant_input(const std::shared_ptr<ov::op::v0::Parameter>& body_parameter,
const Output<Node>& value);
///
/// \brief Gets a value for a particular iteration point

View File

@@ -18,7 +18,7 @@ namespace util {
using VariableMap = std::unordered_map<Variable::Ptr, VariableValue::Ptr>;
/// VariableContext stores and manages a evaluation context for Variables.
class NGRAPH_API VariableContext {
class OPENVINO_API VariableContext {
public:
/// \brief Constructs an uninitialized VariableContext.
VariableContext() = default;
@@ -74,7 +74,7 @@ private:
} // namespace util
} // namespace op
template <>
class NGRAPH_API VariantWrapper<op::util::VariableContext> : public VariantImpl<op::util::VariableContext> {
class OPENVINO_API VariantWrapper<op::util::VariableContext> : public VariantImpl<op::util::VariableContext> {
public:
OPENVINO_RTTI("VariantWrapper<op::util::VariableContext>");
BWDCMP_RTTI_DECLARATION;

View File

@@ -6,7 +6,6 @@
#include <utility>
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/op/util/variable.hpp"

View File

@@ -16,13 +16,13 @@ namespace pass {
class OPENVINO_API ConstantFolding : public FunctionPass {
public:
OPENVINO_RTTI("ConstantFolding");
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
bool run_on_function(std::shared_ptr<ov::Function> f) override;
private:
void copy_runtime_info_to_target_inputs(const std::shared_ptr<Node>& node, const Output<Node>& replacement);
/// \brief Folds pre-calculated output tensor values to constants in case lower and
/// upper estimations are equal. Traverses graph backwards starting from the results.
bool pre_calculated_values_folding(const std::shared_ptr<ngraph::Function>& f);
bool pre_calculated_values_folding(const std::shared_ptr<ov::Function>& f);
};
} // namespace pass
} // namespace ov

View File

@@ -11,7 +11,7 @@ namespace pass {
class OPENVINO_API ConvertFP32ToFP16 : public FunctionPass {
public:
OPENVINO_RTTI("ConvertFP32ToFP16");
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
bool run_on_function(std::shared_ptr<ov::Function>) override;
};
} // namespace pass
} // namespace ov

View File

@@ -39,7 +39,7 @@ public:
explicit LowLatency2(bool use_const_initializer = true) : m_use_const_initializer(use_const_initializer) {}
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
bool run_on_function(std::shared_ptr<ov::Function> f) override;
private:
bool m_use_const_initializer;

View File

@@ -67,8 +67,8 @@ public:
/// reasons for
/// some cases.
/// Callback example:
/// auto callback = [](const std::shared_ptr<const ngraph::Node> & node) -> bool {
/// return std::dynamic_pointer_cast<const ngraph::opset3::DepthToSpace>(node) !=
/// auto callback = [](const std::shared_ptr<const ov::Node> & node) -> bool {
/// return std::dynamic_pointer_cast<const ov::opset3::DepthToSpace>(node) !=
/// nullptr;
/// };
/// This callback returns true in case of DepthToSpace operation. So when execution

View File

@@ -8,9 +8,9 @@
#include <memory>
#include <vector>
#include "ngraph/util.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/deprecated.hpp"
#include "openvino/core/enum_mask.hpp"
#include "openvino/core/function.hpp"
#include "openvino/core/node.hpp"
#include "openvino/pass/pass_config.hpp"
@@ -24,7 +24,7 @@ enum class PassProperty : uint32_t {
CHANGE_DYNAMIC_STATE = 1 << 1,
};
using PassPropertyMask = ngraph::EnumMask<PassProperty>;
using PassPropertyMask = ov::EnumMask<PassProperty>;
class OPENVINO_API PassBase {
friend class Manager;
@@ -61,7 +61,7 @@ public:
/// This method remains here only for backward compatibility and will be removed
/// after all transformations are moved to transformation_callback() method.
/// \return result of callback execution for given node
NGRAPH_DEPRECATED("Please use transformation_callback method instead")
OPENVINO_DEPRECATED("Please use transformation_callback method instead")
bool m_transformation_callback(const std::shared_ptr<const Node>& node) {
return m_pass_config->get_callback(get_type_info())(node);
}
@@ -91,7 +91,7 @@ class OPENVINO_API FunctionPass : public PassBase {
public:
OPENVINO_RTTI("ov::pass::FunctionPass");
~FunctionPass() override;
virtual bool run_on_function(std::shared_ptr<ngraph::Function>) = 0;
virtual bool run_on_function(std::shared_ptr<ov::Function>) = 0;
};
class Manager;
@@ -105,6 +105,6 @@ enum class FusionType : uint32_t {
FOP_FUSIONS = 0x4,
ALL_FUSIONS = 0xFFFFFFFF
};
using FusionTypeMask = ngraph::EnumMask<FusionType>;
using FusionTypeMask = ov::EnumMask<FusionType>;
} // namespace pass
} // namespace ov

View File

@@ -9,7 +9,6 @@
#include <vector>
#include "ngraph/compatibility.hpp"
#include "ngraph/util.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/deprecated.hpp"
#include "openvino/core/function.hpp"
@@ -96,8 +95,8 @@ public:
///
/// Example below show how to set callback for one or multiple passes using this method.
///
/// pass_config->set_callback<ngraph::pass::ConvertBatchToSpace,
/// ngraph::pass::ConvertSpaceToBatch>(
/// pass_config->set_callback<ov::pass::ConvertBatchToSpace,
/// ov::pass::ConvertSpaceToBatch>(
/// [](const_node_ptr &node) -> bool {
/// // Disable transformations for cases when input shape rank is not
/// equal to 4
@@ -145,9 +144,9 @@ public:
/// \return callback lambda function
template <class T, typename std::enable_if<ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
param_callback get_callback() const {
NGRAPH_SUPPRESS_DEPRECATED_START
OPENVINO_SUPPRESS_DEPRECATED_START
return get_callback(T::type_info);
NGRAPH_SUPPRESS_DEPRECATED_END
OPENVINO_SUPPRESS_DEPRECATED_END
}
template <class T, typename std::enable_if<!ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
param_callback get_callback() const {
@@ -165,9 +164,9 @@ public:
/// \return true if transformation type was disabled and false otherwise
template <class T, typename std::enable_if<ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
bool is_disabled() const {
NGRAPH_SUPPRESS_DEPRECATED_START
OPENVINO_SUPPRESS_DEPRECATED_START
return is_disabled(T::type_info);
NGRAPH_SUPPRESS_DEPRECATED_END
OPENVINO_SUPPRESS_DEPRECATED_END
}
template <class T, typename std::enable_if<!ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
bool is_disabled() const {
@@ -185,9 +184,9 @@ public:
/// \return true if transformation type was force enabled and false otherwise
template <class T, typename std::enable_if<ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
bool is_enabled() const {
NGRAPH_SUPPRESS_DEPRECATED_START
OPENVINO_SUPPRESS_DEPRECATED_START
return is_enabled(T::type_info);
NGRAPH_SUPPRESS_DEPRECATED_END
OPENVINO_SUPPRESS_DEPRECATED_END
}
template <class T, typename std::enable_if<!ngraph::HasTypeInfoMember<T>::value, bool>::type = true>
bool is_enabled() const {
@@ -197,7 +196,7 @@ public:
void add_disabled_passes(const PassConfig& rhs);
private:
param_callback m_callback = [](const std::shared_ptr<const ::ngraph::Node>&) {
param_callback m_callback = [](const std::shared_ptr<const ::ov::Node>&) {
return false;
};
param_callback_map m_callback_map;

View File

@@ -9,9 +9,9 @@
#include <algorithm>
#include <functional>
#include "ngraph/op/constant.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/pass/pattern/op/any.hpp"
#include "openvino/pass/pattern/op/any_of.hpp"
#include "openvino/pass/pattern/op/any_output.hpp"

View File

@@ -24,7 +24,7 @@ namespace ov {
namespace pass {
class OPENVINO_API VisualizeTree : public FunctionPass {
public:
OPENVINO_RTTI("ngraph::pass::VisualizeTree");
OPENVINO_RTTI("ov::pass::VisualizeTree");
using node_modifiers_t = std::function<void(const Node& node, std::vector<std::string>& attributes)>;
VisualizeTree(const std::string& file_name, node_modifiers_t nm = nullptr, bool dot_only = false);

View File

@@ -12,8 +12,8 @@
#include "ngraph/log.hpp"
#include "ngraph/type/element_type_traits.hpp"
BWDCMP_RTTI_DEFINITION(ngraph::AttributeAdapter<ov::element::Type>);
BWDCMP_RTTI_DEFINITION(ngraph::AttributeAdapter<ov::element::TypeVector>);
BWDCMP_RTTI_DEFINITION(ov::AttributeAdapter<ov::element::Type>);
BWDCMP_RTTI_DEFINITION(ov::AttributeAdapter<ov::element::TypeVector>);
namespace {
class TypeInfo {
@@ -313,8 +313,8 @@ size_t compiler_byte_size(ov::element::Type_t et) {
return 0;
}
throw ngraph::ngraph_error("compiler_byte_size: Unsupported value of ov::element::Type_t: " +
std::to_string(static_cast<int>(et)));
throw ov::Exception("compiler_byte_size: Unsupported value of ov::element::Type_t: " +
std::to_string(static_cast<int>(et)));
}
namespace ov {