Transformation component transition to OV namespace (phase 2) (#13557)

This commit is contained in:
Tomasz Jankowski
2022-11-15 02:40:48 +01:00
committed by GitHub
parent d6dc40bf34
commit c5eb25d3ea
103 changed files with 467 additions and 460 deletions

View File

@@ -12,7 +12,7 @@
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
using namespace std;
using namespace ngraph;

View File

@@ -13,7 +13,7 @@
#include <ngraph/ngraph.hpp>
#include <ngraph/pattern/matcher.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include <ngraph/rt_info.hpp>
#include "rt_info/shared_value_attribute.hpp"

View File

@@ -11,7 +11,7 @@
#include <vector>
#include <ngraph/pattern/op/wrap_type.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/common/ie_lpt_exception.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -9,7 +9,7 @@
#include <ngraph/ngraph.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset4.hpp>
#include <ngraph/opsets/opset6.hpp>

View File

@@ -23,7 +23,7 @@
#include <ngraph/pass/manager.hpp>
#include "ngraph/pass/constant_folding.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include <openvino/pass/serialize.hpp>
#include <algorithm>

View File

@@ -8,7 +8,7 @@
#include "snippets/op/convert_saturation.hpp"
#include "snippets/pass/align_element_type.hpp"
#include "snippets/utils.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph/op/util/op_types.hpp"
#include <ngraph/rt_info.hpp>
@@ -91,4 +91,4 @@ bool ngraph::snippets::pass::AlignElementType::run_on_model(const std::shared_pt
bool ngraph::snippets::pass::AlignElementType::opNeedsAlignElementType(const std::shared_ptr<ov::Node>& op, const ov::element::Type exec_type) {
// At the moment Snippets support only Eltwise/Convert/FQ which one output so we can just call get_element_type()
return op_supports_only_exec_type(op) && op->get_element_type() != exec_type;
}
}

View File

@@ -4,344 +4,4 @@
#pragma once
#include <algorithm>
#include <memory>
#include <mutex>
#include <openvino/op/convert.hpp>
#include <string>
#include <transformations_visibility.hpp>
#include <vector>
namespace ov {
namespace op {
/// A base class for templated TypeRelaxed that maintains overridden input types and output types for an operation.
class OPENVINO_API TypeRelaxedBase {
public:
virtual ~TypeRelaxedBase();
explicit TypeRelaxedBase(const element::TypeVector& _input_data_types = {},
const element::TypeVector& _output_data_types = {})
: m_input_data_types(_input_data_types),
m_output_data_types(_output_data_types) {}
/// \return Data type that will be set for output with a given index outputIndex.
/// If output with a specified index outputIndex hasn't been set before, element::undefined will returned.
/// Undefined means no type override happens for a given outputIndex and it will deduced as original
/// operation defineds in its infer function.
///
/// This method may look similar to Node::get_output_element_type, but it is not the same thing, because
/// get_output_element_type returns the result of type inference, so it is completely deduced from
/// an operation inputs and attributes, and get_overridden_output_type returns value of the attribute that
/// is used to deduce output type. In some cases they don't match: get_overridden_output_type may return
/// element::undefined for some index i, and get_output_element_type will return some real type for
/// the same index i.
const element::Type& get_overridden_output_type(size_t outputIndex = 0) const {
if (outputIndex >= m_output_data_types.size()) {
return element::undefined;
}
return m_output_data_types[outputIndex];
}
/// Set data type that overrides the original data type for output port with outputIndex index
/// In case if outputIndex is out of range of known outputs (and this class cannot detect
/// the real number of outputs for original operation), the number of overridden outputs
/// is changed according to a given outputIndex value.
void set_overridden_output_type(const element::Type& element_type, size_t outputIndex = 0) {
if (outputIndex >= m_output_data_types.size()) {
m_output_data_types.resize(outputIndex + 1, element::undefined);
}
m_output_data_types[outputIndex] = element_type;
}
/// \return Data type that will be set for input when original shape/type inference function is called.
/// If index inputIndex hasn't been set before, element::undefined will returned. Undefined means that
/// the type from input tensor descriptor is used for a given index.
const element::Type& get_origin_input_type(size_t inputIndex = 0) const {
if (inputIndex >= m_input_data_types.size()) {
return element::undefined;
}
return m_input_data_types[inputIndex];
}
/// Set data type that overrides the original data type for input port with inputIndex index.
/// In case if inputIndex is out of range of known inputs (and this class cannot detect
/// the real number of inputs for original operation), the number of overridden inputs
/// is changed according to a given inputIndex value. All new entries except one added
/// at inputIndex position are undefined.
void set_origin_input_type(const element::Type& element_type, size_t inputIndex = 0) {
if (inputIndex >= m_input_data_types.size()) {
m_input_data_types.resize(inputIndex + 1, element::undefined);
}
m_input_data_types[inputIndex] = element_type;
}
protected:
void remember_input_data_types(Node& node, element::TypeVector& old_input_types) {
// Remember all input data types
for (size_t i = 0; i < node.get_input_size(); ++i) {
old_input_types.push_back(node.get_input_element_type(i));
}
// Reset input data types to m_output_data_type.
for (size_t i = 0; i < node.get_input_size(); ++i) {
auto origin_input_type = get_origin_input_type(i);
if (origin_input_type != element::undefined) {
OPENVINO_SUPPRESS_DEPRECATED_START
node.get_input_tensor(i).set_tensor_type(origin_input_type, node.get_input_partial_shape(i));
OPENVINO_SUPPRESS_DEPRECATED_END
}
}
}
void restore_input_data_types(Node& node, const element::TypeVector& old_input_types) {
// Restore original input data types
for (size_t i = 0; i < node.get_input_size(); ++i) {
OPENVINO_SUPPRESS_DEPRECATED_START
node.get_input_tensor(i).set_tensor_type(old_input_types[i], node.get_input_partial_shape(i));
OPENVINO_SUPPRESS_DEPRECATED_END
}
if (m_original_output_data_types.empty()) {
m_original_output_data_types = element::TypeVector(node.get_output_size());
}
// Save inferred output types
for (size_t i = 0; i < node.get_output_size(); ++i) {
m_original_output_data_types[i] = node.get_output_element_type(i);
}
// Override (some) output types
for (size_t i = 0; i < node.get_output_size(); ++i) {
auto overridden_output_type = get_overridden_output_type(i);
if (overridden_output_type != element::undefined) {
node.set_output_type(i, overridden_output_type, node.get_output_partial_shape(i));
}
}
}
void visit_attributes(AttributeVisitor& visitor) {
bool type_relax = true;
visitor.on_attribute("type_relax", type_relax);
visitor.on_attribute("input_data_types", m_input_data_types);
visitor.on_attribute("output_data_types", m_output_data_types);
}
typedef struct {
} init_rt_result;
init_rt_result init_rt_info(Node& node) const {
node.get_rt_info()["opset"] = "type_relaxed_opset";
return {};
}
protected:
// Data types that are used for parent shape/type infer function input ports
// to infer output data types
element::TypeVector m_input_data_types;
element::TypeVector m_output_data_types;
element::TypeVector m_original_output_data_types;
};
/// Set another type for a specified output for the period of time when an instance of the class exists.
/// When the execution leaves the scope where an onject of TemporaryReplaceOutputType is defined,
/// the type of the output is set to its original value. Used when initialized TypeRelaxed<BaseOp> operation
/// in case when inputs have types that are not compatible with BaseOp infer function. In this case
/// before TypeRelaxed is constructed the BaseOp contructor requires modified data types.
/// So it should be
class TemporaryReplaceOutputType {
Output<Node> m_output;
element::Type orig_type;
public:
/// Replace element type for a given output port by tmp_type
TemporaryReplaceOutputType(Output<Node> output, element::Type tmp_type) : m_output(output) {
// save original element type in order to restore it in the destructor
orig_type = m_output.get_element_type();
OPENVINO_SUPPRESS_DEPRECATED_START
m_output.get_tensor().set_element_type(tmp_type);
OPENVINO_SUPPRESS_DEPRECATED_END
}
/// Return the output port that was used in the constructor
Output<Node> get() const {
return m_output;
}
/// Restores the original element type for the output
~TemporaryReplaceOutputType() {
OPENVINO_SUPPRESS_DEPRECATED_START
m_output.get_tensor().set_element_type(orig_type);
OPENVINO_SUPPRESS_DEPRECATED_END
}
};
// TODO: remove once FusedOp is removed
OPENVINO_SUPPRESS_DEPRECATED_START
/// Relaxes tensor element type requirements for BaseOp inputs and outputs
/// This class template should be used with Node descendant class. Defines a new operation by extending the
/// original BaseOp operation with ability to accept inputs and provide outputs with element type that is
/// unusual for BaseOp. For example, TypeRelaxed<opset1::Add> can accept mixed-precision inputs and provide
/// another type of output. New types are provided as inputs attributes for TypeRelaxed template and fixed.
/// There is no any deduction logic for types are provided as a part of this class and it should be
/// implemented outside if required.
template <typename BaseOp>
class TypeRelaxed : public BaseOp, public TypeRelaxedBase {
public:
OPENVINO_OP(BaseOp::get_type_info_static().name,
BaseOp::get_type_info_static().version_id,
BaseOp,
BaseOp::get_type_info_static().version);
using BaseOp::BaseOp;
TypeRelaxed() = default;
TypeRelaxed(const BaseOp& base_op, element::Type overridden_type)
: TypeRelaxed(base_op,
element::TypeVector(base_op.get_input_size(), overridden_type),
element::TypeVector(base_op.get_output_size(), overridden_type)) {}
explicit TypeRelaxed(const BaseOp& base_op,
const element::TypeVector& _input_data_types = {},
const element::TypeVector& _output_data_types = {})
: BaseOp(base_op),
TypeRelaxedBase(_input_data_types, _output_data_types) {
init();
}
/// Creating a new TypeRelaxed operation by calling one of the original op ctors forwarding arguments directly.
template <typename... Args>
TypeRelaxed(const element::TypeVector& _input_data_types,
const element::TypeVector& _output_data_types,
Args&&... args)
: BaseOp(std::forward<Args>(args)...),
TypeRelaxedBase(_input_data_types, _output_data_types) {
init();
}
void validate_and_infer_types() override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
private:
mutable std::mutex type_relax_mutex;
void init() {
validate_and_infer_types();
}
init_rt_result init_rt = init_rt_info(*this);
};
OPENVINO_SUPPRESS_DEPRECATED_START
template <typename BaseOp>
bool TypeRelaxed<BaseOp>::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
std::shared_ptr<ov::op::v0::Convert> convert;
HostTensorVector casted_inputs(BaseOp::get_input_size());
for (size_t i = 0; i < BaseOp::get_input_size(); ++i) {
const auto expected_input_type = get_origin_input_type(i);
if (inputs[i]->get_element_type() == expected_input_type || expected_input_type == element::undefined) {
casted_inputs[i] = inputs[i];
} else {
if (convert == nullptr) {
convert = std::make_shared<ov::op::v0::Convert>();
}
convert->set_destination_type(expected_input_type);
casted_inputs[i] = std::make_shared<HostTensor>(expected_input_type, inputs[i]->get_shape());
if (!convert->evaluate({casted_inputs[i]}, {inputs[i]})) {
return false;
}
}
}
HostTensorVector original_outputs(BaseOp::get_output_size());
for (size_t i = 0; i < BaseOp::get_output_size(); ++i) {
const auto expected_output_type = get_overridden_output_type(i);
if (expected_output_type == element::undefined || expected_output_type == m_original_output_data_types[i]) {
original_outputs[i] = outputs[i];
} else {
original_outputs[i] =
std::make_shared<HostTensor>(m_original_output_data_types[i], BaseOp::get_output_partial_shape(i));
}
}
if (!BaseOp::evaluate(original_outputs, casted_inputs)) {
return false;
}
for (size_t i = 0; i < BaseOp::get_output_size(); ++i) {
const auto expected_output_type = get_overridden_output_type(i);
if (expected_output_type != element::undefined &&
original_outputs[i]->get_element_type() != expected_output_type) {
if (convert == nullptr) {
convert = std::make_shared<ov::op::v0::Convert>();
}
convert->set_destination_type(expected_output_type);
const auto casted_output =
std::make_shared<HostTensor>(expected_output_type, original_outputs[i]->get_shape());
if (!convert->evaluate({outputs[i]}, {original_outputs[i]})) {
return false;
}
}
}
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_END
template <typename BaseOp>
void TypeRelaxed<BaseOp>::validate_and_infer_types() {
element::TypeVector old_input_types;
remember_input_data_types(*this, old_input_types);
OPENVINO_SUPPRESS_DEPRECATED_START
BaseOp::validate_and_infer_types();
OPENVINO_SUPPRESS_DEPRECATED_END
restore_input_data_types(*this, old_input_types);
}
template <typename BaseOp>
std::shared_ptr<Node> TypeRelaxed<BaseOp>::clone_with_new_inputs(const OutputVector& new_args) const {
std::lock_guard<std::mutex> lock(type_relax_mutex);
// copy then modify inputs
std::shared_ptr<Node> new_node =
std::make_shared<TypeRelaxed<BaseOp>>((BaseOp&)(*this), m_input_data_types, m_output_data_types);
for (size_t i = 0; i < new_node->get_input_size(); ++i) {
new_node->input(i).replace_source_output(new_args[i]);
}
new_node->validate_and_infer_types();
return new_node;
}
template <typename BaseOp>
bool TypeRelaxed<BaseOp>::visit_attributes(AttributeVisitor& visitor) {
TypeRelaxedBase::visit_attributes(visitor);
BaseOp::visit_attributes(visitor);
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace op
} // namespace ov
namespace ngraph {
namespace op {
using ov::op::TemporaryReplaceOutputType;
using ov::op::TypeRelaxed;
using ov::op::TypeRelaxedBase;
} // namespace op
} // namespace ngraph
#include "ov_ops/type_relaxed.hpp"

View File

@@ -0,0 +1,347 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <memory>
#include <mutex>
#include <openvino/op/convert.hpp>
#include <string>
#include <transformations_visibility.hpp>
#include <vector>
namespace ov {
namespace op {
/// A base class for templated TypeRelaxed that maintains overridden input types and output types for an operation.
class OPENVINO_API TypeRelaxedBase {
public:
virtual ~TypeRelaxedBase();
explicit TypeRelaxedBase(const element::TypeVector& _input_data_types = {},
const element::TypeVector& _output_data_types = {})
: m_input_data_types(_input_data_types),
m_output_data_types(_output_data_types) {}
/// \return Data type that will be set for output with a given index outputIndex.
/// If output with a specified index outputIndex hasn't been set before, element::undefined will returned.
/// Undefined means no type override happens for a given outputIndex and it will deduced as original
/// operation defineds in its infer function.
///
/// This method may look similar to Node::get_output_element_type, but it is not the same thing, because
/// get_output_element_type returns the result of type inference, so it is completely deduced from
/// an operation inputs and attributes, and get_overridden_output_type returns value of the attribute that
/// is used to deduce output type. In some cases they don't match: get_overridden_output_type may return
/// element::undefined for some index i, and get_output_element_type will return some real type for
/// the same index i.
const element::Type& get_overridden_output_type(size_t outputIndex = 0) const {
if (outputIndex >= m_output_data_types.size()) {
return element::undefined;
}
return m_output_data_types[outputIndex];
}
/// Set data type that overrides the original data type for output port with outputIndex index
/// In case if outputIndex is out of range of known outputs (and this class cannot detect
/// the real number of outputs for original operation), the number of overridden outputs
/// is changed according to a given outputIndex value.
void set_overridden_output_type(const element::Type& element_type, size_t outputIndex = 0) {
if (outputIndex >= m_output_data_types.size()) {
m_output_data_types.resize(outputIndex + 1, element::undefined);
}
m_output_data_types[outputIndex] = element_type;
}
/// \return Data type that will be set for input when original shape/type inference function is called.
/// If index inputIndex hasn't been set before, element::undefined will returned. Undefined means that
/// the type from input tensor descriptor is used for a given index.
const element::Type& get_origin_input_type(size_t inputIndex = 0) const {
if (inputIndex >= m_input_data_types.size()) {
return element::undefined;
}
return m_input_data_types[inputIndex];
}
/// Set data type that overrides the original data type for input port with inputIndex index.
/// In case if inputIndex is out of range of known inputs (and this class cannot detect
/// the real number of inputs for original operation), the number of overridden inputs
/// is changed according to a given inputIndex value. All new entries except one added
/// at inputIndex position are undefined.
void set_origin_input_type(const element::Type& element_type, size_t inputIndex = 0) {
if (inputIndex >= m_input_data_types.size()) {
m_input_data_types.resize(inputIndex + 1, element::undefined);
}
m_input_data_types[inputIndex] = element_type;
}
protected:
void remember_input_data_types(Node& node, element::TypeVector& old_input_types) {
// Remember all input data types
for (size_t i = 0; i < node.get_input_size(); ++i) {
old_input_types.push_back(node.get_input_element_type(i));
}
// Reset input data types to m_output_data_type.
for (size_t i = 0; i < node.get_input_size(); ++i) {
auto origin_input_type = get_origin_input_type(i);
if (origin_input_type != element::undefined) {
OPENVINO_SUPPRESS_DEPRECATED_START
node.get_input_tensor(i).set_tensor_type(origin_input_type, node.get_input_partial_shape(i));
OPENVINO_SUPPRESS_DEPRECATED_END
}
}
}
void restore_input_data_types(Node& node, const element::TypeVector& old_input_types) {
// Restore original input data types
for (size_t i = 0; i < node.get_input_size(); ++i) {
OPENVINO_SUPPRESS_DEPRECATED_START
node.get_input_tensor(i).set_tensor_type(old_input_types[i], node.get_input_partial_shape(i));
OPENVINO_SUPPRESS_DEPRECATED_END
}
if (m_original_output_data_types.empty()) {
m_original_output_data_types = element::TypeVector(node.get_output_size());
}
// Save inferred output types
for (size_t i = 0; i < node.get_output_size(); ++i) {
m_original_output_data_types[i] = node.get_output_element_type(i);
}
// Override (some) output types
for (size_t i = 0; i < node.get_output_size(); ++i) {
auto overridden_output_type = get_overridden_output_type(i);
if (overridden_output_type != element::undefined) {
node.set_output_type(i, overridden_output_type, node.get_output_partial_shape(i));
}
}
}
void visit_attributes(AttributeVisitor& visitor) {
bool type_relax = true;
visitor.on_attribute("type_relax", type_relax);
visitor.on_attribute("input_data_types", m_input_data_types);
visitor.on_attribute("output_data_types", m_output_data_types);
}
typedef struct {
} init_rt_result;
init_rt_result init_rt_info(Node& node) const {
node.get_rt_info()["opset"] = "type_relaxed_opset";
return {};
}
protected:
// Data types that are used for parent shape/type infer function input ports
// to infer output data types
element::TypeVector m_input_data_types;
element::TypeVector m_output_data_types;
element::TypeVector m_original_output_data_types;
};
/// Set another type for a specified output for the period of time when an instance of the class exists.
/// When the execution leaves the scope where an onject of TemporaryReplaceOutputType is defined,
/// the type of the output is set to its original value. Used when initialized TypeRelaxed<BaseOp> operation
/// in case when inputs have types that are not compatible with BaseOp infer function. In this case
/// before TypeRelaxed is constructed the BaseOp contructor requires modified data types.
/// So it should be
class TemporaryReplaceOutputType {
Output<Node> m_output;
element::Type orig_type;
public:
/// Replace element type for a given output port by tmp_type
TemporaryReplaceOutputType(Output<Node> output, element::Type tmp_type) : m_output(output) {
// save original element type in order to restore it in the destructor
orig_type = m_output.get_element_type();
OPENVINO_SUPPRESS_DEPRECATED_START
m_output.get_tensor().set_element_type(tmp_type);
OPENVINO_SUPPRESS_DEPRECATED_END
}
/// Return the output port that was used in the constructor
Output<Node> get() const {
return m_output;
}
/// Restores the original element type for the output
~TemporaryReplaceOutputType() {
OPENVINO_SUPPRESS_DEPRECATED_START
m_output.get_tensor().set_element_type(orig_type);
OPENVINO_SUPPRESS_DEPRECATED_END
}
};
// TODO: remove once FusedOp is removed
OPENVINO_SUPPRESS_DEPRECATED_START
/// Relaxes tensor element type requirements for BaseOp inputs and outputs
/// This class template should be used with Node descendant class. Defines a new operation by extending the
/// original BaseOp operation with ability to accept inputs and provide outputs with element type that is
/// unusual for BaseOp. For example, TypeRelaxed<opset1::Add> can accept mixed-precision inputs and provide
/// another type of output. New types are provided as inputs attributes for TypeRelaxed template and fixed.
/// There is no any deduction logic for types are provided as a part of this class and it should be
/// implemented outside if required.
template <typename BaseOp>
class TypeRelaxed : public BaseOp, public TypeRelaxedBase {
public:
OPENVINO_OP(BaseOp::get_type_info_static().name,
BaseOp::get_type_info_static().version_id,
BaseOp,
BaseOp::get_type_info_static().version);
using BaseOp::BaseOp;
TypeRelaxed() = default;
TypeRelaxed(const BaseOp& base_op, element::Type overridden_type)
: TypeRelaxed(base_op,
element::TypeVector(base_op.get_input_size(), overridden_type),
element::TypeVector(base_op.get_output_size(), overridden_type)) {}
explicit TypeRelaxed(const BaseOp& base_op,
const element::TypeVector& _input_data_types = {},
const element::TypeVector& _output_data_types = {})
: BaseOp(base_op),
TypeRelaxedBase(_input_data_types, _output_data_types) {
init();
}
/// Creating a new TypeRelaxed operation by calling one of the original op ctors forwarding arguments directly.
template <typename... Args>
TypeRelaxed(const element::TypeVector& _input_data_types,
const element::TypeVector& _output_data_types,
Args&&... args)
: BaseOp(std::forward<Args>(args)...),
TypeRelaxedBase(_input_data_types, _output_data_types) {
init();
}
void validate_and_infer_types() override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
private:
mutable std::mutex type_relax_mutex;
void init() {
validate_and_infer_types();
}
init_rt_result init_rt = init_rt_info(*this);
};
OPENVINO_SUPPRESS_DEPRECATED_START
template <typename BaseOp>
bool TypeRelaxed<BaseOp>::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
std::shared_ptr<ov::op::v0::Convert> convert;
HostTensorVector casted_inputs(BaseOp::get_input_size());
for (size_t i = 0; i < BaseOp::get_input_size(); ++i) {
const auto expected_input_type = get_origin_input_type(i);
if (inputs[i]->get_element_type() == expected_input_type || expected_input_type == element::undefined) {
casted_inputs[i] = inputs[i];
} else {
if (convert == nullptr) {
convert = std::make_shared<ov::op::v0::Convert>();
}
convert->set_destination_type(expected_input_type);
casted_inputs[i] = std::make_shared<HostTensor>(expected_input_type, inputs[i]->get_shape());
if (!convert->evaluate({casted_inputs[i]}, {inputs[i]})) {
return false;
}
}
}
HostTensorVector original_outputs(BaseOp::get_output_size());
for (size_t i = 0; i < BaseOp::get_output_size(); ++i) {
const auto expected_output_type = get_overridden_output_type(i);
if (expected_output_type == element::undefined || expected_output_type == m_original_output_data_types[i]) {
original_outputs[i] = outputs[i];
} else {
original_outputs[i] =
std::make_shared<HostTensor>(m_original_output_data_types[i], BaseOp::get_output_partial_shape(i));
}
}
if (!BaseOp::evaluate(original_outputs, casted_inputs)) {
return false;
}
for (size_t i = 0; i < BaseOp::get_output_size(); ++i) {
const auto expected_output_type = get_overridden_output_type(i);
if (expected_output_type != element::undefined &&
original_outputs[i]->get_element_type() != expected_output_type) {
if (convert == nullptr) {
convert = std::make_shared<ov::op::v0::Convert>();
}
convert->set_destination_type(expected_output_type);
const auto casted_output =
std::make_shared<HostTensor>(expected_output_type, original_outputs[i]->get_shape());
if (!convert->evaluate({outputs[i]}, {original_outputs[i]})) {
return false;
}
}
}
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_END
template <typename BaseOp>
void TypeRelaxed<BaseOp>::validate_and_infer_types() {
element::TypeVector old_input_types;
remember_input_data_types(*this, old_input_types);
OPENVINO_SUPPRESS_DEPRECATED_START
BaseOp::validate_and_infer_types();
OPENVINO_SUPPRESS_DEPRECATED_END
restore_input_data_types(*this, old_input_types);
}
template <typename BaseOp>
std::shared_ptr<Node> TypeRelaxed<BaseOp>::clone_with_new_inputs(const OutputVector& new_args) const {
std::lock_guard<std::mutex> lock(type_relax_mutex);
// copy then modify inputs
std::shared_ptr<Node> new_node =
std::make_shared<TypeRelaxed<BaseOp>>((BaseOp&)(*this), m_input_data_types, m_output_data_types);
for (size_t i = 0; i < new_node->get_input_size(); ++i) {
new_node->input(i).replace_source_output(new_args[i]);
}
new_node->validate_and_infer_types();
return new_node;
}
template <typename BaseOp>
bool TypeRelaxed<BaseOp>::visit_attributes(AttributeVisitor& visitor) {
TypeRelaxedBase::visit_attributes(visitor);
BaseOp::visit_attributes(visitor);
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace op
} // namespace ov
namespace ngraph {
namespace op {
using ov::op::TemporaryReplaceOutputType;
using ov::op::TypeRelaxed;
using ov::op::TypeRelaxedBase;
} // namespace op
} // namespace ngraph

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_cell.hpp"
#include <cmath>

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "augru_sequence_shape_inference.hpp"
#include "itt.hpp"

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/generate_proposals_ie_internal.hpp"
#include "ov_ops/generate_proposals_ie_internal.hpp"
#include <memory>
#include <openvino/opsets/opset9.hpp>

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/multiclass_nms_ie_internal.hpp"
#include "ov_ops/multiclass_nms_ie_internal.hpp"
#include "../../core/shape_inference/include/multiclass_nms_shape_inference.hpp"
#include "itt.hpp"

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/nms_ie_internal.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include <memory>
#include <openvino/opsets/opset5.hpp>

View File

@@ -4,7 +4,7 @@
// clang-format off
#include "ngraph/ops.hpp"
#include "ngraph_ops/nms_static_shape_ie.hpp"
#include "ov_ops/nms_static_shape_ie.hpp"
// clang-format on
#include <memory>

View File

@@ -7,10 +7,10 @@
#include <memory>
#include "itt.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/opsets/opset9.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "ov_ops/augru_cell.hpp"
using namespace std;
using namespace ov::opset9;

View File

@@ -7,12 +7,12 @@
#include <memory>
#include "itt.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/opsets/opset3.hpp"
#include "openvino/opsets/opset9.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "transformations/utils/utils.hpp"
using namespace std;

View File

@@ -16,7 +16,7 @@
#include <vector>
#include "itt.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
using namespace ov;

View File

@@ -10,7 +10,7 @@
#include <openvino/opsets/opset9.hpp>
#include "itt.hpp"
#include "ngraph_ops/generate_proposals_ie_internal.hpp"
#include "ov_ops/generate_proposals_ie_internal.hpp"
#include "transformations/utils/utils.hpp"
ov::pass::ConvertGP9ToGPIEInternal::ConvertGP9ToGPIEInternal() {

View File

@@ -13,7 +13,7 @@
#include <vector>
#include "itt.hpp"
#include "ngraph_ops/nms_static_shape_ie.hpp"
#include "ov_ops/nms_static_shape_ie.hpp"
#include "transformations/utils/utils.hpp"
ov::pass::ConvertMatrixNmsToMatrixNmsIE::ConvertMatrixNmsToMatrixNmsIE(bool force_i32_output_type) {

View File

@@ -12,7 +12,7 @@
#include <vector>
#include "itt.hpp"
#include "ngraph_ops/multiclass_nms_ie_internal.hpp"
#include "ov_ops/multiclass_nms_ie_internal.hpp"
#include "transformations/utils/utils.hpp"
using namespace ov;

View File

@@ -12,7 +12,7 @@
#include <vector>
#include "itt.hpp"
#include "ngraph_ops/nms_ie_internal.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include "transformations/utils/utils.hpp"
ov::pass::ConvertNMS9ToNMSIEInternal::ConvertNMS9ToNMSIEInternal() {

View File

@@ -12,7 +12,7 @@
#include <vector>
#include "itt.hpp"
#include "ngraph_ops/nms_ie_internal.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include "transformations/utils/utils.hpp"
ov::pass::ConvertNMSToNMSIEInternal::ConvertNMSToNMSIEInternal() {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "gru_cell_shape_inference.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "utils.hpp"
namespace ov {

View File

@@ -4,7 +4,7 @@
#pragma once
#include "gru_sequence_shape_inference.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "utils.hpp"
namespace ov {

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include <algorithm>
#include <memory>

View File

@@ -11,11 +11,11 @@
#include <vector>
#include "atomic_guard.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "openvino/core/model.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/node_vector.hpp"
#include "openvino/opsets/opset8.hpp"
#include "ov_ops/type_relaxed.hpp"
using namespace ngraph;
using namespace std;

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_cell.hpp"
#include "gtest/gtest.h"
#include "openvino/core/attribute_visitor.hpp"

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h"

View File

@@ -13,12 +13,12 @@
#include "snippets_transformations/op/store_convert.hpp"
#include <ngraph/ngraph.hpp>
#include <ngraph_ops/augru_cell.hpp>
#include <ngraph_ops/augru_sequence.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ngraph_ops/nms_static_shape_ie.hpp>
#include <ngraph_ops/multiclass_nms_ie_internal.hpp>
#include <ov_ops/augru_cell.hpp>
#include <ov_ops/augru_sequence.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_static_shape_ie.hpp>
#include <ov_ops/multiclass_nms_ie_internal.hpp>
#include <mutex>

View File

@@ -3,7 +3,7 @@
//
#include "multiclass_nms.hpp"
#include "ngraph_ops/multiclass_nms_ie_internal.hpp"
#include "ov_ops/multiclass_nms_ie_internal.hpp"
#include <algorithm>
#include <cassert>

View File

@@ -12,7 +12,7 @@
#include "non_max_suppression.h"
#include "ie_parallel.hpp"
#include <ngraph/opsets/opset5.hpp>
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include "utils/general_utils.h"
#include "cpu/x64/jit_generator.hpp"

View File

@@ -13,8 +13,8 @@
#include "memory_desc/dnnl_blocked_memory_desc.h"
#include <common/primitive_hashing_utils.hpp>
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include <ngraph/node.hpp>

View File

@@ -107,11 +107,11 @@
#include <ngraph/opsets/opset4.hpp>
#include <ngraph/opsets/opset5.hpp>
#include <ngraph/opsets/opset6.hpp>
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include <ngraph/op/util/op_types.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/graph_util.hpp>
#include <ov_ops/augru_cell.hpp>
#include <ov_ops/augru_sequence.hpp>
#include <transformations/low_precision/disable_convert_constant_folding_on_const_path.hpp>
#include <low_precision/common/quantization_granularity_restriction.hpp>

View File

@@ -11,8 +11,8 @@
#include <openvino/opsets/opset7.hpp>
#include <openvino/opsets/opset8.hpp>
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "assign_shape_inference.hpp"
#include "augru_cell_shape_inference.hpp"

View File

@@ -3,9 +3,9 @@
//
#include "behavior/ov_plugin/caching_tests.hpp"
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ngraph_ops/nms_static_shape_ie.hpp>
#include <ngraph_ops/multiclass_nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_static_shape_ie.hpp>
#include <ov_ops/multiclass_nms_ie_internal.hpp>
using namespace ov::test::behavior;
using namespace ngraph;

View File

@@ -3,9 +3,9 @@
//
#include "behavior/plugin/caching_tests.hpp"
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ngraph_ops/nms_static_shape_ie.hpp>
#include <ngraph_ops/multiclass_nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_static_shape_ie.hpp>
#include <ov_ops/multiclass_nms_ie_internal.hpp>
using namespace LayerTestsDefinitions;
using namespace ngraph;

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "test_utils/fusing_test_utils.hpp"
#include "test_utils/convolution_params.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

View File

@@ -13,7 +13,7 @@
#include <ngraph_transformations/op/leaky_relu.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/pass/manager.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"

View File

@@ -6,7 +6,7 @@
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset8.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include <transformations/init_node_info.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -14,7 +14,7 @@
#include <ngraph_transformations/rnn_sequences_optimization.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/pass/manager.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"

View File

@@ -12,7 +12,7 @@
#include <ngraph_transformations/reshape_prelu.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/pass/manager.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_cell.hpp"
#include <gtest/gtest.h>

View File

@@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include <gtest/gtest.h>

View File

@@ -11,7 +11,7 @@
#include "ngraph_functions/builders.hpp"
#include <thread>
#include <atomic>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
using namespace ov;
using namespace ov::intel_cpu;
@@ -57,4 +57,4 @@ TEST(StaticShapeInferenceTest, MakeShapeInference) {
}
ASSERT_FALSE(wrongPrcFlag.test_and_set());
}
}

View File

@@ -4,7 +4,7 @@
#include "intel_gpu/primitives/generate_proposals.hpp"
#include <ngraph_ops/generate_proposals_ie_internal.hpp>
#include <ov_ops/generate_proposals_ie_internal.hpp>
#include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp"

View File

@@ -9,7 +9,7 @@
#include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp"
#include "intel_gpu/primitives/mutable_data.hpp"
#include "ngraph_ops/nms_static_shape_ie.hpp"
#include "ov_ops/nms_static_shape_ie.hpp"
namespace ngraph {
namespace op {

View File

@@ -3,7 +3,7 @@
//
#include <intel_gpu/primitives/multiclass_nms.hpp>
#include "ngraph_ops/multiclass_nms_ie_internal.hpp"
#include "ov_ops/multiclass_nms_ie_internal.hpp"
#include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp"

View File

@@ -7,7 +7,7 @@
#include "ngraph/op/non_max_suppression.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include "intel_gpu/primitives/reorder.hpp"
#include "intel_gpu/primitives/mutable_data.hpp"

View File

@@ -4,7 +4,7 @@
#include "intel_gpu/plugin/program.hpp"
#include "ngraph/ops.hpp"
#include "ngraph_ops/nms_ie_internal.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include "openvino/core/graph_util.hpp"
#include "intel_gpu/plugin/itt.hpp"
#include "intel_gpu/plugin/transformations_pipeline.hpp"

View File

@@ -92,8 +92,8 @@
#include "backend.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/runtime/reference/convert_color_nv12.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
using namespace ngraph;
using namespace std;

View File

@@ -4,7 +4,7 @@
#include <gtest/gtest.h>
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_cell.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;

View File

@@ -4,7 +4,7 @@
#include <gtest/gtest.h>
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;

View File

@@ -6,7 +6,7 @@
#include <gtest/gtest.h>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_cell.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/opsets/opset9.hpp"
#include "transformations/common_optimizations/augru_cell_fusion.hpp"

View File

@@ -9,8 +9,8 @@
#include "openvino/opsets/opset3.hpp"
#include "openvino/opsets/opset9.hpp"
#include "transformations/common_optimizations/sequence_fusion.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
using namespace ov;
using namespace std;

View File

@@ -16,7 +16,7 @@
#include <transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/nms_static_shape_ie.hpp>
#include <ov_ops/nms_static_shape_ie.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -14,7 +14,7 @@
#include <transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/multiclass_nms_ie_internal.hpp>
#include <ov_ops/multiclass_nms_ie_internal.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -14,7 +14,7 @@
#include <transformations/op_conversions/convert_previous_nms_to_nms_9.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -17,7 +17,7 @@
#include <transformations/op_conversions/convert_previous_nms_to_nms_5.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -6,7 +6,7 @@
#include "common_test_utils/test_common.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/pass/manager.hpp>

View File

@@ -18,7 +18,7 @@
#include <transformations/convert_precision.hpp>
#include <transformations/utils/utils.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"

View File

@@ -3,7 +3,7 @@
//
#include "behavior/ov_plugin/caching_tests.hpp"
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include "ov_api_conformance_helpers.hpp"
namespace {

View File

@@ -3,7 +3,7 @@
//
#include "behavior/plugin/caching_tests.hpp"
#include <ngraph_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include "api_conformance_helpers.hpp"
namespace {

View File

@@ -11,7 +11,7 @@
#include <string>
#include <ie_core.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "fake_quantize_function.hpp"
#include "function_helper.hpp"

View File

@@ -12,7 +12,7 @@
#include <vector>
#include <ngraph/ngraph.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "low_precision/layer_transformation.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"

View File

@@ -3,8 +3,8 @@
//
#include "ngraph/ops.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/base/utils/compare_results.hpp"
@@ -81,7 +81,7 @@ CompareMap getCompareMap() {
#include "openvino/opsets/opset9_tbl.hpp"
#include "openvino/opsets/opset10_tbl.hpp"
#include "ngraph_ops/opset_private_tbl.hpp"
#include "ov_ops/opset_private_tbl.hpp"
#undef _OPENVINO_OP_REG
};
return compareMap;

View File

@@ -4,8 +4,8 @@
#include <shared_test_classes/base/ov_subgraph.hpp>
#include "ngraph/ops.hpp"
#include "ngraph_ops/augru_cell.hpp"
#include "ngraph_ops/augru_sequence.hpp"
#include "ov_ops/augru_cell.hpp"
#include "ov_ops/augru_sequence.hpp"
#include <common_test_utils/ov_tensor_utils.hpp>
@@ -782,7 +782,7 @@ InputsMap getInputMap() {
#include "openvino/opsets/opset8_tbl.hpp"
#include "openvino/opsets/opset9_tbl.hpp"
#include "ngraph_ops/opset_private_tbl.hpp"
#include "ov_ops/opset_private_tbl.hpp"
#undef _OPENVINO_OP_REG
};
return inputsMap;

View File

@@ -8,7 +8,7 @@
#include <ngraph/ngraph.hpp>
#include <ngraph/ops.hpp>
#include <ngraph/op/constant.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/rt_info/intervals_alignment_attribute.hpp"
#include "low_precision/rt_info/quantization_alignment_attribute.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -3,7 +3,7 @@
//
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -8,7 +8,7 @@
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/concat_function.hpp"
#include <openvino/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/network_helper.hpp"
#include "low_precision/rt_info/precision_preserved_attribute.hpp"
#include "low_precision/rt_info/intervals_alignment_attribute.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/convolution_backprop_data_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/convolution_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"
#include "low_precision/rt_info/quantization_granularity_attribute.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fake_quantize_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fold_fake_quantize_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
namespace ngraph {

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/group_convolution_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -3,7 +3,7 @@
//
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -8,7 +8,7 @@
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/max_pool_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "low_precision/network_helper.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -6,7 +6,7 @@
#include <low_precision/relu.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/multiply_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -6,7 +6,7 @@
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
namespace ngraph {
namespace builder {

View File

@@ -6,7 +6,7 @@
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
namespace ngraph {
namespace builder {

View File

@@ -6,7 +6,7 @@
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
namespace ngraph {
namespace builder {
@@ -42,4 +42,4 @@ namespace subgraph {
} // namespace subgraph
} // namespace builder
} // namespace ngraph
} // namespace ngraph

View File

@@ -4,7 +4,7 @@
#include "lpt_ngraph_functions/normalize_l2_function.hpp"
#include <ngraph_ops/type_relaxed.hpp>
#include <ov_ops/type_relaxed.hpp>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/precision_propagation_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/network_helper.hpp"
#include "low_precision/rt_info/precision_preserved_attribute.hpp"
#include "low_precision/rt_info/intervals_alignment_attribute.hpp"

View File

@@ -7,7 +7,7 @@
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "low_precision/network_helper.hpp"

View File

@@ -5,7 +5,7 @@
#include "lpt_ngraph_functions/recurrent_cell_function.hpp"
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "low_precision/network_helper.hpp"
#include "low_precision/rt_info/precision_preserved_attribute.hpp"
#include "low_precision/rt_info/intervals_alignment_attribute.hpp"

View File

@@ -7,7 +7,7 @@
#include <memory>
#include <ngraph/opsets/opset1.hpp>
#include "ngraph_ops/type_relaxed.hpp"
#include "ov_ops/type_relaxed.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "low_precision/network_helper.hpp"

Some files were not shown because too many files have changed in this diff Show More