Remove opset include from utils.hpp (#15379)

* Remove opset include from utils.hpp

* Fix build
This commit is contained in:
Maxim Vafin
2023-01-28 10:14:29 +01:00
committed by GitHub
parent ba5ddf2e49
commit 0da79e2f7d
7 changed files with 25 additions and 16 deletions

View File

@@ -8,6 +8,9 @@
#include <utility>
#include "openvino/core/rt_info.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/split.hpp"
#include "openvino/op/squeeze.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
@@ -57,7 +60,7 @@ AppendListUnpackReplacer::AppendListUnpackReplacer() {
// If aten::__getitem__, expect inputs to be equivalent of pytorch Tensor[][].
// Tensor selected by aten::__getitem__ index needs to be splitted in axis 0.
auto getitem_index_ptr = getitem_node->input_value(1).get_node_shared_ptr();
auto getitem_index_const = std::dynamic_pointer_cast<opset10::Constant>(getitem_index_ptr);
auto getitem_index_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(getitem_index_ptr);
auto index_val = getitem_index_const->cast_vector<int64_t>();
auto index = 0;
if (index_val[0] >= 0) {
@@ -65,12 +68,12 @@ AppendListUnpackReplacer::AppendListUnpackReplacer() {
} else {
index = inputs.size() + index_val[0];
}
auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0});
auto split = std::make_shared<opset10::Split>(inputs[index], axis_0, list_unpack->get_output_size());
auto axis_0 = ov::op::v0::Constant::create(element::i64, Shape{}, {0});
auto split = std::make_shared<ov::op::v1::Split>(inputs[index], axis_0, list_unpack->get_output_size());
NodeVector to_copy_rt{axis_0, split};
OutputVector res;
for (auto output : split->outputs()) {
auto squeeze = std::make_shared<opset10::Squeeze>(output, axis_0);
auto squeeze = std::make_shared<ov::op::v0::Squeeze>(output, axis_0);
to_copy_rt.push_back(squeeze);
res.push_back(squeeze);
}

View File

@@ -8,6 +8,8 @@
#include <utility>
#include "openvino/core/rt_info.hpp"
#include "openvino/op/concat.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
@@ -36,7 +38,7 @@ AtenCatToConcat::AtenCatToConcat() {
return false;
auto axis_node = cat->input(1).get_source_output().get_node_shared_ptr();
auto axis_const = std::dynamic_pointer_cast<opset10::Constant>(axis_node);
auto axis_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(axis_node);
if (!axis_const)
return false;
auto axis = axis_const->cast_vector<int64_t>();
@@ -60,7 +62,7 @@ AtenCatToConcat::AtenCatToConcat() {
inputs.push_back(input.get_source_output());
}
inputs.insert(inputs.end(), tmp_inputs.rbegin(), tmp_inputs.rend());
auto result = std::make_shared<opset10::Concat>(inputs, axis[0]);
auto result = std::make_shared<ov::op::v0::Concat>(inputs, axis[0]);
copy_runtime_info(rt_copy_from, result);
replace_node(cat, result);

View File

@@ -9,6 +9,7 @@
#include "openvino/core/rt_info.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "pt_framework_node.hpp"

View File

@@ -6,6 +6,7 @@
#include "openvino/core/rt_info.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "utils.hpp"

View File

@@ -9,6 +9,7 @@
#include "openvino/core/rt_info.hpp"
#include "openvino/op/util/framework_node.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/pass/pattern/matcher.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "utils.hpp"

View File

@@ -6,6 +6,7 @@
#include "op_table.hpp"
#include "openvino/frontend/pytorch/decoder.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/util/log.hpp"
#include "pt_framework_node.hpp"
@@ -33,7 +34,7 @@ Output<Node> make_optional_bias(const Output<Node>& base_op,
}
}
Output<ov::Node> reshape_conv_bias(NodeContext& context, Output<ov::Node> bias, Output<ov::Node> conv) {
Output<ov::Node> reshape_conv_bias(const NodeContext& context, Output<ov::Node> bias, Output<ov::Node> conv) {
auto conv_shape = context.mark_node(std::make_shared<opset10::ShapeOf>(conv));
auto conv_rank = context.mark_node(std::make_shared<opset10::ShapeOf>(conv_shape));
auto one_const = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {1}));
@@ -99,7 +100,7 @@ Output<Node> reshape_kernel_for_group(const NodeContext& context,
return make_shared<opset10::Reshape>(kernel, new_kernel_shape, false);
}
std::shared_ptr<Node> get_axes_range(NodeContext& context, size_t input_id) {
std::shared_ptr<Node> get_axes_range(const NodeContext& context, size_t input_id) {
auto x = context.get_input(input_id);
auto start = std::make_shared<opset10::Constant>(element::i32, Shape{}, 0);
auto step = std::make_shared<opset10::Constant>(element::i32, Shape{}, 1);
@@ -109,7 +110,7 @@ std::shared_ptr<Node> get_axes_range(NodeContext& context, size_t input_id) {
return context.mark_node(std::make_shared<opset10::Range>(start, reduced_rank, step, element::i32));
};
std::shared_ptr<Node> numel(NodeContext& context, size_t input_id) {
std::shared_ptr<Node> numel(const NodeContext& context, size_t input_id) {
auto x = context.get_input(input_id);
auto input_shape = context.mark_node(std::make_shared<opset10::ShapeOf>(x));
auto axes = context.mark_node(opset10::Constant::create(element::i64, Shape({1}), {0}));

View File

@@ -5,14 +5,14 @@
#pragma once
#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/op/constant.hpp"
namespace ov {
namespace op {
namespace util {
class FrameworkNode;
}
} // namespace util
} // namespace op
namespace frontend {
@@ -23,7 +23,7 @@ Output<Node> make_optional_bias(const Output<Node>& base_op,
size_t bias_input_idx,
const std::vector<int>& unsqueeze_dims = {});
Output<ov::Node> reshape_conv_bias(NodeContext& context, Output<ov::Node> bias, Output<ngraph::Node> conv);
Output<ov::Node> reshape_conv_bias(const NodeContext& context, Output<ov::Node> bias, Output<ngraph::Node> conv);
std::shared_ptr<ov::Node> get_rank_node(const Output<Node>& node);
@@ -32,9 +32,9 @@ Output<Node> reshape_kernel_for_group(const NodeContext& context,
const Output<Node>& kernel,
int64_t groups);
std::shared_ptr<Node> get_axes_range(NodeContext& context, size_t input_id);
std::shared_ptr<Node> get_axes_range(const NodeContext& context, size_t input_id);
std::shared_ptr<Node> numel(NodeContext& context, size_t input_id);
std::shared_ptr<Node> numel(const NodeContext& context, size_t input_id);
element::Type convert_dtype(int64_t dtype_value);
ov::op::PadType convert_pad(const std::string& pt_pad);
@@ -84,14 +84,14 @@ OutputVector translate_1to1_match_2_inputs(NodeContext& context) {
}
inline OutputVector return_false_scalar(NodeContext& context) {
return {context.mark_node(opset10::Constant::create(element::boolean, Shape{}, {false}))};
return {context.mark_node(ov::op::v0::Constant::create(element::boolean, Shape{}, {false}))};
}
inline OutputVector skip_node(NodeContext& context) {
return {context.get_input(0).get_node_shared_ptr()};
}
} // namespace op
} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov