[CPU] cleanup misc IE and ngraph (#21007)

1. InferenceEngine::SizeVector
2. InferenceEngine::parallel_for
3. All ngraph namespace except ngraph::op
This commit is contained in:
River Li 2023-11-13 21:06:59 +08:00 committed by GitHub
parent f97e7f1c9d
commit 5dd317c733
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
224 changed files with 1156 additions and 1157 deletions

View File

@ -55,7 +55,6 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_name{model->get_name()}, m_name{model->get_name()},
m_loaded_from_cache(loaded_from_cache) { m_loaded_from_cache(loaded_from_cache) {
bool isFloatModel = !ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(m_model); bool isFloatModel = !ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(m_model);
m_mutex = std::make_shared<std::mutex>(); m_mutex = std::make_shared<std::mutex>();
const auto& core = m_plugin->get_core(); const auto& core = m_plugin->get_core();
if (!core) if (!core)

View File

@ -31,7 +31,7 @@ namespace ov {
[this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr<snippets::Emitter> { \ [this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr<snippets::Emitter> { \
return std::make_shared<e_type>(h.get(), isa, expr); \ return std::make_shared<e_type>(h.get(), isa, expr); \
}, \ }, \
[](const std::shared_ptr<ngraph::Node>& n) -> std::set<std::vector<element::Type>> { \ [](const std::shared_ptr<ov::Node>& n) -> std::set<std::vector<element::Type>> { \
return e_type::get_supported_precisions(n); \ return e_type::get_supported_precisions(n); \
} \ } \
} }

View File

@ -17,7 +17,7 @@ using namespace Xbyak;
namespace ov { namespace ov {
namespace intel_cpu { namespace intel_cpu {
jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, Precision exec_prc) jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) { : jit_emitter(host, host_isa, exec_prc) {
input_type = node->get_input_element_type(0); input_type = node->get_input_element_type(0);
output_type = node->get_output_element_type(0); output_type = node->get_output_element_type(0);
@ -58,7 +58,7 @@ void jit_convert_emitter::float2bfloat(const std::vector<size_t> &in_vec_idxs, c
} }
jit_convert_truncation_emitter::jit_convert_truncation_emitter(jit_generator *host, cpu_isa_t host_isa, jit_convert_truncation_emitter::jit_convert_truncation_emitter(jit_generator *host, cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc) const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_convert_emitter(host, host_isa, node, exec_prc) { : jit_convert_emitter(host, host_isa, node, exec_prc) {
prepare_table(); prepare_table();
} }
@ -193,7 +193,7 @@ void jit_convert_truncation_emitter::dword2int8(const std::vector<size_t> &in_ve
} }
jit_convert_saturation_emitter::jit_convert_saturation_emitter(jit_generator *host, cpu_isa_t host_isa, jit_convert_saturation_emitter::jit_convert_saturation_emitter(jit_generator *host, cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc) const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_convert_emitter(host, host_isa, node, exec_prc) { : jit_convert_emitter(host, host_isa, node, exec_prc) {
} }

View File

@ -14,7 +14,7 @@ namespace intel_cpu {
class jit_convert_emitter : public jit_emitter { class jit_convert_emitter : public jit_emitter {
public: public:
jit_convert_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, jit_convert_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
@ -47,7 +47,7 @@ protected:
class jit_convert_truncation_emitter : public jit_convert_emitter { class jit_convert_truncation_emitter : public jit_convert_emitter {
public: public:
jit_convert_truncation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, jit_convert_truncation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
private: private:
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override; void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
@ -68,7 +68,7 @@ private:
class jit_convert_saturation_emitter : public jit_convert_emitter { class jit_convert_saturation_emitter : public jit_convert_emitter {
public: public:
jit_convert_saturation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, jit_convert_saturation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
private: private:
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override; void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;

View File

@ -13,11 +13,11 @@ using namespace Xbyak;
namespace ov { namespace ov {
namespace intel_cpu { namespace intel_cpu {
std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, InferenceEngine::Precision exec_prc) jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, InferenceEngine::Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) { : jit_emitter(host, host_isa, exec_prc) {
kind = dnnl_eltwise_tanh; kind = dnnl_eltwise_tanh;

View File

@ -20,13 +20,13 @@ public:
void emit_impl(const std::vector<size_t> &in_idxs, const std::vector<size_t> &out_idxs) const override {}; void emit_impl(const std::vector<size_t> &in_idxs, const std::vector<size_t> &out_idxs) const override {};
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
protected: protected:
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
dnnl_alg_kind_t algKind, float inpAlpha, float inpBeta, dnnl_alg_kind_t algKind, float inpAlpha, float inpBeta,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
void set_injector(); void set_injector();

View File

@ -13,7 +13,7 @@ namespace intel_cpu {
class jit_relu_emitter : public jit_dnnl_emitter { class jit_relu_emitter : public jit_dnnl_emitter {
public: public:
jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_relu; kind = dnnl_eltwise_relu;
@ -26,7 +26,7 @@ public:
class jit_sigmoid_emitter : public jit_dnnl_emitter { class jit_sigmoid_emitter : public jit_dnnl_emitter {
public: public:
jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_logistic; kind = dnnl_eltwise_logistic;
@ -39,7 +39,7 @@ public:
class jit_tanh_emitter : public jit_dnnl_emitter { class jit_tanh_emitter : public jit_dnnl_emitter {
public: public:
jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_tanh; kind = dnnl_eltwise_tanh;
@ -52,11 +52,11 @@ public:
class jit_elu_emitter : public jit_dnnl_emitter { class jit_elu_emitter : public jit_dnnl_emitter {
public: public:
jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_elu; kind = dnnl_eltwise_elu;
alpha = ngraph::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha(); alpha = ov::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha();
beta = 0.f; beta = 0.f;
set_injector(); set_injector();
@ -65,7 +65,7 @@ public:
class jit_exp_emitter : public jit_dnnl_emitter { class jit_exp_emitter : public jit_dnnl_emitter {
public: public:
jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_exp; kind = dnnl_eltwise_exp;
@ -78,7 +78,7 @@ public:
class jit_abs_emitter : public jit_dnnl_emitter { class jit_abs_emitter : public jit_dnnl_emitter {
public: public:
jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_abs; kind = dnnl_eltwise_abs;
@ -91,11 +91,11 @@ public:
class jit_clamp_emitter : public jit_dnnl_emitter { class jit_clamp_emitter : public jit_dnnl_emitter {
public: public:
jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_clip; kind = dnnl_eltwise_clip;
auto op = ngraph::as_type_ptr<ov::op::v0::Clamp>(n); auto op = ov::as_type_ptr<ov::op::v0::Clamp>(n);
alpha = op->get_min(); alpha = op->get_min();
beta = op->get_max(); beta = op->get_max();
@ -105,11 +105,11 @@ public:
class jit_swish_emitter : public jit_dnnl_emitter { class jit_swish_emitter : public jit_dnnl_emitter {
public: public:
jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_swish; kind = dnnl_eltwise_swish;
auto op = ngraph::as_type_ptr<ov::intel_cpu::SwishNode>(n); auto op = ov::as_type_ptr<ov::intel_cpu::SwishNode>(n);
alpha = op->get_alpha(); alpha = op->get_alpha();
beta = 0.f; beta = 0.f;
@ -119,7 +119,7 @@ public:
class jit_hswish_emitter : public jit_dnnl_emitter { class jit_hswish_emitter : public jit_dnnl_emitter {
public: public:
jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
// since v3.0 oneDNN has flexible version of hardswish, ov still uses the one with hardcoded alpha and beta // since v3.0 oneDNN has flexible version of hardswish, ov still uses the one with hardcoded alpha and beta
@ -133,7 +133,7 @@ public:
class jit_gelu_v0_emitter : public jit_dnnl_emitter { class jit_gelu_v0_emitter : public jit_dnnl_emitter {
public: public:
jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
kind = dnnl_eltwise_gelu_erf; kind = dnnl_eltwise_gelu_erf;
@ -144,7 +144,7 @@ public:
class jit_gelu_v7_emitter : public jit_dnnl_emitter { class jit_gelu_v7_emitter : public jit_dnnl_emitter {
public: public:
jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
: jit_dnnl_emitter(host, host_isa, n, exec_prc) { : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(n); auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(n);
@ -165,7 +165,7 @@ public:
jit_round_emitter( jit_round_emitter(
dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::jit_generator *host,
dnnl::impl::cpu::x64::cpu_isa_t host_isa, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
const std::shared_ptr<ngraph::Node>& n, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) : jit_dnnl_emitter(host, host_isa, n, exec_prc) { InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
const auto round = getNgraphOpAs<ngraph::op::v5::Round>(n); const auto round = getNgraphOpAs<ngraph::op::v5::Round>(n);
const auto mode = round->get_mode(); const auto mode = round->get_mode();

View File

@ -77,7 +77,7 @@ void jit_add_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const std
} }
} }
std::set<std::vector<element::Type>> jit_add_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_add_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -168,7 +168,7 @@ size_t jit_mul_add_emitter::aux_vecs_count() const {
return 1; return 1;
} }
std::set<std::vector<element::Type>> jit_mul_add_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_mul_add_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32, element::f32}, {element::i32, element::i32, element::i32}}; return {{element::f32, element::f32, element::f32}, {element::i32, element::i32, element::i32}};
} }
@ -215,7 +215,7 @@ void jit_subtract_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, cons
} }
} }
std::set<std::vector<element::Type>> jit_subtract_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_subtract_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -262,7 +262,7 @@ void jit_multiply_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, cons
} }
} }
std::set<std::vector<element::Type>> jit_multiply_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_multiply_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -323,7 +323,7 @@ void jit_divide_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
} }
} }
std::set<std::vector<element::Type>> jit_divide_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_divide_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -339,7 +339,7 @@ jit_floor_emitter::jit_floor_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
size_t jit_floor_emitter::get_inputs_num() const { return 1; } size_t jit_floor_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_floor_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_floor_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -371,7 +371,7 @@ jit_ceiling_emitter::jit_ceiling_emitter(x64::jit_generator *host, x64::cpu_isa_
size_t jit_ceiling_emitter::get_inputs_num() const { return 1; } size_t jit_ceiling_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_ceiling_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_ceiling_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -404,7 +404,7 @@ jit_floor_mod_emitter::jit_floor_mod_emitter(x64::jit_generator *host, x64::cpu_
size_t jit_floor_mod_emitter::get_inputs_num() const { return 2; } size_t jit_floor_mod_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_floor_mod_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_floor_mod_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -458,7 +458,7 @@ jit_mod_emitter::jit_mod_emitter(x64::jit_generator *host, x64::cpu_isa_t host_i
size_t jit_mod_emitter::get_inputs_num() const { return 2; } size_t jit_mod_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_mod_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_mod_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -548,7 +548,7 @@ void jit_maximum_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
} }
} }
std::set<std::vector<element::Type>> jit_maximum_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_maximum_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -596,7 +596,7 @@ void jit_minimum_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
} }
} }
std::set<std::vector<element::Type>> jit_minimum_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_minimum_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -651,7 +651,7 @@ void jit_squared_difference_emitter::emit_isa(const std::vector<size_t> &in_vec_
} }
} }
std::set<std::vector<element::Type>> jit_squared_difference_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_squared_difference_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}, {element::i32, element::i32}}; return {{element::f32, element::f32}, {element::i32, element::i32}};
} }
@ -664,7 +664,7 @@ jit_power_dynamic_emitter::jit_power_dynamic_emitter(x64::jit_generator *host, x
size_t jit_power_dynamic_emitter::get_inputs_num() const { return 2; } size_t jit_power_dynamic_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_power_dynamic_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_power_dynamic_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -779,7 +779,7 @@ jit_equal_emitter::jit_equal_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
size_t jit_equal_emitter::get_inputs_num() const { return 2; } size_t jit_equal_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -842,7 +842,7 @@ jit_not_equal_emitter::jit_not_equal_emitter(x64::jit_generator *host, x64::cpu_
size_t jit_not_equal_emitter::get_inputs_num() const { return 2; } size_t jit_not_equal_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_not_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_not_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -905,7 +905,7 @@ jit_greater_emitter::jit_greater_emitter(x64::jit_generator *host, x64::cpu_isa_
size_t jit_greater_emitter::get_inputs_num() const { return 2; } size_t jit_greater_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_greater_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_greater_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -969,7 +969,7 @@ jit_greater_equal_emitter::jit_greater_equal_emitter(x64::jit_generator *host, x
size_t jit_greater_equal_emitter::get_inputs_num() const { return 2; } size_t jit_greater_equal_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_greater_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_greater_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1032,7 +1032,7 @@ jit_less_emitter::jit_less_emitter(x64::jit_generator *host, x64::cpu_isa_t host
size_t jit_less_emitter::get_inputs_num() const { return 2; } size_t jit_less_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_less_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_less_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1095,7 +1095,7 @@ jit_less_equal_emitter::jit_less_equal_emitter(x64::jit_generator *host, x64::cp
size_t jit_less_equal_emitter::get_inputs_num() const { return 2; } size_t jit_less_equal_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_less_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_less_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1159,7 +1159,7 @@ jit_logical_and_emitter::jit_logical_and_emitter(x64::jit_generator *host, x64::
size_t jit_logical_and_emitter::get_inputs_num() const { return 2; } size_t jit_logical_and_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_logical_and_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_logical_and_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1243,7 +1243,7 @@ jit_logical_or_emitter::jit_logical_or_emitter(x64::jit_generator *host, x64::cp
size_t jit_logical_or_emitter::get_inputs_num() const { return 2; } size_t jit_logical_or_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_logical_or_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_logical_or_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1326,7 +1326,7 @@ jit_logical_xor_emitter::jit_logical_xor_emitter(x64::jit_generator *host, x64::
size_t jit_logical_xor_emitter::get_inputs_num() const { return 2; } size_t jit_logical_xor_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_logical_xor_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_logical_xor_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1409,7 +1409,7 @@ jit_logical_not_emitter::jit_logical_not_emitter(x64::jit_generator *host, x64::
size_t jit_logical_not_emitter::get_inputs_num() const { return 1; } size_t jit_logical_not_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_logical_not_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_logical_not_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -1483,7 +1483,7 @@ jit_power_static_emitter::jit_power_static_emitter(x64::jit_generator *host, x64
size_t jit_power_static_emitter::get_inputs_num() const { return 1; } size_t jit_power_static_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_power_static_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_power_static_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -1661,7 +1661,7 @@ jit_prelu_emitter::jit_prelu_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
} }
size_t jit_prelu_emitter::get_inputs_num() const { return 2; } size_t jit_prelu_emitter::get_inputs_num() const { return 2; }
std::set<std::vector<element::Type>> jit_prelu_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_prelu_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}}; return {{element::f32, element::f32}};
} }
@ -1720,7 +1720,7 @@ jit_sqrt_emitter::jit_sqrt_emitter(x64::jit_generator *host, x64::cpu_isa_t host
size_t jit_sqrt_emitter::get_inputs_num() const { return 1; } size_t jit_sqrt_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_sqrt_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_sqrt_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -1751,7 +1751,7 @@ jit_negative_emitter::jit_negative_emitter(x64::jit_generator *host, x64::cpu_is
size_t jit_negative_emitter::get_inputs_num() const { return 1; } size_t jit_negative_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_negative_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_negative_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -1789,7 +1789,7 @@ jit_erf_emitter::jit_erf_emitter(x64::jit_generator *host, x64::cpu_isa_t host_i
size_t jit_erf_emitter::get_inputs_num() const { return 1; } size_t jit_erf_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_erf_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_erf_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -1973,7 +1973,7 @@ jit_soft_sign_emitter::jit_soft_sign_emitter(x64::jit_generator *host, x64::cpu_
size_t jit_soft_sign_emitter::get_inputs_num() const { return 1; } size_t jit_soft_sign_emitter::get_inputs_num() const { return 1; }
std::set<std::vector<element::Type>> jit_soft_sign_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_soft_sign_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32}}; return {{element::f32}};
} }
@ -2181,14 +2181,14 @@ void jit_is_nan_emitter::register_table_entries() {
} }
/// SELECT /// /// SELECT ///
jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, Precision exec_prc) jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) {} : jit_emitter(host, host_isa, exec_prc) {}
jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, Precision exec_prc) jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, Precision exec_prc)
: jit_emitter(host, host_isa, exec_prc) {} : jit_emitter(host, host_isa, exec_prc) {}
size_t jit_select_emitter::get_inputs_num() const { return 3; } size_t jit_select_emitter::get_inputs_num() const { return 3; }
std::set<std::vector<element::Type>> jit_select_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_select_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32, element::f32}}; return {{element::f32, element::f32, element::f32}};
} }

View File

@ -16,7 +16,7 @@ public:
jit_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -32,7 +32,7 @@ public:
jit_mul_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_mul_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -51,7 +51,7 @@ public:
jit_subtract_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_subtract_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -68,7 +68,7 @@ public:
jit_multiply_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_multiply_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -86,7 +86,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -104,7 +104,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -121,7 +121,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -138,7 +138,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -157,7 +157,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -175,7 +175,7 @@ public:
jit_maximum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_maximum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -192,7 +192,7 @@ public:
jit_minimum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n); jit_minimum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -211,7 +211,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -229,7 +229,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -247,7 +247,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -268,7 +268,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -289,7 +289,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -310,7 +310,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -331,7 +331,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -353,7 +353,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -374,7 +374,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -395,7 +395,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -416,7 +416,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -436,7 +436,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -457,7 +457,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
@ -482,7 +482,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -501,7 +501,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -516,7 +516,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override; void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
@ -534,7 +534,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl( void emit_impl(
@ -556,7 +556,7 @@ public:
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
private: private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override; void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
@ -579,7 +579,7 @@ public:
} }
size_t get_inputs_num() const override { return 1; }; size_t get_inputs_num() const override { return 1; };
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) { static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
return {{element::f32}}; return {{element::f32}};
} }
@ -607,7 +607,7 @@ public:
} }
size_t get_inputs_num() const override { return 1; }; size_t get_inputs_num() const override { return 1; };
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) { static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
return {{element::f32}}; return {{element::f32}};
} }
@ -637,7 +637,7 @@ public:
} }
size_t get_inputs_num() const override { return 1; } size_t get_inputs_num() const override { return 1; }
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) { static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
return {{element::f32}}; return {{element::f32}};
} }
@ -656,11 +656,11 @@ class jit_select_emitter : public jit_emitter {
public: public:
jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n, jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
size_t get_inputs_num() const override; size_t get_inputs_num() const override;
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
size_t aux_vecs_count() const override; size_t aux_vecs_count() const override;
private: private:

View File

@ -55,7 +55,7 @@ size_t jit_emitter::aux_gprs_count() const {
return entry_map_.empty() ? 0 : 1; return entry_map_.empty() ? 0 : 1;
} }
std::set<std::vector<element::Type>> jit_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> jit_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {}; return {};
} }

View File

@ -49,7 +49,7 @@ public:
* Precisions are ordered, the first bigger bitness precision with the same type will be selected. * Precisions are ordered, the first bigger bitness precision with the same type will be selected.
* Empty collection means the emitter supports any input precisions. * Empty collection means the emitter supports any input precisions.
*/ */
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
protected: protected:
virtual size_t aux_gprs_count() const; virtual size_t aux_gprs_count() const;

View File

@ -880,7 +880,7 @@ BrgemmEmitter::BrgemmEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPt
m_load_offset_scratch = brgemm_node->get_offset_scratch(); m_load_offset_scratch = brgemm_node->get_offset_scratch();
} }
std::set<std::vector<element::Type>> BrgemmEmitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) { std::set<std::vector<element::Type>> BrgemmEmitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
const auto brgemm = as_type_ptr<ov::intel_cpu::BrgemmCPU>(node); const auto brgemm = as_type_ptr<ov::intel_cpu::BrgemmCPU>(node);
OPENVINO_ASSERT(brgemm, "BrgemmEmitter::get_supported_precisions() expects BrgemmCPU node"); OPENVINO_ASSERT(brgemm, "BrgemmEmitter::get_supported_precisions() expects BrgemmCPU node");
switch (brgemm->get_type()) { switch (brgemm->get_type()) {

View File

@ -364,7 +364,7 @@ public:
const ov::snippets::lowered::ExpressionPtr& expr); const ov::snippets::lowered::ExpressionPtr& expr);
size_t get_inputs_num() const override { return m_with_scratch ? 3 : 2; } size_t get_inputs_num() const override { return m_with_scratch ? 3 : 2; }
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr); static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
size_t aux_gprs_count() const override; size_t aux_gprs_count() const override;
static size_t get_in_leading_dim(const VectorDims& shape, const std::vector<size_t>& layout); static size_t get_in_leading_dim(const VectorDims& shape, const std::vector<size_t>& layout);
@ -430,7 +430,7 @@ public:
const ov::snippets::lowered::ExpressionPtr& expr); const ov::snippets::lowered::ExpressionPtr& expr);
size_t get_inputs_num() const override {return 1;} size_t get_inputs_num() const override {return 1;}
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) { static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
return {{element::i8}, {element::bf16}}; return {{element::i8}, {element::bf16}};
} }
@ -469,7 +469,7 @@ public:
const ov::snippets::lowered::ExpressionPtr& expr); const ov::snippets::lowered::ExpressionPtr& expr);
size_t get_inputs_num() const override {return 1;} size_t get_inputs_num() const override {return 1;}
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) { static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
return {{element::f32}}; return {{element::f32}};
} }

View File

@ -183,11 +183,11 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
return opsets; return opsets;
} }
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node>&) { std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ov::Node>&) {
return {}; return {};
} }
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) { InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ov::Node>& node, const std::string& implType) {
return nullptr; return nullptr;
} }

View File

@ -14,8 +14,8 @@ public:
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override; void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
void Unload() noexcept override; void Unload() noexcept override;
std::map<std::string, ngraph::OpSet> getOpSets() override; std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override; std::vector<std::string> getImplTypes(const std::shared_ptr<ov::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override; InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ov::Node>& node, const std::string& implType) override;
}; };
} // namespace intel_cpu } // namespace intel_cpu

View File

@ -17,7 +17,7 @@ void ExtensionManager::AddExtension(const IExtensionPtr& extension) {
_extensions.push_back(extension); _extensions.push_back(extension);
} }
InferenceEngine::ILayerImpl::Ptr ExtensionManager::CreateImplementation(const std::shared_ptr<ngraph::Node>& op) { InferenceEngine::ILayerImpl::Ptr ExtensionManager::CreateImplementation(const std::shared_ptr<ov::Node>& op) {
if (!op) if (!op)
IE_THROW() << "Cannot get nGraph operation!"; IE_THROW() << "Cannot get nGraph operation!";
for (const auto& ext : _extensions) { for (const auto& ext : _extensions) {

View File

@ -16,7 +16,7 @@ class ExtensionManager {
public: public:
using Ptr = std::shared_ptr<ExtensionManager>; using Ptr = std::shared_ptr<ExtensionManager>;
ExtensionManager() = default; ExtensionManager() = default;
InferenceEngine::ILayerImpl::Ptr CreateImplementation(const std::shared_ptr<ngraph::Node>& op); InferenceEngine::ILayerImpl::Ptr CreateImplementation(const std::shared_ptr<ov::Node>& op);
void AddExtension(const InferenceEngine::IExtensionPtr& extension); void AddExtension(const InferenceEngine::IExtensionPtr& extension);
const std::vector<InferenceEngine::IExtensionPtr> & Extensions() const; const std::vector<InferenceEngine::IExtensionPtr> & Extensions() const;

View File

@ -670,7 +670,7 @@ void Graph::AllocateWithReuse() {
MemorySolver staticMemSolver(definedBoxes); MemorySolver staticMemSolver(definedBoxes);
size_t total_size = static_cast<size_t>(staticMemSolver.solve()) * alignment; size_t total_size = static_cast<size_t>(staticMemSolver.solve()) * alignment;
memWorkspace = std::make_shared<Memory>(getEngine(), DnnlBlockedMemoryDesc(InferenceEngine::Precision::I8, Shape(InferenceEngine::SizeVector{total_size}))); memWorkspace = std::make_shared<Memory>(getEngine(), DnnlBlockedMemoryDesc(InferenceEngine::Precision::I8, Shape(VectorDims{total_size})));
if (edge_clusters.empty()) if (edge_clusters.empty())
return; return;

View File

@ -113,16 +113,16 @@ std::map<std::string, std::string> extract_node_metadata(const NodePtr &node) {
} // namespace } // namespace
std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph) { std::shared_ptr<ov::Model> dump_graph_as_ie_ngraph_net(const Graph &graph) {
std::map<NodePtr, std::shared_ptr<ngraph::Node> > node2layer; std::map<NodePtr, std::shared_ptr<ov::Node> > node2layer;
ngraph::ResultVector results; ov::ResultVector results;
ngraph::ParameterVector params; ov::ParameterVector params;
ngraph::NodeVector to_hold; ov::NodeVector to_hold;
auto get_inputs = [&] (const NodePtr & node) { auto get_inputs = [&] (const NodePtr & node) {
auto pr_edges = node->getParentEdges(); auto pr_edges = node->getParentEdges();
ngraph::OutputVector inputs(pr_edges.size()); ov::OutputVector inputs(pr_edges.size());
for (size_t i = 0; i < pr_edges.size(); i++) { for (size_t i = 0; i < pr_edges.size(); i++) {
auto edge = node->getParentEdgeAt(i); auto edge = node->getParentEdgeAt(i);
@ -162,7 +162,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
} }
auto meta_data = extract_node_metadata(node); auto meta_data = extract_node_metadata(node);
std::shared_ptr<ngraph::Node> return_node; std::shared_ptr<ov::Node> return_node;
if (is_input) { if (is_input) {
auto& desc = node->getChildEdgeAt(0)->getMemory().getDesc(); auto& desc = node->getChildEdgeAt(0)->getMemory().getDesc();
auto param = std::make_shared<ngraph::op::Parameter>(details::convertPrecision(desc.getPrecision()), desc.getShape().toPartialShape()); auto param = std::make_shared<ngraph::op::Parameter>(details::convertPrecision(desc.getPrecision()), desc.getShape().toPartialShape());
@ -192,7 +192,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
return return_node; return return_node;
}; };
ngraph::NodeVector nodes; ov::NodeVector nodes;
nodes.reserve(graph.graphNodes.size()); nodes.reserve(graph.graphNodes.size());
for (auto &node : graph.graphNodes) { // important: graph.graphNodes are in topological order for (auto &node : graph.graphNodes) { // important: graph.graphNodes are in topological order
nodes.emplace_back(create_ngraph_node(node)); nodes.emplace_back(create_ngraph_node(node));
@ -204,7 +204,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
holder->add_control_dependency(node); holder->add_control_dependency(node);
} }
return std::make_shared<ngraph::Function>(results, params, graph._name); return std::make_shared<ov::Model>(results, params, graph._name);
} }
#ifdef CPU_DEBUG_CAPS #ifdef CPU_DEBUG_CAPS
@ -227,7 +227,7 @@ void serializeToXML(const Graph &graph, const std::string& path) {
return; return;
std::string binPath; std::string binPath;
ngraph::pass::Manager manager; ov::pass::Manager manager;
manager.register_pass<ov::pass::Serialize>(path, manager.register_pass<ov::pass::Serialize>(path,
binPath, binPath,
ov::pass::Serialize::Version::IR_V10); ov::pass::Serialize::Version::IR_V10);

View File

@ -13,7 +13,7 @@
namespace ov { namespace ov {
namespace intel_cpu { namespace intel_cpu {
std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph); std::shared_ptr<ov::Model> dump_graph_as_ie_ngraph_net(const Graph &graph);
#ifdef CPU_DEBUG_CAPS #ifdef CPU_DEBUG_CAPS
void serialize(const Graph &graph); void serialize(const Graph &graph);
void summary_perf(const Graph &graph); void summary_perf(const Graph &graph);

View File

@ -617,7 +617,7 @@ void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph &graph) {
// Bias -> Reshape -> Conv/Deconv/FC // Bias -> Reshape -> Conv/Deconv/FC
const VectorDims flattenShape = {biasOutputShape.getElementsCount()}; const VectorDims flattenShape = {biasOutputShape.getElementsCount()};
// Construct Ngraph Reshape node and CPU Reshape node. // Construct Ngraph Reshape node and CPU Reshape node.
auto reshapeConstInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ngraph::Shape{1}, flattenShape); auto reshapeConstInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ov::Shape{1}, flattenShape);
auto reshapeDummyInput = std::make_shared<ngraph::opset1::Parameter>( auto reshapeDummyInput = std::make_shared<ngraph::opset1::Parameter>(
details::convertPrecision(biasNode->getOriginalOutputPrecisionAtPort(0)), details::convertPrecision(biasNode->getOriginalOutputPrecisionAtPort(0)),
biasOutputShape.toPartialShape()); biasOutputShape.toPartialShape());
@ -2647,7 +2647,7 @@ void GraphOptimizer::reshapeRnnSeq(Graph &graph) {
auto edge = childrenEdges[j]; auto edge = childrenEdges[j];
auto childNode = edge->getChild(); auto childNode = edge->getChild();
const auto secondInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ngraph::Shape{1}, std::vector<int>{1}); const auto secondInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ov::Shape{1}, std::vector<int>{1});
const auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze>( const auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze>(
std::make_shared<ngraph::opset1::Parameter>(details::convertPrecision(parentNode->getOriginalOutputPrecisionAtPort(0)), std::make_shared<ngraph::opset1::Parameter>(details::convertPrecision(parentNode->getOriginalOutputPrecisionAtPort(0)),
parentNode->getOutputShapeAtPort(0).toPartialShape()), secondInput); parentNode->getOutputShapeAtPort(0).toPartialShape()), secondInput);

View File

@ -143,14 +143,14 @@ size_t CpuBlockedMemoryDesc::getMaxMemSize() const {
return maxDimsDesc->getCurrentMemSize(); return maxDimsDesc->getCurrentMemSize();
} }
size_t CpuBlockedMemoryDesc::getOffset(const InferenceEngine::SizeVector& v) const { size_t CpuBlockedMemoryDesc::getOffset(const VectorDims& v) const {
InferenceEngine::SizeVector off_v = v; VectorDims off_v = v;
size_t n_blocked_dims = order.size(); size_t n_blocked_dims = order.size();
if (blockedDims.size() != n_blocked_dims || strides.size() != n_blocked_dims) { if (blockedDims.size() != n_blocked_dims || strides.size() != n_blocked_dims) {
IE_THROW() << "Cannot calculate offset. Incorrect primitive descriptor!"; IE_THROW() << "Cannot calculate offset. Incorrect primitive descriptor!";
} }
InferenceEngine::SizeVector blockedShift(n_blocked_dims); VectorDims blockedShift(n_blocked_dims);
for (size_t i = 1; i <= n_blocked_dims; i++) { for (size_t i = 1; i <= n_blocked_dims; i++) {
blockedShift[n_blocked_dims - i] = off_v[order[n_blocked_dims - i]] % blockedDims[n_blocked_dims - i]; blockedShift[n_blocked_dims - i] = off_v[order[n_blocked_dims - i]] % blockedDims[n_blocked_dims - i];
off_v[order[n_blocked_dims - i]] /= blockedDims[n_blocked_dims - i]; off_v[order[n_blocked_dims - i]] /= blockedDims[n_blocked_dims - i];
@ -167,7 +167,7 @@ size_t CpuBlockedMemoryDesc::getElementOffset(size_t elemNumber) const {
// TODO [DS]: rewrite to support dynamic shapes // TODO [DS]: rewrite to support dynamic shapes
auto& dims = shape.getStaticDims(); auto& dims = shape.getStaticDims();
size_t n_dims = dims.size(); size_t n_dims = dims.size();
InferenceEngine::SizeVector pos(n_dims); VectorDims pos(n_dims);
for (size_t rd = 1; rd <= n_dims; ++rd) { for (size_t rd = 1; rd <= n_dims; ++rd) {
const size_t d = n_dims - rd; const size_t d = n_dims - rd;
const size_t cur_dim = dims[d]; const size_t cur_dim = dims[d];

View File

@ -84,7 +84,7 @@ private:
size_t getElementOffset(size_t elemNumber) const override; size_t getElementOffset(size_t elemNumber) const override;
bool canComputeMemSizeZeroDims() const override; bool canComputeMemSizeZeroDims() const override;
size_t getCurrentMemSizeImp() const override; size_t getCurrentMemSizeImp() const override;
size_t getOffset(const InferenceEngine::SizeVector& v) const; size_t getOffset(const VectorDims& v) const;
bool isPlainFormat() const; bool isPlainFormat() const;
bool isBlockedCFormat(size_t blk_size) const; bool isBlockedCFormat(size_t blk_size) const;
bool isTailCFormat() const; bool isTailCFormat() const;

View File

@ -80,7 +80,7 @@ Node::NodesFactory & Node::factory() {
return factoryInstance; return factoryInstance;
} }
Node::Node(const std::shared_ptr<ngraph::Node>& op, Node::Node(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr ctx, const GraphContext::CPtr ctx,
const ShapeInferFactory& shapeInferFactory) const ShapeInferFactory& shapeInferFactory)
: selectedPrimitiveDescriptorIndex(-1), : selectedPrimitiveDescriptorIndex(-1),
@ -1282,7 +1282,7 @@ InferenceEngine::Precision Node::getRuntimePrecision() const {
return runtimePrecision; return runtimePrecision;
} }
Node* Node::NodesFactory::create(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) { Node* Node::NodesFactory::create(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) {
// getExceptionDescWithoutStatus removes redundant information from the exception message. For instance, the NotImplemented // getExceptionDescWithoutStatus removes redundant information from the exception message. For instance, the NotImplemented
// exception is generated in the form: full_path_to_src_file:line_number [ NOT_IMPLEMENTED ] reason. // exception is generated in the form: full_path_to_src_file:line_number [ NOT_IMPLEMENTED ] reason.
// An example for gather node: // An example for gather node:

View File

@ -588,7 +588,7 @@ protected:
std::string originalLayers; // contains names of the original layers separated by comma std::string originalLayers; // contains names of the original layers separated by comma
Node(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory); Node(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory);
Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx); Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx);
int selectedPrimitiveDescriptorIndex = -1; int selectedPrimitiveDescriptorIndex = -1;
@ -744,17 +744,17 @@ constexpr uint64_t PortMask(T... rest) {
} }
class Node::NodesFactory : public openvino::cc::Factory<Type, class Node::NodesFactory : public openvino::cc::Factory<Type,
Node*(const std::shared_ptr<ngraph::Node>& op, Node*(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr)> { const GraphContext::CPtr)> {
public: public:
NodesFactory(); NodesFactory();
Node* create(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Node* create(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
}; };
template<typename NodeType> template<typename NodeType>
struct NodeImpl : public NodeType { struct NodeImpl : public NodeType {
NodeImpl(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) NodeImpl(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: NodeType(op, context) { : NodeType(op, context) {
NodeType::perfCounters().template buildClassCounters<NodeType>(NameFromType(NodeType::getType())); NodeType::perfCounters().template buildClassCounters<NodeType>(NameFromType(NodeType::getType()));
} }

View File

@ -24,7 +24,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (one_of(op->get_type_info(), ngraph::op::v8::AdaptiveAvgPool::get_type_info_static())) { if (one_of(op->get_type_info(), ngraph::op::v8::AdaptiveAvgPool::get_type_info_static())) {
auto adaPool = std::dynamic_pointer_cast<const ngraph::opset8::AdaptiveAvgPool>(op); auto adaPool = std::dynamic_pointer_cast<const ngraph::opset8::AdaptiveAvgPool>(op);
@ -48,7 +48,7 @@ bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ngraph::N
return true; return true;
} }
AdaptivePooling::AdaptivePooling(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) AdaptivePooling::AdaptivePooling(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, AdaptivePoolingShapeInferFactory(op)) { : Node(op, context, AdaptivePoolingShapeInferFactory(op)) {
std::string errorMessage; std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) { if (isSupportedOperation(op, errorMessage)) {

View File

@ -16,14 +16,14 @@ namespace node {
class AdaptivePooling : public Node { class AdaptivePooling : public Node {
public: public:
AdaptivePooling(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); AdaptivePooling(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void execute(dnnl::stream strm) override; void execute(dnnl::stream strm) override;
bool created() const override; bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
int spatialDimsCount; int spatialDimsCount;

View File

@ -17,7 +17,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto batchToSpace = std::dynamic_pointer_cast<const ngraph::opset2::BatchToSpace>(op); const auto batchToSpace = std::dynamic_pointer_cast<const ngraph::opset2::BatchToSpace>(op);
if (!batchToSpace) { if (!batchToSpace) {
@ -30,7 +30,7 @@ bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
return true; return true;
} }
BatchToSpace::BatchToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) BatchToSpace::BatchToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, PortMask(1, 2, 3))) { : Node(op, context, NgraphShapeInferFactory(op, PortMask(1, 2, 3))) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -16,7 +16,7 @@ namespace node {
class BatchToSpace : public Node { class BatchToSpace : public Node {
public: public:
BatchToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); BatchToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -27,7 +27,7 @@ public:
bool needShapeInfer() const override {return true;}; bool needShapeInfer() const override {return true;};
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
template<typename T> template<typename T>

View File

@ -878,7 +878,7 @@ private:
} }
}; };
#endif #endif
bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (isDynamicNgraphNode(op)) { if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes"; errorMessage = "Doesn't support op with dynamic shapes";
@ -891,7 +891,7 @@ bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph:
return false; return false;
} }
if (binConv->get_mode() != ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT) { if (binConv->get_mode() != ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT) {
errorMessage = "Doesn't support mode: " + ngraph::as_string(binConv->get_mode()); errorMessage = "Doesn't support mode: " + ov::as_string(binConv->get_mode());
return false; return false;
} }
} catch (...) { } catch (...) {
@ -900,7 +900,7 @@ bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph:
return true; return true;
} }
BinaryConvolution::BinaryConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) BinaryConvolution::BinaryConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) { if (isSupportedOperation(op, errorMessage)) {

View File

@ -77,7 +77,7 @@ struct jit_uni_bin_conv_kernel {
class BinaryConvolution : public Node { class BinaryConvolution : public Node {
public: public:
BinaryConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); BinaryConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void createPrimitive() override; void createPrimitive() override;
@ -90,7 +90,7 @@ public:
void setPostOps(dnnl::primitive_attr &attr); void setPostOps(dnnl::primitive_attr &attr);
bool canFuse(const NodePtr& node) const override; bool canFuse(const NodePtr& node) const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
impl_desc_type getImplType() { return implType; } impl_desc_type getImplType() { return implType; }

View File

@ -17,7 +17,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool Bucketize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Bucketize::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto bucketsize = std::dynamic_pointer_cast<const ngraph::opset3::Bucketize>(op); const auto bucketsize = std::dynamic_pointer_cast<const ngraph::opset3::Bucketize>(op);
if (!bucketsize) { if (!bucketsize) {
@ -30,7 +30,7 @@ bool Bucketize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
return true; return true;
} }
Bucketize::Bucketize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) Bucketize::Bucketize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, PassThroughShapeInferFactory()) { : Node(op, context, PassThroughShapeInferFactory()) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -13,7 +13,7 @@ namespace node {
class Bucketize : public Node { class Bucketize : public Node {
public: public:
Bucketize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Bucketize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -26,7 +26,7 @@ public:
void prepareParams() override; void prepareParams() override;
bool isExecutable() const override; bool isExecutable() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
template <typename T, typename T_BOUNDARIES, typename T_IND> template <typename T, typename T_BOUNDARIES, typename T_IND>

View File

@ -24,7 +24,7 @@ namespace intel_cpu {
namespace node { namespace node {
namespace { namespace {
std::tuple<Algorithm, std::string> getAlgorithmFor(const std::shared_ptr<const ngraph::Node>& op) { std::tuple<Algorithm, std::string> getAlgorithmFor(const std::shared_ptr<const ov::Node>& op) {
if (ov::is_type<ov::op::v8::NV12toRGB>(op)) if (ov::is_type<ov::op::v8::NV12toRGB>(op))
return std::make_tuple(Algorithm::ColorConvertNV12toRGB, std::string()); return std::make_tuple(Algorithm::ColorConvertNV12toRGB, std::string());
if (ov::is_type<ov::op::v8::NV12toBGR>(op)) if (ov::is_type<ov::op::v8::NV12toBGR>(op))
@ -327,7 +327,7 @@ void RefConverter::convert(const T* y,
size_t width, size_t width,
size_t stride_y, size_t stride_y,
size_t stride_uv) { size_t stride_uv) {
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
T* out = dst + batch * width * height * 3; T* out = dst + batch * width * height * 3;
auto y_ptr = y + batch * stride_y; auto y_ptr = y + batch * stride_y;
auto uv_ptr = uv + batch * stride_uv; auto uv_ptr = uv + batch * stride_uv;
@ -569,7 +569,7 @@ public:
const size_t stride_y = height * width * 3 / 2; const size_t stride_y = height * width * 3 / 2;
const size_t stride_uv = height * width * 3 / 2; const size_t stride_uv = height * width * 3 / 2;
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
typename jit_uni_converter::Params args; typename jit_uni_converter::Params args;
args.y = y + batch * stride_y + h * width; args.y = y + batch * stride_y + h * width;
args.u = args.v = uv + batch * stride_uv + (h / 2) * width; args.u = args.v = uv + batch * stride_uv + (h / 2) * width;
@ -604,7 +604,7 @@ public:
const size_t stride_y = height * width; const size_t stride_y = height * width;
const size_t stride_uv = height * width / 2; const size_t stride_uv = height * width / 2;
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
typename jit_uni_converter::Params args; typename jit_uni_converter::Params args;
args.y = y + batch * stride_y + h * width; args.y = y + batch * stride_y + h * width;
args.u = args.v = uv + batch * stride_uv + (h / 2) * width; args.u = args.v = uv + batch * stride_uv + (h / 2) * width;
@ -679,7 +679,7 @@ void RefConverter::convert(const T* y,
size_t width, size_t width,
size_t stride_y, size_t stride_y,
size_t stride_uv) { size_t stride_uv) {
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
T* out = dst + batch * width * height * 3; T* out = dst + batch * width * height * 3;
auto y_ptr = y + batch * stride_y; auto y_ptr = y + batch * stride_y;
auto u_ptr = u + batch * stride_uv; auto u_ptr = u + batch * stride_uv;
@ -920,7 +920,7 @@ public:
const size_t stride_y = height * width * 3 / 2; const size_t stride_y = height * width * 3 / 2;
const size_t stride_uv = height * width * 3 / 2; const size_t stride_uv = height * width * 3 / 2;
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
typename jit_uni_converter::Params args; typename jit_uni_converter::Params args;
args.y = y + batch * stride_y + h * width; args.y = y + batch * stride_y + h * width;
args.u = u + batch * stride_uv + (h / 2) * (width / 2); args.u = u + batch * stride_uv + (h / 2) * (width / 2);
@ -957,7 +957,7 @@ public:
const size_t stride_y = height * width; const size_t stride_y = height * width;
const size_t stride_uv = height * width / 4; const size_t stride_uv = height * width / 4;
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) { ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
typename jit_uni_converter::Params args; typename jit_uni_converter::Params args;
args.y = y + batch * stride_y + h * width; args.y = y + batch * stride_y + h * width;
args.u = u + batch * stride_uv + (h / 2) * (width / 2); args.u = u + batch * stride_uv + (h / 2) * (width / 2);
@ -999,13 +999,13 @@ const VectorDims & ColorConvert::Converter::inputDims(size_t idx) const {
return _node->getParentEdgesAtPort(idx)[0]->getMemory().getStaticDims(); return _node->getParentEdgesAtPort(idx)[0]->getMemory().getStaticDims();
} }
bool ColorConvert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool ColorConvert::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
Algorithm alg; Algorithm alg;
std::tie(alg, errorMessage) = getAlgorithmFor(op); std::tie(alg, errorMessage) = getAlgorithmFor(op);
return alg != Algorithm::Default; return alg != Algorithm::Default;
} }
ColorConvert::ColorConvert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) ColorConvert::ColorConvert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, ColorConvertShapeInferFactory(op)) { : Node(op, context, ColorConvertShapeInferFactory(op)) {
std::string errorMessage; std::string errorMessage;
std::tie(algorithm, errorMessage) = getAlgorithmFor(op); std::tie(algorithm, errorMessage) = getAlgorithmFor(op);

View File

@ -16,7 +16,7 @@ namespace node {
class ColorConvert : public Node { class ColorConvert : public Node {
public: public:
ColorConvert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ColorConvert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
class Converter; class Converter;
public: public:
@ -28,7 +28,7 @@ public:
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
void initSupportedNV12Impls(); void initSupportedNV12Impls();

View File

@ -12,11 +12,11 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
struct PermuteParams { struct PermuteParams {
InferenceEngine::SizeVector src_block_dims; VectorDims src_block_dims;
InferenceEngine::SizeVector dst_block_dims; VectorDims dst_block_dims;
InferenceEngine::SizeVector src_block_order; VectorDims src_block_order;
InferenceEngine::SizeVector dst_block_order; VectorDims dst_block_order;
InferenceEngine::SizeVector order; VectorDims order;
size_t data_size; size_t data_size;
size_t hash() const; size_t hash() const;
@ -25,9 +25,9 @@ struct PermuteParams {
struct jit_permute_config_params { struct jit_permute_config_params {
uint32_t ndims; uint32_t ndims;
InferenceEngine::SizeVector dst_block_dims; VectorDims dst_block_dims;
InferenceEngine::SizeVector src_strides; VectorDims src_strides;
InferenceEngine::SizeVector dst_strides; VectorDims dst_strides;
int n; int n;
int data_size; int data_size;

View File

@ -17,7 +17,7 @@ struct jit_uni_softmax_kernel;
static inline static inline
void softmax_many_batches(const float *src_data, float *dst_data, int B, int C, int H, int W) { void softmax_many_batches(const float *src_data, float *dst_data, int B, int C, int H, int W) {
InferenceEngine::parallel_for(B * H * W, [&](size_t i) { ov::parallel_for(B * H * W, [&](size_t i) {
const float *psrc = src_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W; const float *psrc = src_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W;
float *pdst = dst_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W; float *pdst = dst_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W;

View File

@ -37,9 +37,9 @@ bool Concat::isExecutable() const {
return !isInPlace() && !hasEmptyOutputTensors(); return !isInPlace() && !hasEmptyOutputTensors();
} }
bool Concat::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Concat::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto concatOp = ngraph::as_type_ptr<const ngraph::op::v0::Concat>(op); const auto concatOp = ov::as_type_ptr<const ngraph::op::v0::Concat>(op);
if (!concatOp) { if (!concatOp) {
errorMessage = "Node is not an instance of the Concat operation."; errorMessage = "Node is not an instance of the Concat operation.";
return false; return false;
@ -50,7 +50,7 @@ bool Concat::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
return true; return true;
} }
Concat::Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) Concat::Concat(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -58,7 +58,7 @@ Concat::Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr
} }
const auto inRank = getInputShapeAtPort(0).getRank(); const auto inRank = getInputShapeAtPort(0).getRank();
auto concatOp = ngraph::as_type_ptr<ngraph::op::v0::Concat>(op); auto concatOp = ov::as_type_ptr<ngraph::op::v0::Concat>(op);
auto axis = concatOp->get_axis(); auto axis = concatOp->get_axis();
if (axis < 0) { if (axis < 0) {
axis += inRank; axis += inRank;

View File

@ -16,9 +16,9 @@ namespace node {
class Concat : public Node { class Concat : public Node {
public: public:
Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Concat(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void initOptimalPrimitiveDescriptor() override; void initOptimalPrimitiveDescriptor() override;
@ -40,7 +40,7 @@ private:
bool canBeInPlace = false; bool canBeInPlace = false;
bool canOptimizeNspc = false; bool canOptimizeNspc = false;
void execRef(); void execRef();
size_t inverseOrder(const InferenceEngine::SizeVector& order, size_t axis); size_t inverseOrder(const VectorDims& order, size_t axis);
void execNspcSpecCase(); void execNspcSpecCase();
std::vector<VectorDims> inputStrides; std::vector<VectorDims> inputStrides;
std::vector<size_t> nelemToCopy; // byte moved in each iter std::vector<size_t> nelemToCopy; // byte moved in each iter

View File

@ -211,9 +211,9 @@ private:
std::vector<std::shared_ptr<Input>> outputs; std::vector<std::shared_ptr<Input>> outputs;
}; };
bool Convolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Convolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (!ngraph::is_type<ngraph::op::v1::Convolution>(op) && !ngraph::is_type<ngraph::op::v1::GroupConvolution>(op)) { if (!ov::is_type<ngraph::op::v1::Convolution>(op) && !ov::is_type<ngraph::op::v1::GroupConvolution>(op)) {
errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported"; errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported";
return false; return false;
} }
@ -233,7 +233,7 @@ bool Convolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>
return true; return true;
} }
Convolution::Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) Convolution::Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), withBiases(false), withSum(false), withDWConv(false), : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), withBiases(false), withSum(false), withDWConv(false),
isGrouped(false), dw_conv_oc(0), dw_conv_ih(0), dw_conv_iw(0), dw_conv_in_dt(memory::data_type::undef), isGrouped(false), dw_conv_oc(0), dw_conv_ih(0), dw_conv_iw(0), dw_conv_in_dt(memory::data_type::undef),
groupNum(1lu), IC(1), groupIC(1), groupOC(1), eltwisePrecision(Precision::FP32) { groupNum(1lu), IC(1), groupIC(1), groupOC(1), eltwisePrecision(Precision::FP32) {
@ -242,8 +242,8 @@ Convolution::Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphCon
IE_THROW(NotImplemented) << errorMessage; IE_THROW(NotImplemented) << errorMessage;
} }
auto convolutionOp = ngraph::as_type_ptr<ngraph::op::v1::Convolution>(op); auto convolutionOp = ov::as_type_ptr<ngraph::op::v1::Convolution>(op);
auto groupConvolutionOp = ngraph::as_type_ptr<ngraph::op::v1::GroupConvolution>(op); auto groupConvolutionOp = ov::as_type_ptr<ngraph::op::v1::GroupConvolution>(op);
if (convolutionOp) { if (convolutionOp) {
algorithm = Algorithm::ConvolutionCommon; algorithm = Algorithm::ConvolutionCommon;

View File

@ -19,9 +19,9 @@ class Eltwise;
class Convolution : public Node { class Convolution : public Node {
public: public:
Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc, void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
const std::vector<MemoryDescPtr>& outputDesc) override; const std::vector<MemoryDescPtr>& outputDesc) override;
@ -55,7 +55,7 @@ public:
std::vector<int32_t> inputZeroPoints; std::vector<int32_t> inputZeroPoints;
void initializeInputZeroPoints(const uint8_t* inputZpData, const size_t inputZpSize); void initializeInputZeroPoints(const uint8_t* inputZpData, const size_t inputZpSize);
const InferenceEngine::SizeVector &getWeightDims() { return weightDims; } const VectorDims &getWeightDims() { return weightDims; }
const std::vector<size_t> &getStride() { return stride; } const std::vector<size_t> &getStride() { return stride; }
const std::vector<ptrdiff_t> &getDilation() { return dilation; } const std::vector<ptrdiff_t> &getDilation() { return dilation; }
const std::vector<ptrdiff_t> &getPaddingL() { return paddingL; } const std::vector<ptrdiff_t> &getPaddingL() { return paddingL; }
@ -142,7 +142,7 @@ private:
std::vector<ptrdiff_t> dilation; std::vector<ptrdiff_t> dilation;
std::vector<ptrdiff_t> paddingL; std::vector<ptrdiff_t> paddingL;
std::vector<ptrdiff_t> paddingR; std::vector<ptrdiff_t> paddingR;
InferenceEngine::SizeVector weightDims; VectorDims weightDims;
std::unordered_map<int, MemoryPtr> convPostOpsArgs[2]; std::unordered_map<int, MemoryPtr> convPostOpsArgs[2];
size_t dw_conv_oc; size_t dw_conv_oc;

View File

@ -17,7 +17,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool Convert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Convert::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto convert = std::dynamic_pointer_cast<const ngraph::opset1::Convert>(op); const auto convert = std::dynamic_pointer_cast<const ngraph::opset1::Convert>(op);
if (!convert) { if (!convert) {
@ -30,7 +30,7 @@ bool Convert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
return true; return true;
} }
Convert::Convert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) Convert::Convert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, PassThroughShapeInferFactory()) { : Node(op, context, PassThroughShapeInferFactory()) {
std::string errorMessage; std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) { if (isSupportedOperation(op, errorMessage)) {

View File

@ -16,7 +16,7 @@ namespace node {
class Convert : public Node { class Convert : public Node {
public: public:
Convert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Convert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
Convert(const Shape &shape, const InferenceEngine::Precision &inPrc, const InferenceEngine::Precision &outPrc, Convert(const Shape &shape, const InferenceEngine::Precision &inPrc, const InferenceEngine::Precision &outPrc,
const std::string &nodeName, const GraphContext::CPtr context); const std::string &nodeName, const GraphContext::CPtr context);
@ -44,7 +44,7 @@ public:
bool needPrepareParams() const override { return inputShapesModified(); } bool needPrepareParams() const override { return inputShapesModified(); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
static bool isSupportedDesc(const MemoryDesc &desc); static bool isSupportedDesc(const MemoryDesc &desc);

View File

@ -15,9 +15,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op); const auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
if (!greedyDecOp) { if (!greedyDecOp) {
errorMessage = "Node is not an instance of the CTCGreedyDecoder operation from operation set v0."; errorMessage = "Node is not an instance of the CTCGreedyDecoder operation from operation set v0.";
return false; return false;
@ -28,7 +28,7 @@ bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ngraph::
return true; return true;
} }
CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -47,7 +47,7 @@ CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, cons
if (!dimsEqualWeak(dataDims[0], seqDims[0]) || !dimsEqualWeak(dataDims[1], seqDims[1])) if (!dimsEqualWeak(dataDims[0], seqDims[0]) || !dimsEqualWeak(dataDims[1], seqDims[1]))
IE_THROW() << errorPrefix << "has invalid input shapes."; IE_THROW() << errorPrefix << "has invalid input shapes.";
auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op); auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
mergeRepeated = greedyDecOp->get_ctc_merge_repeated(); mergeRepeated = greedyDecOp->get_ctc_merge_repeated();
} }

View File

@ -13,7 +13,7 @@ namespace node {
class CTCGreedyDecoder : public Node { class CTCGreedyDecoder : public Node {
public: public:
CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); CTCGreedyDecoder(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -22,7 +22,7 @@ public:
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
bool needPrepareParams() const override; bool needPrepareParams() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
const size_t DATA_INDEX = 0lu; const size_t DATA_INDEX = 0lu;
const size_t SEQUENCE_LENGTH_INDEX = 1lu; const size_t SEQUENCE_LENGTH_INDEX = 1lu;

View File

@ -15,9 +15,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op); const auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
if (!greedyDecOp) { if (!greedyDecOp) {
errorMessage = "Node is not an instance of the CTCGreedyDecoderSeqLen operation from operation set v6."; errorMessage = "Node is not an instance of the CTCGreedyDecoderSeqLen operation from operation set v6.";
return false; return false;
@ -28,7 +28,7 @@ bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ng
return true; return true;
} }
CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -46,7 +46,7 @@ CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Nod
if (!dimsEqualWeak(dataDims[0], seqDims[0])) if (!dimsEqualWeak(dataDims[0], seqDims[0]))
IE_THROW() << errorPrefix << "has invalid input shapes."; IE_THROW() << errorPrefix << "has invalid input shapes.";
auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op); auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
mergeRepeated = greedyDecOp->get_merge_repeated(); mergeRepeated = greedyDecOp->get_merge_repeated();
} }

View File

@ -13,7 +13,7 @@ namespace node {
class CTCGreedyDecoderSeqLen : public Node { class CTCGreedyDecoderSeqLen : public Node {
public: public:
CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); CTCGreedyDecoderSeqLen(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -22,7 +22,7 @@ public:
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
bool needPrepareParams() const override; bool needPrepareParams() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
const size_t DATA_INDEX = 0lu; const size_t DATA_INDEX = 0lu;

View File

@ -14,9 +14,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto ctcLossOp = ngraph::as_type_ptr<const ngraph::op::v4::CTCLoss>(op); const auto ctcLossOp = ov::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
if (!ctcLossOp) { if (!ctcLossOp) {
errorMessage = "Node is not an instance of the CTCLoss operation from operation set v4."; errorMessage = "Node is not an instance of the CTCLoss operation from operation set v4.";
return false; return false;
@ -27,7 +27,7 @@ bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
return true; return true;
} }
CTCLoss::CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) CTCLoss::CTCLoss(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -39,7 +39,7 @@ CTCLoss::CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CP
if (getOriginalInputsNumber() != 4 && getOriginalInputsNumber() != 5) if (getOriginalInputsNumber() != 4 && getOriginalInputsNumber() != 5)
IE_THROW() << errorPrefix << " has invalid inputs number."; IE_THROW() << errorPrefix << " has invalid inputs number.";
auto ctcLossOp = ngraph::as_type_ptr<const ngraph::op::v4::CTCLoss>(op); auto ctcLossOp = ov::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
ctcMergeRepeated = ctcLossOp->get_ctc_merge_repeated(); ctcMergeRepeated = ctcLossOp->get_ctc_merge_repeated();
preprocessCollapseRepeated = ctcLossOp->get_preprocess_collapse_repeated(); preprocessCollapseRepeated = ctcLossOp->get_preprocess_collapse_repeated();
unique = ctcLossOp->get_unique(); unique = ctcLossOp->get_unique();

View File

@ -13,14 +13,14 @@ namespace node {
class CTCLoss : public Node { class CTCLoss : public Node {
public: public:
CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); CTCLoss(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void execute(dnnl::stream strm) override; void execute(dnnl::stream strm) override;
bool created() const override; bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
bool needPrepareParams() const override { return false; }; bool needPrepareParams() const override { return false; };

View File

@ -20,7 +20,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool CumSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool CumSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto cumsum = std::dynamic_pointer_cast<const ngraph::opset3::CumSum>(op); const auto cumsum = std::dynamic_pointer_cast<const ngraph::opset3::CumSum>(op);
if (!cumsum) { if (!cumsum) {
@ -33,7 +33,7 @@ bool CumSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
return true; return true;
} }
CumSum::CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { CumSum::CumSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage; IE_THROW(NotImplemented) << errorMessage;
@ -60,8 +60,8 @@ CumSum::CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr
if (getOriginalInputsNumber() == numOfInputs) { if (getOriginalInputsNumber() == numOfInputs) {
const auto axis_shape = cumsum->get_input_partial_shape(AXIS); const auto axis_shape = cumsum->get_input_partial_shape(AXIS);
if (axis_shape.is_dynamic() || !ngraph::is_scalar(axis_shape.to_shape())) if (axis_shape.is_dynamic() || !ov::is_scalar(axis_shape.to_shape()))
IE_THROW() << errorPrefix << " doesn't support 'axis' input tensor with non scalar rank"; OPENVINO_THROW(errorPrefix, " doesn't support 'axis' input tensor with non scalar rank");
} }
if (dataShape != getOutputShapeAtPort(0)) if (dataShape != getOutputShapeAtPort(0))

View File

@ -13,7 +13,7 @@ namespace node {
class CumSum : public Node { class CumSum : public Node {
public: public:
CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); CumSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -23,7 +23,7 @@ public:
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
template <typename dataType> template <typename dataType>

View File

@ -125,7 +125,7 @@ bool DeconvKey::operator==(const DeconvKey &rhs) const {
*/ */
class DeconfolutionShapeInferFactory : public ShapeInferFactory { class DeconfolutionShapeInferFactory : public ShapeInferFactory {
public: public:
DeconfolutionShapeInferFactory(std::shared_ptr<ngraph::Node> op) : m_op(op) {} DeconfolutionShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
ShapeInferPtr makeShapeInfer() const override { ShapeInferPtr makeShapeInfer() const override {
if (m_op->get_input_size() > 2) { if (m_op->get_input_size() > 2) {
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), PortMask(2)); return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), PortMask(2));
@ -133,11 +133,11 @@ public:
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), EMPTY_PORT_MASK); return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), EMPTY_PORT_MASK);
} }
private: private:
std::shared_ptr<ngraph::Node> m_op; std::shared_ptr<ov::Node> m_op;
}; };
} // namespace } // namespace
bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (std::dynamic_pointer_cast<const ngraph::opset1::ConvolutionBackpropData>(op) == nullptr && if (std::dynamic_pointer_cast<const ngraph::opset1::ConvolutionBackpropData>(op) == nullptr &&
std::dynamic_pointer_cast<const ngraph::opset1::GroupConvolutionBackpropData>(op) == nullptr) { std::dynamic_pointer_cast<const ngraph::opset1::GroupConvolutionBackpropData>(op) == nullptr) {
@ -159,7 +159,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Nod
return true; return true;
} }
Deconvolution::Deconvolution(const std::shared_ptr<ngraph::Node>& op, Deconvolution::Deconvolution(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) : Node(op, context, DeconfolutionShapeInferFactory(op)) { const GraphContext::CPtr context) : Node(op, context, DeconfolutionShapeInferFactory(op)) {
std::string errorMessage; std::string errorMessage;
errorPrefix = "Deconvolution node with name '" + getName() + "' "; errorPrefix = "Deconvolution node with name '" + getName() + "' ";
@ -220,7 +220,7 @@ Deconvolution::Deconvolution(const std::shared_ptr<ngraph::Node>& op,
externOutShape = inputShapes.size() == 3; externOutShape = inputShapes.size() == 3;
biasPort = externOutShape ? 3 : 2; biasPort = externOutShape ? 3 : 2;
if (externOutShape && isDynamicNode()) { if (externOutShape && isDynamicNode()) {
bool isConstOutShape = ngraph::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2)); bool isConstOutShape = ov::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (isConstOutShape) { if (isConstOutShape) {
lastOutputSpatialDims = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(2))->cast_vector<int32_t>(); lastOutputSpatialDims = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(2))->cast_vector<int32_t>();
} }
@ -246,7 +246,7 @@ InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::Siz
InferenceEngine::SizeVector dimsForBlockedDesc{dims}; InferenceEngine::SizeVector dimsForBlockedDesc{dims};
std::swap(dimsForBlockedDesc[withGroups + 0], dimsForBlockedDesc[withGroups + 1]); std::swap(dimsForBlockedDesc[withGroups + 0], dimsForBlockedDesc[withGroups + 1]);
InferenceEngine::SizeVector orderForBlockedDesc; VectorDims orderForBlockedDesc;
if (withGroups) { if (withGroups) {
orderForBlockedDesc = {0, 2, 1}; orderForBlockedDesc = {0, 2, 1};
} else { } else {

View File

@ -19,7 +19,7 @@ namespace node {
class Deconvolution : public Node { class Deconvolution : public Node {
public: public:
Deconvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Deconvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -40,7 +40,7 @@ public:
InferenceEngine::Precision getRuntimePrecision() const override; InferenceEngine::Precision getRuntimePrecision() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
bool canFuse(const NodePtr& node) const override; bool canFuse(const NodePtr& node) const override;
const VectorDims& getWeightDims() const { return getInputShapeAtPort(1).getStaticDims(); } const VectorDims& getWeightDims() const { return getInputShapeAtPort(1).getStaticDims(); }

View File

@ -670,7 +670,7 @@ private:
} }
}; };
#endif #endif
bool DeformableConvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool DeformableConvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (!one_of(op->get_type_info(), if (!one_of(op->get_type_info(),
ngraph::op::v1::DeformableConvolution::get_type_info_static(), ngraph::op::v1::DeformableConvolution::get_type_info_static(),
@ -742,7 +742,7 @@ bool DefConvKey::operator==(const DefConvKey &rhs) const {
} // namespace } // namespace
DeformableConvolution::DeformableConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) DeformableConvolution::DeformableConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -71,9 +71,9 @@ struct jit_uni_def_conv_kernel {
class DeformableConvolution : public Node { class DeformableConvolution : public Node {
public: public:
DeformableConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); DeformableConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void execute(dnnl::stream strm) override; void execute(dnnl::stream strm) override;

View File

@ -49,7 +49,7 @@ bool DepthToSpace::DepthToSpaceAttrs::operator==(const DepthToSpaceAttrs& rhs) c
return result; return result;
} }
bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
auto depthToSpace = ov::as_type_ptr<const ngraph::opset1::DepthToSpace>(op); auto depthToSpace = ov::as_type_ptr<const ngraph::opset1::DepthToSpace>(op);
if (!depthToSpace) { if (!depthToSpace) {
@ -58,7 +58,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
} }
const auto mode = depthToSpace->get_mode(); const auto mode = depthToSpace->get_mode();
if (!one_of(mode, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST)) { if (!one_of(mode, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST)) {
errorMessage = "Does not support mode: " + ngraph::as_string(mode); errorMessage = "Does not support mode: " + ov::as_string(mode);
return false; return false;
} }
} catch (...) { } catch (...) {
@ -67,7 +67,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
return true; return true;
} }
DepthToSpace::DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) DepthToSpace::DepthToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -86,7 +86,7 @@ DepthToSpace::DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphC
} else if (modeNgraph == ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) { } else if (modeNgraph == ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) {
attrs.mode = Mode::DEPTH_FIRST; attrs.mode = Mode::DEPTH_FIRST;
} else { } else {
THROW_ERROR << "doesn't support mode: " << ngraph::as_string(modeNgraph); THROW_ERROR << "doesn't support mode: " << ov::as_string(modeNgraph);
} }
attrs.blockSize = depthToSpace->get_block_size(); attrs.blockSize = depthToSpace->get_block_size();

View File

@ -15,9 +15,9 @@ namespace node {
class DepthToSpace : public Node { class DepthToSpace : public Node {
public: public:
DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); DepthToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override; void createPrimitive() override;

View File

@ -51,7 +51,7 @@ bool DetectionOutput::isSupportedOperation(const std::shared_ptr<const ov::Node>
return true; return true;
} }
DetectionOutput::DetectionOutput(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) DetectionOutput::DetectionOutput(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -25,7 +25,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool DFT::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool DFT::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (isDynamicNgraphNode(op)) { if (isDynamicNgraphNode(op)) {
errorMessage = "Doesn't support op with dynamic shapes"; errorMessage = "Doesn't support op with dynamic shapes";
@ -44,7 +44,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, st
return true; return true;
} }
DFT::DFT(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) : DFT::DFT(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -16,7 +16,7 @@ namespace node {
class DFT : public Node { class DFT : public Node {
public: public:
DFT(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); DFT(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
~DFT() override = default; ~DFT() override = default;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
@ -26,7 +26,7 @@ public:
void prepareParams() override; void prepareParams() override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
std::vector<int32_t> getAxes() const; std::vector<int32_t> getAxes() const;

View File

@ -935,7 +935,7 @@ private:
#endif // OPENVINO_ARCH_X86_64 #endif // OPENVINO_ARCH_X86_64
Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr<ngraph::Node>& op) { Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr<ov::Node>& op) {
const auto const1 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(0)); const auto const1 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(0));
const auto const2 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1)); const auto const2 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1));
int constPort = -1; int constPort = -1;
@ -948,49 +948,49 @@ Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shar
} }
auto const_shape = op->get_input_shape(constPort); auto const_shape = op->get_input_shape(constPort);
if (ngraph::shape_size(const_shape) == 1) if (ov::shape_size(const_shape) == 1)
return PerTensor; return PerTensor;
else else
return PerChannel; return PerChannel;
} }
const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::getInitializers() { const std::map<const ov::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::getInitializers() {
static const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer> initializers = { static const std::map<const ov::DiscreteTypeInfo, Eltwise::Initializer> initializers = {
{ngraph::op::v1::Add::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Add::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseAdd; node.algorithm = Algorithm::EltwiseAdd;
node.broadcastingPolicy = determineBroadcastingPolicy(op); node.broadcastingPolicy = determineBroadcastingPolicy(op);
}}, }},
{ngraph::op::v1::Subtract::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Subtract::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSubtract; node.algorithm = Algorithm::EltwiseSubtract;
node.broadcastingPolicy = determineBroadcastingPolicy(op); node.broadcastingPolicy = determineBroadcastingPolicy(op);
}}, }},
{ngraph::op::v1::Multiply::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Multiply::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseMultiply; node.algorithm = Algorithm::EltwiseMultiply;
node.broadcastingPolicy = determineBroadcastingPolicy(op); node.broadcastingPolicy = determineBroadcastingPolicy(op);
}}, }},
{ngraph::op::v1::Divide::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Divide::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseDivide; node.algorithm = Algorithm::EltwiseDivide;
node.broadcastingPolicy = determineBroadcastingPolicy(op); node.broadcastingPolicy = determineBroadcastingPolicy(op);
}}, }},
{ngraph::op::v0::SquaredDifference::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::SquaredDifference::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSquaredDifference; node.algorithm = Algorithm::EltwiseSquaredDifference;
}}, }},
{ngraph::op::v1::Maximum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Maximum::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseMaximum; node.algorithm = Algorithm::EltwiseMaximum;
}}, }},
{ngraph::op::v1::Minimum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Minimum::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseMinimum; node.algorithm = Algorithm::EltwiseMinimum;
}}, }},
{ngraph::op::v1::Mod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Mod::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseMod; node.algorithm = Algorithm::EltwiseMod;
}}, }},
{ngraph::op::v1::FloorMod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::FloorMod::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseFloorMod; node.algorithm = Algorithm::EltwiseFloorMod;
}}, }},
{ngraph::op::v1::Power::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Power::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwisePowerDynamic; node.algorithm = Algorithm::EltwisePowerDynamic;
}}, }},
{PowerStaticNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {PowerStaticNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto powerStatic = getNgraphOpAs<PowerStaticNode>(op); auto powerStatic = getNgraphOpAs<PowerStaticNode>(op);
node.algorithm = Algorithm::EltwisePowerStatic; node.algorithm = Algorithm::EltwisePowerStatic;
node.alpha = powerStatic->get_power(); node.alpha = powerStatic->get_power();
@ -998,10 +998,10 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
node.gamma = powerStatic->get_shift(); node.gamma = powerStatic->get_shift();
node.broadcastingPolicy = PerTensor; node.broadcastingPolicy = PerTensor;
}}, }},
{ngraph::op::v1::Equal::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Equal::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseEqual; node.algorithm = Algorithm::EltwiseEqual;
}}, }},
{ngraph::op::v1::NotEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::NotEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseNotEqual; node.algorithm = Algorithm::EltwiseNotEqual;
}}, }},
{ov::op::v10::IsFinite::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) { {ov::op::v10::IsFinite::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
@ -1016,46 +1016,46 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
{ov::op::v10::IsNaN::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) { {ov::op::v10::IsNaN::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseIsNaN; node.algorithm = Algorithm::EltwiseIsNaN;
}}, }},
{ngraph::op::v1::Greater::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Greater::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseGreater; node.algorithm = Algorithm::EltwiseGreater;
}}, }},
{ngraph::op::v1::GreaterEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::GreaterEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseGreaterEqual; node.algorithm = Algorithm::EltwiseGreaterEqual;
}}, }},
{ngraph::op::v1::Less::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Less::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLess; node.algorithm = Algorithm::EltwiseLess;
}}, }},
{ngraph::op::v1::LessEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::LessEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLessEqual; node.algorithm = Algorithm::EltwiseLessEqual;
}}, }},
{ngraph::op::v1::LogicalAnd::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::LogicalAnd::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLogicalAnd; node.algorithm = Algorithm::EltwiseLogicalAnd;
}}, }},
{ngraph::op::v1::LogicalOr::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::LogicalOr::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLogicalOr; node.algorithm = Algorithm::EltwiseLogicalOr;
}}, }},
{ngraph::op::v1::LogicalXor::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::LogicalXor::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLogicalXor; node.algorithm = Algorithm::EltwiseLogicalXor;
}}, }},
{ngraph::op::v1::LogicalNot::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::LogicalNot::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLogicalNot; node.algorithm = Algorithm::EltwiseLogicalNot;
}}, }},
{ngraph::op::v0::Relu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Relu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseRelu; node.algorithm = Algorithm::EltwiseRelu;
node.onednnAlgorithm = dnnl::algorithm::eltwise_relu; node.onednnAlgorithm = dnnl::algorithm::eltwise_relu;
}}, }},
{LeakyReluNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {LeakyReluNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto leakyRelu = getNgraphOpAs<LeakyReluNode>(op); auto leakyRelu = getNgraphOpAs<LeakyReluNode>(op);
node.algorithm = Algorithm::EltwiseRelu; node.algorithm = Algorithm::EltwiseRelu;
node.onednnAlgorithm = dnnl::algorithm::eltwise_relu; node.onednnAlgorithm = dnnl::algorithm::eltwise_relu;
node.alpha = leakyRelu->get_slope(); node.alpha = leakyRelu->get_slope();
node.beta = 0.0f; node.beta = 0.0f;
}}, }},
{ngraph::op::v0::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Gelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseGeluErf; node.algorithm = Algorithm::EltwiseGeluErf;
node.onednnAlgorithm = dnnl::algorithm::eltwise_gelu_erf; node.onednnAlgorithm = dnnl::algorithm::eltwise_gelu_erf;
}}, }},
{ngraph::op::v7::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v7::Gelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(op); auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(op);
ngraph::op::GeluApproximationMode approximationMode = gelu->get_approximation_mode(); ngraph::op::GeluApproximationMode approximationMode = gelu->get_approximation_mode();
if (approximationMode == ngraph::op::GeluApproximationMode::ERF) { if (approximationMode == ngraph::op::GeluApproximationMode::ERF) {
@ -1068,29 +1068,29 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
IE_THROW(NotImplemented) << "CPU Eltwise node doesn't support ngraph operation Gelu with approximation mode: " << approximationMode; IE_THROW(NotImplemented) << "CPU Eltwise node doesn't support ngraph operation Gelu with approximation mode: " << approximationMode;
} }
}}, }},
{ngraph::op::v0::Elu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Elu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto eluOp = getNgraphOpAs<ngraph::op::v0::Elu>(op); auto eluOp = getNgraphOpAs<ngraph::op::v0::Elu>(op);
node.alpha = static_cast<float>(eluOp->get_alpha()); node.alpha = static_cast<float>(eluOp->get_alpha());
node.algorithm = Algorithm::EltwiseElu; node.algorithm = Algorithm::EltwiseElu;
node.onednnAlgorithm = dnnl::algorithm::eltwise_elu; node.onednnAlgorithm = dnnl::algorithm::eltwise_elu;
}}, }},
{ngraph::op::v0::Tanh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Tanh::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseTanh; node.algorithm = Algorithm::EltwiseTanh;
node.onednnAlgorithm = dnnl::algorithm::eltwise_tanh; node.onednnAlgorithm = dnnl::algorithm::eltwise_tanh;
}}, }},
{ngraph::op::v0::Sigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Sigmoid::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSigmoid; node.algorithm = Algorithm::EltwiseSigmoid;
node.onednnAlgorithm = dnnl::algorithm::eltwise_logistic; node.onednnAlgorithm = dnnl::algorithm::eltwise_logistic;
}}, }},
{ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseAbs; node.algorithm = Algorithm::EltwiseAbs;
node.onednnAlgorithm = dnnl::algorithm::eltwise_abs; node.onednnAlgorithm = dnnl::algorithm::eltwise_abs;
}}, }},
{ngraph::op::v0::Sqrt::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Sqrt::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSqrt; node.algorithm = Algorithm::EltwiseSqrt;
node.onednnAlgorithm = dnnl::algorithm::eltwise_sqrt; node.onednnAlgorithm = dnnl::algorithm::eltwise_sqrt;
}}, }},
{ngraph::op::v0::Clamp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Clamp::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto clampOp = getNgraphOpAs<ngraph::op::v0::Clamp>(op); auto clampOp = getNgraphOpAs<ngraph::op::v0::Clamp>(op);
float alpha_ = static_cast<float>(clampOp->get_min()); float alpha_ = static_cast<float>(clampOp->get_min());
@ -1105,32 +1105,32 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
node.algorithm = Algorithm::EltwiseClamp; node.algorithm = Algorithm::EltwiseClamp;
node.onednnAlgorithm = dnnl::algorithm::eltwise_clip; node.onednnAlgorithm = dnnl::algorithm::eltwise_clip;
}}, }},
{ngraph::op::v0::Exp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Exp::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseExp; node.algorithm = Algorithm::EltwiseExp;
node.onednnAlgorithm = dnnl::algorithm::eltwise_exp; node.onednnAlgorithm = dnnl::algorithm::eltwise_exp;
}}, }},
{SwishNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {SwishNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto swishOp = getNgraphOpAs<SwishNode>(op); auto swishOp = getNgraphOpAs<SwishNode>(op);
node.algorithm = Algorithm::EltwiseSwish; node.algorithm = Algorithm::EltwiseSwish;
node.onednnAlgorithm = dnnl::algorithm::eltwise_swish; node.onednnAlgorithm = dnnl::algorithm::eltwise_swish;
node.alpha = swishOp->get_alpha(); node.alpha = swishOp->get_alpha();
}}, }},
{ngraph::op::v4::HSwish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v4::HSwish::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
// since v3.0 version, oneDNN has flexible implementation of hardswish, ov still uses the one with hardcoded alpha and beta // since v3.0 version, oneDNN has flexible implementation of hardswish, ov still uses the one with hardcoded alpha and beta
node.alpha = 1.f / 6.f; node.alpha = 1.f / 6.f;
node.beta = 0.5f; node.beta = 0.5f;
node.algorithm = Algorithm::EltwiseHswish; node.algorithm = Algorithm::EltwiseHswish;
node.onednnAlgorithm = dnnl::algorithm::eltwise_hardswish; node.onednnAlgorithm = dnnl::algorithm::eltwise_hardswish;
}}, }},
{ngraph::op::v4::Mish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v4::Mish::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseMish; node.algorithm = Algorithm::EltwiseMish;
node.onednnAlgorithm = dnnl::algorithm::eltwise_mish; node.onednnAlgorithm = dnnl::algorithm::eltwise_mish;
}}, }},
{ngraph::op::v5::HSigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v5::HSigmoid::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseHsigmoid; node.algorithm = Algorithm::EltwiseHsigmoid;
node.onednnAlgorithm = dnnl::algorithm::eltwise_hsigmoid; node.onednnAlgorithm = dnnl::algorithm::eltwise_hsigmoid;
}}, }},
{ngraph::op::v5::Round::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v5::Round::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
auto roundOp = getNgraphOpAs<ngraph::op::v5::Round>(op); auto roundOp = getNgraphOpAs<ngraph::op::v5::Round>(op);
switch (roundOp->get_mode()) { switch (roundOp->get_mode()) {
@ -1144,25 +1144,25 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
break; break;
} }
}}, }},
{ngraph::op::v0::PRelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::PRelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwisePrelu; node.algorithm = Algorithm::EltwisePrelu;
node.broadcastingPolicy = determineBroadcastingPolicy(op); node.broadcastingPolicy = determineBroadcastingPolicy(op);
}}, }},
{ngraph::op::v0::Erf::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Erf::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseErf; node.algorithm = Algorithm::EltwiseErf;
}}, }},
{ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSoftRelu; node.algorithm = Algorithm::EltwiseSoftRelu;
node.alpha = 1.f; node.alpha = 1.f;
node.onednnAlgorithm = dnnl::algorithm::eltwise_soft_relu; node.onednnAlgorithm = dnnl::algorithm::eltwise_soft_relu;
}}, }},
{ngraph::op::v9::SoftSign::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v9::SoftSign::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSoftSign; node.algorithm = Algorithm::EltwiseSoftSign;
}}, }},
{ngraph::op::v1::Select::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v1::Select::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseSelect; node.algorithm = Algorithm::EltwiseSelect;
}}, }},
{ngraph::op::v0::Log::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) { {ngraph::op::v0::Log::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
node.algorithm = Algorithm::EltwiseLog; node.algorithm = Algorithm::EltwiseLog;
}}, }},
{op::v13::BitwiseAnd::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) { {op::v13::BitwiseAnd::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
@ -1926,7 +1926,7 @@ static Eltwise::executorPtr buildExecutor(const EltwiseKey& key) {
key.implType == EltwiseImplType::optimizedShapeAgnostic); key.implType == EltwiseImplType::optimizedShapeAgnostic);
} }
bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool Eltwise::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (getInitializers().find(op->get_type_info()) == getInitializers().end()) { if (getInitializers().find(op->get_type_info()) == getInitializers().end()) {
errorMessage = "Doesn't support Eltwise algorithm: " + std::string(op->get_type_name()); errorMessage = "Doesn't support Eltwise algorithm: " + std::string(op->get_type_name());
@ -1935,14 +1935,14 @@ bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
if (const auto binOp = ov::as_type_ptr<const ov::op::util::BinaryElementwiseArithmetic>(op)) { if (const auto binOp = ov::as_type_ptr<const ov::op::util::BinaryElementwiseArithmetic>(op)) {
if (binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NONE && if (binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NONE &&
binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NUMPY) { binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(binOp->get_autob().m_type); errorMessage = "Doesn't support broadcast type: " + ov::as_string(binOp->get_autob().m_type);
return false; return false;
} }
} }
if (const auto select = ov::as_type_ptr<const ov::op::v1::Select>(op)) { if (const auto select = ov::as_type_ptr<const ov::op::v1::Select>(op)) {
if (select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE && if (select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE &&
select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) { select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(select->get_autob().m_type); errorMessage = "Doesn't support broadcast type: " + ov::as_string(select->get_autob().m_type);
return false; return false;
} }
} }
@ -1952,7 +1952,7 @@ bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
return true; return true;
} }
Eltwise::Eltwise(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) : Eltwise::Eltwise(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
Node(op, context, EltwiseShapeInferFactory()), broadcastingPolicy(Undefined) { Node(op, context, EltwiseShapeInferFactory()), broadcastingPolicy(Undefined) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -103,7 +103,7 @@ public:
using executorPtr = std::shared_ptr<IEltwiseExecutor>; using executorPtr = std::shared_ptr<IEltwiseExecutor>;
public: public:
Eltwise(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Eltwise(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -141,7 +141,7 @@ public:
BroadcastingPolicy getBroadcastingPolicy() const { return broadcastingPolicy; } BroadcastingPolicy getBroadcastingPolicy() const { return broadcastingPolicy; }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
executorPtr execPtr = nullptr; executorPtr execPtr = nullptr;
@ -185,10 +185,10 @@ private:
std::vector<MemoryPtr> memPtrs = {}; std::vector<MemoryPtr> memPtrs = {};
std::vector<const void*> fqDataPtrs; std::vector<const void*> fqDataPtrs;
using Initializer = std::function<void(const std::shared_ptr<ngraph::Node>&, Eltwise& node)>; using Initializer = std::function<void(const std::shared_ptr<ov::Node>&, Eltwise& node)>;
static const std::map<const ngraph::DiscreteTypeInfo, Initializer>& getInitializers(); static const std::map<const ov::DiscreteTypeInfo, Initializer>& getInitializers();
static BroadcastingPolicy determineBroadcastingPolicy(const std::shared_ptr<ngraph::Node>& op); static BroadcastingPolicy determineBroadcastingPolicy(const std::shared_ptr<ov::Node>& op);
size_t getOpInputsNum() const; size_t getOpInputsNum() const;

View File

@ -14,9 +14,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto embBagOffsetSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagOffsetsSum>(op); const auto embBagOffsetSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingBagOffsetsSum>(op);
if (!embBagOffsetSumOp) { if (!embBagOffsetSumOp) {
errorMessage = "Node is not an instance of the EmbeddingBagOffsetsSum operation from opset v3."; errorMessage = "Node is not an instance of the EmbeddingBagOffsetsSum operation from opset v3.";
return false; return false;
@ -27,7 +27,7 @@ bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ngr
return true; return true;
} }
EmbeddingBagOffsetSum::EmbeddingBagOffsetSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) EmbeddingBagOffsetSum::EmbeddingBagOffsetSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)),
EmbeddingBagSum(op, 3lu, 1lu, 4lu, 3lu) { EmbeddingBagSum(op, 3lu, 1lu, 4lu, 3lu) {
std::string errorMessage; std::string errorMessage;

View File

@ -17,7 +17,7 @@ namespace node {
class EmbeddingBagOffsetSum : public Node, public EmbeddingBagSum { class EmbeddingBagOffsetSum : public Node, public EmbeddingBagSum {
public: public:
EmbeddingBagOffsetSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); EmbeddingBagOffsetSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -25,7 +25,7 @@ public:
bool created() const override; bool created() const override;
bool isExecutable() const override; bool isExecutable() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
protected: protected:
void prepareParams() override; void prepareParams() override;

View File

@ -14,9 +14,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto embBagPackedSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagPackedSum>(op); const auto embBagPackedSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingBagPackedSum>(op);
if (!embBagPackedSumOp) { if (!embBagPackedSumOp) {
errorMessage = "Node is not an instance of the EmbeddingBagPackedSum operation from opset v3."; errorMessage = "Node is not an instance of the EmbeddingBagPackedSum operation from opset v3.";
return false; return false;
@ -27,7 +27,7 @@ bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ngr
return true; return true;
} }
EmbeddingBagPackedSum::EmbeddingBagPackedSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) EmbeddingBagPackedSum::EmbeddingBagPackedSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)),
EmbeddingBagSum(op, 2lu, 1lu, 2lu, 3lu) { EmbeddingBagSum(op, 2lu, 1lu, 2lu, 3lu) {
std::string errorMessage; std::string errorMessage;

View File

@ -17,7 +17,7 @@ namespace node {
class EmbeddingBagPackedSum : public Node, public EmbeddingBagSum { class EmbeddingBagPackedSum : public Node, public EmbeddingBagSum {
public: public:
EmbeddingBagPackedSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); EmbeddingBagPackedSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -25,7 +25,7 @@ public:
bool created() const override; bool created() const override;
bool isExecutable() const override; bool isExecutable() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
protected: protected:
void prepareParams() override; void prepareParams() override;

View File

@ -18,7 +18,7 @@ namespace intel_cpu {
namespace node { namespace node {
EmbeddingBagSum::EmbeddingBagSum( EmbeddingBagSum::EmbeddingBagSum(
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
size_t requiredInputNum, size_t requiredInputNum,
size_t indicesIdx, size_t indicesIdx,
size_t perSampleWeightsIdx, size_t perSampleWeightsIdx,
@ -48,7 +48,7 @@ void EmbeddingBagSum::prepareParams(const VectorDims& indexStaticShape) {
template<typename T> template<typename T>
void EmbeddingBagSum::processData(const T* srcData, const T* weightsData, void EmbeddingBagSum::processData(const T* srcData, const T* weightsData,
const InferenceEngine::SizeVector& inDataDims, const MemoryPtr& outMemory) { const VectorDims& inDataDims, const MemoryPtr& outMemory) {
std::string msgPrefix = std::string("Node EmbeddingBagSum with name '") + _layerName + "' "; std::string msgPrefix = std::string("Node EmbeddingBagSum with name '") + _layerName + "' ";
initFromInputs(); initFromInputs();
@ -120,7 +120,7 @@ void EmbeddingBagSum::processData(const T* srcData, const T* weightsData,
} }
void EmbeddingBagSum::execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc, void EmbeddingBagSum::execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc,
const InferenceEngine::SizeVector& inDims, const MemoryPtr& outMemory) { const VectorDims& inDims, const MemoryPtr& outMemory) {
switch (srcPrc) { switch (srcPrc) {
case Precision::FP32: { case Precision::FP32: {
return processData<PrecisionTrait<Precision::FP32>::value_type>(reinterpret_cast<const float*>(srcData), return processData<PrecisionTrait<Precision::FP32>::value_type>(reinterpret_cast<const float*>(srcData),

View File

@ -17,14 +17,14 @@ namespace node {
class EmbeddingBagSum { class EmbeddingBagSum {
public: public:
EmbeddingBagSum( EmbeddingBagSum(
const std::shared_ptr<ngraph::Node>&, const std::shared_ptr<ov::Node>&,
size_t requiredInputsNum, size_t requiredInputsNum,
size_t indicesIdx, size_t indicesIdx,
size_t perSampleWeightsIdx, size_t perSampleWeightsIdx,
size_t defaultIndexIdx); size_t defaultIndexIdx);
void execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc, void execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc,
const InferenceEngine::SizeVector& inDims, const MemoryPtr& outMemory); const VectorDims& inDims, const MemoryPtr& outMemory);
~EmbeddingBagSum() = default; ~EmbeddingBagSum() = default;
@ -41,7 +41,7 @@ protected:
template<typename T> template<typename T>
void processData(const T* srcData, const T* weightsData, void processData(const T* srcData, const T* weightsData,
const InferenceEngine::SizeVector& inDataDims, const MemoryPtr& outMemory); const VectorDims& inDataDims, const MemoryPtr& outMemory);
const size_t EMB_TABLE_IDX = 0lu; const size_t EMB_TABLE_IDX = 0lu;
const size_t INDICES_IDX; const size_t INDICES_IDX;

View File

@ -14,9 +14,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto embBagSegSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingSegmentsSum>(op); const auto embBagSegSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingSegmentsSum>(op);
if (!embBagSegSumOp) { if (!embBagSegSumOp) {
errorMessage = "Node is not an instance of the EmbeddingSegmentsSum operation from opset v3."; errorMessage = "Node is not an instance of the EmbeddingSegmentsSum operation from opset v3.";
return false; return false;
@ -27,7 +27,7 @@ bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ngra
return true; return true;
} }
EmbeddingSegmentsSum::EmbeddingSegmentsSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) EmbeddingSegmentsSum::EmbeddingSegmentsSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, PortMask(NUM_SEGMENTS_IDX))), : Node(op, context, NgraphShapeInferFactory(op, PortMask(NUM_SEGMENTS_IDX))),
EmbeddingBagSum(op, 4lu, 1lu, 5lu, 4lu) { EmbeddingBagSum(op, 4lu, 1lu, 5lu, 4lu) {
std::string errorMessage; std::string errorMessage;

View File

@ -17,7 +17,7 @@ namespace node {
class EmbeddingSegmentsSum : public Node, public EmbeddingBagSum { class EmbeddingSegmentsSum : public Node, public EmbeddingBagSum {
public: public:
EmbeddingSegmentsSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); EmbeddingSegmentsSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -25,7 +25,7 @@ public:
bool created() const override; bool created() const override;
bool isExecutable() const override; bool isExecutable() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
protected: protected:
void prepareParams() override; void prepareParams() override;

View File

@ -50,7 +50,7 @@ void ACLScheduler::schedule_custom(ICPPKernel *kernel, const Hints &hints, const
const auto num_windows = _num_threads; const auto num_windows = _num_threads;
const auto hints_split_dimension = hints.split_dimension(); const auto hints_split_dimension = hints.split_dimension();
InferenceEngine::parallel_for(num_windows, [&](int wid) { ov::parallel_for(num_windows, [&](int wid) {
Window win = max_window.split_window(hints_split_dimension, wid, num_windows); Window win = max_window.split_window(hints_split_dimension, wid, num_windows);
win.validate(); win.validate();
main_run(win, {wid, static_cast<int>(_num_threads), &cpu_info()}); main_run(win, {wid, static_cast<int>(_num_threads), &cpu_info()});
@ -68,7 +68,7 @@ void ACLScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win
} }
void ACLScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload> &workloads) { void ACLScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload> &workloads) {
InferenceEngine::parallel_for(workloads.size(), [&](int wid) { ov::parallel_for(workloads.size(), [&](int wid) {
workloads[wid]({wid, static_cast<int>(parallel_get_num_threads()), &cpu_info()}); workloads[wid]({wid, static_cast<int>(parallel_get_num_threads()), &cpu_info()});
}); });
} }

View File

@ -20,7 +20,7 @@ public:
impl_desc_type getImplType() const override { return implType; } impl_desc_type getImplType() const override { return implType; }
private: private:
static int64_t calcShapeSize(const Shape& shape, size_t start, size_t end); static int64_t calcShapeSize(const Shape& shape, size_t start, size_t end);
static bool IsTransposeMovingSingleAxis(InferenceEngine::SizeVector permutations, size_t& from, size_t& to); static bool IsTransposeMovingSingleAxis(VectorDims permutations, size_t& from, size_t& to);
void TransposeSingleAxisOutwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to); void TransposeSingleAxisOutwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to);
void TransposeSingleAxisInwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to); void TransposeSingleAxisInwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to);

View File

@ -47,7 +47,7 @@ public:
virtual impl_desc_type getImplType() const = 0; virtual impl_desc_type getImplType() const = 0;
static InferenceEngine::SizeVector transformTo5DCase(const InferenceEngine::SizeVector& shape, bool initAcrossChannels); static VectorDims transformTo5DCase(const VectorDims& shape, bool initAcrossChannels);
protected: protected:
MVNAttrs mvnAttrs; MVNAttrs mvnAttrs;

View File

@ -223,9 +223,9 @@ bool ExperimentalDetectronDetectionOutput::needPrepareParams() const {
return false; return false;
} }
bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op); const auto doOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
if (!doOp) { if (!doOp) {
errorMessage = "Node is not an instance of the ExperimentalDetectronDetectionOutput from the operations set v6."; errorMessage = "Node is not an instance of the ExperimentalDetectronDetectionOutput from the operations set v6.";
return false; return false;
@ -236,14 +236,14 @@ bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::share
return true; return true;
} }
ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const std::shared_ptr<ngraph::Node>& op, ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage; IE_THROW(NotImplemented) << errorMessage;
} }
auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op); auto doOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
auto attributes = doOp->get_attrs(); auto attributes = doOp->get_attrs();
score_threshold_ = attributes.score_threshold; score_threshold_ = attributes.score_threshold;

View File

@ -13,7 +13,7 @@ namespace node {
class ExperimentalDetectronDetectionOutput : public Node { class ExperimentalDetectronDetectionOutput : public Node {
public: public:
ExperimentalDetectronDetectionOutput(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ExperimentalDetectronDetectionOutput(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -23,7 +23,7 @@ public:
bool needShapeInfer() const override; bool needShapeInfer() const override;
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); } void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
const int INPUT_ROIS {0}; const int INPUT_ROIS {0};

View File

@ -272,9 +272,9 @@ void fill_output_blobs(const float* proposals, const int* roi_indices,
} // namespace } // namespace
bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { (const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op); const auto proposalOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
if (!proposalOp) { if (!proposalOp) {
errorMessage = "Node is not an instance of the Proposal from the operations set v0."; errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
return false; return false;
@ -286,7 +286,7 @@ bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation
} }
ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerateProposalsSingleImage( ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerateProposalsSingleImage(
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
@ -294,7 +294,7 @@ ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerate
IE_THROW(NotImplemented) << errorMessage; IE_THROW(NotImplemented) << errorMessage;
} }
auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op); auto proposalOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
auto proposalAttrs = proposalOp->get_attrs(); auto proposalAttrs = proposalOp->get_attrs();
min_size_ = proposalAttrs.min_size; min_size_ = proposalAttrs.min_size;

View File

@ -13,7 +13,7 @@ namespace node {
class ExperimentalDetectronGenerateProposalsSingleImage : public Node { class ExperimentalDetectronGenerateProposalsSingleImage : public Node {
public: public:
ExperimentalDetectronGenerateProposalsSingleImage(const std::shared_ptr<ngraph::Node>& op, ExperimentalDetectronGenerateProposalsSingleImage(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context); const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
@ -24,7 +24,7 @@ public:
bool needShapeInfer() const override; bool needShapeInfer() const override;
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); } void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
// Inputs: // Inputs:

View File

@ -14,7 +14,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
std::string& errorMessage) noexcept { std::string& errorMessage) noexcept {
try { try {
const auto priorGridGen = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronPriorGridGenerator>(op); const auto priorGridGen = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronPriorGridGenerator>(op);
@ -29,7 +29,7 @@ bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::sh
} }
ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator( ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator(
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;

View File

@ -13,7 +13,7 @@ namespace node {
class ExperimentalDetectronPriorGridGenerator : public Node { class ExperimentalDetectronPriorGridGenerator : public Node {
public: public:
ExperimentalDetectronPriorGridGenerator(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ExperimentalDetectronPriorGridGenerator(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -22,7 +22,7 @@ public:
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); } void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
// Inputs: // Inputs:

View File

@ -281,7 +281,7 @@ void split_points(const std::vector<int>& ids, std::vector<int>& rois_per_level,
} // namespace } // namespace
bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
std::string& errorMessage) noexcept { std::string& errorMessage) noexcept {
try { try {
const auto roiFeatureExtractor = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronROIFeatureExtractor>(op); const auto roiFeatureExtractor = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronROIFeatureExtractor>(op);
@ -296,7 +296,7 @@ bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::s
} }
ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor( ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor(
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;

View File

@ -13,7 +13,7 @@ namespace node {
class ExperimentalDetectronROIFeatureExtractor : public Node { class ExperimentalDetectronROIFeatureExtractor : public Node {
public: public:
ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -23,7 +23,7 @@ public:
bool needPrepareParams() const override { return false; }; bool needPrepareParams() const override { return false; };
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }; void executeDynamicImpl(dnnl::stream strm) override { execute(strm); };
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
const int INPUT_ROIS {0}; const int INPUT_ROIS {0};

View File

@ -17,7 +17,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto topKROI = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronTopKROIs>(op); const auto topKROI = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronTopKROIs>(op);
if (!topKROI) { if (!topKROI) {
@ -30,7 +30,7 @@ bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<c
return true; return true;
} }
ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_ptr<ngraph::Node>& op, ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_ptr<ov::Node>& op,
const GraphContext::CPtr context) const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;

View File

@ -13,7 +13,7 @@ namespace node {
class ExperimentalDetectronTopKROIs : public Node { class ExperimentalDetectronTopKROIs : public Node {
public: public:
ExperimentalDetectronTopKROIs(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ExperimentalDetectronTopKROIs(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -24,7 +24,7 @@ public:
bool needPrepareParams() const override { return false; }; bool needPrepareParams() const override { return false; };
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }; void executeDynamicImpl(dnnl::stream strm) override { execute(strm); };
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
// Inputs: // Inputs:

View File

@ -272,16 +272,16 @@ private:
}; };
#endif // OPENVINO_ARCH_X86_64 #endif // OPENVINO_ARCH_X86_64
bool ExtractImagePatches::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool ExtractImagePatches::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
auto extImgPatcher = ngraph::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op); auto extImgPatcher = ov::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
if (!extImgPatcher) { if (!extImgPatcher) {
errorMessage = "Only opset3 ExtractImagePatches operation is supported"; errorMessage = "Only opset3 ExtractImagePatches operation is supported";
return false; return false;
} }
const auto padValue = extImgPatcher->get_auto_pad(); const auto padValue = extImgPatcher->get_auto_pad();
if (!one_of(padValue, ngraph::op::PadType::VALID, ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER)) { if (!one_of(padValue, ngraph::op::PadType::VALID, ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER)) {
errorMessage = "Does not support pad type: " + ngraph::as_string(padValue); errorMessage = "Does not support pad type: " + ov::as_string(padValue);
return false; return false;
} }
if (!everyone_is(2u, extImgPatcher->get_sizes().size(), extImgPatcher->get_strides().size(), extImgPatcher->get_rates().size())) { if (!everyone_is(2u, extImgPatcher->get_sizes().size(), extImgPatcher->get_strides().size(), extImgPatcher->get_rates().size())) {
@ -328,7 +328,7 @@ bool ExtractImagePatchesKey::operator==(const ExtractImagePatchesKey& rhs) const
} }
} // namespace } // namespace
ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -336,7 +336,7 @@ ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op
} }
errorPrefix = "ExtractImagePatches layer with name '" + op->get_friendly_name() + "' "; errorPrefix = "ExtractImagePatches layer with name '" + op->get_friendly_name() + "' ";
auto extImgPatcher = ngraph::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op); auto extImgPatcher = ov::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
if (inputShapes.size() != 1 || outputShapes.size() != 1) if (inputShapes.size() != 1 || outputShapes.size() != 1)
IE_THROW() << errorPrefix << "has incorrect number of input or output edges!" IE_THROW() << errorPrefix << "has incorrect number of input or output edges!"

View File

@ -44,7 +44,7 @@ struct jit_uni_extract_image_patches_kernel {
class ExtractImagePatches : public Node { class ExtractImagePatches : public Node {
public: public:
ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); ExtractImagePatches(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -54,7 +54,7 @@ public:
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
void prepareParams() override; void prepareParams() override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
enum class ExtImgPatcherPadType { enum class ExtImgPatcherPadType {
VALID, VALID,
SAME_LOWER, SAME_LOWER,

View File

@ -55,8 +55,8 @@ Eye::Eye(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
} }
outType = op->get_output_element_type(0); outType = op->get_output_element_type(0);
withBatchShape = (op->get_input_size() == 4); withBatchShape = (op->get_input_size() == 4);
if (!one_of(outType, ngraph::element::f32, ngraph::element::bf16, if (!one_of(outType, ov::element::f32, ov::element::bf16,
ngraph::element::i32, ngraph::element::i8, ngraph::element::u8)) { ov::element::i32, ov::element::i8, ov::element::u8)) {
THROW_ERROR << errorPrefix << "doesn't support demanded output precision"; THROW_ERROR << errorPrefix << "doesn't support demanded output precision";
} }
} }

View File

@ -22,7 +22,7 @@ public:
static constexpr size_t BATCH_SHAPE = 3lu; static constexpr size_t BATCH_SHAPE = 3lu;
public: public:
Eye(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Eye(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -32,7 +32,7 @@ public:
bool needShapeInfer() const override {return true;}; bool needShapeInfer() const override {return true;};
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); } void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
std::string errorPrefix = ""; std::string errorPrefix = "";

View File

@ -864,7 +864,7 @@ private:
} }
}; };
#endif #endif
bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto fq = std::dynamic_pointer_cast<const ngraph::opset1::FakeQuantize>(op); const auto fq = std::dynamic_pointer_cast<const ngraph::opset1::FakeQuantize>(op);
if (!fq) { if (!fq) {
@ -892,7 +892,7 @@ bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node
size_t count_not_unit_axis = 0; size_t count_not_unit_axis = 0;
auto shape = getNormalizedDimsBySize(fq->get_input_shape(i), dataRank); auto shape = getNormalizedDimsBySize(fq->get_input_shape(i), dataRank);
if (ngraph::shape_size(shape) != 1) { if (ov::shape_size(shape) != 1) {
size_t not_unit_axis = 0; size_t not_unit_axis = 0;
for (size_t i = 0; i < shape.size(); i++) { for (size_t i = 0; i < shape.size(); i++) {
if (shape[i] > 1) { if (shape[i] > 1) {
@ -916,7 +916,7 @@ bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node
} }
if (fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE && if (fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE &&
fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) { fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(fq->get_auto_broadcast().m_type); errorMessage = "Doesn't support broadcast type: " + ov::as_string(fq->get_auto_broadcast().m_type);
return false; return false;
} }
} catch (...) { } catch (...) {
@ -960,7 +960,7 @@ struct FakeQuantKey {
}; };
} // namespace } // namespace
FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) : FakeQuantize::FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
Node(op, context, PassThroughShapeInferFactory()) { Node(op, context, PassThroughShapeInferFactory()) {
std::string errorMessage; std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) { if (isSupportedOperation(op, errorMessage)) {
@ -994,7 +994,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
const auto ilShape = getNormalizedDimsBySize(fq->get_input_shape(1), dataRank); const auto ilShape = getNormalizedDimsBySize(fq->get_input_shape(1), dataRank);
auto inputLowAxis = initAxisIdx(ilShape); auto inputLowAxis = initAxisIdx(ilShape);
isInputLowBroadcasted = (ngraph::is_scalar(ilShape) || ilShape[inputLowAxis] == 1); isInputLowBroadcasted = (ov::is_scalar(ilShape) || ilShape[inputLowAxis] == 1);
if (!isInputLowBroadcasted) { if (!isInputLowBroadcasted) {
axis = inputLowAxis; axis = inputLowAxis;
axisSize = ilShape[inputLowAxis]; axisSize = ilShape[inputLowAxis];
@ -1002,7 +1002,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
const auto ihShape = getNormalizedDimsBySize(fq->get_input_shape(2), dataRank); const auto ihShape = getNormalizedDimsBySize(fq->get_input_shape(2), dataRank);
auto inputHighAxis = initAxisIdx(ihShape); auto inputHighAxis = initAxisIdx(ihShape);
isInputHighBroadcasted = (ngraph::is_scalar(ihShape) || ihShape[inputHighAxis] == 1); isInputHighBroadcasted = (ov::is_scalar(ihShape) || ihShape[inputHighAxis] == 1);
if (!isInputHighBroadcasted) { if (!isInputHighBroadcasted) {
axis = inputHighAxis; axis = inputHighAxis;
axisSize = ihShape[inputHighAxis]; axisSize = ihShape[inputHighAxis];
@ -1010,7 +1010,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
const auto olShape = getNormalizedDimsBySize(fq->get_input_shape(3), dataRank); const auto olShape = getNormalizedDimsBySize(fq->get_input_shape(3), dataRank);
auto outputLowAxis = initAxisIdx(olShape); auto outputLowAxis = initAxisIdx(olShape);
isOutputLowBroadcasted = (ngraph::is_scalar(olShape) || olShape[outputLowAxis] == 1); isOutputLowBroadcasted = (ov::is_scalar(olShape) || olShape[outputLowAxis] == 1);
if (!isOutputLowBroadcasted) { if (!isOutputLowBroadcasted) {
axis = outputLowAxis; axis = outputLowAxis;
axisSize = olShape[outputLowAxis]; axisSize = olShape[outputLowAxis];
@ -1018,16 +1018,16 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
const auto ohShape = getNormalizedDimsBySize(fq->get_input_shape(4), dataRank); const auto ohShape = getNormalizedDimsBySize(fq->get_input_shape(4), dataRank);
auto outputHighAxis = initAxisIdx(ohShape); auto outputHighAxis = initAxisIdx(ohShape);
isOutputHighBroadcasted = (ngraph::is_scalar(ohShape) || ohShape[outputHighAxis] == 1); isOutputHighBroadcasted = (ov::is_scalar(ohShape) || ohShape[outputHighAxis] == 1);
if (!isOutputHighBroadcasted) { if (!isOutputHighBroadcasted) {
axis = outputHighAxis; axis = outputHighAxis;
axisSize = ohShape[outputHighAxis]; axisSize = ohShape[outputHighAxis];
} }
auto inputLowAxisSize = ngraph::is_scalar(ilShape) ? 1 : ilShape[inputLowAxis]; auto inputLowAxisSize = ov::is_scalar(ilShape) ? 1 : ilShape[inputLowAxis];
auto inputHighAxisSize = ngraph::is_scalar(ihShape) ? 1 : ihShape[inputHighAxis]; auto inputHighAxisSize = ov::is_scalar(ihShape) ? 1 : ihShape[inputHighAxis];
auto outputLowAxisSize = ngraph::is_scalar(olShape) ? 1 : olShape[outputLowAxis]; auto outputLowAxisSize = ov::is_scalar(olShape) ? 1 : olShape[outputLowAxis];
auto outputHighAxisSize = ngraph::is_scalar(ohShape) ? 1 : ohShape[outputHighAxis]; auto outputHighAxisSize = ov::is_scalar(ohShape) ? 1 : ohShape[outputHighAxis];
if (axisSize != -1 && !dimsEqualWeak(axisSize, getInputShapeAtPort(0).getDims()[axis])) { if (axisSize != -1 && !dimsEqualWeak(axisSize, getInputShapeAtPort(0).getDims()[axis])) {
IE_THROW() << errorPrefix << "has different quantization axis size on 'data' and 'range' inputs"; IE_THROW() << errorPrefix << "has different quantization axis size on 'data' and 'range' inputs";

View File

@ -77,7 +77,7 @@ struct jit_uni_quantize_kernel {
class FakeQuantize : public Node { class FakeQuantize : public Node {
public: public:
FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
@ -142,7 +142,7 @@ public:
bool allowBinary = true, bool allowBinary = true,
bool do_rounding = true); bool do_rounding = true);
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
enum BroadcastingPolicy { enum BroadcastingPolicy {
PerChannel, // all FQ operations are per channel PerChannel, // all FQ operations are per channel

View File

@ -98,7 +98,7 @@ bool FCKey::operator==(const FCKey &rhs) const {
} // namespace } // namespace
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto fc = std::dynamic_pointer_cast<const FullyConnectedNode>(op); const auto fc = std::dynamic_pointer_cast<const FullyConnectedNode>(op);
if (!fc) { if (!fc) {
@ -126,7 +126,7 @@ bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ngraph::No
return true; return true;
} }
FullyConnected::FullyConnected(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) FullyConnected::FullyConnected(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, FCShapeInferFactory(op)), withBiases(false) { : Node(op, context, FCShapeInferFactory(op)), withBiases(false) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) if (!isSupportedOperation(op, errorMessage))

View File

@ -18,7 +18,7 @@ namespace node {
class FullyConnected : public Node { class FullyConnected : public Node {
public: public:
FullyConnected(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); FullyConnected(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
std::vector<dnnl::memory::format_tag> getAvailableFormatsForDims(const Shape &dims) const override; std::vector<dnnl::memory::format_tag> getAvailableFormatsForDims(const Shape &dims) const override;
void getSupportedDescriptors() override; void getSupportedDescriptors() override;
@ -51,7 +51,7 @@ public:
bool canFuse(const NodePtr& node) const override; bool canFuse(const NodePtr& node) const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
void prepareParams() override; void prepareParams() override;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;

View File

@ -17,7 +17,7 @@ namespace node {
class Gather : public Node { class Gather : public Node {
public: public:
Gather(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -27,7 +27,7 @@ public:
bool isExecutable() const override; bool isExecutable() const override;
void resolveInPlaceEdges(Edge::LOOK look) override; void resolveInPlaceEdges(Edge::LOOK look) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
struct threadExecParams { struct threadExecParams {
std::vector<int> specIdxInBytes; std::vector<int> specIdxInBytes;

View File

@ -32,7 +32,7 @@ bool GatherElements::isSupportedOperation(const std::shared_ptr<const ov::Node>&
return true; return true;
} }
GatherElements::GatherElements(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) GatherElements::GatherElements(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -16,7 +16,7 @@ namespace node {
class GatherElements : public Node { class GatherElements : public Node {
public: public:
GatherElements(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); GatherElements(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;

View File

@ -21,7 +21,7 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool GatherND::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool GatherND::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (!one_of(op->get_type_info(), ngraph::op::v5::GatherND::get_type_info_static(), ngraph::op::v8::GatherND::get_type_info_static())) { if (!one_of(op->get_type_info(), ngraph::op::v5::GatherND::get_type_info_static(), ngraph::op::v8::GatherND::get_type_info_static())) {
errorMessage = "Node is not an instance of the GatherND operation from operation set v5 and v8."; errorMessage = "Node is not an instance of the GatherND operation from operation set v5 and v8.";
@ -34,7 +34,7 @@ bool GatherND::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& o
return true; return true;
} }
GatherND::GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) GatherND::GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
@ -47,9 +47,9 @@ GatherND::GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::
const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank(); const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank();
const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank(); const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank();
if (auto gatherNdOp = ngraph::as_type_ptr<const ngraph::op::v8::GatherND>(op)) { if (auto gatherNdOp = ov::as_type_ptr<const ngraph::op::v8::GatherND>(op)) {
attrs.batchDims = gatherNdOp->get_batch_dims(); attrs.batchDims = gatherNdOp->get_batch_dims();
} else if (auto gatherNdOp = ngraph::as_type_ptr<const ngraph::op::v5::GatherND>(op)) { } else if (auto gatherNdOp = ov::as_type_ptr<const ngraph::op::v5::GatherND>(op)) {
attrs.batchDims = gatherNdOp->get_batch_dims(); attrs.batchDims = gatherNdOp->get_batch_dims();
} else { } else {
THROW_ERROR << "has support only opset5."; THROW_ERROR << "has support only opset5.";

View File

@ -16,14 +16,14 @@ namespace node {
class GatherND : public Node { class GatherND : public Node {
public: public:
GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
void execute(dnnl::stream strm) override; void execute(dnnl::stream strm) override;
bool created() const override; bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
protected: protected:
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;

View File

@ -17,9 +17,9 @@ namespace ov {
namespace intel_cpu { namespace intel_cpu {
namespace node { namespace node {
bool GatherTree::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool GatherTree::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
const auto gatherElementsOp = ngraph::as_type_ptr<const ngraph::op::v1::GatherTree>(op); const auto gatherElementsOp = ov::as_type_ptr<const ngraph::op::v1::GatherTree>(op);
if (!gatherElementsOp) { if (!gatherElementsOp) {
errorMessage = "Node is not an instance of the GatherTree operation from operation set v1."; errorMessage = "Node is not an instance of the GatherTree operation from operation set v1.";
return false; return false;
@ -30,7 +30,7 @@ bool GatherTree::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
return true; return true;
} }
GatherTree::GatherTree(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) GatherTree::GatherTree(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) { : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {

View File

@ -13,7 +13,7 @@ namespace node {
class GatherTree : public Node { class GatherTree : public Node {
public: public:
GatherTree(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); GatherTree(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -23,7 +23,7 @@ public:
void prepareParams() override; void prepareParams() override;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
struct GatherTreeExecutor { struct GatherTreeExecutor {

View File

@ -277,9 +277,9 @@ void fill_output_blobs(const float* proposals, const int* roi_indices,
} // namespace } // namespace
bool GenerateProposals::isSupportedOperation bool GenerateProposals::isSupportedOperation
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { (const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (!ngraph::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op)) { if (!ov::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op)) {
errorMessage = "Node is not an instance of the Proposal from the operations set v0."; errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
return false; return false;
} }
@ -289,14 +289,14 @@ bool GenerateProposals::isSupportedOperation
return true; return true;
} }
GenerateProposals::GenerateProposals(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) GenerateProposals::GenerateProposals(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
: Node(op, context, InternalDynShapeInferFactory()) { : Node(op, context, InternalDynShapeInferFactory()) {
std::string errorMessage; std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) { if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage; IE_THROW(NotImplemented) << errorMessage;
} }
auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op); auto proposalOp = ov::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op);
auto proposalAttrs = proposalOp->get_attrs(); auto proposalAttrs = proposalOp->get_attrs();
min_size_ = proposalAttrs.min_size; min_size_ = proposalAttrs.min_size;

View File

@ -13,7 +13,7 @@ namespace node {
class GenerateProposals : public Node { class GenerateProposals : public Node {
public: public:
GenerateProposals(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context); GenerateProposals(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
void getSupportedDescriptors() override {}; void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override; void initSupportedPrimitiveDescriptors() override;
@ -23,7 +23,7 @@ public:
bool needShapeInfer() const override; bool needShapeInfer() const override;
bool needPrepareParams() const override; bool needPrepareParams() const override;
void executeDynamicImpl(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
private: private:
// Inputs: // Inputs:

Some files were not shown because too many files have changed in this diff Show More