[CPU] cleanup misc IE and ngraph (#21007)
1. InferenceEngine::SizeVector 2. InferenceEngine::parallel_for 3. All ngraph namespace except ngraph::op
This commit is contained in:
parent
f97e7f1c9d
commit
5dd317c733
@ -55,7 +55,6 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
|
||||
m_name{model->get_name()},
|
||||
m_loaded_from_cache(loaded_from_cache) {
|
||||
bool isFloatModel = !ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(m_model);
|
||||
|
||||
m_mutex = std::make_shared<std::mutex>();
|
||||
const auto& core = m_plugin->get_core();
|
||||
if (!core)
|
||||
|
@ -31,7 +31,7 @@ namespace ov {
|
||||
[this](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr<snippets::Emitter> { \
|
||||
return std::make_shared<e_type>(h.get(), isa, expr); \
|
||||
}, \
|
||||
[](const std::shared_ptr<ngraph::Node>& n) -> std::set<std::vector<element::Type>> { \
|
||||
[](const std::shared_ptr<ov::Node>& n) -> std::set<std::vector<element::Type>> { \
|
||||
return e_type::get_supported_precisions(n); \
|
||||
} \
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ using namespace Xbyak;
|
||||
namespace ov {
|
||||
namespace intel_cpu {
|
||||
|
||||
jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
|
||||
jit_convert_emitter::jit_convert_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, Precision exec_prc)
|
||||
: jit_emitter(host, host_isa, exec_prc) {
|
||||
input_type = node->get_input_element_type(0);
|
||||
output_type = node->get_output_element_type(0);
|
||||
@ -58,7 +58,7 @@ void jit_convert_emitter::float2bfloat(const std::vector<size_t> &in_vec_idxs, c
|
||||
}
|
||||
|
||||
jit_convert_truncation_emitter::jit_convert_truncation_emitter(jit_generator *host, cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
|
||||
const std::shared_ptr<ov::Node>& node, Precision exec_prc)
|
||||
: jit_convert_emitter(host, host_isa, node, exec_prc) {
|
||||
prepare_table();
|
||||
}
|
||||
@ -193,7 +193,7 @@ void jit_convert_truncation_emitter::dword2int8(const std::vector<size_t> &in_ve
|
||||
}
|
||||
|
||||
jit_convert_saturation_emitter::jit_convert_saturation_emitter(jit_generator *host, cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
|
||||
const std::shared_ptr<ov::Node>& node, Precision exec_prc)
|
||||
: jit_convert_emitter(host, host_isa, node, exec_prc) {
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@ namespace intel_cpu {
|
||||
class jit_convert_emitter : public jit_emitter {
|
||||
public:
|
||||
jit_convert_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
|
||||
@ -47,7 +47,7 @@ protected:
|
||||
class jit_convert_truncation_emitter : public jit_convert_emitter {
|
||||
public:
|
||||
jit_convert_truncation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
|
||||
@ -68,7 +68,7 @@ private:
|
||||
class jit_convert_saturation_emitter : public jit_convert_emitter {
|
||||
public:
|
||||
jit_convert_saturation_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
const std::shared_ptr<ov::Node>& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
|
||||
|
@ -13,11 +13,11 @@ using namespace Xbyak;
|
||||
namespace ov {
|
||||
namespace intel_cpu {
|
||||
|
||||
std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_dnnl_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, InferenceEngine::Precision exec_prc)
|
||||
jit_dnnl_emitter::jit_dnnl_emitter(jit_generator *host, cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, InferenceEngine::Precision exec_prc)
|
||||
: jit_emitter(host, host_isa, exec_prc) {
|
||||
|
||||
kind = dnnl_eltwise_tanh;
|
||||
|
@ -20,13 +20,13 @@ public:
|
||||
|
||||
void emit_impl(const std::vector<size_t> &in_idxs, const std::vector<size_t> &out_idxs) const override {};
|
||||
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
protected:
|
||||
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
dnnl_alg_kind_t algKind, float inpAlpha, float inpBeta,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_dnnl_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
void set_injector();
|
||||
|
||||
|
@ -13,7 +13,7 @@ namespace intel_cpu {
|
||||
|
||||
class jit_relu_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_relu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_relu;
|
||||
@ -26,7 +26,7 @@ public:
|
||||
|
||||
class jit_sigmoid_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_sigmoid_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_logistic;
|
||||
@ -39,7 +39,7 @@ public:
|
||||
|
||||
class jit_tanh_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_tanh_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_tanh;
|
||||
@ -52,11 +52,11 @@ public:
|
||||
|
||||
class jit_elu_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_elu_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_elu;
|
||||
alpha = ngraph::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha();
|
||||
alpha = ov::as_type_ptr<ov::op::v0::Elu>(n)->get_alpha();
|
||||
beta = 0.f;
|
||||
|
||||
set_injector();
|
||||
@ -65,7 +65,7 @@ public:
|
||||
|
||||
class jit_exp_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_exp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_exp;
|
||||
@ -78,7 +78,7 @@ public:
|
||||
|
||||
class jit_abs_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_abs_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_abs;
|
||||
@ -91,11 +91,11 @@ public:
|
||||
|
||||
class jit_clamp_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_clamp_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_clip;
|
||||
auto op = ngraph::as_type_ptr<ov::op::v0::Clamp>(n);
|
||||
auto op = ov::as_type_ptr<ov::op::v0::Clamp>(n);
|
||||
alpha = op->get_min();
|
||||
beta = op->get_max();
|
||||
|
||||
@ -105,11 +105,11 @@ public:
|
||||
|
||||
class jit_swish_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_swish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_swish;
|
||||
auto op = ngraph::as_type_ptr<ov::intel_cpu::SwishNode>(n);
|
||||
auto op = ov::as_type_ptr<ov::intel_cpu::SwishNode>(n);
|
||||
alpha = op->get_alpha();
|
||||
beta = 0.f;
|
||||
|
||||
@ -119,7 +119,7 @@ public:
|
||||
|
||||
class jit_hswish_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_hswish_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
// since v3.0 oneDNN has flexible version of hardswish, ov still uses the one with hardcoded alpha and beta
|
||||
@ -133,7 +133,7 @@ public:
|
||||
|
||||
class jit_gelu_v0_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_gelu_v0_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
kind = dnnl_eltwise_gelu_erf;
|
||||
@ -144,7 +144,7 @@ public:
|
||||
|
||||
class jit_gelu_v7_emitter : public jit_dnnl_emitter {
|
||||
public:
|
||||
jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_gelu_v7_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32)
|
||||
: jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(n);
|
||||
@ -165,7 +165,7 @@ public:
|
||||
jit_round_emitter(
|
||||
dnnl::impl::cpu::x64::jit_generator *host,
|
||||
dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
const std::shared_ptr<ngraph::Node>& n,
|
||||
const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32) : jit_dnnl_emitter(host, host_isa, n, exec_prc) {
|
||||
const auto round = getNgraphOpAs<ngraph::op::v5::Round>(n);
|
||||
const auto mode = round->get_mode();
|
||||
|
@ -77,7 +77,7 @@ void jit_add_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const std
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_add_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_add_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ size_t jit_mul_add_emitter::aux_vecs_count() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_mul_add_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_mul_add_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32, element::f32}, {element::i32, element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ void jit_subtract_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, cons
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_subtract_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_subtract_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ void jit_multiply_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, cons
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_multiply_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_multiply_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -323,7 +323,7 @@ void jit_divide_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_divide_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_divide_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ jit_floor_emitter::jit_floor_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
|
||||
|
||||
size_t jit_floor_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_floor_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_floor_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ jit_ceiling_emitter::jit_ceiling_emitter(x64::jit_generator *host, x64::cpu_isa_
|
||||
|
||||
size_t jit_ceiling_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_ceiling_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_ceiling_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -404,7 +404,7 @@ jit_floor_mod_emitter::jit_floor_mod_emitter(x64::jit_generator *host, x64::cpu_
|
||||
|
||||
size_t jit_floor_mod_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_floor_mod_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_floor_mod_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -458,7 +458,7 @@ jit_mod_emitter::jit_mod_emitter(x64::jit_generator *host, x64::cpu_isa_t host_i
|
||||
|
||||
size_t jit_mod_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_mod_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_mod_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -548,7 +548,7 @@ void jit_maximum_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_maximum_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_maximum_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -596,7 +596,7 @@ void jit_minimum_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_minimum_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_minimum_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -651,7 +651,7 @@ void jit_squared_difference_emitter::emit_isa(const std::vector<size_t> &in_vec_
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_squared_difference_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_squared_difference_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}, {element::i32, element::i32}};
|
||||
}
|
||||
|
||||
@ -664,7 +664,7 @@ jit_power_dynamic_emitter::jit_power_dynamic_emitter(x64::jit_generator *host, x
|
||||
|
||||
size_t jit_power_dynamic_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_power_dynamic_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_power_dynamic_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -779,7 +779,7 @@ jit_equal_emitter::jit_equal_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
|
||||
|
||||
size_t jit_equal_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -842,7 +842,7 @@ jit_not_equal_emitter::jit_not_equal_emitter(x64::jit_generator *host, x64::cpu_
|
||||
|
||||
size_t jit_not_equal_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_not_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_not_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -905,7 +905,7 @@ jit_greater_emitter::jit_greater_emitter(x64::jit_generator *host, x64::cpu_isa_
|
||||
|
||||
size_t jit_greater_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_greater_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_greater_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -969,7 +969,7 @@ jit_greater_equal_emitter::jit_greater_equal_emitter(x64::jit_generator *host, x
|
||||
|
||||
size_t jit_greater_equal_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_greater_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_greater_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1032,7 +1032,7 @@ jit_less_emitter::jit_less_emitter(x64::jit_generator *host, x64::cpu_isa_t host
|
||||
|
||||
size_t jit_less_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_less_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_less_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1095,7 +1095,7 @@ jit_less_equal_emitter::jit_less_equal_emitter(x64::jit_generator *host, x64::cp
|
||||
|
||||
size_t jit_less_equal_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_less_equal_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_less_equal_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1159,7 +1159,7 @@ jit_logical_and_emitter::jit_logical_and_emitter(x64::jit_generator *host, x64::
|
||||
|
||||
size_t jit_logical_and_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_logical_and_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_logical_and_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1243,7 +1243,7 @@ jit_logical_or_emitter::jit_logical_or_emitter(x64::jit_generator *host, x64::cp
|
||||
|
||||
size_t jit_logical_or_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_logical_or_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_logical_or_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1326,7 +1326,7 @@ jit_logical_xor_emitter::jit_logical_xor_emitter(x64::jit_generator *host, x64::
|
||||
|
||||
size_t jit_logical_xor_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_logical_xor_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_logical_xor_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1409,7 +1409,7 @@ jit_logical_not_emitter::jit_logical_not_emitter(x64::jit_generator *host, x64::
|
||||
|
||||
size_t jit_logical_not_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_logical_not_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_logical_not_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -1483,7 +1483,7 @@ jit_power_static_emitter::jit_power_static_emitter(x64::jit_generator *host, x64
|
||||
|
||||
size_t jit_power_static_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_power_static_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_power_static_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -1661,7 +1661,7 @@ jit_prelu_emitter::jit_prelu_emitter(x64::jit_generator *host, x64::cpu_isa_t ho
|
||||
}
|
||||
size_t jit_prelu_emitter::get_inputs_num() const { return 2; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_prelu_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_prelu_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32}};
|
||||
}
|
||||
|
||||
@ -1720,7 +1720,7 @@ jit_sqrt_emitter::jit_sqrt_emitter(x64::jit_generator *host, x64::cpu_isa_t host
|
||||
|
||||
size_t jit_sqrt_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_sqrt_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_sqrt_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -1751,7 +1751,7 @@ jit_negative_emitter::jit_negative_emitter(x64::jit_generator *host, x64::cpu_is
|
||||
|
||||
size_t jit_negative_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_negative_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_negative_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -1789,7 +1789,7 @@ jit_erf_emitter::jit_erf_emitter(x64::jit_generator *host, x64::cpu_isa_t host_i
|
||||
|
||||
size_t jit_erf_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_erf_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_erf_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -1973,7 +1973,7 @@ jit_soft_sign_emitter::jit_soft_sign_emitter(x64::jit_generator *host, x64::cpu_
|
||||
|
||||
size_t jit_soft_sign_emitter::get_inputs_num() const { return 1; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_soft_sign_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_soft_sign_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -2181,14 +2181,14 @@ void jit_is_nan_emitter::register_table_entries() {
|
||||
}
|
||||
|
||||
/// SELECT ///
|
||||
jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& node, Precision exec_prc)
|
||||
jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& node, Precision exec_prc)
|
||||
: jit_emitter(host, host_isa, exec_prc) {}
|
||||
jit_select_emitter::jit_select_emitter(x64::jit_generator *host, x64::cpu_isa_t host_isa, Precision exec_prc)
|
||||
: jit_emitter(host, host_isa, exec_prc) {}
|
||||
|
||||
size_t jit_select_emitter::get_inputs_num() const { return 3; }
|
||||
|
||||
std::set<std::vector<element::Type>> jit_select_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_select_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {{element::f32, element::f32, element::f32}};
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
jit_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -32,7 +32,7 @@ public:
|
||||
jit_mul_add_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -51,7 +51,7 @@ public:
|
||||
jit_subtract_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -68,7 +68,7 @@ public:
|
||||
jit_multiply_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -86,7 +86,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -104,7 +104,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -121,7 +121,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -138,7 +138,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -157,7 +157,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -175,7 +175,7 @@ public:
|
||||
jit_maximum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -192,7 +192,7 @@ public:
|
||||
jit_minimum_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -211,7 +211,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -229,7 +229,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -247,7 +247,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -268,7 +268,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -289,7 +289,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -310,7 +310,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -331,7 +331,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -353,7 +353,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -374,7 +374,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -395,7 +395,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -416,7 +416,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -436,7 +436,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -457,7 +457,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
|
||||
private:
|
||||
@ -482,7 +482,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -501,7 +501,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -516,7 +516,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t>& in, const std::vector<size_t>& out) const override;
|
||||
@ -534,7 +534,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(
|
||||
@ -556,7 +556,7 @@ public:
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
private:
|
||||
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;
|
||||
@ -579,7 +579,7 @@ public:
|
||||
}
|
||||
|
||||
size_t get_inputs_num() const override { return 1; };
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) {
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -607,7 +607,7 @@ public:
|
||||
}
|
||||
|
||||
size_t get_inputs_num() const override { return 1; };
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) {
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -637,7 +637,7 @@ public:
|
||||
}
|
||||
|
||||
size_t get_inputs_num() const override { return 1; }
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) {
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
@ -656,11 +656,11 @@ class jit_select_emitter : public jit_emitter {
|
||||
public:
|
||||
jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ngraph::Node>& n,
|
||||
jit_select_emitter(dnnl::impl::cpu::x64::jit_generator *host, dnnl::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr<ov::Node>& n,
|
||||
InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32);
|
||||
|
||||
size_t get_inputs_num() const override;
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
size_t aux_vecs_count() const override;
|
||||
|
||||
private:
|
||||
|
@ -55,7 +55,7 @@ size_t jit_emitter::aux_gprs_count() const {
|
||||
return entry_map_.empty() ? 0 : 1;
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> jit_emitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> jit_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ public:
|
||||
* Precisions are ordered, the first bigger bitness precision with the same type will be selected.
|
||||
* Empty collection means the emitter supports any input precisions.
|
||||
*/
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
|
||||
protected:
|
||||
virtual size_t aux_gprs_count() const;
|
||||
|
@ -880,7 +880,7 @@ BrgemmEmitter::BrgemmEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPt
|
||||
m_load_offset_scratch = brgemm_node->get_offset_scratch();
|
||||
}
|
||||
|
||||
std::set<std::vector<element::Type>> BrgemmEmitter::get_supported_precisions(const std::shared_ptr<ngraph::Node>& node) {
|
||||
std::set<std::vector<element::Type>> BrgemmEmitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
|
||||
const auto brgemm = as_type_ptr<ov::intel_cpu::BrgemmCPU>(node);
|
||||
OPENVINO_ASSERT(brgemm, "BrgemmEmitter::get_supported_precisions() expects BrgemmCPU node");
|
||||
switch (brgemm->get_type()) {
|
||||
|
@ -364,7 +364,7 @@ public:
|
||||
const ov::snippets::lowered::ExpressionPtr& expr);
|
||||
|
||||
size_t get_inputs_num() const override { return m_with_scratch ? 3 : 2; }
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr);
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr);
|
||||
size_t aux_gprs_count() const override;
|
||||
|
||||
static size_t get_in_leading_dim(const VectorDims& shape, const std::vector<size_t>& layout);
|
||||
@ -430,7 +430,7 @@ public:
|
||||
const ov::snippets::lowered::ExpressionPtr& expr);
|
||||
|
||||
size_t get_inputs_num() const override {return 1;}
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) {
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
|
||||
return {{element::i8}, {element::bf16}};
|
||||
}
|
||||
|
||||
@ -469,7 +469,7 @@ public:
|
||||
const ov::snippets::lowered::ExpressionPtr& expr);
|
||||
|
||||
size_t get_inputs_num() const override {return 1;}
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ngraph::Node>& node = nullptr) {
|
||||
static std::set<std::vector<element::Type>> get_supported_precisions(const std::shared_ptr<ov::Node>& node = nullptr) {
|
||||
return {{element::f32}};
|
||||
}
|
||||
|
||||
|
@ -183,11 +183,11 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
|
||||
return opsets;
|
||||
}
|
||||
|
||||
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node>&) {
|
||||
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ov::Node>&) {
|
||||
return {};
|
||||
}
|
||||
|
||||
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
|
||||
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ov::Node>& node, const std::string& implType) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -14,8 +14,8 @@ public:
|
||||
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
|
||||
void Unload() noexcept override;
|
||||
std::map<std::string, ngraph::OpSet> getOpSets() override;
|
||||
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
|
||||
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
|
||||
std::vector<std::string> getImplTypes(const std::shared_ptr<ov::Node>& node) override;
|
||||
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ov::Node>& node, const std::string& implType) override;
|
||||
};
|
||||
|
||||
} // namespace intel_cpu
|
||||
|
@ -17,7 +17,7 @@ void ExtensionManager::AddExtension(const IExtensionPtr& extension) {
|
||||
_extensions.push_back(extension);
|
||||
}
|
||||
|
||||
InferenceEngine::ILayerImpl::Ptr ExtensionManager::CreateImplementation(const std::shared_ptr<ngraph::Node>& op) {
|
||||
InferenceEngine::ILayerImpl::Ptr ExtensionManager::CreateImplementation(const std::shared_ptr<ov::Node>& op) {
|
||||
if (!op)
|
||||
IE_THROW() << "Cannot get nGraph operation!";
|
||||
for (const auto& ext : _extensions) {
|
||||
|
@ -16,7 +16,7 @@ class ExtensionManager {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<ExtensionManager>;
|
||||
ExtensionManager() = default;
|
||||
InferenceEngine::ILayerImpl::Ptr CreateImplementation(const std::shared_ptr<ngraph::Node>& op);
|
||||
InferenceEngine::ILayerImpl::Ptr CreateImplementation(const std::shared_ptr<ov::Node>& op);
|
||||
void AddExtension(const InferenceEngine::IExtensionPtr& extension);
|
||||
const std::vector<InferenceEngine::IExtensionPtr> & Extensions() const;
|
||||
|
||||
|
@ -670,7 +670,7 @@ void Graph::AllocateWithReuse() {
|
||||
MemorySolver staticMemSolver(definedBoxes);
|
||||
size_t total_size = static_cast<size_t>(staticMemSolver.solve()) * alignment;
|
||||
|
||||
memWorkspace = std::make_shared<Memory>(getEngine(), DnnlBlockedMemoryDesc(InferenceEngine::Precision::I8, Shape(InferenceEngine::SizeVector{total_size})));
|
||||
memWorkspace = std::make_shared<Memory>(getEngine(), DnnlBlockedMemoryDesc(InferenceEngine::Precision::I8, Shape(VectorDims{total_size})));
|
||||
|
||||
if (edge_clusters.empty())
|
||||
return;
|
||||
|
@ -113,16 +113,16 @@ std::map<std::string, std::string> extract_node_metadata(const NodePtr &node) {
|
||||
|
||||
} // namespace
|
||||
|
||||
std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph) {
|
||||
std::map<NodePtr, std::shared_ptr<ngraph::Node> > node2layer;
|
||||
std::shared_ptr<ov::Model> dump_graph_as_ie_ngraph_net(const Graph &graph) {
|
||||
std::map<NodePtr, std::shared_ptr<ov::Node> > node2layer;
|
||||
|
||||
ngraph::ResultVector results;
|
||||
ngraph::ParameterVector params;
|
||||
ngraph::NodeVector to_hold;
|
||||
ov::ResultVector results;
|
||||
ov::ParameterVector params;
|
||||
ov::NodeVector to_hold;
|
||||
|
||||
auto get_inputs = [&] (const NodePtr & node) {
|
||||
auto pr_edges = node->getParentEdges();
|
||||
ngraph::OutputVector inputs(pr_edges.size());
|
||||
ov::OutputVector inputs(pr_edges.size());
|
||||
|
||||
for (size_t i = 0; i < pr_edges.size(); i++) {
|
||||
auto edge = node->getParentEdgeAt(i);
|
||||
@ -162,7 +162,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
|
||||
}
|
||||
|
||||
auto meta_data = extract_node_metadata(node);
|
||||
std::shared_ptr<ngraph::Node> return_node;
|
||||
std::shared_ptr<ov::Node> return_node;
|
||||
if (is_input) {
|
||||
auto& desc = node->getChildEdgeAt(0)->getMemory().getDesc();
|
||||
auto param = std::make_shared<ngraph::op::Parameter>(details::convertPrecision(desc.getPrecision()), desc.getShape().toPartialShape());
|
||||
@ -192,7 +192,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
|
||||
return return_node;
|
||||
};
|
||||
|
||||
ngraph::NodeVector nodes;
|
||||
ov::NodeVector nodes;
|
||||
nodes.reserve(graph.graphNodes.size());
|
||||
for (auto &node : graph.graphNodes) { // important: graph.graphNodes are in topological order
|
||||
nodes.emplace_back(create_ngraph_node(node));
|
||||
@ -204,7 +204,7 @@ std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph
|
||||
holder->add_control_dependency(node);
|
||||
}
|
||||
|
||||
return std::make_shared<ngraph::Function>(results, params, graph._name);
|
||||
return std::make_shared<ov::Model>(results, params, graph._name);
|
||||
}
|
||||
|
||||
#ifdef CPU_DEBUG_CAPS
|
||||
@ -227,7 +227,7 @@ void serializeToXML(const Graph &graph, const std::string& path) {
|
||||
return;
|
||||
|
||||
std::string binPath;
|
||||
ngraph::pass::Manager manager;
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::Serialize>(path,
|
||||
binPath,
|
||||
ov::pass::Serialize::Version::IR_V10);
|
||||
|
@ -13,7 +13,7 @@
|
||||
namespace ov {
|
||||
namespace intel_cpu {
|
||||
|
||||
std::shared_ptr<ngraph::Function> dump_graph_as_ie_ngraph_net(const Graph &graph);
|
||||
std::shared_ptr<ov::Model> dump_graph_as_ie_ngraph_net(const Graph &graph);
|
||||
#ifdef CPU_DEBUG_CAPS
|
||||
void serialize(const Graph &graph);
|
||||
void summary_perf(const Graph &graph);
|
||||
|
@ -617,7 +617,7 @@ void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph &graph) {
|
||||
// Bias -> Reshape -> Conv/Deconv/FC
|
||||
const VectorDims flattenShape = {biasOutputShape.getElementsCount()};
|
||||
// Construct Ngraph Reshape node and CPU Reshape node.
|
||||
auto reshapeConstInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ngraph::Shape{1}, flattenShape);
|
||||
auto reshapeConstInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ov::Shape{1}, flattenShape);
|
||||
auto reshapeDummyInput = std::make_shared<ngraph::opset1::Parameter>(
|
||||
details::convertPrecision(biasNode->getOriginalOutputPrecisionAtPort(0)),
|
||||
biasOutputShape.toPartialShape());
|
||||
@ -2647,7 +2647,7 @@ void GraphOptimizer::reshapeRnnSeq(Graph &graph) {
|
||||
auto edge = childrenEdges[j];
|
||||
auto childNode = edge->getChild();
|
||||
|
||||
const auto secondInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ngraph::Shape{1}, std::vector<int>{1});
|
||||
const auto secondInput = std::make_shared<ngraph::opset1::Constant>(ov::element::i32, ov::Shape{1}, std::vector<int>{1});
|
||||
const auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze>(
|
||||
std::make_shared<ngraph::opset1::Parameter>(details::convertPrecision(parentNode->getOriginalOutputPrecisionAtPort(0)),
|
||||
parentNode->getOutputShapeAtPort(0).toPartialShape()), secondInput);
|
||||
|
@ -143,14 +143,14 @@ size_t CpuBlockedMemoryDesc::getMaxMemSize() const {
|
||||
return maxDimsDesc->getCurrentMemSize();
|
||||
}
|
||||
|
||||
size_t CpuBlockedMemoryDesc::getOffset(const InferenceEngine::SizeVector& v) const {
|
||||
InferenceEngine::SizeVector off_v = v;
|
||||
size_t CpuBlockedMemoryDesc::getOffset(const VectorDims& v) const {
|
||||
VectorDims off_v = v;
|
||||
|
||||
size_t n_blocked_dims = order.size();
|
||||
if (blockedDims.size() != n_blocked_dims || strides.size() != n_blocked_dims) {
|
||||
IE_THROW() << "Cannot calculate offset. Incorrect primitive descriptor!";
|
||||
}
|
||||
InferenceEngine::SizeVector blockedShift(n_blocked_dims);
|
||||
VectorDims blockedShift(n_blocked_dims);
|
||||
for (size_t i = 1; i <= n_blocked_dims; i++) {
|
||||
blockedShift[n_blocked_dims - i] = off_v[order[n_blocked_dims - i]] % blockedDims[n_blocked_dims - i];
|
||||
off_v[order[n_blocked_dims - i]] /= blockedDims[n_blocked_dims - i];
|
||||
@ -167,7 +167,7 @@ size_t CpuBlockedMemoryDesc::getElementOffset(size_t elemNumber) const {
|
||||
// TODO [DS]: rewrite to support dynamic shapes
|
||||
auto& dims = shape.getStaticDims();
|
||||
size_t n_dims = dims.size();
|
||||
InferenceEngine::SizeVector pos(n_dims);
|
||||
VectorDims pos(n_dims);
|
||||
for (size_t rd = 1; rd <= n_dims; ++rd) {
|
||||
const size_t d = n_dims - rd;
|
||||
const size_t cur_dim = dims[d];
|
||||
|
@ -84,7 +84,7 @@ private:
|
||||
size_t getElementOffset(size_t elemNumber) const override;
|
||||
bool canComputeMemSizeZeroDims() const override;
|
||||
size_t getCurrentMemSizeImp() const override;
|
||||
size_t getOffset(const InferenceEngine::SizeVector& v) const;
|
||||
size_t getOffset(const VectorDims& v) const;
|
||||
bool isPlainFormat() const;
|
||||
bool isBlockedCFormat(size_t blk_size) const;
|
||||
bool isTailCFormat() const;
|
||||
|
@ -80,7 +80,7 @@ Node::NodesFactory & Node::factory() {
|
||||
return factoryInstance;
|
||||
}
|
||||
|
||||
Node::Node(const std::shared_ptr<ngraph::Node>& op,
|
||||
Node::Node(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr ctx,
|
||||
const ShapeInferFactory& shapeInferFactory)
|
||||
: selectedPrimitiveDescriptorIndex(-1),
|
||||
@ -1282,7 +1282,7 @@ InferenceEngine::Precision Node::getRuntimePrecision() const {
|
||||
return runtimePrecision;
|
||||
}
|
||||
|
||||
Node* Node::NodesFactory::create(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) {
|
||||
Node* Node::NodesFactory::create(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) {
|
||||
// getExceptionDescWithoutStatus removes redundant information from the exception message. For instance, the NotImplemented
|
||||
// exception is generated in the form: full_path_to_src_file:line_number [ NOT_IMPLEMENTED ] reason.
|
||||
// An example for gather node:
|
||||
|
@ -588,7 +588,7 @@ protected:
|
||||
|
||||
std::string originalLayers; // contains names of the original layers separated by comma
|
||||
|
||||
Node(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory);
|
||||
Node(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory);
|
||||
Node(const std::string& type, const std::string& name, const GraphContext::CPtr ctx);
|
||||
|
||||
int selectedPrimitiveDescriptorIndex = -1;
|
||||
@ -744,17 +744,17 @@ constexpr uint64_t PortMask(T... rest) {
|
||||
}
|
||||
|
||||
class Node::NodesFactory : public openvino::cc::Factory<Type,
|
||||
Node*(const std::shared_ptr<ngraph::Node>& op,
|
||||
Node*(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr)> {
|
||||
public:
|
||||
NodesFactory();
|
||||
|
||||
Node* create(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Node* create(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
};
|
||||
|
||||
template<typename NodeType>
|
||||
struct NodeImpl : public NodeType {
|
||||
NodeImpl(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
NodeImpl(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: NodeType(op, context) {
|
||||
NodeType::perfCounters().template buildClassCounters<NodeType>(NameFromType(NodeType::getType()));
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (one_of(op->get_type_info(), ngraph::op::v8::AdaptiveAvgPool::get_type_info_static())) {
|
||||
auto adaPool = std::dynamic_pointer_cast<const ngraph::opset8::AdaptiveAvgPool>(op);
|
||||
@ -48,7 +48,7 @@ bool AdaptivePooling::isSupportedOperation(const std::shared_ptr<const ngraph::N
|
||||
return true;
|
||||
}
|
||||
|
||||
AdaptivePooling::AdaptivePooling(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
AdaptivePooling::AdaptivePooling(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, AdaptivePoolingShapeInferFactory(op)) {
|
||||
std::string errorMessage;
|
||||
if (isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -16,14 +16,14 @@ namespace node {
|
||||
|
||||
class AdaptivePooling : public Node {
|
||||
public:
|
||||
AdaptivePooling(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
AdaptivePooling(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void execute(dnnl::stream strm) override;
|
||||
bool created() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
int spatialDimsCount;
|
||||
|
@ -17,7 +17,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto batchToSpace = std::dynamic_pointer_cast<const ngraph::opset2::BatchToSpace>(op);
|
||||
if (!batchToSpace) {
|
||||
@ -30,7 +30,7 @@ bool BatchToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
|
||||
return true;
|
||||
}
|
||||
|
||||
BatchToSpace::BatchToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
BatchToSpace::BatchToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, PortMask(1, 2, 3))) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -16,7 +16,7 @@ namespace node {
|
||||
|
||||
class BatchToSpace : public Node {
|
||||
public:
|
||||
BatchToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
BatchToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -27,7 +27,7 @@ public:
|
||||
bool needShapeInfer() const override {return true;};
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
|
@ -878,7 +878,7 @@ private:
|
||||
}
|
||||
};
|
||||
#endif
|
||||
bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (isDynamicNgraphNode(op)) {
|
||||
errorMessage = "Doesn't support op with dynamic shapes";
|
||||
@ -891,7 +891,7 @@ bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph:
|
||||
return false;
|
||||
}
|
||||
if (binConv->get_mode() != ngraph::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT) {
|
||||
errorMessage = "Doesn't support mode: " + ngraph::as_string(binConv->get_mode());
|
||||
errorMessage = "Doesn't support mode: " + ov::as_string(binConv->get_mode());
|
||||
return false;
|
||||
}
|
||||
} catch (...) {
|
||||
@ -900,7 +900,7 @@ bool BinaryConvolution::isSupportedOperation(const std::shared_ptr<const ngraph:
|
||||
return true;
|
||||
}
|
||||
|
||||
BinaryConvolution::BinaryConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
BinaryConvolution::BinaryConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -77,7 +77,7 @@ struct jit_uni_bin_conv_kernel {
|
||||
|
||||
class BinaryConvolution : public Node {
|
||||
public:
|
||||
BinaryConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
BinaryConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void createPrimitive() override;
|
||||
@ -90,7 +90,7 @@ public:
|
||||
void setPostOps(dnnl::primitive_attr &attr);
|
||||
bool canFuse(const NodePtr& node) const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
impl_desc_type getImplType() { return implType; }
|
||||
|
||||
|
@ -17,7 +17,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool Bucketize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Bucketize::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto bucketsize = std::dynamic_pointer_cast<const ngraph::opset3::Bucketize>(op);
|
||||
if (!bucketsize) {
|
||||
@ -30,7 +30,7 @@ bool Bucketize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
|
||||
return true;
|
||||
}
|
||||
|
||||
Bucketize::Bucketize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
Bucketize::Bucketize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, PassThroughShapeInferFactory()) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class Bucketize : public Node {
|
||||
public:
|
||||
Bucketize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Bucketize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -26,7 +26,7 @@ public:
|
||||
void prepareParams() override;
|
||||
|
||||
bool isExecutable() const override;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
template <typename T, typename T_BOUNDARIES, typename T_IND>
|
||||
|
@ -24,7 +24,7 @@ namespace intel_cpu {
|
||||
namespace node {
|
||||
namespace {
|
||||
|
||||
std::tuple<Algorithm, std::string> getAlgorithmFor(const std::shared_ptr<const ngraph::Node>& op) {
|
||||
std::tuple<Algorithm, std::string> getAlgorithmFor(const std::shared_ptr<const ov::Node>& op) {
|
||||
if (ov::is_type<ov::op::v8::NV12toRGB>(op))
|
||||
return std::make_tuple(Algorithm::ColorConvertNV12toRGB, std::string());
|
||||
if (ov::is_type<ov::op::v8::NV12toBGR>(op))
|
||||
@ -327,7 +327,7 @@ void RefConverter::convert(const T* y,
|
||||
size_t width,
|
||||
size_t stride_y,
|
||||
size_t stride_uv) {
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
T* out = dst + batch * width * height * 3;
|
||||
auto y_ptr = y + batch * stride_y;
|
||||
auto uv_ptr = uv + batch * stride_uv;
|
||||
@ -569,7 +569,7 @@ public:
|
||||
const size_t stride_y = height * width * 3 / 2;
|
||||
const size_t stride_uv = height * width * 3 / 2;
|
||||
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
typename jit_uni_converter::Params args;
|
||||
args.y = y + batch * stride_y + h * width;
|
||||
args.u = args.v = uv + batch * stride_uv + (h / 2) * width;
|
||||
@ -604,7 +604,7 @@ public:
|
||||
const size_t stride_y = height * width;
|
||||
const size_t stride_uv = height * width / 2;
|
||||
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
typename jit_uni_converter::Params args;
|
||||
args.y = y + batch * stride_y + h * width;
|
||||
args.u = args.v = uv + batch * stride_uv + (h / 2) * width;
|
||||
@ -679,7 +679,7 @@ void RefConverter::convert(const T* y,
|
||||
size_t width,
|
||||
size_t stride_y,
|
||||
size_t stride_uv) {
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
T* out = dst + batch * width * height * 3;
|
||||
auto y_ptr = y + batch * stride_y;
|
||||
auto u_ptr = u + batch * stride_uv;
|
||||
@ -920,7 +920,7 @@ public:
|
||||
const size_t stride_y = height * width * 3 / 2;
|
||||
const size_t stride_uv = height * width * 3 / 2;
|
||||
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
typename jit_uni_converter::Params args;
|
||||
args.y = y + batch * stride_y + h * width;
|
||||
args.u = u + batch * stride_uv + (h / 2) * (width / 2);
|
||||
@ -957,7 +957,7 @@ public:
|
||||
const size_t stride_y = height * width;
|
||||
const size_t stride_uv = height * width / 4;
|
||||
|
||||
InferenceEngine::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
ov::parallel_for2d(batch_size, height, [&](int batch, int h) {
|
||||
typename jit_uni_converter::Params args;
|
||||
args.y = y + batch * stride_y + h * width;
|
||||
args.u = u + batch * stride_uv + (h / 2) * (width / 2);
|
||||
@ -999,13 +999,13 @@ const VectorDims & ColorConvert::Converter::inputDims(size_t idx) const {
|
||||
return _node->getParentEdgesAtPort(idx)[0]->getMemory().getStaticDims();
|
||||
}
|
||||
|
||||
bool ColorConvert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool ColorConvert::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
Algorithm alg;
|
||||
std::tie(alg, errorMessage) = getAlgorithmFor(op);
|
||||
return alg != Algorithm::Default;
|
||||
}
|
||||
|
||||
ColorConvert::ColorConvert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
ColorConvert::ColorConvert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, ColorConvertShapeInferFactory(op)) {
|
||||
std::string errorMessage;
|
||||
std::tie(algorithm, errorMessage) = getAlgorithmFor(op);
|
||||
|
@ -16,7 +16,7 @@ namespace node {
|
||||
|
||||
class ColorConvert : public Node {
|
||||
public:
|
||||
ColorConvert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ColorConvert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
class Converter;
|
||||
|
||||
public:
|
||||
@ -28,7 +28,7 @@ public:
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
void initSupportedNV12Impls();
|
||||
|
@ -12,11 +12,11 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
|
||||
struct PermuteParams {
|
||||
InferenceEngine::SizeVector src_block_dims;
|
||||
InferenceEngine::SizeVector dst_block_dims;
|
||||
InferenceEngine::SizeVector src_block_order;
|
||||
InferenceEngine::SizeVector dst_block_order;
|
||||
InferenceEngine::SizeVector order;
|
||||
VectorDims src_block_dims;
|
||||
VectorDims dst_block_dims;
|
||||
VectorDims src_block_order;
|
||||
VectorDims dst_block_order;
|
||||
VectorDims order;
|
||||
size_t data_size;
|
||||
|
||||
size_t hash() const;
|
||||
@ -25,9 +25,9 @@ struct PermuteParams {
|
||||
|
||||
struct jit_permute_config_params {
|
||||
uint32_t ndims;
|
||||
InferenceEngine::SizeVector dst_block_dims;
|
||||
InferenceEngine::SizeVector src_strides;
|
||||
InferenceEngine::SizeVector dst_strides;
|
||||
VectorDims dst_block_dims;
|
||||
VectorDims src_strides;
|
||||
VectorDims dst_strides;
|
||||
int n;
|
||||
int data_size;
|
||||
|
||||
|
@ -17,7 +17,7 @@ struct jit_uni_softmax_kernel;
|
||||
|
||||
static inline
|
||||
void softmax_many_batches(const float *src_data, float *dst_data, int B, int C, int H, int W) {
|
||||
InferenceEngine::parallel_for(B * H * W, [&](size_t i) {
|
||||
ov::parallel_for(B * H * W, [&](size_t i) {
|
||||
const float *psrc = src_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W;
|
||||
float *pdst = dst_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W;
|
||||
|
||||
|
@ -37,9 +37,9 @@ bool Concat::isExecutable() const {
|
||||
return !isInPlace() && !hasEmptyOutputTensors();
|
||||
}
|
||||
|
||||
bool Concat::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Concat::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto concatOp = ngraph::as_type_ptr<const ngraph::op::v0::Concat>(op);
|
||||
const auto concatOp = ov::as_type_ptr<const ngraph::op::v0::Concat>(op);
|
||||
if (!concatOp) {
|
||||
errorMessage = "Node is not an instance of the Concat operation.";
|
||||
return false;
|
||||
@ -50,7 +50,7 @@ bool Concat::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
||||
return true;
|
||||
}
|
||||
|
||||
Concat::Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
Concat::Concat(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -58,7 +58,7 @@ Concat::Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr
|
||||
}
|
||||
|
||||
const auto inRank = getInputShapeAtPort(0).getRank();
|
||||
auto concatOp = ngraph::as_type_ptr<ngraph::op::v0::Concat>(op);
|
||||
auto concatOp = ov::as_type_ptr<ngraph::op::v0::Concat>(op);
|
||||
auto axis = concatOp->get_axis();
|
||||
if (axis < 0) {
|
||||
axis += inRank;
|
||||
|
@ -16,9 +16,9 @@ namespace node {
|
||||
|
||||
class Concat : public Node {
|
||||
public:
|
||||
Concat(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Concat(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void initOptimalPrimitiveDescriptor() override;
|
||||
@ -40,7 +40,7 @@ private:
|
||||
bool canBeInPlace = false;
|
||||
bool canOptimizeNspc = false;
|
||||
void execRef();
|
||||
size_t inverseOrder(const InferenceEngine::SizeVector& order, size_t axis);
|
||||
size_t inverseOrder(const VectorDims& order, size_t axis);
|
||||
void execNspcSpecCase();
|
||||
std::vector<VectorDims> inputStrides;
|
||||
std::vector<size_t> nelemToCopy; // byte moved in each iter
|
||||
|
@ -211,9 +211,9 @@ private:
|
||||
std::vector<std::shared_ptr<Input>> outputs;
|
||||
};
|
||||
|
||||
bool Convolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Convolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!ngraph::is_type<ngraph::op::v1::Convolution>(op) && !ngraph::is_type<ngraph::op::v1::GroupConvolution>(op)) {
|
||||
if (!ov::is_type<ngraph::op::v1::Convolution>(op) && !ov::is_type<ngraph::op::v1::GroupConvolution>(op)) {
|
||||
errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported";
|
||||
return false;
|
||||
}
|
||||
@ -233,7 +233,7 @@ bool Convolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>
|
||||
return true;
|
||||
}
|
||||
|
||||
Convolution::Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
Convolution::Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), withBiases(false), withSum(false), withDWConv(false),
|
||||
isGrouped(false), dw_conv_oc(0), dw_conv_ih(0), dw_conv_iw(0), dw_conv_in_dt(memory::data_type::undef),
|
||||
groupNum(1lu), IC(1), groupIC(1), groupOC(1), eltwisePrecision(Precision::FP32) {
|
||||
@ -242,8 +242,8 @@ Convolution::Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphCon
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
|
||||
auto convolutionOp = ngraph::as_type_ptr<ngraph::op::v1::Convolution>(op);
|
||||
auto groupConvolutionOp = ngraph::as_type_ptr<ngraph::op::v1::GroupConvolution>(op);
|
||||
auto convolutionOp = ov::as_type_ptr<ngraph::op::v1::Convolution>(op);
|
||||
auto groupConvolutionOp = ov::as_type_ptr<ngraph::op::v1::GroupConvolution>(op);
|
||||
|
||||
if (convolutionOp) {
|
||||
algorithm = Algorithm::ConvolutionCommon;
|
||||
|
@ -19,9 +19,9 @@ class Eltwise;
|
||||
|
||||
class Convolution : public Node {
|
||||
public:
|
||||
Convolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Convolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
void getSupportedDescriptors() override;
|
||||
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
|
||||
const std::vector<MemoryDescPtr>& outputDesc) override;
|
||||
@ -55,7 +55,7 @@ public:
|
||||
std::vector<int32_t> inputZeroPoints;
|
||||
void initializeInputZeroPoints(const uint8_t* inputZpData, const size_t inputZpSize);
|
||||
|
||||
const InferenceEngine::SizeVector &getWeightDims() { return weightDims; }
|
||||
const VectorDims &getWeightDims() { return weightDims; }
|
||||
const std::vector<size_t> &getStride() { return stride; }
|
||||
const std::vector<ptrdiff_t> &getDilation() { return dilation; }
|
||||
const std::vector<ptrdiff_t> &getPaddingL() { return paddingL; }
|
||||
@ -142,7 +142,7 @@ private:
|
||||
std::vector<ptrdiff_t> dilation;
|
||||
std::vector<ptrdiff_t> paddingL;
|
||||
std::vector<ptrdiff_t> paddingR;
|
||||
InferenceEngine::SizeVector weightDims;
|
||||
VectorDims weightDims;
|
||||
std::unordered_map<int, MemoryPtr> convPostOpsArgs[2];
|
||||
|
||||
size_t dw_conv_oc;
|
||||
|
@ -17,7 +17,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool Convert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Convert::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto convert = std::dynamic_pointer_cast<const ngraph::opset1::Convert>(op);
|
||||
if (!convert) {
|
||||
@ -30,7 +30,7 @@ bool Convert::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
|
||||
return true;
|
||||
}
|
||||
|
||||
Convert::Convert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
Convert::Convert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, PassThroughShapeInferFactory()) {
|
||||
std::string errorMessage;
|
||||
if (isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -16,7 +16,7 @@ namespace node {
|
||||
|
||||
class Convert : public Node {
|
||||
public:
|
||||
Convert(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Convert(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
Convert(const Shape &shape, const InferenceEngine::Precision &inPrc, const InferenceEngine::Precision &outPrc,
|
||||
const std::string &nodeName, const GraphContext::CPtr context);
|
||||
|
||||
@ -44,7 +44,7 @@ public:
|
||||
|
||||
bool needPrepareParams() const override { return inputShapesModified(); }
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
static bool isSupportedDesc(const MemoryDesc &desc);
|
||||
|
||||
|
@ -15,9 +15,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
|
||||
const auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
|
||||
if (!greedyDecOp) {
|
||||
errorMessage = "Node is not an instance of the CTCGreedyDecoder operation from operation set v0.";
|
||||
return false;
|
||||
@ -28,7 +28,7 @@ bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr<const ngraph::
|
||||
return true;
|
||||
}
|
||||
|
||||
CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -47,7 +47,7 @@ CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, cons
|
||||
if (!dimsEqualWeak(dataDims[0], seqDims[0]) || !dimsEqualWeak(dataDims[1], seqDims[1]))
|
||||
IE_THROW() << errorPrefix << "has invalid input shapes.";
|
||||
|
||||
auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
|
||||
auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v0::CTCGreedyDecoder>(op);
|
||||
mergeRepeated = greedyDecOp->get_ctc_merge_repeated();
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class CTCGreedyDecoder : public Node {
|
||||
public:
|
||||
CTCGreedyDecoder(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
CTCGreedyDecoder(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -22,7 +22,7 @@ public:
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
bool needPrepareParams() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
private:
|
||||
const size_t DATA_INDEX = 0lu;
|
||||
const size_t SEQUENCE_LENGTH_INDEX = 1lu;
|
||||
|
@ -15,9 +15,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
|
||||
const auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
|
||||
if (!greedyDecOp) {
|
||||
errorMessage = "Node is not an instance of the CTCGreedyDecoderSeqLen operation from operation set v6.";
|
||||
return false;
|
||||
@ -28,7 +28,7 @@ bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr<const ng
|
||||
return true;
|
||||
}
|
||||
|
||||
CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -46,7 +46,7 @@ CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Nod
|
||||
if (!dimsEqualWeak(dataDims[0], seqDims[0]))
|
||||
IE_THROW() << errorPrefix << "has invalid input shapes.";
|
||||
|
||||
auto greedyDecOp = ngraph::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
|
||||
auto greedyDecOp = ov::as_type_ptr<const ngraph::op::v6::CTCGreedyDecoderSeqLen>(op);
|
||||
mergeRepeated = greedyDecOp->get_merge_repeated();
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class CTCGreedyDecoderSeqLen : public Node {
|
||||
public:
|
||||
CTCGreedyDecoderSeqLen(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
CTCGreedyDecoderSeqLen(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -22,7 +22,7 @@ public:
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
bool needPrepareParams() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
const size_t DATA_INDEX = 0lu;
|
||||
|
@ -14,9 +14,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto ctcLossOp = ngraph::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
|
||||
const auto ctcLossOp = ov::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
|
||||
if (!ctcLossOp) {
|
||||
errorMessage = "Node is not an instance of the CTCLoss operation from operation set v4.";
|
||||
return false;
|
||||
@ -27,7 +27,7 @@ bool CTCLoss::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
|
||||
return true;
|
||||
}
|
||||
|
||||
CTCLoss::CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
CTCLoss::CTCLoss(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -39,7 +39,7 @@ CTCLoss::CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CP
|
||||
if (getOriginalInputsNumber() != 4 && getOriginalInputsNumber() != 5)
|
||||
IE_THROW() << errorPrefix << " has invalid inputs number.";
|
||||
|
||||
auto ctcLossOp = ngraph::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
|
||||
auto ctcLossOp = ov::as_type_ptr<const ngraph::op::v4::CTCLoss>(op);
|
||||
ctcMergeRepeated = ctcLossOp->get_ctc_merge_repeated();
|
||||
preprocessCollapseRepeated = ctcLossOp->get_preprocess_collapse_repeated();
|
||||
unique = ctcLossOp->get_unique();
|
||||
|
@ -13,14 +13,14 @@ namespace node {
|
||||
|
||||
class CTCLoss : public Node {
|
||||
public:
|
||||
CTCLoss(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
CTCLoss(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void execute(dnnl::stream strm) override;
|
||||
bool created() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
bool needPrepareParams() const override { return false; };
|
||||
|
@ -20,7 +20,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool CumSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool CumSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto cumsum = std::dynamic_pointer_cast<const ngraph::opset3::CumSum>(op);
|
||||
if (!cumsum) {
|
||||
@ -33,7 +33,7 @@ bool CumSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
||||
return true;
|
||||
}
|
||||
|
||||
CumSum::CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
CumSum::CumSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
@ -60,8 +60,8 @@ CumSum::CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr
|
||||
|
||||
if (getOriginalInputsNumber() == numOfInputs) {
|
||||
const auto axis_shape = cumsum->get_input_partial_shape(AXIS);
|
||||
if (axis_shape.is_dynamic() || !ngraph::is_scalar(axis_shape.to_shape()))
|
||||
IE_THROW() << errorPrefix << " doesn't support 'axis' input tensor with non scalar rank";
|
||||
if (axis_shape.is_dynamic() || !ov::is_scalar(axis_shape.to_shape()))
|
||||
OPENVINO_THROW(errorPrefix, " doesn't support 'axis' input tensor with non scalar rank");
|
||||
}
|
||||
|
||||
if (dataShape != getOutputShapeAtPort(0))
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class CumSum : public Node {
|
||||
public:
|
||||
CumSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
CumSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -23,7 +23,7 @@ public:
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
template <typename dataType>
|
||||
|
@ -125,7 +125,7 @@ bool DeconvKey::operator==(const DeconvKey &rhs) const {
|
||||
*/
|
||||
class DeconfolutionShapeInferFactory : public ShapeInferFactory {
|
||||
public:
|
||||
DeconfolutionShapeInferFactory(std::shared_ptr<ngraph::Node> op) : m_op(op) {}
|
||||
DeconfolutionShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
|
||||
ShapeInferPtr makeShapeInfer() const override {
|
||||
if (m_op->get_input_size() > 2) {
|
||||
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), PortMask(2));
|
||||
@ -133,11 +133,11 @@ public:
|
||||
return std::make_shared<NgraphShapeInfer>(make_shape_inference(m_op), EMPTY_PORT_MASK);
|
||||
}
|
||||
private:
|
||||
std::shared_ptr<ngraph::Node> m_op;
|
||||
std::shared_ptr<ov::Node> m_op;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (std::dynamic_pointer_cast<const ngraph::opset1::ConvolutionBackpropData>(op) == nullptr &&
|
||||
std::dynamic_pointer_cast<const ngraph::opset1::GroupConvolutionBackpropData>(op) == nullptr) {
|
||||
@ -159,7 +159,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Nod
|
||||
return true;
|
||||
}
|
||||
|
||||
Deconvolution::Deconvolution(const std::shared_ptr<ngraph::Node>& op,
|
||||
Deconvolution::Deconvolution(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context) : Node(op, context, DeconfolutionShapeInferFactory(op)) {
|
||||
std::string errorMessage;
|
||||
errorPrefix = "Deconvolution node with name '" + getName() + "' ";
|
||||
@ -220,7 +220,7 @@ Deconvolution::Deconvolution(const std::shared_ptr<ngraph::Node>& op,
|
||||
externOutShape = inputShapes.size() == 3;
|
||||
biasPort = externOutShape ? 3 : 2;
|
||||
if (externOutShape && isDynamicNode()) {
|
||||
bool isConstOutShape = ngraph::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
|
||||
bool isConstOutShape = ov::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
|
||||
if (isConstOutShape) {
|
||||
lastOutputSpatialDims = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(2))->cast_vector<int32_t>();
|
||||
}
|
||||
@ -246,7 +246,7 @@ InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::Siz
|
||||
InferenceEngine::SizeVector dimsForBlockedDesc{dims};
|
||||
std::swap(dimsForBlockedDesc[withGroups + 0], dimsForBlockedDesc[withGroups + 1]);
|
||||
|
||||
InferenceEngine::SizeVector orderForBlockedDesc;
|
||||
VectorDims orderForBlockedDesc;
|
||||
if (withGroups) {
|
||||
orderForBlockedDesc = {0, 2, 1};
|
||||
} else {
|
||||
|
@ -19,7 +19,7 @@ namespace node {
|
||||
|
||||
class Deconvolution : public Node {
|
||||
public:
|
||||
Deconvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Deconvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -40,7 +40,7 @@ public:
|
||||
|
||||
InferenceEngine::Precision getRuntimePrecision() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
bool canFuse(const NodePtr& node) const override;
|
||||
|
||||
const VectorDims& getWeightDims() const { return getInputShapeAtPort(1).getStaticDims(); }
|
||||
|
@ -670,7 +670,7 @@ private:
|
||||
}
|
||||
};
|
||||
#endif
|
||||
bool DeformableConvolution::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool DeformableConvolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!one_of(op->get_type_info(),
|
||||
ngraph::op::v1::DeformableConvolution::get_type_info_static(),
|
||||
@ -742,7 +742,7 @@ bool DefConvKey::operator==(const DefConvKey &rhs) const {
|
||||
|
||||
} // namespace
|
||||
|
||||
DeformableConvolution::DeformableConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
DeformableConvolution::DeformableConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -71,9 +71,9 @@ struct jit_uni_def_conv_kernel {
|
||||
|
||||
class DeformableConvolution : public Node {
|
||||
public:
|
||||
DeformableConvolution(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
DeformableConvolution(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void execute(dnnl::stream strm) override;
|
||||
|
@ -49,7 +49,7 @@ bool DepthToSpace::DepthToSpaceAttrs::operator==(const DepthToSpaceAttrs& rhs) c
|
||||
return result;
|
||||
}
|
||||
|
||||
bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
auto depthToSpace = ov::as_type_ptr<const ngraph::opset1::DepthToSpace>(op);
|
||||
if (!depthToSpace) {
|
||||
@ -58,7 +58,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
|
||||
}
|
||||
const auto mode = depthToSpace->get_mode();
|
||||
if (!one_of(mode, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST)) {
|
||||
errorMessage = "Does not support mode: " + ngraph::as_string(mode);
|
||||
errorMessage = "Does not support mode: " + ov::as_string(mode);
|
||||
return false;
|
||||
}
|
||||
} catch (...) {
|
||||
@ -67,7 +67,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr<const ngraph::Node
|
||||
return true;
|
||||
}
|
||||
|
||||
DepthToSpace::DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
DepthToSpace::DepthToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -86,7 +86,7 @@ DepthToSpace::DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphC
|
||||
} else if (modeNgraph == ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) {
|
||||
attrs.mode = Mode::DEPTH_FIRST;
|
||||
} else {
|
||||
THROW_ERROR << "doesn't support mode: " << ngraph::as_string(modeNgraph);
|
||||
THROW_ERROR << "doesn't support mode: " << ov::as_string(modeNgraph);
|
||||
}
|
||||
|
||||
attrs.blockSize = depthToSpace->get_block_size();
|
||||
|
@ -15,9 +15,9 @@ namespace node {
|
||||
|
||||
class DepthToSpace : public Node {
|
||||
public:
|
||||
DepthToSpace(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
DepthToSpace(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void createPrimitive() override;
|
||||
|
@ -51,7 +51,7 @@ bool DetectionOutput::isSupportedOperation(const std::shared_ptr<const ov::Node>
|
||||
return true;
|
||||
}
|
||||
|
||||
DetectionOutput::DetectionOutput(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
DetectionOutput::DetectionOutput(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -25,7 +25,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool DFT::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool DFT::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (isDynamicNgraphNode(op)) {
|
||||
errorMessage = "Doesn't support op with dynamic shapes";
|
||||
@ -44,7 +44,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, st
|
||||
return true;
|
||||
}
|
||||
|
||||
DFT::DFT(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) :
|
||||
DFT::DFT(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
|
||||
Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -16,7 +16,7 @@ namespace node {
|
||||
|
||||
class DFT : public Node {
|
||||
public:
|
||||
DFT(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
DFT(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
~DFT() override = default;
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
@ -26,7 +26,7 @@ public:
|
||||
|
||||
void prepareParams() override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
std::vector<int32_t> getAxes() const;
|
||||
|
@ -935,7 +935,7 @@ private:
|
||||
|
||||
#endif // OPENVINO_ARCH_X86_64
|
||||
|
||||
Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr<ngraph::Node>& op) {
|
||||
Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shared_ptr<ov::Node>& op) {
|
||||
const auto const1 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(0));
|
||||
const auto const2 = ov::as_type_ptr<ngraph::opset1::Constant>(op->get_input_node_shared_ptr(1));
|
||||
int constPort = -1;
|
||||
@ -948,49 +948,49 @@ Eltwise::BroadcastingPolicy Eltwise::determineBroadcastingPolicy(const std::shar
|
||||
}
|
||||
|
||||
auto const_shape = op->get_input_shape(constPort);
|
||||
if (ngraph::shape_size(const_shape) == 1)
|
||||
if (ov::shape_size(const_shape) == 1)
|
||||
return PerTensor;
|
||||
else
|
||||
return PerChannel;
|
||||
}
|
||||
|
||||
const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::getInitializers() {
|
||||
static const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer> initializers = {
|
||||
{ngraph::op::v1::Add::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
const std::map<const ov::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::getInitializers() {
|
||||
static const std::map<const ov::DiscreteTypeInfo, Eltwise::Initializer> initializers = {
|
||||
{ngraph::op::v1::Add::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseAdd;
|
||||
node.broadcastingPolicy = determineBroadcastingPolicy(op);
|
||||
}},
|
||||
{ngraph::op::v1::Subtract::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Subtract::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSubtract;
|
||||
node.broadcastingPolicy = determineBroadcastingPolicy(op);
|
||||
}},
|
||||
{ngraph::op::v1::Multiply::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Multiply::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseMultiply;
|
||||
node.broadcastingPolicy = determineBroadcastingPolicy(op);
|
||||
}},
|
||||
{ngraph::op::v1::Divide::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Divide::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseDivide;
|
||||
node.broadcastingPolicy = determineBroadcastingPolicy(op);
|
||||
}},
|
||||
{ngraph::op::v0::SquaredDifference::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::SquaredDifference::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSquaredDifference;
|
||||
}},
|
||||
{ngraph::op::v1::Maximum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Maximum::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseMaximum;
|
||||
}},
|
||||
{ngraph::op::v1::Minimum::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Minimum::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseMinimum;
|
||||
}},
|
||||
{ngraph::op::v1::Mod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Mod::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseMod;
|
||||
}},
|
||||
{ngraph::op::v1::FloorMod::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::FloorMod::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseFloorMod;
|
||||
}},
|
||||
{ngraph::op::v1::Power::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Power::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwisePowerDynamic;
|
||||
}},
|
||||
{PowerStaticNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{PowerStaticNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto powerStatic = getNgraphOpAs<PowerStaticNode>(op);
|
||||
node.algorithm = Algorithm::EltwisePowerStatic;
|
||||
node.alpha = powerStatic->get_power();
|
||||
@ -998,10 +998,10 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
|
||||
node.gamma = powerStatic->get_shift();
|
||||
node.broadcastingPolicy = PerTensor;
|
||||
}},
|
||||
{ngraph::op::v1::Equal::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Equal::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseEqual;
|
||||
}},
|
||||
{ngraph::op::v1::NotEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::NotEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseNotEqual;
|
||||
}},
|
||||
{ov::op::v10::IsFinite::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
@ -1016,46 +1016,46 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
|
||||
{ov::op::v10::IsNaN::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseIsNaN;
|
||||
}},
|
||||
{ngraph::op::v1::Greater::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Greater::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseGreater;
|
||||
}},
|
||||
{ngraph::op::v1::GreaterEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::GreaterEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseGreaterEqual;
|
||||
}},
|
||||
{ngraph::op::v1::Less::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Less::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLess;
|
||||
}},
|
||||
{ngraph::op::v1::LessEqual::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::LessEqual::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLessEqual;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalAnd::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::LogicalAnd::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLogicalAnd;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalOr::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::LogicalOr::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLogicalOr;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalXor::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::LogicalXor::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLogicalXor;
|
||||
}},
|
||||
{ngraph::op::v1::LogicalNot::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::LogicalNot::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLogicalNot;
|
||||
}},
|
||||
{ngraph::op::v0::Relu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Relu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseRelu;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_relu;
|
||||
}},
|
||||
{LeakyReluNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{LeakyReluNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto leakyRelu = getNgraphOpAs<LeakyReluNode>(op);
|
||||
node.algorithm = Algorithm::EltwiseRelu;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_relu;
|
||||
node.alpha = leakyRelu->get_slope();
|
||||
node.beta = 0.0f;
|
||||
}},
|
||||
{ngraph::op::v0::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Gelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseGeluErf;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_gelu_erf;
|
||||
}},
|
||||
{ngraph::op::v7::Gelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v7::Gelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto gelu = getNgraphOpAs<ngraph::op::v7::Gelu>(op);
|
||||
ngraph::op::GeluApproximationMode approximationMode = gelu->get_approximation_mode();
|
||||
if (approximationMode == ngraph::op::GeluApproximationMode::ERF) {
|
||||
@ -1068,29 +1068,29 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
|
||||
IE_THROW(NotImplemented) << "CPU Eltwise node doesn't support ngraph operation Gelu with approximation mode: " << approximationMode;
|
||||
}
|
||||
}},
|
||||
{ngraph::op::v0::Elu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Elu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto eluOp = getNgraphOpAs<ngraph::op::v0::Elu>(op);
|
||||
node.alpha = static_cast<float>(eluOp->get_alpha());
|
||||
node.algorithm = Algorithm::EltwiseElu;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_elu;
|
||||
}},
|
||||
{ngraph::op::v0::Tanh::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Tanh::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseTanh;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_tanh;
|
||||
}},
|
||||
{ngraph::op::v0::Sigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Sigmoid::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSigmoid;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_logistic;
|
||||
}},
|
||||
{ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Abs::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseAbs;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_abs;
|
||||
}},
|
||||
{ngraph::op::v0::Sqrt::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Sqrt::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSqrt;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_sqrt;
|
||||
}},
|
||||
{ngraph::op::v0::Clamp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Clamp::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto clampOp = getNgraphOpAs<ngraph::op::v0::Clamp>(op);
|
||||
|
||||
float alpha_ = static_cast<float>(clampOp->get_min());
|
||||
@ -1105,32 +1105,32 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
|
||||
node.algorithm = Algorithm::EltwiseClamp;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_clip;
|
||||
}},
|
||||
{ngraph::op::v0::Exp::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Exp::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseExp;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_exp;
|
||||
}},
|
||||
{SwishNode::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{SwishNode::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto swishOp = getNgraphOpAs<SwishNode>(op);
|
||||
node.algorithm = Algorithm::EltwiseSwish;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_swish;
|
||||
node.alpha = swishOp->get_alpha();
|
||||
}},
|
||||
{ngraph::op::v4::HSwish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v4::HSwish::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
// since v3.0 version, oneDNN has flexible implementation of hardswish, ov still uses the one with hardcoded alpha and beta
|
||||
node.alpha = 1.f / 6.f;
|
||||
node.beta = 0.5f;
|
||||
node.algorithm = Algorithm::EltwiseHswish;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_hardswish;
|
||||
}},
|
||||
{ngraph::op::v4::Mish::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v4::Mish::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseMish;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_mish;
|
||||
}},
|
||||
{ngraph::op::v5::HSigmoid::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v5::HSigmoid::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseHsigmoid;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_hsigmoid;
|
||||
}},
|
||||
{ngraph::op::v5::Round::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v5::Round::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
auto roundOp = getNgraphOpAs<ngraph::op::v5::Round>(op);
|
||||
|
||||
switch (roundOp->get_mode()) {
|
||||
@ -1144,25 +1144,25 @@ const std::map<const ngraph::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::g
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{ngraph::op::v0::PRelu::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::PRelu::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwisePrelu;
|
||||
node.broadcastingPolicy = determineBroadcastingPolicy(op);
|
||||
}},
|
||||
{ngraph::op::v0::Erf::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Erf::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseErf;
|
||||
}},
|
||||
{ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v4::SoftPlus::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSoftRelu;
|
||||
node.alpha = 1.f;
|
||||
node.onednnAlgorithm = dnnl::algorithm::eltwise_soft_relu;
|
||||
}},
|
||||
{ngraph::op::v9::SoftSign::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v9::SoftSign::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSoftSign;
|
||||
}},
|
||||
{ngraph::op::v1::Select::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v1::Select::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseSelect;
|
||||
}},
|
||||
{ngraph::op::v0::Log::get_type_info_static(), [](const std::shared_ptr<ngraph::Node>& op, Eltwise& node) {
|
||||
{ngraph::op::v0::Log::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
node.algorithm = Algorithm::EltwiseLog;
|
||||
}},
|
||||
{op::v13::BitwiseAnd::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
|
||||
@ -1926,7 +1926,7 @@ static Eltwise::executorPtr buildExecutor(const EltwiseKey& key) {
|
||||
key.implType == EltwiseImplType::optimizedShapeAgnostic);
|
||||
}
|
||||
|
||||
bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool Eltwise::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (getInitializers().find(op->get_type_info()) == getInitializers().end()) {
|
||||
errorMessage = "Doesn't support Eltwise algorithm: " + std::string(op->get_type_name());
|
||||
@ -1935,14 +1935,14 @@ bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
|
||||
if (const auto binOp = ov::as_type_ptr<const ov::op::util::BinaryElementwiseArithmetic>(op)) {
|
||||
if (binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NONE &&
|
||||
binOp->get_autob().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(binOp->get_autob().m_type);
|
||||
errorMessage = "Doesn't support broadcast type: " + ov::as_string(binOp->get_autob().m_type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (const auto select = ov::as_type_ptr<const ov::op::v1::Select>(op)) {
|
||||
if (select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE &&
|
||||
select->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(select->get_autob().m_type);
|
||||
errorMessage = "Doesn't support broadcast type: " + ov::as_string(select->get_autob().m_type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1952,7 +1952,7 @@ bool Eltwise::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op
|
||||
return true;
|
||||
}
|
||||
|
||||
Eltwise::Eltwise(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) :
|
||||
Eltwise::Eltwise(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
|
||||
Node(op, context, EltwiseShapeInferFactory()), broadcastingPolicy(Undefined) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
using executorPtr = std::shared_ptr<IEltwiseExecutor>;
|
||||
|
||||
public:
|
||||
Eltwise(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Eltwise(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -141,7 +141,7 @@ public:
|
||||
|
||||
BroadcastingPolicy getBroadcastingPolicy() const { return broadcastingPolicy; }
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
executorPtr execPtr = nullptr;
|
||||
@ -185,10 +185,10 @@ private:
|
||||
std::vector<MemoryPtr> memPtrs = {};
|
||||
std::vector<const void*> fqDataPtrs;
|
||||
|
||||
using Initializer = std::function<void(const std::shared_ptr<ngraph::Node>&, Eltwise& node)>;
|
||||
static const std::map<const ngraph::DiscreteTypeInfo, Initializer>& getInitializers();
|
||||
using Initializer = std::function<void(const std::shared_ptr<ov::Node>&, Eltwise& node)>;
|
||||
static const std::map<const ov::DiscreteTypeInfo, Initializer>& getInitializers();
|
||||
|
||||
static BroadcastingPolicy determineBroadcastingPolicy(const std::shared_ptr<ngraph::Node>& op);
|
||||
static BroadcastingPolicy determineBroadcastingPolicy(const std::shared_ptr<ov::Node>& op);
|
||||
|
||||
size_t getOpInputsNum() const;
|
||||
|
||||
|
@ -14,9 +14,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto embBagOffsetSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagOffsetsSum>(op);
|
||||
const auto embBagOffsetSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingBagOffsetsSum>(op);
|
||||
if (!embBagOffsetSumOp) {
|
||||
errorMessage = "Node is not an instance of the EmbeddingBagOffsetsSum operation from opset v3.";
|
||||
return false;
|
||||
@ -27,7 +27,7 @@ bool EmbeddingBagOffsetSum::isSupportedOperation(const std::shared_ptr<const ngr
|
||||
return true;
|
||||
}
|
||||
|
||||
EmbeddingBagOffsetSum::EmbeddingBagOffsetSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
EmbeddingBagOffsetSum::EmbeddingBagOffsetSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)),
|
||||
EmbeddingBagSum(op, 3lu, 1lu, 4lu, 3lu) {
|
||||
std::string errorMessage;
|
||||
|
@ -17,7 +17,7 @@ namespace node {
|
||||
|
||||
class EmbeddingBagOffsetSum : public Node, public EmbeddingBagSum {
|
||||
public:
|
||||
EmbeddingBagOffsetSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
EmbeddingBagOffsetSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -25,7 +25,7 @@ public:
|
||||
bool created() const override;
|
||||
|
||||
bool isExecutable() const override;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
protected:
|
||||
void prepareParams() override;
|
||||
|
@ -14,9 +14,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto embBagPackedSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingBagPackedSum>(op);
|
||||
const auto embBagPackedSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingBagPackedSum>(op);
|
||||
if (!embBagPackedSumOp) {
|
||||
errorMessage = "Node is not an instance of the EmbeddingBagPackedSum operation from opset v3.";
|
||||
return false;
|
||||
@ -27,7 +27,7 @@ bool EmbeddingBagPackedSum::isSupportedOperation(const std::shared_ptr<const ngr
|
||||
return true;
|
||||
}
|
||||
|
||||
EmbeddingBagPackedSum::EmbeddingBagPackedSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
EmbeddingBagPackedSum::EmbeddingBagPackedSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)),
|
||||
EmbeddingBagSum(op, 2lu, 1lu, 2lu, 3lu) {
|
||||
std::string errorMessage;
|
||||
|
@ -17,7 +17,7 @@ namespace node {
|
||||
|
||||
class EmbeddingBagPackedSum : public Node, public EmbeddingBagSum {
|
||||
public:
|
||||
EmbeddingBagPackedSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
EmbeddingBagPackedSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -25,7 +25,7 @@ public:
|
||||
bool created() const override;
|
||||
|
||||
bool isExecutable() const override;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
protected:
|
||||
void prepareParams() override;
|
||||
|
@ -18,7 +18,7 @@ namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
EmbeddingBagSum::EmbeddingBagSum(
|
||||
const std::shared_ptr<ngraph::Node>& op,
|
||||
const std::shared_ptr<ov::Node>& op,
|
||||
size_t requiredInputNum,
|
||||
size_t indicesIdx,
|
||||
size_t perSampleWeightsIdx,
|
||||
@ -48,7 +48,7 @@ void EmbeddingBagSum::prepareParams(const VectorDims& indexStaticShape) {
|
||||
|
||||
template<typename T>
|
||||
void EmbeddingBagSum::processData(const T* srcData, const T* weightsData,
|
||||
const InferenceEngine::SizeVector& inDataDims, const MemoryPtr& outMemory) {
|
||||
const VectorDims& inDataDims, const MemoryPtr& outMemory) {
|
||||
std::string msgPrefix = std::string("Node EmbeddingBagSum with name '") + _layerName + "' ";
|
||||
|
||||
initFromInputs();
|
||||
@ -120,7 +120,7 @@ void EmbeddingBagSum::processData(const T* srcData, const T* weightsData,
|
||||
}
|
||||
|
||||
void EmbeddingBagSum::execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc,
|
||||
const InferenceEngine::SizeVector& inDims, const MemoryPtr& outMemory) {
|
||||
const VectorDims& inDims, const MemoryPtr& outMemory) {
|
||||
switch (srcPrc) {
|
||||
case Precision::FP32: {
|
||||
return processData<PrecisionTrait<Precision::FP32>::value_type>(reinterpret_cast<const float*>(srcData),
|
||||
|
@ -17,14 +17,14 @@ namespace node {
|
||||
class EmbeddingBagSum {
|
||||
public:
|
||||
EmbeddingBagSum(
|
||||
const std::shared_ptr<ngraph::Node>&,
|
||||
const std::shared_ptr<ov::Node>&,
|
||||
size_t requiredInputsNum,
|
||||
size_t indicesIdx,
|
||||
size_t perSampleWeightsIdx,
|
||||
size_t defaultIndexIdx);
|
||||
|
||||
void execute(const uint8_t* srcData, const uint8_t* weightsData, const InferenceEngine::Precision &srcPrc,
|
||||
const InferenceEngine::SizeVector& inDims, const MemoryPtr& outMemory);
|
||||
const VectorDims& inDims, const MemoryPtr& outMemory);
|
||||
|
||||
~EmbeddingBagSum() = default;
|
||||
|
||||
@ -41,7 +41,7 @@ protected:
|
||||
|
||||
template<typename T>
|
||||
void processData(const T* srcData, const T* weightsData,
|
||||
const InferenceEngine::SizeVector& inDataDims, const MemoryPtr& outMemory);
|
||||
const VectorDims& inDataDims, const MemoryPtr& outMemory);
|
||||
|
||||
const size_t EMB_TABLE_IDX = 0lu;
|
||||
const size_t INDICES_IDX;
|
||||
|
@ -14,9 +14,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto embBagSegSumOp = ngraph::as_type_ptr<const ngraph::op::v3::EmbeddingSegmentsSum>(op);
|
||||
const auto embBagSegSumOp = ov::as_type_ptr<const ngraph::op::v3::EmbeddingSegmentsSum>(op);
|
||||
if (!embBagSegSumOp) {
|
||||
errorMessage = "Node is not an instance of the EmbeddingSegmentsSum operation from opset v3.";
|
||||
return false;
|
||||
@ -27,7 +27,7 @@ bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr<const ngra
|
||||
return true;
|
||||
}
|
||||
|
||||
EmbeddingSegmentsSum::EmbeddingSegmentsSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
EmbeddingSegmentsSum::EmbeddingSegmentsSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, PortMask(NUM_SEGMENTS_IDX))),
|
||||
EmbeddingBagSum(op, 4lu, 1lu, 5lu, 4lu) {
|
||||
std::string errorMessage;
|
||||
|
@ -17,7 +17,7 @@ namespace node {
|
||||
|
||||
class EmbeddingSegmentsSum : public Node, public EmbeddingBagSum {
|
||||
public:
|
||||
EmbeddingSegmentsSum(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
EmbeddingSegmentsSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -25,7 +25,7 @@ public:
|
||||
bool created() const override;
|
||||
|
||||
bool isExecutable() const override;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
protected:
|
||||
void prepareParams() override;
|
||||
|
@ -50,7 +50,7 @@ void ACLScheduler::schedule_custom(ICPPKernel *kernel, const Hints &hints, const
|
||||
const auto num_windows = _num_threads;
|
||||
const auto hints_split_dimension = hints.split_dimension();
|
||||
|
||||
InferenceEngine::parallel_for(num_windows, [&](int wid) {
|
||||
ov::parallel_for(num_windows, [&](int wid) {
|
||||
Window win = max_window.split_window(hints_split_dimension, wid, num_windows);
|
||||
win.validate();
|
||||
main_run(win, {wid, static_cast<int>(_num_threads), &cpu_info()});
|
||||
@ -68,7 +68,7 @@ void ACLScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Win
|
||||
}
|
||||
|
||||
void ACLScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload> &workloads) {
|
||||
InferenceEngine::parallel_for(workloads.size(), [&](int wid) {
|
||||
ov::parallel_for(workloads.size(), [&](int wid) {
|
||||
workloads[wid]({wid, static_cast<int>(parallel_get_num_threads()), &cpu_info()});
|
||||
});
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
impl_desc_type getImplType() const override { return implType; }
|
||||
private:
|
||||
static int64_t calcShapeSize(const Shape& shape, size_t start, size_t end);
|
||||
static bool IsTransposeMovingSingleAxis(InferenceEngine::SizeVector permutations, size_t& from, size_t& to);
|
||||
static bool IsTransposeMovingSingleAxis(VectorDims permutations, size_t& from, size_t& to);
|
||||
void TransposeSingleAxisOutwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to);
|
||||
void TransposeSingleAxisInwards(const MemoryCPtr& input, const MemoryPtr& output, size_t from, size_t to);
|
||||
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
|
||||
virtual impl_desc_type getImplType() const = 0;
|
||||
|
||||
static InferenceEngine::SizeVector transformTo5DCase(const InferenceEngine::SizeVector& shape, bool initAcrossChannels);
|
||||
static VectorDims transformTo5DCase(const VectorDims& shape, bool initAcrossChannels);
|
||||
|
||||
protected:
|
||||
MVNAttrs mvnAttrs;
|
||||
|
@ -223,9 +223,9 @@ bool ExperimentalDetectronDetectionOutput::needPrepareParams() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
||||
const auto doOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
||||
if (!doOp) {
|
||||
errorMessage = "Node is not an instance of the ExperimentalDetectronDetectionOutput from the operations set v6.";
|
||||
return false;
|
||||
@ -236,14 +236,14 @@ bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::share
|
||||
return true;
|
||||
}
|
||||
|
||||
ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const std::shared_ptr<ngraph::Node>& op,
|
||||
ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
auto doOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
||||
auto doOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronDetectionOutput>(op);
|
||||
auto attributes = doOp->get_attrs();
|
||||
|
||||
score_threshold_ = attributes.score_threshold;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class ExperimentalDetectronDetectionOutput : public Node {
|
||||
public:
|
||||
ExperimentalDetectronDetectionOutput(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ExperimentalDetectronDetectionOutput(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -23,7 +23,7 @@ public:
|
||||
bool needShapeInfer() const override;
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
const int INPUT_ROIS {0};
|
||||
|
@ -272,9 +272,9 @@ void fill_output_blobs(const float* proposals, const int* roi_indices,
|
||||
} // namespace
|
||||
|
||||
bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation
|
||||
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
||||
const auto proposalOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
||||
if (!proposalOp) {
|
||||
errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
|
||||
return false;
|
||||
@ -286,7 +286,7 @@ bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation
|
||||
}
|
||||
|
||||
ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerateProposalsSingleImage(
|
||||
const std::shared_ptr<ngraph::Node>& op,
|
||||
const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
@ -294,7 +294,7 @@ ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerate
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
|
||||
auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
||||
auto proposalOp = ov::as_type_ptr<const ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>(op);
|
||||
auto proposalAttrs = proposalOp->get_attrs();
|
||||
|
||||
min_size_ = proposalAttrs.min_size;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class ExperimentalDetectronGenerateProposalsSingleImage : public Node {
|
||||
public:
|
||||
ExperimentalDetectronGenerateProposalsSingleImage(const std::shared_ptr<ngraph::Node>& op,
|
||||
ExperimentalDetectronGenerateProposalsSingleImage(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
@ -24,7 +24,7 @@ public:
|
||||
bool needShapeInfer() const override;
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
// Inputs:
|
||||
|
@ -14,7 +14,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
||||
bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
|
||||
std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto priorGridGen = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronPriorGridGenerator>(op);
|
||||
@ -29,7 +29,7 @@ bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::sh
|
||||
}
|
||||
|
||||
ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator(
|
||||
const std::shared_ptr<ngraph::Node>& op,
|
||||
const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class ExperimentalDetectronPriorGridGenerator : public Node {
|
||||
public:
|
||||
ExperimentalDetectronPriorGridGenerator(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ExperimentalDetectronPriorGridGenerator(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -22,7 +22,7 @@ public:
|
||||
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
// Inputs:
|
||||
|
@ -281,7 +281,7 @@ void split_points(const std::vector<int>& ids, std::vector<int>& rois_per_level,
|
||||
|
||||
} // namespace
|
||||
|
||||
bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op,
|
||||
bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
|
||||
std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto roiFeatureExtractor = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronROIFeatureExtractor>(op);
|
||||
@ -296,7 +296,7 @@ bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::s
|
||||
}
|
||||
|
||||
ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor(
|
||||
const std::shared_ptr<ngraph::Node>& op,
|
||||
const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class ExperimentalDetectronROIFeatureExtractor : public Node {
|
||||
public:
|
||||
ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -23,7 +23,7 @@ public:
|
||||
bool needPrepareParams() const override { return false; };
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); };
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
const int INPUT_ROIS {0};
|
||||
|
@ -17,7 +17,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto topKROI = std::dynamic_pointer_cast<const ngraph::opset6::ExperimentalDetectronTopKROIs>(op);
|
||||
if (!topKROI) {
|
||||
@ -30,7 +30,7 @@ bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr<c
|
||||
return true;
|
||||
}
|
||||
|
||||
ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_ptr<ngraph::Node>& op,
|
||||
ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_ptr<ov::Node>& op,
|
||||
const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class ExperimentalDetectronTopKROIs : public Node {
|
||||
public:
|
||||
ExperimentalDetectronTopKROIs(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ExperimentalDetectronTopKROIs(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -24,7 +24,7 @@ public:
|
||||
bool needPrepareParams() const override { return false; };
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); };
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
// Inputs:
|
||||
|
@ -272,16 +272,16 @@ private:
|
||||
};
|
||||
#endif // OPENVINO_ARCH_X86_64
|
||||
|
||||
bool ExtractImagePatches::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool ExtractImagePatches::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
auto extImgPatcher = ngraph::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
|
||||
auto extImgPatcher = ov::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
|
||||
if (!extImgPatcher) {
|
||||
errorMessage = "Only opset3 ExtractImagePatches operation is supported";
|
||||
return false;
|
||||
}
|
||||
const auto padValue = extImgPatcher->get_auto_pad();
|
||||
if (!one_of(padValue, ngraph::op::PadType::VALID, ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER)) {
|
||||
errorMessage = "Does not support pad type: " + ngraph::as_string(padValue);
|
||||
errorMessage = "Does not support pad type: " + ov::as_string(padValue);
|
||||
return false;
|
||||
}
|
||||
if (!everyone_is(2u, extImgPatcher->get_sizes().size(), extImgPatcher->get_strides().size(), extImgPatcher->get_rates().size())) {
|
||||
@ -328,7 +328,7 @@ bool ExtractImagePatchesKey::operator==(const ExtractImagePatchesKey& rhs) const
|
||||
}
|
||||
} // namespace
|
||||
|
||||
ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -336,7 +336,7 @@ ExtractImagePatches::ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op
|
||||
}
|
||||
|
||||
errorPrefix = "ExtractImagePatches layer with name '" + op->get_friendly_name() + "' ";
|
||||
auto extImgPatcher = ngraph::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
|
||||
auto extImgPatcher = ov::as_type_ptr<const ngraph::opset3::ExtractImagePatches>(op);
|
||||
|
||||
if (inputShapes.size() != 1 || outputShapes.size() != 1)
|
||||
IE_THROW() << errorPrefix << "has incorrect number of input or output edges!"
|
||||
|
@ -44,7 +44,7 @@ struct jit_uni_extract_image_patches_kernel {
|
||||
|
||||
class ExtractImagePatches : public Node {
|
||||
public:
|
||||
ExtractImagePatches(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
ExtractImagePatches(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -54,7 +54,7 @@ public:
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
void prepareParams() override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
enum class ExtImgPatcherPadType {
|
||||
VALID,
|
||||
SAME_LOWER,
|
||||
|
@ -55,8 +55,8 @@ Eye::Eye(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
}
|
||||
outType = op->get_output_element_type(0);
|
||||
withBatchShape = (op->get_input_size() == 4);
|
||||
if (!one_of(outType, ngraph::element::f32, ngraph::element::bf16,
|
||||
ngraph::element::i32, ngraph::element::i8, ngraph::element::u8)) {
|
||||
if (!one_of(outType, ov::element::f32, ov::element::bf16,
|
||||
ov::element::i32, ov::element::i8, ov::element::u8)) {
|
||||
THROW_ERROR << errorPrefix << "doesn't support demanded output precision";
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
static constexpr size_t BATCH_SHAPE = 3lu;
|
||||
|
||||
public:
|
||||
Eye(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Eye(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override;
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -32,7 +32,7 @@ public:
|
||||
bool needShapeInfer() const override {return true;};
|
||||
void executeDynamicImpl(dnnl::stream strm) override { execute(strm); }
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
std::string errorPrefix = "";
|
||||
|
@ -864,7 +864,7 @@ private:
|
||||
}
|
||||
};
|
||||
#endif
|
||||
bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto fq = std::dynamic_pointer_cast<const ngraph::opset1::FakeQuantize>(op);
|
||||
if (!fq) {
|
||||
@ -892,7 +892,7 @@ bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node
|
||||
size_t count_not_unit_axis = 0;
|
||||
auto shape = getNormalizedDimsBySize(fq->get_input_shape(i), dataRank);
|
||||
|
||||
if (ngraph::shape_size(shape) != 1) {
|
||||
if (ov::shape_size(shape) != 1) {
|
||||
size_t not_unit_axis = 0;
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
if (shape[i] > 1) {
|
||||
@ -916,7 +916,7 @@ bool FakeQuantize::isSupportedOperation(const std::shared_ptr<const ngraph::Node
|
||||
}
|
||||
if (fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NONE &&
|
||||
fq->get_auto_broadcast().m_type != ngraph::op::AutoBroadcastType::NUMPY) {
|
||||
errorMessage = "Doesn't support broadcast type: " + ngraph::as_string(fq->get_auto_broadcast().m_type);
|
||||
errorMessage = "Doesn't support broadcast type: " + ov::as_string(fq->get_auto_broadcast().m_type);
|
||||
return false;
|
||||
}
|
||||
} catch (...) {
|
||||
@ -960,7 +960,7 @@ struct FakeQuantKey {
|
||||
};
|
||||
} // namespace
|
||||
|
||||
FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context) :
|
||||
FakeQuantize::FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context) :
|
||||
Node(op, context, PassThroughShapeInferFactory()) {
|
||||
std::string errorMessage;
|
||||
if (isSupportedOperation(op, errorMessage)) {
|
||||
@ -994,7 +994,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
|
||||
|
||||
const auto ilShape = getNormalizedDimsBySize(fq->get_input_shape(1), dataRank);
|
||||
auto inputLowAxis = initAxisIdx(ilShape);
|
||||
isInputLowBroadcasted = (ngraph::is_scalar(ilShape) || ilShape[inputLowAxis] == 1);
|
||||
isInputLowBroadcasted = (ov::is_scalar(ilShape) || ilShape[inputLowAxis] == 1);
|
||||
if (!isInputLowBroadcasted) {
|
||||
axis = inputLowAxis;
|
||||
axisSize = ilShape[inputLowAxis];
|
||||
@ -1002,7 +1002,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
|
||||
|
||||
const auto ihShape = getNormalizedDimsBySize(fq->get_input_shape(2), dataRank);
|
||||
auto inputHighAxis = initAxisIdx(ihShape);
|
||||
isInputHighBroadcasted = (ngraph::is_scalar(ihShape) || ihShape[inputHighAxis] == 1);
|
||||
isInputHighBroadcasted = (ov::is_scalar(ihShape) || ihShape[inputHighAxis] == 1);
|
||||
if (!isInputHighBroadcasted) {
|
||||
axis = inputHighAxis;
|
||||
axisSize = ihShape[inputHighAxis];
|
||||
@ -1010,7 +1010,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
|
||||
|
||||
const auto olShape = getNormalizedDimsBySize(fq->get_input_shape(3), dataRank);
|
||||
auto outputLowAxis = initAxisIdx(olShape);
|
||||
isOutputLowBroadcasted = (ngraph::is_scalar(olShape) || olShape[outputLowAxis] == 1);
|
||||
isOutputLowBroadcasted = (ov::is_scalar(olShape) || olShape[outputLowAxis] == 1);
|
||||
if (!isOutputLowBroadcasted) {
|
||||
axis = outputLowAxis;
|
||||
axisSize = olShape[outputLowAxis];
|
||||
@ -1018,16 +1018,16 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphC
|
||||
|
||||
const auto ohShape = getNormalizedDimsBySize(fq->get_input_shape(4), dataRank);
|
||||
auto outputHighAxis = initAxisIdx(ohShape);
|
||||
isOutputHighBroadcasted = (ngraph::is_scalar(ohShape) || ohShape[outputHighAxis] == 1);
|
||||
isOutputHighBroadcasted = (ov::is_scalar(ohShape) || ohShape[outputHighAxis] == 1);
|
||||
if (!isOutputHighBroadcasted) {
|
||||
axis = outputHighAxis;
|
||||
axisSize = ohShape[outputHighAxis];
|
||||
}
|
||||
|
||||
auto inputLowAxisSize = ngraph::is_scalar(ilShape) ? 1 : ilShape[inputLowAxis];
|
||||
auto inputHighAxisSize = ngraph::is_scalar(ihShape) ? 1 : ihShape[inputHighAxis];
|
||||
auto outputLowAxisSize = ngraph::is_scalar(olShape) ? 1 : olShape[outputLowAxis];
|
||||
auto outputHighAxisSize = ngraph::is_scalar(ohShape) ? 1 : ohShape[outputHighAxis];
|
||||
auto inputLowAxisSize = ov::is_scalar(ilShape) ? 1 : ilShape[inputLowAxis];
|
||||
auto inputHighAxisSize = ov::is_scalar(ihShape) ? 1 : ihShape[inputHighAxis];
|
||||
auto outputLowAxisSize = ov::is_scalar(olShape) ? 1 : olShape[outputLowAxis];
|
||||
auto outputHighAxisSize = ov::is_scalar(ohShape) ? 1 : ohShape[outputHighAxis];
|
||||
|
||||
if (axisSize != -1 && !dimsEqualWeak(axisSize, getInputShapeAtPort(0).getDims()[axis])) {
|
||||
IE_THROW() << errorPrefix << "has different quantization axis size on 'data' and 'range' inputs";
|
||||
|
@ -77,7 +77,7 @@ struct jit_uni_quantize_kernel {
|
||||
|
||||
class FakeQuantize : public Node {
|
||||
public:
|
||||
FakeQuantize(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void getSupportedDescriptors() override;
|
||||
@ -142,7 +142,7 @@ public:
|
||||
bool allowBinary = true,
|
||||
bool do_rounding = true);
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
enum BroadcastingPolicy {
|
||||
PerChannel, // all FQ operations are per channel
|
||||
|
@ -98,7 +98,7 @@ bool FCKey::operator==(const FCKey &rhs) const {
|
||||
|
||||
} // namespace
|
||||
|
||||
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto fc = std::dynamic_pointer_cast<const FullyConnectedNode>(op);
|
||||
if (!fc) {
|
||||
@ -126,7 +126,7 @@ bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ngraph::No
|
||||
return true;
|
||||
}
|
||||
|
||||
FullyConnected::FullyConnected(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
FullyConnected::FullyConnected(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, FCShapeInferFactory(op)), withBiases(false) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage))
|
||||
|
@ -18,7 +18,7 @@ namespace node {
|
||||
|
||||
class FullyConnected : public Node {
|
||||
public:
|
||||
FullyConnected(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
FullyConnected(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
std::vector<dnnl::memory::format_tag> getAvailableFormatsForDims(const Shape &dims) const override;
|
||||
void getSupportedDescriptors() override;
|
||||
@ -51,7 +51,7 @@ public:
|
||||
|
||||
bool canFuse(const NodePtr& node) const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
void prepareParams() override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
@ -17,7 +17,7 @@ namespace node {
|
||||
|
||||
class Gather : public Node {
|
||||
public:
|
||||
Gather(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -27,7 +27,7 @@ public:
|
||||
bool isExecutable() const override;
|
||||
void resolveInPlaceEdges(Edge::LOOK look) override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
struct threadExecParams {
|
||||
std::vector<int> specIdxInBytes;
|
||||
|
@ -32,7 +32,7 @@ bool GatherElements::isSupportedOperation(const std::shared_ptr<const ov::Node>&
|
||||
return true;
|
||||
}
|
||||
|
||||
GatherElements::GatherElements(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
GatherElements::GatherElements(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -16,7 +16,7 @@ namespace node {
|
||||
|
||||
class GatherElements : public Node {
|
||||
public:
|
||||
GatherElements(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
GatherElements(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
|
@ -21,7 +21,7 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool GatherND::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool GatherND::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!one_of(op->get_type_info(), ngraph::op::v5::GatherND::get_type_info_static(), ngraph::op::v8::GatherND::get_type_info_static())) {
|
||||
errorMessage = "Node is not an instance of the GatherND operation from operation set v5 and v8.";
|
||||
@ -34,7 +34,7 @@ bool GatherND::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& o
|
||||
return true;
|
||||
}
|
||||
|
||||
GatherND::GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
GatherND::GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
@ -47,9 +47,9 @@ GatherND::GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::
|
||||
const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank();
|
||||
const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank();
|
||||
|
||||
if (auto gatherNdOp = ngraph::as_type_ptr<const ngraph::op::v8::GatherND>(op)) {
|
||||
if (auto gatherNdOp = ov::as_type_ptr<const ngraph::op::v8::GatherND>(op)) {
|
||||
attrs.batchDims = gatherNdOp->get_batch_dims();
|
||||
} else if (auto gatherNdOp = ngraph::as_type_ptr<const ngraph::op::v5::GatherND>(op)) {
|
||||
} else if (auto gatherNdOp = ov::as_type_ptr<const ngraph::op::v5::GatherND>(op)) {
|
||||
attrs.batchDims = gatherNdOp->get_batch_dims();
|
||||
} else {
|
||||
THROW_ERROR << "has support only opset5.";
|
||||
|
@ -16,14 +16,14 @@ namespace node {
|
||||
|
||||
class GatherND : public Node {
|
||||
public:
|
||||
GatherND(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
void execute(dnnl::stream strm) override;
|
||||
bool created() const override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
protected:
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
@ -17,9 +17,9 @@ namespace ov {
|
||||
namespace intel_cpu {
|
||||
namespace node {
|
||||
|
||||
bool GatherTree::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
bool GatherTree::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
const auto gatherElementsOp = ngraph::as_type_ptr<const ngraph::op::v1::GatherTree>(op);
|
||||
const auto gatherElementsOp = ov::as_type_ptr<const ngraph::op::v1::GatherTree>(op);
|
||||
if (!gatherElementsOp) {
|
||||
errorMessage = "Node is not an instance of the GatherTree operation from operation set v1.";
|
||||
return false;
|
||||
@ -30,7 +30,7 @@ bool GatherTree::isSupportedOperation(const std::shared_ptr<const ngraph::Node>&
|
||||
return true;
|
||||
}
|
||||
|
||||
GatherTree::GatherTree(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
GatherTree::GatherTree(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class GatherTree : public Node {
|
||||
public:
|
||||
GatherTree(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
GatherTree(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -23,7 +23,7 @@ public:
|
||||
void prepareParams() override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
struct GatherTreeExecutor {
|
||||
|
@ -277,9 +277,9 @@ void fill_output_blobs(const float* proposals, const int* roi_indices,
|
||||
} // namespace
|
||||
|
||||
bool GenerateProposals::isSupportedOperation
|
||||
(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||
(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||
try {
|
||||
if (!ngraph::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op)) {
|
||||
if (!ov::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op)) {
|
||||
errorMessage = "Node is not an instance of the Proposal from the operations set v0.";
|
||||
return false;
|
||||
}
|
||||
@ -289,14 +289,14 @@ bool GenerateProposals::isSupportedOperation
|
||||
return true;
|
||||
}
|
||||
|
||||
GenerateProposals::GenerateProposals(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
GenerateProposals::GenerateProposals(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, InternalDynShapeInferFactory()) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
|
||||
auto proposalOp = ngraph::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op);
|
||||
auto proposalOp = ov::as_type_ptr<const ngraph::op::v9::GenerateProposals>(op);
|
||||
auto proposalAttrs = proposalOp->get_attrs();
|
||||
|
||||
min_size_ = proposalAttrs.min_size;
|
||||
|
@ -13,7 +13,7 @@ namespace node {
|
||||
|
||||
class GenerateProposals : public Node {
|
||||
public:
|
||||
GenerateProposals(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context);
|
||||
GenerateProposals(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr context);
|
||||
|
||||
void getSupportedDescriptors() override {};
|
||||
void initSupportedPrimitiveDescriptors() override;
|
||||
@ -23,7 +23,7 @@ public:
|
||||
bool needShapeInfer() const override;
|
||||
bool needPrepareParams() const override;
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||
|
||||
private:
|
||||
// Inputs:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user