[GPU] New headers and namespaces in some parts (#19229)

This commit is contained in:
Vladimir Paramuzov 2023-08-18 15:57:15 +04:00 committed by GitHub
parent 4f29e60742
commit 526d76c81f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
102 changed files with 1206 additions and 1152 deletions

View File

@ -25,7 +25,7 @@
#include "intel_gpu/plugin/custom_layer.hpp" #include "intel_gpu/plugin/custom_layer.hpp"
#include "intel_gpu/plugin/remote_context.hpp" #include "intel_gpu/plugin/remote_context.hpp"
#include "intel_gpu/plugin/remote_blob.hpp" #include "intel_gpu/plugin/remote_blob.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
@ -96,7 +96,7 @@ public:
protected: protected:
RemoteContextImpl::Ptr m_context; RemoteContextImpl::Ptr m_context;
std::shared_ptr<Program> m_program; std::shared_ptr<ProgramBuilder> m_program;
std::string m_networkName; std::string m_networkName;
ExecutionConfig m_config; ExecutionConfig m_config;
uint16_t m_stream_id; uint16_t m_stream_id;

View File

@ -4,6 +4,17 @@
#pragma once #pragma once
#include "openvino/core/node.hpp"
#include "openvino/runtime/profiling_info.hpp"
#include "intel_gpu/plugin/custom_layer.hpp"
#include "intel_gpu/runtime/engine.hpp"
#include "intel_gpu/runtime/execution_config.hpp"
#include "intel_gpu/graph/topology.hpp"
#include "intel_gpu/graph/program.hpp"
#include <cpp/ie_cnn_network.h>
#include <vector> #include <vector>
#include <map> #include <map>
#include <memory> #include <memory>
@ -12,16 +23,6 @@
#include <mutex> #include <mutex>
#include <set> #include <set>
#include <cpp/ie_cnn_network.h>
#include <ngraph/ngraph.hpp>
#include "gpu/gpu_config.hpp"
#include "intel_gpu/plugin/custom_layer.hpp"
#include "intel_gpu/runtime/engine.hpp"
#include "intel_gpu/runtime/execution_config.hpp"
#include "intel_gpu/graph/topology.hpp"
#include "intel_gpu/graph/program.hpp"
// Forward declarations for cldnn part // Forward declarations for cldnn part
namespace cldnn { namespace cldnn {
@ -34,8 +35,8 @@ enum class eltwise_mode : int32_t;
#define REGISTER_FACTORY_IMPL(op_version, op_name) \ #define REGISTER_FACTORY_IMPL(op_version, op_name) \
void __register ## _ ## op_name ## _ ## op_version(); \ void __register ## _ ## op_name ## _ ## op_version(); \
void __register ## _ ## op_name ## _ ## op_version() { \ void __register ## _ ## op_name ## _ ## op_version() { \
Program::RegisterFactory<ov::op::op_version::op_name>( \ ProgramBuilder::RegisterFactory<ov::op::op_version::op_name>( \
[](Program& p, const std::shared_ptr<ov::Node>& op) { \ [](ProgramBuilder& p, const std::shared_ptr<ov::Node>& op) { \
auto op_casted = std::dynamic_pointer_cast<ov::op::op_version::op_name>(op); \ auto op_casted = std::dynamic_pointer_cast<ov::op::op_version::op_name>(op); \
OPENVINO_ASSERT(op_casted, "[GPU] Invalid ov Node type passed into ", __PRETTY_FUNCTION__); \ OPENVINO_ASSERT(op_casted, "[GPU] Invalid ov Node type passed into ", __PRETTY_FUNCTION__); \
Create##op_name##Op(p, op_casted); \ Create##op_name##Op(p, op_casted); \
@ -52,10 +53,10 @@ struct is_smart_pointer<std::shared_ptr<T>> : std::true_type {};
template<class T> template<class T>
struct is_smart_pointer<std::shared_ptr<const T>> : std::true_type {}; struct is_smart_pointer<std::shared_ptr<const T>> : std::true_type {};
std::string layer_type_lower(const ngraph::Node* op); std::string layer_type_lower(const ov::Node* op);
std::string layer_type_name_ID(const ngraph::Node* op); std::string layer_type_name_ID(const ov::Node* op);
std::string layer_type_lower(const std::shared_ptr<ngraph::Node>& op); std::string layer_type_lower(const std::shared_ptr<ov::Node>& op);
std::string layer_type_name_ID(const std::shared_ptr<ngraph::Node>& op); std::string layer_type_name_ID(const std::shared_ptr<ov::Node>& op);
struct PerfCounter { struct PerfCounter {
InferenceEngine::InferenceEngineProfileInfo::LayerStatus status; InferenceEngine::InferenceEngineProfileInfo::LayerStatus status;
@ -78,13 +79,13 @@ public:
long long cpu_avg() const { return (num == 0) ? 0 : cpu_uSec / num; } long long cpu_avg() const { return (num == 0) ? 0 : cpu_uSec / num; }
}; };
class Program { class ProgramBuilder {
public: public:
Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config, ProgramBuilder(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config,
bool createTopologyOnly = false, bool partialBuild = false, bool createTopologyOnly = false, bool partialBuild = false,
InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr, InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor = nullptr, bool innerProgram = false); std::shared_ptr<ov::threading::IStreamsExecutor> task_executor = nullptr, bool innerProgram = false);
Program(cldnn::engine& engine, const ExecutionConfig& config, ProgramBuilder(cldnn::engine& engine, const ExecutionConfig& config,
InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr); InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr);
static const cldnn::primitive_id m_preProcessTag; static const cldnn::primitive_id m_preProcessTag;
@ -129,14 +130,14 @@ public:
// Graph construction helpers // Graph construction helpers
std::vector<cldnn::input_info> GetInputInfo(const std::shared_ptr<ngraph::Node>& op) const; std::vector<cldnn::input_info> GetInputInfo(const std::shared_ptr<ngraph::Node>& op) const;
using factory_t = std::function<void(Program&, const std::shared_ptr<ngraph::Node>&)>; using factory_t = std::function<void(ProgramBuilder&, const std::shared_ptr<ngraph::Node>&)>;
using factories_map_t = std::map<ngraph::DiscreteTypeInfo, factory_t>; using factories_map_t = std::map<ngraph::DiscreteTypeInfo, factory_t>;
template<typename OpType> template<typename OpType>
static void RegisterFactory(factory_t func) { static void RegisterFactory(factory_t func) {
std::lock_guard<std::mutex> lock(m_mutex); std::lock_guard<std::mutex> lock(m_mutex);
if (Program::factories_map.find(OpType::get_type_info_static()) == Program::factories_map.end()) { if (ProgramBuilder::factories_map.find(OpType::get_type_info_static()) == ProgramBuilder::factories_map.end()) {
Program::factories_map.insert({OpType::get_type_info_static(), func}); ProgramBuilder::factories_map.insert({OpType::get_type_info_static(), func});
} }
} }
@ -194,10 +195,10 @@ private:
void ChangeInputBatch(int batch); void ChangeInputBatch(int batch);
}; };
void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& node, CustomLayerPtr customLayer); void CreateCustomOp(ProgramBuilder& p, const std::shared_ptr<ngraph::Node>& node, CustomLayerPtr customLayer);
void CreateUnaryEltwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& node, void CreateUnaryEltwiseOp(ProgramBuilder& p, const std::shared_ptr<ngraph::Node>& node,
cldnn::activation_func func, cldnn::activation_additional_params params); cldnn::activation_func func, cldnn::activation_additional_params params);
void CreateElementwiseOp(Program& p, void CreateElementwiseOp(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& node, const std::shared_ptr<ngraph::Node>& node,
cldnn::eltwise_mode mode, cldnn::eltwise_mode mode,
std::vector<float> coefficients = {}, std::vector<float> coefficients = {},

View File

@ -6,7 +6,7 @@
#include <memory> #include <memory>
#include <ngraph/function.hpp> #include "openvino/core/model.hpp"
#include "intel_gpu/runtime/execution_config.hpp" #include "intel_gpu/runtime/execution_config.hpp"
#include "intel_gpu/runtime/device.hpp" #include "intel_gpu/runtime/device.hpp"

View File

@ -97,7 +97,7 @@ struct broadcast : public primitive_base<broadcast> {
broadcast(const primitive_id& id, broadcast(const primitive_id& id,
const input_info& input, const input_info& input,
const ov::Shape& target_shape, const ov::Shape& target_shape,
const ngraph::AxisSet& axes_mapping, const ov::AxisSet& axes_mapping,
const ov::op::BroadcastModeSpec& broadcast_spec = ov::op::BroadcastType::EXPLICIT, const ov::op::BroadcastModeSpec& broadcast_spec = ov::op::BroadcastType::EXPLICIT,
const padding& output_padding = padding()) const padding& output_padding = padding())
: primitive_base(id, {input}, {output_padding}), : primitive_base(id, {input}, {output_padding}),
@ -111,7 +111,7 @@ struct broadcast : public primitive_base<broadcast> {
broadcast(const primitive_id& id, broadcast(const primitive_id& id,
const input_info& input, const input_info& input,
const input_info& target_shape_id, const input_info& target_shape_id,
const ngraph::AxisSet& axes_mapping, const ov::AxisSet& axes_mapping,
const ov::op::BroadcastModeSpec& broadcast_spec = ov::op::BroadcastType::EXPLICIT, const ov::op::BroadcastModeSpec& broadcast_spec = ov::op::BroadcastType::EXPLICIT,
const padding& output_padding = padding()) const padding& output_padding = padding())
: primitive_base(id, {input, target_shape_id}, {output_padding}), : primitive_base(id, {input, target_shape_id}, {output_padding}),

View File

@ -3,10 +3,10 @@
// //
#pragma once #pragma once
#include <vector>
#include "ngraph/op/matrix_nms.hpp" #include "openvino/op/matrix_nms.hpp"
#include "primitive.hpp" #include "primitive.hpp"
#include <vector>
namespace cldnn { namespace cldnn {
@ -54,7 +54,7 @@ struct matrix_nms : public primitive_base<matrix_nms> {
attributes() {} attributes() {}
attributes(const ngraph::op::v8::MatrixNms::Attributes& attrs) attributes(const ov::op::v8::MatrixNms::Attributes& attrs)
: attributes(from(attrs.sort_result_type), : attributes(from(attrs.sort_result_type),
attrs.sort_result_across_batch, attrs.sort_result_across_batch,
attrs.score_threshold, attrs.score_threshold,
@ -142,7 +142,7 @@ struct matrix_nms : public primitive_base<matrix_nms> {
const input_info& scores, const input_info& scores,
const input_info& second_output, const input_info& second_output,
const input_info& third_output, const input_info& third_output,
const ngraph::op::v8::MatrixNms::Attributes& attrs) const ov::op::v8::MatrixNms::Attributes& attrs)
: primitive_base(id, {boxes, scores, second_output, third_output}), : primitive_base(id, {boxes, scores, second_output, third_output}),
attribs(attrs) {} attribs(attrs) {}
@ -194,23 +194,23 @@ struct matrix_nms : public primitive_base<matrix_nms> {
} }
private: private:
static cldnn::matrix_nms::decay_function from(ngraph::op::v8::MatrixNms::DecayFunction decay) { static cldnn::matrix_nms::decay_function from(ov::op::v8::MatrixNms::DecayFunction decay) {
switch (decay) { switch (decay) {
case ngraph::op::v8::MatrixNms::DecayFunction::GAUSSIAN: case ov::op::v8::MatrixNms::DecayFunction::GAUSSIAN:
return cldnn::matrix_nms::decay_function::gaussian; return cldnn::matrix_nms::decay_function::gaussian;
case ngraph::op::v8::MatrixNms::DecayFunction::LINEAR: case ov::op::v8::MatrixNms::DecayFunction::LINEAR:
default: default:
return cldnn::matrix_nms::decay_function::linear; return cldnn::matrix_nms::decay_function::linear;
} }
} }
static cldnn::matrix_nms::sort_result_type from(ngraph::op::v8::MatrixNms::SortResultType type) { static cldnn::matrix_nms::sort_result_type from(ov::op::v8::MatrixNms::SortResultType type) {
switch (type) { switch (type) {
case ngraph::op::v8::MatrixNms::SortResultType::CLASSID: case ov::op::v8::MatrixNms::SortResultType::CLASSID:
return cldnn::matrix_nms::sort_result_type::class_id; return cldnn::matrix_nms::sort_result_type::class_id;
case ngraph::op::v8::MatrixNms::SortResultType::SCORE: case ov::op::v8::MatrixNms::SortResultType::SCORE:
return cldnn::matrix_nms::sort_result_type::score; return cldnn::matrix_nms::sort_result_type::score;
case ngraph::op::v8::MatrixNms::SortResultType::NONE: case ov::op::v8::MatrixNms::SortResultType::NONE:
default: default:
return cldnn::matrix_nms::sort_result_type::none; return cldnn::matrix_nms::sort_result_type::none;
} }

View File

@ -3,13 +3,14 @@
// //
#pragma once #pragma once
#include <utility>
#include <vector>
#include "ngraph/op/multiclass_nms.hpp" #include "openvino/op/multiclass_nms.hpp"
#include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type.hpp"
#include "primitive.hpp" #include "primitive.hpp"
#include <utility>
#include <vector>
namespace cldnn { namespace cldnn {
/// @brief multiclass NMS /// @brief multiclass NMS
@ -71,7 +72,7 @@ struct multiclass_nms : public primitive_base<multiclass_nms> {
normalized(normalized), normalized(normalized),
nms_eta(nms_eta) {} nms_eta(nms_eta) {}
attributes(const ngraph::op::util::MulticlassNmsBase::Attributes& attrs) attributes(const ov::op::util::MulticlassNmsBase::Attributes& attrs)
: attributes(from(attrs.sort_result_type), : attributes(from(attrs.sort_result_type),
attrs.sort_result_across_batch, attrs.sort_result_across_batch,
cldnn::element_type_to_data_type(attrs.output_type), cldnn::element_type_to_data_type(attrs.output_type),
@ -110,13 +111,13 @@ struct multiclass_nms : public primitive_base<multiclass_nms> {
} }
private: private:
static sort_result_type from(const ngraph::op::util::MulticlassNmsBase::SortResultType sort_result_type) { static sort_result_type from(const ov::op::util::MulticlassNmsBase::SortResultType sort_result_type) {
switch (sort_result_type) { switch (sort_result_type) {
case ngraph::op::util::MulticlassNmsBase::SortResultType::CLASSID: case ov::op::util::MulticlassNmsBase::SortResultType::CLASSID:
return sort_result_type::classid; return sort_result_type::classid;
case ngraph::op::util::MulticlassNmsBase::SortResultType::SCORE: case ov::op::util::MulticlassNmsBase::SortResultType::SCORE:
return sort_result_type::score; return sort_result_type::score;
case ngraph::op::util::MulticlassNmsBase::SortResultType::NONE: case ov::op::util::MulticlassNmsBase::SortResultType::NONE:
return sort_result_type::none; return sort_result_type::none;
default: default:
return sort_result_type::none; return sort_result_type::none;

View File

@ -4,6 +4,7 @@
#include "intel_gpu/plugin/legacy_api_helper.hpp" #include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/runtime/intel_gpu/properties.hpp"
#include "intel_gpu/graph/serialization/binary_buffer.hpp" #include "intel_gpu/graph/serialization/binary_buffer.hpp"

View File

@ -7,7 +7,6 @@
#include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/runtime/itt.hpp"
#include "xml_parse_utils.h" #include "xml_parse_utils.h"
#include <description_buffer.hpp>
#include <map> #include <map>
#include <fstream> #include <fstream>
#include <streambuf> #include <streambuf>
@ -20,7 +19,6 @@
# include <windows.h> # include <windows.h>
#endif #endif
using namespace InferenceEngine;
using namespace pugixml::utils; using namespace pugixml::utils;
#define CheckAndReturnError(cond, errorMsg) \ #define CheckAndReturnError(cond, errorMsg) \
@ -198,7 +196,7 @@ bool CustomLayer::IsLegalSizeRule(const std::string & rule) {
try { try {
expr.Evaluate(); expr.Evaluate();
} catch (...) { } catch (std::exception&) {
return false; return false;
} }
return true; return true;

View File

@ -44,7 +44,7 @@ Graph::Graph(InferenceEngine::CNNNetwork& network, const RemoteContextImpl::Ptr&
, m_config(config) , m_config(config)
, m_stream_id(stream_id) , m_stream_id(stream_id)
, m_state(0) { , m_state(0) {
m_program = std::make_shared<Program>(network, get_engine(), config, false, false, inputs, outputs); m_program = std::make_shared<ProgramBuilder>(network, get_engine(), config, false, false, inputs, outputs);
if (m_program->m_max_batch > 1) if (m_program->m_max_batch > 1)
m_config.set_property(ov::intel_gpu::max_dynamic_batch(m_program->m_max_batch)); m_config.set_property(ov::intel_gpu::max_dynamic_batch(m_program->m_max_batch));
Build(); Build();
@ -56,7 +56,7 @@ Graph::Graph(cldnn::BinaryInputBuffer &ib, const RemoteContextImpl::Ptr& context
, m_config(config) , m_config(config)
, m_stream_id(stream_id) , m_stream_id(stream_id)
, m_state(0) { , m_state(0) {
m_program = std::make_shared<Program>(get_engine(), config, inputs, outputs); m_program = std::make_shared<ProgramBuilder>(get_engine(), config, inputs, outputs);
ib >> m_program->m_max_batch; ib >> m_program->m_max_batch;
if (m_program->m_max_batch > 1) if (m_program->m_max_batch > 1)
m_config.set_property(ov::intel_gpu::max_dynamic_batch(m_program->m_max_batch)); m_config.set_property(ov::intel_gpu::max_dynamic_batch(m_program->m_max_batch));
@ -72,7 +72,7 @@ Graph::Graph(cldnn::BinaryInputBuffer &ib, const RemoteContextImpl::Ptr& context
} }
ib >> m_program->inputLayouts; ib >> m_program->inputLayouts;
Program::variables_state_info_map variablesStateInfoMap; ProgramBuilder::variables_state_info_map variablesStateInfoMap;
ib >> variablesStateInfoMap; ib >> variablesStateInfoMap;
for (const auto& variablesStateInfo : variablesStateInfoMap) { for (const auto& variablesStateInfo : variablesStateInfoMap) {
m_program->AddVariableStateInfo(variablesStateInfo.first, *variablesStateInfo.second.begin()); m_program->AddVariableStateInfo(variablesStateInfo.first, *variablesStateInfo.second.begin());
@ -500,7 +500,7 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
} }
// Cache blob format: // Cache blob format:
// [ ov::intel_gpu::Program::inputLayouts ] // [ ov::intel_gpu::ProgramBuilder::inputLayouts ]
// [ ov::intel_gpu::Graph::primitiveIDs ] // [ ov::intel_gpu::Graph::primitiveIDs ]
// [ ov::intel_gpu::Graph::outputDims ] // [ ov::intel_gpu::Graph::outputDims ]
// [ cldnn::network ] // [ cldnn::network ]

View File

@ -2,10 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/adaptive_max_pool.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "openvino/op/adaptive_avg_pool.hpp"
#include "ngraph/op/adaptive_max_pool.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/primitives/adaptive_pooling.hpp" #include "intel_gpu/primitives/adaptive_pooling.hpp"
@ -13,7 +14,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateAdaptiveAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::AdaptiveAvgPool>& op) { static void CreateAdaptiveAvgPoolOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::AdaptiveAvgPool>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
const auto inputs = p.GetInputInfo(op); const auto inputs = p.GetInputInfo(op);
@ -25,7 +26,7 @@ static void CreateAdaptiveAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op
p.add_primitive(*op, poolPrim); p.add_primitive(*op, poolPrim);
} }
static void CreateAdaptiveMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::AdaptiveMaxPool>& op) { static void CreateAdaptiveMaxPoolOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::AdaptiveMaxPool>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
OPENVINO_ASSERT(op->get_output_size() == 2, "[GPU] AdaptiveMaxPool requires 2 outputs"); OPENVINO_ASSERT(op->get_output_size() == 2, "[GPU] AdaptiveMaxPool requires 2 outputs");

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/batch_to_space.hpp" #include "openvino/op/batch_to_space.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/batch_to_space.hpp" #include "intel_gpu/primitives/batch_to_space.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v1::BatchToSpace>& op) { static void CreateBatchToSpaceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::BatchToSpace>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -26,7 +26,7 @@ static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v
bool non_constant_input = false; bool non_constant_input = false;
for (size_t i = 1; i < 4; ++i) { for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i)); auto inConst = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(i));
bool is_const_input = (inConst != nullptr); bool is_const_input = (inConst != nullptr);
OPENVINO_ASSERT((i == 1) || (i >= 2 && non_constant_input != is_const_input), OPENVINO_ASSERT((i == 1) || (i >= 2 && non_constant_input != is_const_input),
@ -45,7 +45,7 @@ static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v
p.add_primitive(*op, batchToSpacePrim); p.add_primitive(*op, batchToSpacePrim);
} else { } else {
for (size_t i = 1; i < 4; ++i) { for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i)); auto inConst = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(i));
std::vector<int32_t> sizes = inConst->cast_vector<int32_t>(); std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0; int32_t default_size = i == 1 ? 1 : 0;

View File

@ -3,11 +3,11 @@
// //
#include "openvino/core/except.hpp" #include "openvino/core/except.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/broadcast.hpp" #include "openvino/op/broadcast.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/broadcast.hpp" #include "intel_gpu/primitives/broadcast.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -16,7 +16,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonBroadcastOp(Program& p, const std::shared_ptr<ngraph::Node>& op, const ngraph::AxisSet axis_mapping) { static void CreateCommonBroadcastOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, const ov::AxisSet axis_mapping) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -61,9 +61,9 @@ static void CreateCommonBroadcastOp(Program& p, const std::shared_ptr<ngraph::No
} }
ov::op::BroadcastModeSpec mode = ov::op::BroadcastType::NONE; ov::op::BroadcastModeSpec mode = ov::op::BroadcastType::NONE;
if (auto broadcast_v3 = std::dynamic_pointer_cast<ngraph::op::v3::Broadcast>(op)) { if (auto broadcast_v3 = std::dynamic_pointer_cast<ov::op::v3::Broadcast>(op)) {
mode = broadcast_v3->get_broadcast_spec(); mode = broadcast_v3->get_broadcast_spec();
} else if (auto broadcast_v1 = std::dynamic_pointer_cast<ngraph::op::v1::Broadcast>(op)) { } else if (auto broadcast_v1 = std::dynamic_pointer_cast<ov::op::v1::Broadcast>(op)) {
switch (broadcast_v1->get_broadcast_spec().m_type) { switch (broadcast_v1->get_broadcast_spec().m_type) {
case ov::op::AutoBroadcastType::NONE: mode = ov::op::BroadcastType::NONE; break; case ov::op::AutoBroadcastType::NONE: mode = ov::op::BroadcastType::NONE; break;
case ov::op::AutoBroadcastType::NUMPY: mode = ov::op::BroadcastType::NUMPY; break; case ov::op::AutoBroadcastType::NUMPY: mode = ov::op::BroadcastType::NUMPY; break;
@ -93,10 +93,10 @@ static void CreateCommonBroadcastOp(Program& p, const std::shared_ptr<ngraph::No
p.add_primitive(*op, broadcast_prim); p.add_primitive(*op, broadcast_prim);
} }
static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::Broadcast>& op) { static void CreateBroadcastOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Broadcast>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
if (op->get_broadcast_spec().m_type == ngraph::op::AutoBroadcastType::NONE && op->get_input_size() == 3) { if (op->get_broadcast_spec().m_type == ov::op::AutoBroadcastType::NONE && op->get_input_size() == 3) {
auto axis_mapping_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto axis_mapping_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis_mapping = axis_mapping_node->get_axis_set_val(); auto axis_mapping = axis_mapping_node->get_axis_set_val();
@ -107,11 +107,11 @@ static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::
} }
} }
static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v3::Broadcast>& op) { static void CreateBroadcastOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Broadcast>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
ngraph::AxisSet axis_mapping; ov::AxisSet axis_mapping;
if (op->get_input_size() == 3) { if (op->get_input_size() == 3) {
auto axis_mapping_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto axis_mapping_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis_mapping = axis_mapping_node->get_axis_set_val(); axis_mapping = axis_mapping_node->get_axis_set_val();

View File

@ -2,19 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/bucketize.hpp"
#include "intel_gpu/primitives/bucketize.hpp" #include "intel_gpu/primitives/bucketize.hpp"
#include <ngraph/op/bucketize.hpp>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateBucketizeOp(Program& p, const std::shared_ptr<ngraph::op::v3::Bucketize>& op) { void CreateBucketizeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Bucketize>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
const cldnn::bucketize bucketize_prim(layer_type_name_ID(op), const cldnn::bucketize bucketize_prim(layer_type_name_ID(op),

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/concat.hpp" #include "openvino/op/concat.hpp"
#include "intel_gpu/primitives/concatenation.hpp" #include "intel_gpu/primitives/concatenation.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateConcatOp(Program& p, const std::shared_ptr<ngraph::op::v0::Concat>& op) { static void CreateConcatOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Concat>& op) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int64_t axis = op->get_axis(); int64_t axis = op->get_axis();

View File

@ -1,22 +1,21 @@
// Copyright (C) 2023 Intel Corporation // Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/if.hpp"
#include "ngraph/op/if.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "ie_ngraph_utils.hpp"
#include "intel_gpu/primitives/condition.hpp" #include "intel_gpu/primitives/condition.hpp"
#include "ie_ngraph_utils.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
const size_t idx_true = 0; const size_t idx_true = 0;
const size_t idx_false = 1; const size_t idx_false = 1;
static cldnn::condition::branch gen_branch(Program& p, const std::shared_ptr<ngraph::op::v8::If>& op, size_t idx) { static cldnn::condition::branch gen_branch(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::If>& op, size_t idx) {
cldnn::condition::branch branch; cldnn::condition::branch branch;
const auto& internal_body = (idx == idx_true)? op->get_then_body() : op->get_else_body(); const auto& internal_body = (idx == idx_true)? op->get_then_body() : op->get_else_body();
InferenceEngine::CNNNetwork body_network(internal_body); InferenceEngine::CNNNetwork body_network(internal_body);
{ {
// CNNNetwork change the input/output data type to fp32 when input/output data type is fp16 // CNNNetwork change the input/output data type to fp32 when input/output data type is fp16
@ -42,7 +41,7 @@ static cldnn::condition::branch gen_branch(Program& p, const std::shared_ptr<ngr
config.set_property(ov::intel_gpu::max_dynamic_batch(1)); config.set_property(ov::intel_gpu::max_dynamic_batch(1));
config.set_property(ov::intel_gpu::allow_new_shape_infer(op->is_dynamic())); config.set_property(ov::intel_gpu::allow_new_shape_infer(op->is_dynamic()));
Program prog(body_network, p.get_engine(), config, false, false, nullptr, nullptr, p.get_task_executor(), true); ProgramBuilder prog(body_network, p.get_engine(), config, false, false, nullptr, nullptr, p.get_task_executor(), true);
branch.inner_program = prog.GetCompiledProgram(); branch.inner_program = prog.GetCompiledProgram();
auto& input_map = branch.input_map; auto& input_map = branch.input_map;
@ -66,7 +65,7 @@ static cldnn::condition::branch gen_branch(Program& p, const std::shared_ptr<ngr
return branch; return branch;
} }
static void CreateIfOp(Program& p, const std::shared_ptr<ngraph::op::v8::If>& op) { static void CreateIfOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::If>& op) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
OPENVINO_ASSERT(inputs.size() >= 1, "Invalid inputs count (Not allowed no input)"); OPENVINO_ASSERT(inputs.size() >= 1, "Invalid inputs count (Not allowed no input)");
auto compare_node_pshape = op->get_input_partial_shape(0); auto compare_node_pshape = op->get_input_partial_shape(0);

View File

@ -2,20 +2,22 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "ngraph/op/convolution.hpp" #include "openvino/op/convolution.hpp"
#include "ngraph/op/binary_convolution.hpp" #include "openvino/op/convert.hpp"
#include "ngraph/op/deformable_convolution.hpp" #include "openvino/op/binary_convolution.hpp"
#include "ngraph/op/group_conv.hpp" #include "openvino/op/deformable_convolution.hpp"
#include "ngraph/op/concat.hpp" #include "openvino/op/group_conv.hpp"
#include "ngraph/op/squared_difference.hpp" #include "openvino/op/concat.hpp"
#include "ngraph/op/gather.hpp" #include "openvino/op/squared_difference.hpp"
#include "ngraph/op/split.hpp" #include "openvino/op/gather.hpp"
#include "ngraph/op/variadic_split.hpp" #include "openvino/op/split.hpp"
#include "ngraph/op/util/op_types.hpp" #include "openvino/op/prelu.hpp"
#include "openvino/op/variadic_split.hpp"
#include "openvino/op/util/op_types.hpp"
#include "intel_gpu/primitives/data.hpp" #include "intel_gpu/primitives/data.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp" #include "intel_gpu/runtime/debug_configuration.hpp"
@ -23,7 +25,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static cldnn::tensor getConstTensor(const ngraph::Shape constDims) { static cldnn::tensor getConstTensor(const ov::Shape constDims) {
std::vector<cldnn::tensor::value_type> shuffled_dims(constDims.size()); std::vector<cldnn::tensor::value_type> shuffled_dims(constDims.size());
// cldnn tensor c-tor expects constants be in a reversed order (x, y, z, w, u, v) // cldnn tensor c-tor expects constants be in a reversed order (x, y, z, w, u, v)
@ -66,27 +68,27 @@ struct ConstProperties {
bool hasGroupDimension; bool hasGroupDimension;
}; };
static void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std::shared_ptr<ngraph::op::v0::Constant>& op, const ConstProperties& props); static void createClDnnConstant(ProgramBuilder& p, const ov::Shape& constDims, const std::shared_ptr<ov::op::v0::Constant>& op, const ConstProperties& props);
static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::Constant>& op) { static void CreateConstantOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Constant>& op) {
ngraph::Shape constDims = op->get_shape(); ov::Shape constDims = op->get_shape();
auto constUsers = op->get_output_target_inputs(0); auto constUsers = op->get_output_target_inputs(0);
size_t numConstUsers = constUsers.size(); size_t numConstUsers = constUsers.size();
std::unordered_map<std::shared_ptr<ngraph::op::v0::Constant>, ConstProperties> consts = { std::unordered_map<std::shared_ptr<ov::op::v0::Constant>, ConstProperties> consts = {
{op, {false, false, false}} {op, {false, false, false}}
}; };
// handleConvWeights function is executed when one of the constant users is ConvolutionBackpropData or GroupConvolutionBackpropData. // handleConvWeights function is executed when one of the constant users is ConvolutionBackpropData or GroupConvolutionBackpropData.
// In that case, we mark that constant's O and I dimensions need to be swapped. // In that case, we mark that constant's O and I dimensions need to be swapped.
auto handleConvWeights = [&op] (ngraph::Node* conv, std::unordered_map<std::shared_ptr<ngraph::op::v0::Constant>, ConstProperties>& consts, auto handleConvWeights = [&op] (ov::Node* conv, std::unordered_map<std::shared_ptr<ov::op::v0::Constant>, ConstProperties>& consts,
size_t& numConstUsers, bool hasGroupDimension) { size_t& numConstUsers, bool hasGroupDimension) {
// If constant has multiple users - create its copy and replace 'conv' weights with the copy. // If constant has multiple users - create its copy and replace 'conv' weights with the copy.
// This is to make sure that dimension change doesn't break other users of the constant node. // This is to make sure that dimension change doesn't break other users of the constant node.
// It is a shallow copy, but that's fine since in createClDnnConstant // It is a shallow copy, but that's fine since in createClDnnConstant
// every constant created here, gets memcopied to a brand new cldnn::memory. // every constant created here, gets memcopied to a brand new cldnn::memory.
if (numConstUsers > 1) { if (numConstUsers > 1) {
auto constant = std::make_shared<ngraph::op::v0::Constant>(*(op.get())); auto constant = std::make_shared<ov::op::v0::Constant>(*(op.get()));
conv->input(1).replace_source_output(constant); conv->input(1).replace_source_output(constant);
consts.insert({constant, {false, true, hasGroupDimension}}); consts.insert({constant, {false, true, hasGroupDimension}});
numConstUsers--; numConstUsers--;
@ -97,9 +99,9 @@ static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::C
}; };
auto is_binary_eltwise = [&] (ov::Node* op) -> bool { auto is_binary_eltwise = [&] (ov::Node* op) -> bool {
if (ngraph::op::is_binary_elementwise_arithmetic(op) || if (ov::op::util::is_binary_elementwise_arithmetic(op) ||
ngraph::op::is_binary_elementwise_logical(op) || ov::op::util::is_binary_elementwise_logical(op) ||
ngraph::op::is_binary_elementwise_comparison(op)) { ov::op::util::is_binary_elementwise_comparison(op)) {
return true; return true;
} else { } else {
return false; return false;
@ -116,7 +118,7 @@ static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::C
}; };
auto is_convert_into_binary_eltwise = [&] (ov::Node* op) -> bool { auto is_convert_into_binary_eltwise = [&] (ov::Node* op) -> bool {
if (ngraph::is_type<ngraph::op::v0::Convert>(op)) { if (ov::is_type<ov::op::v0::Convert>(op)) {
for (size_t i = 0; i < op->get_output_size(); ++i) { for (size_t i = 0; i < op->get_output_size(); ++i) {
auto convertUsers = op->get_output_target_inputs(i); auto convertUsers = op->get_output_target_inputs(i);
for (auto user : convertUsers) { for (auto user : convertUsers) {
@ -136,24 +138,24 @@ static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::C
// Also check if constant users is a backprop convolution - in that case O and I need to be swapped. // Also check if constant users is a backprop convolution - in that case O and I need to be swapped.
for (auto& node : constUsers) { for (auto& node : constUsers) {
auto outOp = node.get_node(); auto outOp = node.get_node();
if (auto castedOp = dynamic_cast<ngraph::op::v0::Concat*>(outOp)) { if (auto castedOp = dynamic_cast<ov::op::v0::Concat*>(outOp)) {
if (castedOp->get_axis() == 0) { if (castedOp->get_axis() == 0) {
consts[op].needsBatchInterpretation = constDims.size() == 1; consts[op].needsBatchInterpretation = constDims.size() == 1;
} }
} else if (((is_binary_eltwise(outOp) || ngraph::is_type<ngraph::op::v0::SquaredDifference>(outOp)) && is_all_inputs_1d(outOp)) || } else if (((is_binary_eltwise(outOp) || ov::is_type<ov::op::v0::SquaredDifference>(outOp)) && is_all_inputs_1d(outOp)) ||
is_convert_into_binary_eltwise(outOp)) { is_convert_into_binary_eltwise(outOp)) {
consts[op].needsBatchInterpretation = constDims.size() == 1; consts[op].needsBatchInterpretation = constDims.size() == 1;
} else if (ngraph::is_type<ngraph::op::v1::Gather>(outOp) || } else if (ov::is_type<ov::op::v1::Gather>(outOp) ||
ngraph::is_type<ngraph::op::v7::Gather>(outOp) || ov::is_type<ov::op::v7::Gather>(outOp) ||
ngraph::is_type<ngraph::op::v8::Gather>(outOp) || ov::is_type<ov::op::v8::Gather>(outOp) ||
ngraph::is_type<ngraph::op::v1::Split>(outOp) || ov::is_type<ov::op::v1::Split>(outOp) ||
ngraph::is_type<ngraph::op::v1::VariadicSplit>(outOp)) { ov::is_type<ov::op::v1::VariadicSplit>(outOp)) {
consts[op].needsBatchInterpretation = constDims.size() == 1; consts[op].needsBatchInterpretation = constDims.size() == 1;
} else if (ngraph::is_type<ngraph::op::v1::ConvolutionBackpropData>(outOp) && node.get_index() == 1) { } else if (ov::is_type<ov::op::v1::ConvolutionBackpropData>(outOp) && node.get_index() == 1) {
handleConvWeights(outOp, consts, numConstUsers, false); handleConvWeights(outOp, consts, numConstUsers, false);
} else if (ngraph::is_type<ngraph::op::v1::GroupConvolutionBackpropData>(outOp) && node.get_index() == 1) { } else if (ov::is_type<ov::op::v1::GroupConvolutionBackpropData>(outOp) && node.get_index() == 1) {
handleConvWeights(outOp, consts, numConstUsers, true); handleConvWeights(outOp, consts, numConstUsers, true);
} else if (ngraph::is_type<ngraph::op::v0::PRelu>(outOp) && node.get_index() == 1) { } else if (ov::is_type<ov::op::v0::PRelu>(outOp) && node.get_index() == 1) {
// PReLU slope tensor reshape policy // PReLU slope tensor reshape policy
// //
// 1. 1-dim slope is handled by 'getConstTensor' (if slope dimension is equal to the feature dimension of input). // 1. 1-dim slope is handled by 'getConstTensor' (if slope dimension is equal to the feature dimension of input).
@ -168,12 +170,12 @@ static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::C
if ((constDims.size() != 1 && constDims.size() < input_shape.size()) || if ((constDims.size() != 1 && constDims.size() < input_shape.size()) ||
(constDims.size() == 1 && input_shape.is_static() && static_cast<int64_t>(constDims[0]) != input_shape[1].get_length())) { (constDims.size() == 1 && input_shape.is_static() && static_cast<int64_t>(constDims[0]) != input_shape[1].get_length())) {
// Reshape 'constDims' according to the numpy broadcasting rule. // Reshape 'constDims' according to the numpy broadcasting rule.
ngraph::Shape slope_shape(input_shape.size(), 1); ov::Shape slope_shape(input_shape.size(), 1);
for (size_t j = 1; j <= constDims.size(); j++) for (size_t j = 1; j <= constDims.size(); j++)
slope_shape[slope_shape.size() - j] = constDims[constDims.size() - j]; slope_shape[slope_shape.size() - j] = constDims[constDims.size() - j];
constDims = slope_shape; constDims = slope_shape;
} }
} else if (ngraph::is_type<ngraph::op::v1::GroupConvolution>(outOp) && node.get_index() == 1 && !p.use_new_shape_infer()) { } else if (ov::is_type<ov::op::v1::GroupConvolution>(outOp) && node.get_index() == 1 && !p.use_new_shape_infer()) {
auto input_shape = outOp->get_input_partial_shape(0); auto input_shape = outOp->get_input_partial_shape(0);
if (constDims.size() == 4 && input_shape.size() == 3) { // In case of weight dim 4 and input dim 3, if (constDims.size() == 4 && input_shape.size() == 3) { // In case of weight dim 4 and input dim 3,
constDims.push_back(1); // The weight cldnn tensor adds 1d to the end as the input cldnn tensor does constDims.push_back(1); // The weight cldnn tensor adds 1d to the end as the input cldnn tensor does
@ -186,7 +188,7 @@ static void CreateConstantOp(Program& p, const std::shared_ptr<ngraph::op::v0::C
} }
} }
void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std::shared_ptr<ngraph::op::v0::Constant>& op, const ConstProperties& props) { void createClDnnConstant(ProgramBuilder& p, const ov::Shape& constDims, const std::shared_ptr<ov::op::v0::Constant>& op, const ConstProperties& props) {
cldnn::tensor constTensor = getConstTensor(constDims); cldnn::tensor constTensor = getConstTensor(constDims);
auto constFormat = cldnn::format::get_default_format(constDims.size()); auto constFormat = cldnn::format::get_default_format(constDims.size());

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/convert.hpp" #include "openvino/op/convert.hpp"
#include "ngraph/op/convert_like.hpp" #include "openvino/op/convert_like.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvertLike>& op) { static void CreateConvertLikeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ConvertLike>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -27,7 +27,7 @@ static void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1
p.add_primitive(*op, reorderPrim); p.add_primitive(*op, reorderPrim);
} }
static void CreateConvertOp(Program& p, const std::shared_ptr<ngraph::op::v0::Convert>& op) { static void CreateConvertOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Convert>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,17 +2,22 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/op/nv12_to_bgr.hpp"
#include "openvino/op/nv12_to_rgb.hpp"
#include "openvino/op/i420_to_bgr.hpp"
#include "openvino/op/i420_to_rgb.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/primitives/convert_color.hpp" #include "intel_gpu/primitives/convert_color.hpp"
#include "intel_gpu/primitives/concatenation.hpp" #include "intel_gpu/primitives/concatenation.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonConvertColorOp(Program& p, const std::shared_ptr<ngraph::Node>& op, static void CreateCommonConvertColorOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op,
const cldnn::convert_color::color_format from_color, const cldnn::convert_color::color_format from_color,
const cldnn::convert_color::color_format to_color) { const cldnn::convert_color::color_format to_color) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -28,7 +33,7 @@ static void CreateCommonConvertColorOp(Program& p, const std::shared_ptr<ngraph:
if (op->get_input_node_ptr(0)->output(0).get_rt_info().count(ov::preprocess::TensorInfoMemoryType::get_type_info_static())) { if (op->get_input_node_ptr(0)->output(0).get_rt_info().count(ov::preprocess::TensorInfoMemoryType::get_type_info_static())) {
std::string mem_type = op->get_input_node_ptr(0)->output(0).get_rt_info().at(ov::preprocess::TensorInfoMemoryType::get_type_info_static()) std::string mem_type = op->get_input_node_ptr(0)->output(0).get_rt_info().at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
.as<ov::preprocess::TensorInfoMemoryType>().value; .as<ov::preprocess::TensorInfoMemoryType>().value;
if (mem_type.find(GPU_CONFIG_KEY(SURFACE)) != std::string::npos) { if (mem_type.find(ov::intel_gpu::memory_type::surface) != std::string::npos) {
memory_type = cldnn::convert_color::memory_type::image; memory_type = cldnn::convert_color::memory_type::image;
} }
} }
@ -64,22 +69,22 @@ static void CreateCommonConvertColorOp(Program& p, const std::shared_ptr<ngraph:
} }
} }
static void CreateNV12toRGBOp(Program& p, const std::shared_ptr<ngraph::op::v8::NV12toRGB>& op) { static void CreateNV12toRGBOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::NV12toRGB>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::NV12, cldnn::convert_color::color_format::RGB); CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::NV12, cldnn::convert_color::color_format::RGB);
} }
static void CreateNV12toBGROp(Program& p, const std::shared_ptr<ngraph::op::v8::NV12toBGR>& op) { static void CreateNV12toBGROp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::NV12toBGR>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::NV12, cldnn::convert_color::color_format::BGR); CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::NV12, cldnn::convert_color::color_format::BGR);
} }
static void CreateI420toRGBOp(Program& p, const std::shared_ptr<ngraph::op::v8::I420toRGB>& op) { static void CreateI420toRGBOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::I420toRGB>& op) {
validate_inputs_count(op, {1, 3}); validate_inputs_count(op, {1, 3});
CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::I420, cldnn::convert_color::color_format::RGB); CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::I420, cldnn::convert_color::color_format::RGB);
} }
static void CreateI420toBGROp(Program& p, const std::shared_ptr<ngraph::op::v8::I420toBGR>& op) { static void CreateI420toBGROp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::I420toBGR>& op) {
validate_inputs_count(op, {1, 3}); validate_inputs_count(op, {1, 3});
CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::I420, cldnn::convert_color::color_format::BGR); CreateCommonConvertColorOp(p, op, cldnn::convert_color::color_format::I420, cldnn::convert_color::color_format::BGR);
} }

View File

@ -2,16 +2,16 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/convolution.hpp" #include "openvino/op/convolution.hpp"
#include "ngraph/op/binary_convolution.hpp" #include "openvino/op/binary_convolution.hpp"
#include "ngraph/op/deformable_convolution.hpp" #include "openvino/op/deformable_convolution.hpp"
#include "ngraph/op/group_conv.hpp" #include "openvino/op/group_conv.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "ngraph/op/fake_quantize.hpp" #include "openvino/op/fake_quantize.hpp"
#include "ngraph/op/util/op_types.hpp" #include "openvino/op/util/op_types.hpp"
#include "intel_gpu/primitives/convolution.hpp" #include "intel_gpu/primitives/convolution.hpp"
#include "intel_gpu/primitives/deconvolution.hpp" #include "intel_gpu/primitives/deconvolution.hpp"
@ -22,7 +22,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolution>& op) { static void CreateGroupConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::GroupConvolution>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -61,7 +61,7 @@ static void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::o
p.add_primitive(*op, convPrim); p.add_primitive(*op, convPrim);
} }
static void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::Convolution>& op) { static void CreateConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Convolution>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -99,7 +99,7 @@ static void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1
p.add_primitive(*op, convPrim); p.add_primitive(*op, convPrim);
} }
static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::ConvolutionBackpropData>& op) { static void CreateConvolutionBackpropDataOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ConvolutionBackpropData>& op) {
// 3rd input is an optional output shape // 3rd input is an optional output shape
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -119,7 +119,7 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
// Dimensions order of weights blob is IOYX, but // Dimensions order of weights blob is IOYX, but
// the selected format is OIYX by default. So we need to swap (and transpose) I and O dimensions to match the format // the selected format is OIYX by default. So we need to swap (and transpose) I and O dimensions to match the format
// For Constant node on input transpose is not needed, because the data is transposed on const node creation // For Constant node on input transpose is not needed, because the data is transposed on const node creation
if ((hasConstantWeights && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) || !hasConstantWeights) { if ((hasConstantWeights && std::dynamic_pointer_cast<ov::op::v0::Constant>(weights_node) == nullptr) || !hasConstantWeights) {
std::string permuteName = layerName + "_cldnn_weights_permute"; std::string permuteName = layerName + "_cldnn_weights_permute";
auto weights_rank = op->get_input_shape(1).size(); auto weights_rank = op->get_input_shape(1).size();
std::vector<uint16_t> permute_order(weights_rank); std::vector<uint16_t> permute_order(weights_rank);
@ -173,7 +173,7 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
output_padding, output_padding,
weights_have_group_dim); weights_have_group_dim);
if (op->get_input_size() == 3) { if (op->get_input_size() == 3) {
auto output_shape_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2)); auto output_shape_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (output_shape_constant) { if (output_shape_constant) {
auto output_shape = output_shape_constant->cast_vector<int64_t>(); auto output_shape = output_shape_constant->cast_vector<int64_t>();
ov::Shape shape(output_shape.begin(), output_shape.end()); ov::Shape shape(output_shape.begin(), output_shape.end());
@ -187,7 +187,7 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
} }
} }
static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ngraph::op::v1::GroupConvolutionBackpropData>& op) { static void CreateGroupConvolutionBackpropDataOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::GroupConvolutionBackpropData>& op) {
// 3rd input is an optional output shape // 3rd input is an optional output shape
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -209,7 +209,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
// Dimensions order of weights blob is IOYX, but // Dimensions order of weights blob is IOYX, but
// the selected format is OIYX by default. So we need to swap I and O dimensions to match the format. // the selected format is OIYX by default. So we need to swap I and O dimensions to match the format.
// For Constant node on input transpose is not needed, because the data is transposed on const node creation // For Constant node on input transpose is not needed, because the data is transposed on const node creation
if ((hasConstWeights && std::dynamic_pointer_cast<ngraph::op::v0::Constant>(weights_node) == nullptr) || !hasConstWeights) { if ((hasConstWeights && std::dynamic_pointer_cast<ov::op::v0::Constant>(weights_node) == nullptr) || !hasConstWeights) {
std::string permuteName = layerName + "_cldnn_weights_permute"; std::string permuteName = layerName + "_cldnn_weights_permute";
auto weights_rank = op->get_input_shape(1).size(); auto weights_rank = op->get_input_shape(1).size();
std::vector<uint16_t> permute_order(weights_rank); std::vector<uint16_t> permute_order(weights_rank);
@ -264,7 +264,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
output_padding, output_padding,
weights_have_group_dim); weights_have_group_dim);
if (op->get_input_size() == 3) { if (op->get_input_size() == 3) {
auto output_shape_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2)); auto output_shape_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (output_shape_constant) { if (output_shape_constant) {
auto output_shape = output_shape_constant->cast_vector<int64_t>(); auto output_shape = output_shape_constant->cast_vector<int64_t>();
ov::Shape shape(output_shape.begin(), output_shape.end()); ov::Shape shape(output_shape.begin(), output_shape.end());
@ -278,8 +278,8 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
} }
} }
static void DeformableConvolutionImpl(Program& p, static void DeformableConvolutionImpl(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
const int64_t groups, const int64_t groups,
const ov::Strides& strides, const ov::Strides& strides,
const ov::Strides& dilations, const ov::Strides& dilations,
@ -351,7 +351,7 @@ static void DeformableConvolutionImpl(Program& p,
} }
} }
static void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformableConvolution>& op) { static void CreateDeformableConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::DeformableConvolution>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto strides = op->get_strides(); auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin(); auto pads_begin = op->get_pads_begin();
@ -365,7 +365,7 @@ static void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngra
DeformableConvolutionImpl(p, op, op->get_group(), strides, dilations, pads_begin, op->get_deformable_group()); DeformableConvolutionImpl(p, op, op->get_group(), strides, dilations, pads_begin, op->get_deformable_group());
} }
static void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v8::DeformableConvolution>& op) { static void CreateDeformableConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::DeformableConvolution>& op) {
validate_inputs_count(op, {3, 4}); validate_inputs_count(op, {3, 4});
auto strides = op->get_strides(); auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin(); auto pads_begin = op->get_pads_begin();
@ -386,7 +386,7 @@ static void CreateDeformableConvolutionOp(Program& p, const std::shared_ptr<ngra
op->get_bilinear_interpolation_pad()); op->get_bilinear_interpolation_pad());
} }
static void CreateBinaryConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1::BinaryConvolution>& op) { static void CreateBinaryConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::BinaryConvolution>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/ctc_greedy_decoder.hpp" #include "openvino/op/ctc_greedy_decoder.hpp"
#include "ngraph/op/ctc_greedy_decoder_seq_len.hpp" #include "openvino/op/ctc_greedy_decoder_seq_len.hpp"
#include "intel_gpu/primitives/ctc_greedy_decoder.hpp" #include "intel_gpu/primitives/ctc_greedy_decoder.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -18,7 +18,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::Node>& op, bool ctc_merge_repeated) { static void CreateCommonCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, bool ctc_merge_repeated) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -30,7 +30,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
if (inputDataType == cldnn::data_types::i64) { if (inputDataType == cldnn::data_types::i64) {
// GPU primitive supports only i32 data type for 'sequence_length' and 'blank_index' inputs // GPU primitive supports only i32 data type for 'sequence_length' and 'blank_index' inputs
// so we need additional reorder if it's provided as i64 // so we need additional reorder if it's provided as i64
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],
@ -45,12 +45,12 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
uint32_t blank_index = static_cast<uint32_t>(op->get_input_shape(0).back() - 1); uint32_t blank_index = static_cast<uint32_t>(op->get_input_shape(0).back() - 1);
if (reordered_inputs.size() == 3) { if (reordered_inputs.size() == 3) {
auto blank_index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto blank_index_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!blank_index_node) { if (!blank_index_node) {
OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }
float val; float val;
if (ngraph::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) { if (ov::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) {
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }
blank_index = static_cast<uint32_t>(val); blank_index = static_cast<uint32_t>(val);
@ -62,8 +62,8 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
std::vector<cldnn::memory::ptr> shared_memory; std::vector<cldnn::memory::ptr> shared_memory;
if (num_output == 2) { if (num_output == 2) {
auto mutable_precision = op->get_output_element_type(1); auto mutable_precision = op->get_output_element_type(1);
if (mutable_precision == ngraph::element::i64) { if (mutable_precision == ov::element::i64) {
mutable_precision = ngraph::element::i32; mutable_precision = ov::element::i32;
} }
cldnn::layout mutableLayout = cldnn::layout( cldnn::layout mutableLayout = cldnn::layout(
@ -90,7 +90,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
tensor_from_dims(op->get_output_shape(0))); tensor_from_dims(op->get_output_shape(0)));
// GPU primitive supports only i32 as output data type // GPU primitive supports only i32 as output data type
primitive.output_data_types = {cldnn::element_type_to_data_type(ngraph::element::i32)}; primitive.output_data_types = {cldnn::element_type_to_data_type(ov::element::i32)};
if (num_output == 2) { if (num_output == 2) {
primitive.second_output = reordered_inputs.back().pid; primitive.second_output = reordered_inputs.back().pid;
@ -107,11 +107,11 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
} }
} }
static void CreateCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngraph::op::v0::CTCGreedyDecoder>& op) { static void CreateCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::CTCGreedyDecoder>& op) {
CreateCommonCTCGreedyDecoderOp(p, op, op->get_ctc_merge_repeated()); CreateCommonCTCGreedyDecoderOp(p, op, op->get_ctc_merge_repeated());
} }
static void CreateCTCGreedyDecoderSeqLenOp(Program& p, const std::shared_ptr<ngraph::op::v6::CTCGreedyDecoderSeqLen>& op) { static void CreateCTCGreedyDecoderSeqLenOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v6::CTCGreedyDecoderSeqLen>& op) {
CreateCommonCTCGreedyDecoderOp(p, op, op->get_merge_repeated()); CreateCommonCTCGreedyDecoderOp(p, op, op->get_merge_repeated());
} }

View File

@ -2,18 +2,16 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/ctc_loss.hpp"
#include "intel_gpu/primitives/ctc_loss.hpp" #include "intel_gpu/primitives/ctc_loss.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include <ngraph/op/ctc_loss.hpp>
#include "intel_gpu/plugin/program.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateCTCLossOp(Program& p, const std::shared_ptr<ngraph::op::v4::CTCLoss>& op) { void CreateCTCLossOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::CTCLoss>& op) {
validate_inputs_count(op, {4, 5}); validate_inputs_count(op, {4, 5});
const cldnn::ctc_loss ctc_loss_prim(layer_type_name_ID(op), const cldnn::ctc_loss ctc_loss_prim(layer_type_name_ID(op),

View File

@ -2,18 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/validation_util.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "openvino/op/cum_sum.hpp"
#include "openvino/op/constant.hpp"
#include "ngraph/op/cum_sum.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "ngraph/op/constant.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/primitives/cum_sum.hpp" #include "intel_gpu/primitives/cum_sum.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::CumSum>& op) { static void CreateCumSumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::CumSum>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -23,7 +24,7 @@ static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cum
int64_t axis = 0; int64_t axis = 0;
if (op->get_input_size() == 2) { if (op->get_input_size() == 2) {
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis = axes_constant->cast_vector<int64_t>()[0]; axis = axes_constant->cast_vector<int64_t>()[0];
} }

View File

@ -2,13 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/attribute_visitor.hpp"
#include "openvino/core/node.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/simple_math.hpp" #include "intel_gpu/plugin/simple_math.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/node.hpp"
#include "intel_gpu/primitives/custom_gpu_primitive.hpp" #include "intel_gpu/primitives/custom_gpu_primitive.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -39,57 +38,57 @@ inline std::string vecToString<std::string>(std::vector<std::string> vec) {
return res; return res;
} }
class CustomLayerAttributeVisitor : public ngraph::AttributeVisitor { class CustomLayerAttributeVisitor : public ov::AttributeVisitor {
public: public:
CustomLayerAttributeVisitor() : m_values({}) { } CustomLayerAttributeVisitor() : m_values({}) { }
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override {
OPENVINO_THROW("Attribute ", name, " can't be processed\n"); OPENVINO_THROW("Attribute ", name, " can't be processed\n");
} }
// The remaining adapter methods fall back on the void adapter if not implemented // The remaining adapter methods fall back on the void adapter if not implemented
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::string>& adapter) override {
m_values[name] = adapter.get(); m_values[name] = adapter.get();
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<bool>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) override {
m_values[name] = std::to_string(adapter.get()); m_values[name] = std::to_string(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<int64_t>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) override {
m_values[name] = std::to_string(adapter.get()); m_values[name] = std::to_string(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<double>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) override {
m_values[name] = std::to_string(adapter.get()); m_values[name] = std::to_string(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<std::string>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<std::string>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<float>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<float>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<double>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<double>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int8_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int8_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int16_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int16_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int32_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int32_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<int64_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<uint8_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint8_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<uint16_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint16_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<uint32_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint32_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<uint64_t>>& adapter) override { void on_adapter(const std::string& name, ov::ValueAccessor<std::vector<uint64_t>>& adapter) override {
m_values[name] = vecToString(adapter.get()); m_values[name] = vecToString(adapter.get());
} }
@ -101,7 +100,7 @@ protected:
std::map<std::string, std::string> m_values; std::map<std::string, std::string> m_values;
}; };
void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomLayerPtr customLayer) { void CreateCustomOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, CustomLayerPtr customLayer) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -141,7 +140,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
if (param.portIndex < static_cast<int>(inputs.size()) && reordered_inputs[param.portIndex].pid.empty()) { if (param.portIndex < static_cast<int>(inputs.size()) && reordered_inputs[param.portIndex].pid.empty()) {
// todo: add support for multiple reorders of the same input? (read as bfyx for one arg and yxfb for another) // todo: add support for multiple reorders of the same input? (read as bfyx for one arg and yxfb for another)
if (param.format != cldnn::format::any) { if (param.format != cldnn::format::any) {
auto reorderPrimName = inputs[param.portIndex].pid + "_" + op->get_friendly_name() + Program::m_preCustomLayerTag; auto reorderPrimName = inputs[param.portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preCustomLayerTag;
auto preprocessPrim = cldnn::reorder( auto preprocessPrim = cldnn::reorder(
reorderPrimName, reorderPrimName,
inputs[param.portIndex], inputs[param.portIndex],
@ -234,7 +233,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
auto prevLayerName = genericLayerName; auto prevLayerName = genericLayerName;
if (outputLayout.format != cldnn::format::any) { if (outputLayout.format != cldnn::format::any) {
// Handle output reorder // Handle output reorder
auto reorderPrimName = genericLayerName + Program::m_postCustomLayerTag; auto reorderPrimName = genericLayerName + ProgramBuilder::m_postCustomLayerTag;
p.add_primitive(*op, cldnn::reorder(reorderPrimName, p.add_primitive(*op, cldnn::reorder(reorderPrimName,
cldnn::input_info(genericLayerName), cldnn::input_info(genericLayerName),
cldnn::format::get_default_format(op->get_output_shape(0).size()), cldnn::format::get_default_format(op->get_output_shape(0).size()),

View File

@ -2,28 +2,28 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/depth_to_space.hpp" #include "openvino/op/depth_to_space.hpp"
#include "intel_gpu/primitives/depth_to_space.hpp" #include "intel_gpu/primitives/depth_to_space.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static cldnn::depth_to_space_mode GetDepthMode(ngraph::op::v0::DepthToSpace::DepthToSpaceMode mode) { static cldnn::depth_to_space_mode GetDepthMode(ov::op::v0::DepthToSpace::DepthToSpaceMode mode) {
switch (mode) { switch (mode) {
case ngraph::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST: case ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST:
return cldnn::depth_to_space_mode::blocks_first; return cldnn::depth_to_space_mode::blocks_first;
case ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST: case ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST:
return cldnn::depth_to_space_mode::depth_first; return cldnn::depth_to_space_mode::depth_first;
default: OPENVINO_THROW("Unsupported DepthToSpaceMode value: ", static_cast<int>(mode)); default: OPENVINO_THROW("Unsupported DepthToSpaceMode value: ", static_cast<int>(mode));
} }
return cldnn::depth_to_space_mode::blocks_first; return cldnn::depth_to_space_mode::blocks_first;
} }
static void CreateDepthToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v0::DepthToSpace>& op) { static void CreateDepthToSpaceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::DepthToSpace>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputPrimitives = p.GetInputInfo(op); auto inputPrimitives = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/detection_output.hpp" #include "openvino/op/detection_output.hpp"
#include "intel_gpu/primitives/detection_output.hpp" #include "intel_gpu/primitives/detection_output.hpp"
@ -27,8 +27,8 @@ static cldnn::prior_box_code_type PriorBoxCodeFromString(const std::string& str)
return cldnn::prior_box_code_type::corner; return cldnn::prior_box_code_type::corner;
} }
static void CreateCommonDetectionOutputOp(Program& p, static void CreateCommonDetectionOutputOp(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
const ov::op::util::DetectionOutputBase::AttributesBase& attrs, const ov::op::util::DetectionOutputBase::AttributesBase& attrs,
int num_classes) { int num_classes) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -80,14 +80,14 @@ static void CreateCommonDetectionOutputOp(Program& p,
p.add_primitive(*op, detectionPrim); p.add_primitive(*op, detectionPrim);
} }
static void CreateDetectionOutputOp(Program& p, const std::shared_ptr<ngraph::op::v0::DetectionOutput>& op) { static void CreateDetectionOutputOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::DetectionOutput>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto attrs = op->get_attrs(); auto attrs = op->get_attrs();
CreateCommonDetectionOutputOp(p, op, attrs, attrs.num_classes); CreateCommonDetectionOutputOp(p, op, attrs, attrs.num_classes);
} }
static void CreateDetectionOutputOp(Program& p, const std::shared_ptr<ngraph::op::v8::DetectionOutput>& op) { static void CreateDetectionOutputOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::DetectionOutput>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
CreateCommonDetectionOutputOp(p, op, op->get_attrs(), -1); CreateCommonDetectionOutputOp(p, op, op->get_attrs(), -1);

View File

@ -2,19 +2,24 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/constant.hpp"
#include "openvino/op/dft.hpp"
#include "openvino/op/idft.hpp"
#include "openvino/op/rdft.hpp"
#include "openvino/op/irdft.hpp"
#include "openvino/core/validation_util.hpp"
#include <intel_gpu/plugin/common_utils.hpp> #include <intel_gpu/plugin/common_utils.hpp>
#include <intel_gpu/plugin/program.hpp> #include "intel_gpu/plugin/program_builder.hpp"
#include <intel_gpu/primitives/dft.hpp> #include <intel_gpu/primitives/dft.hpp>
#include <ngraph/op/constant.hpp>
#include <ngraph/op/dft.hpp>
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void createDft(Program& p, void createDft(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
cldnn::dft_direction direction, cldnn::dft_direction direction,
cldnn::dft_mode mode) { cldnn::dft_mode mode) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
@ -24,7 +29,7 @@ void createDft(Program& p,
const auto& friendly_name = op->get_friendly_name(); const auto& friendly_name = op->get_friendly_name();
const auto& out_shape = op->get_output_shape(0); const auto& out_shape = op->get_output_shape(0);
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")");
auto axes = axes_constant->cast_vector<int64_t>(); auto axes = axes_constant->cast_vector<int64_t>();
uint8_t axis_correction = static_cast<uint8_t>(op->get_input_shape(0).size()); uint8_t axis_correction = static_cast<uint8_t>(op->get_input_shape(0).size());
@ -37,7 +42,7 @@ void createDft(Program& p,
std::vector<int64_t> signal_size; std::vector<int64_t> signal_size;
if (op->get_input_size() == 3) { if (op->get_input_size() == 3) {
auto signal_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2)); auto signal_size_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
OPENVINO_ASSERT(signal_size_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")"); OPENVINO_ASSERT(signal_size_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")");
signal_size = signal_size_constant->cast_vector<int64_t>(); signal_size = signal_size_constant->cast_vector<int64_t>();
} }
@ -47,19 +52,19 @@ void createDft(Program& p,
p.add_primitive(*op, prim); p.add_primitive(*op, prim);
} }
void CreateDFTOp(Program& p, const std::shared_ptr<ngraph::op::v7::DFT>& op) { void CreateDFTOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v7::DFT>& op) {
createDft(p, op, cldnn::dft_direction::forward, cldnn::dft_mode::complex); createDft(p, op, cldnn::dft_direction::forward, cldnn::dft_mode::complex);
} }
void CreateIDFTOp(Program& p, const std::shared_ptr<ngraph::op::v7::IDFT>& op) { void CreateIDFTOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v7::IDFT>& op) {
createDft(p, op, cldnn::dft_direction::inverse, cldnn::dft_mode::complex); createDft(p, op, cldnn::dft_direction::inverse, cldnn::dft_mode::complex);
} }
void CreateRDFTOp(Program& p, const std::shared_ptr<ngraph::op::v9::RDFT>& op) { void CreateRDFTOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::RDFT>& op) {
createDft(p, op, cldnn::dft_direction::forward, cldnn::dft_mode::real); createDft(p, op, cldnn::dft_direction::forward, cldnn::dft_mode::real);
} }
void CreateIRDFTOp(Program& p, const std::shared_ptr<ngraph::op::v9::IRDFT>& op) { void CreateIRDFTOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::IRDFT>& op) {
createDft(p, op, cldnn::dft_direction::inverse, cldnn::dft_mode::real); createDft(p, op, cldnn::dft_direction::inverse, cldnn::dft_mode::real);
} }

View File

@ -2,28 +2,28 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ngraph/op/add.hpp" #include "openvino/op/add.hpp"
#include "ngraph/op/multiply.hpp" #include "openvino/op/multiply.hpp"
#include "ngraph/op/maximum.hpp" #include "openvino/op/maximum.hpp"
#include "ngraph/op/minimum.hpp" #include "openvino/op/minimum.hpp"
#include "ngraph/op/subtract.hpp" #include "openvino/op/subtract.hpp"
#include "ngraph/op/divide.hpp" #include "openvino/op/divide.hpp"
#include "ngraph/op/squared_difference.hpp" #include "openvino/op/squared_difference.hpp"
#include "ngraph/op/equal.hpp" #include "openvino/op/equal.hpp"
#include "ngraph/op/not_equal.hpp" #include "openvino/op/not_equal.hpp"
#include "ngraph/op/less.hpp" #include "openvino/op/less.hpp"
#include "ngraph/op/less_eq.hpp" #include "openvino/op/less_eq.hpp"
#include "ngraph/op/greater.hpp" #include "openvino/op/greater.hpp"
#include "ngraph/op/greater_eq.hpp" #include "openvino/op/greater_eq.hpp"
#include "ngraph/op/and.hpp" #include "openvino/op/logical_and.hpp"
#include "ngraph/op/or.hpp" #include "openvino/op/logical_or.hpp"
#include "ngraph/op/xor.hpp" #include "openvino/op/xor.hpp"
#include "ngraph/op/power.hpp" #include "openvino/op/power.hpp"
#include "ngraph/op/floor_mod.hpp" #include "openvino/op/floor_mod.hpp"
#include "intel_gpu/primitives/activation.hpp" #include "intel_gpu/primitives/activation.hpp"
#include "intel_gpu/primitives/eltwise.hpp" #include "intel_gpu/primitives/eltwise.hpp"
@ -33,8 +33,8 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
void CreateElementwiseOp(Program& p, void CreateElementwiseOp(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
cldnn::eltwise_mode mode, cldnn::eltwise_mode mode,
std::vector<float> coefficients, std::vector<float> coefficients,
bool pythondiv) { bool pythondiv) {
@ -91,75 +91,75 @@ void CreateElementwiseOp(Program& p,
p.add_primitive(*op, eltwisePrim); p.add_primitive(*op, eltwisePrim);
} }
static void CreateAddOp(Program& p, const std::shared_ptr<ngraph::op::v1::Add>& op) { static void CreateAddOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Add>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::sum); CreateElementwiseOp(p, op, cldnn::eltwise_mode::sum);
} }
static void CreateMultiplyOp(Program& p, const std::shared_ptr<ngraph::op::v1::Multiply>& op) { static void CreateMultiplyOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Multiply>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::prod); CreateElementwiseOp(p, op, cldnn::eltwise_mode::prod);
} }
static void CreateMaximumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Maximum>& op) { static void CreateMaximumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Maximum>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::max); CreateElementwiseOp(p, op, cldnn::eltwise_mode::max);
} }
static void CreateMinimumOp(Program& p, const std::shared_ptr<ngraph::op::v1::Minimum>& op) { static void CreateMinimumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Minimum>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::min); CreateElementwiseOp(p, op, cldnn::eltwise_mode::min);
} }
static void CreateSubtractOp(Program& p, const std::shared_ptr<ngraph::op::v1::Subtract>& op) { static void CreateSubtractOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Subtract>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::sub); CreateElementwiseOp(p, op, cldnn::eltwise_mode::sub);
} }
static void CreateDivideOp(Program& p, const std::shared_ptr<ngraph::op::v1::Divide>& op) { static void CreateDivideOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Divide>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::div, {}, op->is_pythondiv()); CreateElementwiseOp(p, op, cldnn::eltwise_mode::div, {}, op->is_pythondiv());
} }
static void CreateSquaredDifferenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::SquaredDifference>& op) { static void CreateSquaredDifferenceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::SquaredDifference>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::squared_diff); CreateElementwiseOp(p, op, cldnn::eltwise_mode::squared_diff);
} }
static void CreateEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::Equal>& op) { static void CreateEqualOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Equal>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::eq); CreateElementwiseOp(p, op, cldnn::eltwise_mode::eq);
} }
static void CreateNotEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::NotEqual>& op) { static void CreateNotEqualOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::NotEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::ne); CreateElementwiseOp(p, op, cldnn::eltwise_mode::ne);
} }
static void CreateLessOp(Program& p, const std::shared_ptr<ngraph::op::v1::Less>& op) { static void CreateLessOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Less>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::lt); CreateElementwiseOp(p, op, cldnn::eltwise_mode::lt);
} }
static void CreateLessEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::LessEqual>& op) { static void CreateLessEqualOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::LessEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::le); CreateElementwiseOp(p, op, cldnn::eltwise_mode::le);
} }
static void CreateGreaterOp(Program& p, const std::shared_ptr<ngraph::op::v1::Greater>& op) { static void CreateGreaterOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Greater>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::gt); CreateElementwiseOp(p, op, cldnn::eltwise_mode::gt);
} }
static void CreateGreaterEqualOp(Program& p, const std::shared_ptr<ngraph::op::v1::GreaterEqual>& op) { static void CreateGreaterEqualOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::GreaterEqual>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::ge); CreateElementwiseOp(p, op, cldnn::eltwise_mode::ge);
} }
static void CreateLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalAnd>& op) { static void CreateLogicalAndOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::LogicalAnd>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_and); CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_and);
} }
static void CreateLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalOr>& op) { static void CreateLogicalOrOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::LogicalOr>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_or); CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_or);
} }
static void CreateLogicalXorOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalXor>& op) { static void CreateLogicalXorOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::LogicalXor>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_xor); CreateElementwiseOp(p, op, cldnn::eltwise_mode::logic_xor);
} }
static void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Power>& op) { static void CreatePowerOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Power>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto power_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto power_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
if (power_node) { if (power_node) {
if (ngraph::shape_size(power_node->get_output_shape(0)) == 1) { if (ov::shape_size(power_node->get_output_shape(0)) == 1) {
float pow; float pow;
if (!ov::op::util::get_single_value(power_node, pow)) if (!ov::op::util::get_single_value(power_node, pow))
OPENVINO_THROW("Invalid parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Invalid parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
@ -170,26 +170,26 @@ static void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Powe
CreateElementwiseOp(p, op, cldnn::eltwise_mode::pow); CreateElementwiseOp(p, op, cldnn::eltwise_mode::pow);
} }
static void CreateFloorModOp(Program& p, const std::shared_ptr<ngraph::op::v1::FloorMod>& op) { static void CreateFloorModOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::FloorMod>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::floor_mod); CreateElementwiseOp(p, op, cldnn::eltwise_mode::floor_mod);
} }
static void CreateModOp(Program& p, const std::shared_ptr<ngraph::op::v1::Mod>& op) { static void CreateModOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Mod>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::mod); CreateElementwiseOp(p, op, cldnn::eltwise_mode::mod);
} }
static void CreateIsFiniteOp(Program& p, const std::shared_ptr<ngraph::op::v10::IsFinite>& op) { static void CreateIsFiniteOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v10::IsFinite>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_finite); CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_finite);
} }
static void CreateIsInfOp(Program& p, const std::shared_ptr<ngraph::op::v10::IsInf>& op) { static void CreateIsInfOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v10::IsInf>& op) {
const auto& attributes = op->get_attributes(); const auto& attributes = op->get_attributes();
const auto detect_negative = static_cast<float>(attributes.detect_negative); const auto detect_negative = static_cast<float>(attributes.detect_negative);
const auto detect_positive = static_cast<float>(attributes.detect_positive); const auto detect_positive = static_cast<float>(attributes.detect_positive);
CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_inf, {detect_negative, detect_positive}); CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_inf, {detect_negative, detect_positive});
} }
static void CreateIsNaNOp(Program& p, const std::shared_ptr<ngraph::op::v10::IsNaN>& op) { static void CreateIsNaNOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v10::IsNaN>& op) {
CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_nan); CreateElementwiseOp(p, op, cldnn::eltwise_mode::is_nan);
} }

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/embedding_segments_sum.hpp" #include "openvino/op/embedding_segments_sum.hpp"
#include "ngraph/op/embeddingbag_offsets_sum.hpp" #include "openvino/op/embeddingbag_offsets_sum.hpp"
#include "ngraph/op/embeddingbag_packedsum.hpp" #include "openvino/op/embeddingbag_packedsum.hpp"
#include "intel_gpu/primitives/embedding_bag.hpp" #include "intel_gpu/primitives/embedding_bag.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -17,18 +17,18 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagOffsetsSum>& op) { static void CreateEmbeddingBagOffsetsSumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::EmbeddingBagOffsetsSum>& op) {
validate_inputs_count(op, {3, 4, 5}); validate_inputs_count(op, {3, 4, 5});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int32_t defaultIndex = -1; int32_t defaultIndex = -1;
if (inputs.size() > 3) { if (inputs.size() > 3) {
auto index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(3)); auto index_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(3));
OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
float val; float val;
if (ngraph::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val)) if (ov::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val))
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
defaultIndex = static_cast<int32_t>(val); defaultIndex = static_cast<int32_t>(val);
@ -43,7 +43,7 @@ static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngr
if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) { if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices inputs, // GPU primitive supports only i32 data type for indices inputs,
// so we need additional reorders if they are provided as i64 // so we need additional reorders if they are provided as i64
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],
@ -65,7 +65,7 @@ static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngr
p.add_primitive(*op, embeddingBagPrim); p.add_primitive(*op, embeddingBagPrim);
} }
static void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingBagPackedSum>& op) { static void CreateEmbeddingBagPackedSumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::EmbeddingBagPackedSum>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -78,7 +78,7 @@ static void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngra
if ((portIndex == 1) && (inputDataType == cldnn::data_types::i64)) { if ((portIndex == 1) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices input, // GPU primitive supports only i32 data type for indices input,
// so we need additional reorder if it's provided as i64 // so we need additional reorder if it's provided as i64
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],
@ -100,7 +100,7 @@ static void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngra
p.add_primitive(*op, embeddingBagPrim); p.add_primitive(*op, embeddingBagPrim);
} }
static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngraph::op::v3::EmbeddingSegmentsSum>& op) { static void CreateEmbeddingSegmentsSumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::EmbeddingSegmentsSum>& op) {
validate_inputs_count(op, {4, 5, 6}); validate_inputs_count(op, {4, 5, 6});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -110,11 +110,11 @@ static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngrap
int32_t defaultIndex = -1; int32_t defaultIndex = -1;
// port of default_index is 4 by default, but we removed "num_segments" above, so now it's equal to 3 // port of default_index is 4 by default, but we removed "num_segments" above, so now it's equal to 3
if (inputs.size() > 3) { if (inputs.size() > 3) {
auto index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(4)); auto index_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(4));
OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
float val; float val;
if (ngraph::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val)) if (ov::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val))
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
defaultIndex = static_cast<int32_t>(val); defaultIndex = static_cast<int32_t>(val);
@ -129,7 +129,7 @@ static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngrap
if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) { if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices inputs, // GPU primitive supports only i32 data type for indices inputs,
// so we need additional reorders if they are provided as i64 // so we need additional reorders if they are provided as i64
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/experimental_detectron_detection_output.hpp" #include "openvino/op/experimental_detectron_detection_output.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/experimental_detectron_detection_output.hpp" #include "intel_gpu/primitives/experimental_detectron_detection_output.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
@ -13,8 +13,8 @@ namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateExperimentalDetectronDetectionOutputOp( static void CreateExperimentalDetectronDetectionOutputOp(
Program& p, ProgramBuilder& p,
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronDetectionOutput>& op) { const std::shared_ptr<ov::op::v6::ExperimentalDetectronDetectionOutput>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
if (op->get_output_size() != 3) { if (op->get_output_size() != 3) {

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/experimental_detectron_generate_proposals.hpp" #include "openvino/op/experimental_detectron_generate_proposals.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/primitives/experimental_detectron_generate_proposals_single_image.hpp" #include "intel_gpu/primitives/experimental_detectron_generate_proposals_single_image.hpp"
@ -14,8 +14,8 @@ namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateExperimentalDetectronGenerateProposalsSingleImageOp( static void CreateExperimentalDetectronGenerateProposalsSingleImageOp(
Program& p, ProgramBuilder& p,
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>& op) { const std::shared_ptr<ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
if (op->get_output_size() != 2) { if (op->get_output_size() != 2) {
OPENVINO_THROW("ExperimentalDetectronGenerateProposalsSingleImage requires 2 outputs"); OPENVINO_THROW("ExperimentalDetectronGenerateProposalsSingleImage requires 2 outputs");

View File

@ -3,9 +3,9 @@
// //
#include <intel_gpu/plugin/common_utils.hpp> #include <intel_gpu/plugin/common_utils.hpp>
#include <intel_gpu/plugin/program.hpp> #include "intel_gpu/plugin/program_builder.hpp"
#include <intel_gpu/primitives/experimental_detectron_prior_grid_generator.hpp> #include <intel_gpu/primitives/experimental_detectron_prior_grid_generator.hpp>
#include <ngraph/op/experimental_detectron_prior_grid_generator.hpp> #include "openvino/op/experimental_detectron_prior_grid_generator.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
@ -18,8 +18,8 @@ cldnn::tensor mkTensor(const ov::Shape& shape) {
} }
static void CreateExperimentalDetectronPriorGridGeneratorOp( static void CreateExperimentalDetectronPriorGridGeneratorOp(
Program& p, ProgramBuilder& p,
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronPriorGridGenerator>& op) { const std::shared_ptr<ov::op::v6::ExperimentalDetectronPriorGridGenerator>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
cldnn::tensor outTensor = mkTensor(op->get_output_shape(0)); cldnn::tensor outTensor = mkTensor(op->get_output_shape(0));
auto outDataType = cldnn::element_type_to_data_type(op->get_output_element_type(0)); auto outDataType = cldnn::element_type_to_data_type(op->get_output_element_type(0));

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/experimental_detectron_roi_feature.hpp" #include "openvino/op/experimental_detectron_roi_feature.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/primitives/experimental_detectron_roi_feature_extractor.hpp" #include "intel_gpu/primitives/experimental_detectron_roi_feature_extractor.hpp"
@ -13,7 +13,8 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateExperimentalDetectronROIFeatureExtractorOp(Program& p, const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronROIFeatureExtractor>& op) { static void CreateExperimentalDetectronROIFeatureExtractorOp(ProgramBuilder& p,
const std::shared_ptr<ov::op::v6::ExperimentalDetectronROIFeatureExtractor>& op) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op) + ".out0"; std::string layerName = layer_type_name_ID(op) + ".out0";

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/experimental_detectron_topkrois.hpp" #include "openvino/op/experimental_detectron_topkrois.hpp"
#include "intel_gpu/primitives/experimental_detectron_topk_rois.hpp" #include "intel_gpu/primitives/experimental_detectron_topk_rois.hpp"
#include "intel_gpu/primitives/arg_max_min.hpp" #include "intel_gpu/primitives/arg_max_min.hpp"
@ -17,8 +17,8 @@ namespace {
using namespace cldnn; using namespace cldnn;
void CreateExperimentalDetectronTopKROIsOp(Program &p, void CreateExperimentalDetectronTopKROIsOp(ProgramBuilder &p,
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronTopKROIs> &op) { const std::shared_ptr<ov::op::v6::ExperimentalDetectronTopKROIs> &op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
auto max_rois = static_cast<uint32_t>(op->get_max_rois()); auto max_rois = static_cast<uint32_t>(op->get_max_rois());

View File

@ -2,28 +2,28 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/extractimagepatches.hpp" #include "openvino/op/extractimagepatches.hpp"
#include "intel_gpu/primitives/extract_image_patches.hpp" #include "intel_gpu/primitives/extract_image_patches.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static inline std::string PadToString(ngraph::op::PadType pad) { static inline std::string PadToString(ov::op::PadType pad) {
switch (pad) { switch (pad) {
case ngraph::op::PadType::SAME_UPPER: return "same_upper"; case ov::op::PadType::SAME_UPPER: return "same_upper";
case ngraph::op::PadType::SAME_LOWER: return "same_lower"; case ov::op::PadType::SAME_LOWER: return "same_lower";
case ngraph::op::PadType::VALID: return "valid"; case ov::op::PadType::VALID: return "valid";
default: OPENVINO_THROW("Unsupported pad type in ExtractImagePatches primitive ", pad); default: OPENVINO_THROW("Unsupported pad type in ExtractImagePatches primitive ", pad);
} }
return ""; return "";
} }
static void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr<ngraph::op::v3::ExtractImagePatches>& op) { static void CreateExtractImagePatchesOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ExtractImagePatches>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,12 +2,13 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/eye.hpp" #include "openvino/op/eye.hpp"
#include "openvino/op/constant.hpp"
#include <memory> #include <memory>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/eye.hpp" #include "intel_gpu/primitives/eye.hpp"
#include "intel_gpu/runtime/layout.hpp" #include "intel_gpu/runtime/layout.hpp"
@ -16,19 +17,19 @@ namespace intel_gpu {
namespace { namespace {
static void CreateEyeOp(Program& p, const std::shared_ptr<ngraph::op::v9::Eye>& op) { static void CreateEyeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::Eye>& op) {
validate_inputs_count(op, {3, 4}); validate_inputs_count(op, {3, 4});
const InferenceEngine::SizeVector& output_shapes = op->get_output_shape(0); const ov::Shape& output_shapes = op->get_output_shape(0);
auto os_sz = output_shapes.size(); auto os_sz = output_shapes.size();
OPENVINO_ASSERT(2 <= os_sz && os_sz <= 5, "Incorrect output size: ", os_sz, " in op ", op->get_friendly_name()); OPENVINO_ASSERT(2 <= os_sz && os_sz <= 5, "Incorrect output size: ", os_sz, " in op ", op->get_friendly_name());
size_t dim_size = std::max(os_sz, static_cast<size_t>(4)); size_t dim_size = std::max(os_sz, static_cast<size_t>(4));
InferenceEngine::SizeVector dims(dim_size, 1); ov::Shape dims(dim_size, 1);
for (size_t i = dim_size, j = os_sz; i > 0 && j > 0; --i, --j) { for (size_t i = dim_size, j = os_sz; i > 0 && j > 0; --i, --j) {
dims[i - 1] = output_shapes[j - 1]; dims[i - 1] = output_shapes[j - 1];
} }
const ngraph::op::v0::Constant* constant = dynamic_cast<const ngraph::op::v0::Constant*>(op->get_input_node_ptr(2)); const ov::op::v0::Constant* constant = dynamic_cast<ov::op::v0::Constant*>(op->get_input_node_ptr(2));
OPENVINO_ASSERT(constant != nullptr, "Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(constant != nullptr, "Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
int32_t shift{}; int32_t shift{};

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/fake_quantize.hpp" #include "openvino/op/fake_quantize.hpp"
#include "intel_gpu/primitives/quantize.hpp" #include "intel_gpu/primitives/quantize.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateFakeQuantizeOp(Program& p, const std::shared_ptr<ngraph::op::v0::FakeQuantize>& op) { static void CreateFakeQuantizeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::FakeQuantize>& op) {
validate_inputs_count(op, {5}); validate_inputs_count(op, {5});
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/gather_tree.hpp" #include "openvino/op/gather_tree.hpp"
#include "intel_gpu/primitives/gather_tree.hpp" #include "intel_gpu/primitives/gather_tree.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -13,7 +13,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGatherTreeOp(Program& p, const std::shared_ptr<ngraph::op::v1::GatherTree>& op) { static void CreateGatherTreeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::GatherTree>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -26,7 +26,7 @@ static void CreateGatherTreeOp(Program& p, const std::shared_ptr<ngraph::op::v1:
if (inputDataType == cldnn::data_types::i64) { if (inputDataType == cldnn::data_types::i64) {
// GPU primitive does not support i64 inputs, // GPU primitive does not support i64 inputs,
// so we need additional reorders to convert them to i32 // so we need additional reorders to convert them to i32
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],

View File

@ -2,23 +2,22 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ngraph/op/gather.hpp" #include "openvino/op/gather.hpp"
#include "intel_gpu/primitives/gather.hpp" #include "intel_gpu/primitives/gather.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
#include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/primitives/reshape.hpp"
#include "intel_gpu/primitives/crop.hpp" #include "intel_gpu/primitives/crop.hpp"
using namespace InferenceEngine;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
template <typename T> template <typename T>
void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t batch_dim = 0, bool support_neg_ind = false) { void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr<T>& op, const int64_t batch_dim = 0, bool support_neg_ind = false) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -32,7 +31,7 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
if (inputDataType == cldnn::data_types::i64) { if (inputDataType == cldnn::data_types::i64) {
// GPU primitive does not support i64 inputs, // GPU primitive does not support i64 inputs,
// so we need additional reorders to convert them to i32 // so we need additional reorders to convert them to i32
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_partial_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_partial_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],
@ -100,13 +99,13 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
// Get indices info to calculate offset // Get indices info to calculate offset
const auto& indices_node = indices.get_node_shared_ptr(); const auto& indices_node = indices.get_node_shared_ptr();
auto indices_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(indices_node); auto indices_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(indices_node);
float result = 0.f; float result = 0.f;
OPENVINO_ASSERT(ov::op::util::get_single_value(indices_constant, result), OPENVINO_ASSERT(ov::op::util::get_single_value(indices_constant, result),
"Unsupported indices node in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); "Unsupported indices node in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
// Set tensors for crop shape and offset // Set tensors for crop shape and offset
InferenceEngine::SizeVector start_offset(input_shape.size()); ov::Shape start_offset(input_shape.size());
start_offset[0] = static_cast<size_t>(result); start_offset[0] = static_cast<size_t>(result);
auto offsetTensor = tensor_from_dims(start_offset, 0); auto offsetTensor = tensor_from_dims(start_offset, 0);
auto outTensor = tensor_from_dims(out_shape, 1); auto outTensor = tensor_from_dims(out_shape, 1);
@ -148,23 +147,23 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
} }
} }
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v1::Gather>& op) { static void CreateGatherOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Gather>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
CreateGatherOpBase<ngraph::op::v1::Gather>(p, op); CreateGatherOpBase<ov::op::v1::Gather>(p, op);
} }
REGISTER_FACTORY_IMPL(v1, Gather); REGISTER_FACTORY_IMPL(v1, Gather);
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v7::Gather>& op) { static void CreateGatherOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v7::Gather>& op) {
validate_inputs_count(op, {2, 3, 4}); validate_inputs_count(op, {2, 3, 4});
CreateGatherOpBase<ngraph::op::v7::Gather>(p, op, op->get_batch_dims()); CreateGatherOpBase<ov::op::v7::Gather>(p, op, op->get_batch_dims());
} }
REGISTER_FACTORY_IMPL(v7, Gather); REGISTER_FACTORY_IMPL(v7, Gather);
static void CreateGatherOp(Program& p, const std::shared_ptr<ngraph::op::v8::Gather>& op) { static void CreateGatherOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::Gather>& op) {
validate_inputs_count(op, {2, 3, 4}); validate_inputs_count(op, {2, 3, 4});
CreateGatherOpBase<ngraph::op::v8::Gather>(p, op, op->get_batch_dims(), true); CreateGatherOpBase<ov::op::v8::Gather>(p, op, op->get_batch_dims(), true);
} }
REGISTER_FACTORY_IMPL(v8, Gather); REGISTER_FACTORY_IMPL(v8, Gather);

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/gather_elements.hpp" #include "openvino/op/gather_elements.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/gather_elements.hpp" #include "intel_gpu/primitives/gather_elements.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGatherElementsOp(Program& p, const std::shared_ptr<ngraph::op::v6::GatherElements>& op) { static void CreateGatherElementsOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v6::GatherElements>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/gather_nd.hpp" #include "openvino/op/gather_nd.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/gather_nd.hpp" #include "intel_gpu/primitives/gather_nd.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::GatherND>& op) { static void CreateGatherNDOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v5::GatherND>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -34,7 +34,7 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::G
REGISTER_FACTORY_IMPL(v5, GatherND); REGISTER_FACTORY_IMPL(v5, GatherND);
static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v8::GatherND>& op) { static void CreateGatherNDOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::GatherND>& op) {
validate_inputs_count(op, { 2 }); validate_inputs_count(op, { 2 });
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -7,14 +7,14 @@
#include <ov_ops/generate_proposals_ie_internal.hpp> #include <ov_ops/generate_proposals_ie_internal.hpp>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGenerateProposalsIEInternalOp( static void CreateGenerateProposalsIEInternalOp(
Program& p, ProgramBuilder& p,
const std::shared_ptr<ov::op::internal::GenerateProposalsIEInternal>& op) { const std::shared_ptr<ov::op::internal::GenerateProposalsIEInternal>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
if (op->get_output_size() != 3) { if (op->get_output_size() != 3) {

View File

@ -4,15 +4,15 @@
#include "intel_gpu/primitives/grid_sample.hpp" #include "intel_gpu/primitives/grid_sample.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "ngraph/op/grid_sample.hpp" #include "openvino/op/grid_sample.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateGridSampleOp(Program& p, const std::shared_ptr<ngraph::op::v9::GridSample>& op) { void CreateGridSampleOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::GridSample>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
const cldnn::grid_sample grid_sample_prim(layer_type_name_ID(op), p.GetInputInfo(op), op->get_attributes()); const cldnn::grid_sample grid_sample_prim(layer_type_name_ID(op), p.GetInputInfo(op), op->get_attributes());

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/grn.hpp" #include "openvino/op/grn.hpp"
#include "intel_gpu/primitives/grn.hpp" #include "intel_gpu/primitives/grn.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateGRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::GRN>& op) { static void CreateGRNOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::GRN>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,13 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/validation_util.hpp"
#include "openvino/op/interpolate.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "caseless.hpp"
#include "ngraph/op/interpolate.hpp"
#include "ngraph/op/constant.hpp"
#include "intel_gpu/primitives/resample.hpp" #include "intel_gpu/primitives/resample.hpp"
#include <optional> #include <optional>
@ -19,7 +18,7 @@ static std::vector<int64_t> ExtractAxes(const std::shared_ptr<ov::op::util::Inte
std::vector<int64_t> axes; std::vector<int64_t> axes;
auto inputRank = op->get_input_partial_shape(0).size(); auto inputRank = op->get_input_partial_shape(0).size();
if (op->get_input_size() == axes_index + 1) { if (op->get_input_size() == axes_index + 1) {
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(axes_index)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(axes_index));
OPENVINO_ASSERT(axes_constant, "Unsupported parameter node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant, "Unsupported parameter node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axes = axes_constant->cast_vector<int64_t>(); axes = axes_constant->cast_vector<int64_t>();
@ -70,7 +69,7 @@ static void ValidateAxesAndThrowIfError(const std::shared_ptr<ov::op::util::Inte
} }
} }
static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4::Interpolate>& op) { static void CreateInterpolateOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::Interpolate>& op) {
validate_inputs_count(op, {3, 4}); validate_inputs_count(op, {3, 4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -81,10 +80,10 @@ static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4
auto attrs = op->get_attrs(); auto attrs = op->get_attrs();
auto sizes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(SIZES_INDEX)); auto sizes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(SIZES_INDEX));
std::vector<int64_t> sizes = sizes_constant ? sizes_constant->cast_vector<int64_t>() : std::vector<int64_t>{}; std::vector<int64_t> sizes = sizes_constant ? sizes_constant->cast_vector<int64_t>() : std::vector<int64_t>{};
auto scales_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(SCALES_INDEX)); auto scales_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(SCALES_INDEX));
std::vector<float> scales = scales_constant ? scales_constant->cast_vector<float>() : std::vector<float>{}; std::vector<float> scales = scales_constant ? scales_constant->cast_vector<float>() : std::vector<float>{};
std::vector<int64_t> axes = ExtractAxes(op, AXES_INDEX); std::vector<int64_t> axes = ExtractAxes(op, AXES_INDEX);
@ -93,7 +92,7 @@ static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4
OPENVINO_ASSERT(axes.size() == scales.size(), "[GPU] Incorrect axes and scales values for Interpolate operation with id ", op->get_friendly_name()); OPENVINO_ASSERT(axes.size() == scales.size(), "[GPU] Incorrect axes and scales values for Interpolate operation with id ", op->get_friendly_name());
} }
// TODO shouldn't be all this checking done in ngraph::op::v4::Interpolate? // TODO shouldn't be all this checking done in ov::op::v4::Interpolate?
ValidateAxesAndThrowIfError(op, axes); ValidateAxesAndThrowIfError(op, axes);
std::shared_ptr<cldnn::resample> resamplePrim = nullptr; std::shared_ptr<cldnn::resample> resamplePrim = nullptr;
@ -148,7 +147,7 @@ static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v4
p.add_primitive(*op, resamplePrim); p.add_primitive(*op, resamplePrim);
} }
static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v11::Interpolate>& op) { static void CreateInterpolateOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v11::Interpolate>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -161,7 +160,7 @@ static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v1
auto attrs = op->get_attrs(); auto attrs = op->get_attrs();
auto scales_or_sizes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(eScalesOrSizesIndex)); auto scales_or_sizes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(eScalesOrSizesIndex));
std::vector<float> scales = scales_or_sizes_constant && attrs.shape_calculation_mode == ov::op::v11::Interpolate::ShapeCalcMode::SCALES ? std::vector<float> scales = scales_or_sizes_constant && attrs.shape_calculation_mode == ov::op::v11::Interpolate::ShapeCalcMode::SCALES ?
scales_or_sizes_constant->cast_vector<float>() : std::vector<float>{}; scales_or_sizes_constant->cast_vector<float>() : std::vector<float>{};
std::vector<int64_t> sizes = scales_or_sizes_constant && attrs.shape_calculation_mode == ov::op::v11::Interpolate::ShapeCalcMode::SIZES ? std::vector<int64_t> sizes = scales_or_sizes_constant && attrs.shape_calculation_mode == ov::op::v11::Interpolate::ShapeCalcMode::SIZES ?
@ -173,7 +172,7 @@ static void CreateInterpolateOp(Program& p, const std::shared_ptr<ngraph::op::v1
OPENVINO_ASSERT(axes.size() == scales.size(), "[GPU] Incorrect axes and scales values for Interpolate operation with id ", op->get_friendly_name()); OPENVINO_ASSERT(axes.size() == scales.size(), "[GPU] Incorrect axes and scales values for Interpolate operation with id ", op->get_friendly_name());
} }
// TODO shouldn't be all this checking done in ngraph::op::v4::Interpolate? // TODO shouldn't be all this checking done in ov::op::v4::Interpolate?
ValidateAxesAndThrowIfError(op, axes); ValidateAxesAndThrowIfError(op, axes);
std::shared_ptr<cldnn::resample> resamplePrim = nullptr; std::shared_ptr<cldnn::resample> resamplePrim = nullptr;

View File

@ -1,17 +1,14 @@
// Copyright (C) 2018-2023 Intel Corporation // Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/plugin.hpp" #include "intel_gpu/plugin/plugin.hpp"
#include <cpp/ie_cnn_network.h> #include "openvino/op/loop.hpp"
#include "openvino/op/constant.hpp"
#include "ngraph/op/loop.hpp" #include "openvino/op/util/sub_graph_base.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/util/sub_graph_base.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ie_ngraph_utils.hpp"
#include "intel_gpu/primitives/loop.hpp" #include "intel_gpu/primitives/loop.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
@ -19,23 +16,25 @@
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
#include "intel_gpu/graph/topology.hpp" #include "intel_gpu/graph/topology.hpp"
#include "ie_ngraph_utils.hpp"
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
using Loop = ngraph::op::v5::Loop; using Loop = ov::op::v5::Loop;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
template<class DATA_TYPE> template<class DATA_TYPE>
static DATA_TYPE CreateScalarData(Program &p, const cldnn::primitive_id& id, int64_t num) { static DATA_TYPE CreateScalarData(ProgramBuilder &p, const cldnn::primitive_id& id, int64_t num) {
auto mem = p.get_engine().allocate_memory({ cldnn::data_types::i64, cldnn::format::bfyx, { 1, 1, 1, 1 } }); auto mem = p.get_engine().allocate_memory({ cldnn::data_types::i64, cldnn::format::bfyx, { 1, 1, 1, 1 } });
cldnn::mem_lock<int64_t> ptr{mem, p.get_engine().get_service_stream()}; cldnn::mem_lock<int64_t> ptr{mem, p.get_engine().get_service_stream()};
*ptr.begin() = num; *ptr.begin() = num;
return {id, mem}; return {id, mem};
} }
static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::shared_ptr<ngraph::Node>& op, static cldnn::mutable_data CreateAdditionalOutputData(ProgramBuilder &p, const std::shared_ptr<ov::Node>& op,
const cldnn::primitive_id& id, const cldnn::primitive_id& input, const cldnn::primitive_id& id, const cldnn::primitive_id& input,
const int32_t output_idx) { const int32_t output_idx) {
const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx)); const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx));
@ -47,7 +46,7 @@ static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::sha
return md; return md;
} }
static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) { static void CreateLoopOp(ProgramBuilder& p, const std::shared_ptr<Loop>& op) {
const std::string layerName = layer_type_name_ID(op); const std::string layerName = layer_type_name_ID(op);
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
const auto& loop_input_descs = op->get_input_descriptions(); const auto& loop_input_descs = op->get_input_descriptions();
@ -58,7 +57,6 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
InferenceEngine::CNNNetwork body_network(op->get_function()); InferenceEngine::CNNNetwork body_network(op->get_function());
auto networkInputs = body_network.getInputsInfo(); auto networkInputs = body_network.getInputsInfo();
auto networkOutputs = body_network.getOutputsInfo(); auto networkOutputs = body_network.getOutputsInfo();
// Set special body ports: current_iteration input , execution condition output // Set special body ports: current_iteration input , execution condition output
auto special_body_ports = op->get_special_body_ports(); auto special_body_ports = op->get_special_body_ports();
@ -82,7 +80,7 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
} }
// get body topology from ngraph function // get body topology from ngraph function
Program body_program(body_network, p.get_engine(), p.get_config(), true); ProgramBuilder body_program(body_network, p.get_engine(), p.get_config(), true);
auto body_topology = *body_program.GetTopology(); auto body_topology = *body_program.GetTopology();
// setup input_primitive_maps/ output_primitive_maps and back_edges // setup input_primitive_maps/ output_primitive_maps and back_edges
@ -118,11 +116,10 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
cldnn::primitive_id from_id = layer_type_name_ID(from); cldnn::primitive_id from_id = layer_type_name_ID(from);
// reset output data type because the data types of the outputs of the // reset output data type because the data types of the outputs of the
// body topology are always FP32 regardless of ngraph data type // body topology are always FP32 regardless of element type
{ {
const auto from_prim = body_topology.at(from_id); const auto from_prim = body_topology.at(from_id);
const auto& to_ngraph_type = to->get_element_type(); const auto to_cldnn_type = cldnn::element_type_to_data_type(to->get_element_type());
const auto to_cldnn_type = cldnn::element_type_to_data_type(to_ngraph_type);
from_prim->output_data_types = {to_cldnn_type}; from_prim->output_data_types = {to_cldnn_type};
} }
back_edges.emplace_back(from_id, to_id); back_edges.emplace_back(from_id, to_id);

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/lrn.hpp" #include "openvino/op/lrn.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/lrn.hpp" #include "intel_gpu/primitives/lrn.hpp"
@ -21,12 +21,12 @@ static cldnn::lrn_norm_region GetNormRegion(std::vector<int64_t> axis_value) {
} }
} }
static void CreateLRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::LRN>& op) { static void CreateLRNOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::LRN>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto axis_const = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto axis_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(axis_const != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axis_const != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis_value = axis_const->cast_vector<int64_t>(); auto axis_value = axis_const->cast_vector<int64_t>();
auto localSize = static_cast<uint32_t>(op->get_nsize()); auto localSize = static_cast<uint32_t>(op->get_nsize());

View File

@ -4,12 +4,12 @@
#include <array> #include <array>
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/matmul.hpp" #include "openvino/op/matmul.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "ngraph/op/fake_quantize.hpp" #include "openvino/op/fake_quantize.hpp"
#include "intel_gpu/primitives/gemm.hpp" #include "intel_gpu/primitives/gemm.hpp"
#include "intel_gpu/primitives/fully_connected.hpp" #include "intel_gpu/primitives/fully_connected.hpp"
@ -29,7 +29,7 @@ namespace intel_gpu {
static std::tuple<bool, PartialShape, PartialShape> get_aligned_shapes(const PartialShape& shape_a, static std::tuple<bool, PartialShape, PartialShape> get_aligned_shapes(const PartialShape& shape_a,
const PartialShape& shape_b, const PartialShape& shape_b,
const std::shared_ptr<ngraph::op::v0::MatMul>& matmul) { const std::shared_ptr<ov::op::v0::MatMul>& matmul) {
PartialShape shape_a_aligned(shape_a), shape_b_aligned(shape_b); PartialShape shape_a_aligned(shape_a), shape_b_aligned(shape_b);
auto rank_a = shape_a_aligned.rank().get_length(); auto rank_a = shape_a_aligned.rank().get_length();
auto rank_b = shape_b_aligned.rank().get_length(); auto rank_b = shape_b_aligned.rank().get_length();
@ -71,7 +71,7 @@ static std::tuple<bool, PartialShape, PartialShape> get_aligned_shapes(const Par
return {true, shape_a_aligned, shape_b_aligned}; return {true, shape_a_aligned, shape_b_aligned};
} }
static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::MatMul>& op) { static void CreateMatMulOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::MatMul>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -172,12 +172,12 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
auto transA = op->get_transpose_a(); auto transA = op->get_transpose_a();
auto transB = op->get_transpose_b(); auto transB = op->get_transpose_b();
std::array<ngraph::PartialShape, 2> inputShapes{ std::array<ov::PartialShape, 2> inputShapes{
op->get_input_partial_shape(0), op->get_input_partial_shape(0),
op->get_input_partial_shape(1) op->get_input_partial_shape(1)
}; };
auto canTransposeInputs = [&] (const std::array<ngraph::PartialShape, 2>& shapes, bool transA, bool transB, ov::element::Type type) -> bool { auto canTransposeInputs = [&] (const std::array<ov::PartialShape, 2>& shapes, bool transA, bool transB, ov::element::Type type) -> bool {
if (!transA && !transB) if (!transA && !transB)
return false; return false;
@ -188,17 +188,17 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
// don't transpose inputs if they're aligned to 16 // don't transpose inputs if they're aligned to 16
bool inputsAligned = std::all_of(shapes[0].rbegin(), shapes[0].rbegin() + 2, bool inputsAligned = std::all_of(shapes[0].rbegin(), shapes[0].rbegin() + 2,
[] (const ngraph::Dimension& dim) { return dim.is_static() && dim.get_length() % 16 == 0; }) && [] (const ov::Dimension& dim) { return dim.is_static() && dim.get_length() % 16 == 0; }) &&
std::all_of(shapes[1].rbegin(), shapes[1].rbegin() + 2, std::all_of(shapes[1].rbegin(), shapes[1].rbegin() + 2,
[] (const ngraph::Dimension& dim) { return dim.is_static() && dim.get_length() % 16 == 0; }); [] (const ov::Dimension& dim) { return dim.is_static() && dim.get_length() % 16 == 0; });
if (inputsAligned) if (inputsAligned)
return false; return false;
// Heuristic condition for permute and tiled_opt kernel perform better than ref kernel. // Heuristic condition for permute and tiled_opt kernel perform better than ref kernel.
bool in0_large = std::all_of(shapes[0].rbegin(), shapes[0].rbegin() + 2, bool in0_large = std::all_of(shapes[0].rbegin(), shapes[0].rbegin() + 2,
[] (const ngraph::Dimension& dim) { return dim.is_static() && dim.get_length() >= 64; }); [] (const ov::Dimension& dim) { return dim.is_static() && dim.get_length() >= 64; });
bool in1_large = std::all_of(shapes[1].rbegin(), shapes[1].rbegin() + 2, bool in1_large = std::all_of(shapes[1].rbegin(), shapes[1].rbegin() + 2,
[] (const ngraph::Dimension& dim) { return dim.is_static() && dim.get_length() >= 64; }); [] (const ov::Dimension& dim) { return dim.is_static() && dim.get_length() >= 64; });
// Optimized for clDNN // Optimized for clDNN
auto is_u8_i8 = (type == ov::element::Type_t::i8 || type == ov::element::Type_t::u8); auto is_u8_i8 = (type == ov::element::Type_t::i8 || type == ov::element::Type_t::u8);
bool in0_very_large = tensor_from_dims(shapes[0].to_shape()).count() > 100000; bool in0_very_large = tensor_from_dims(shapes[0].to_shape()).count() > 100000;
@ -208,7 +208,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
return (in0_large && in1_large) || needs_to_transpose_inputs; return (in0_large && in1_large) || needs_to_transpose_inputs;
}; };
auto transposeInput = [] (Program& p, const std::shared_ptr<ngraph::Node>& op, const ngraph::PartialShape& shape, auto transposeInput = [] (ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, const ov::PartialShape& shape,
const std::string& suffix, const cldnn::primitive_id& primitiveId) -> cldnn::input_info { const std::string& suffix, const cldnn::primitive_id& primitiveId) -> cldnn::input_info {
std::vector<uint16_t> transposeOrder(shape.size()); std::vector<uint16_t> transposeOrder(shape.size());
std::iota(transposeOrder.begin(), transposeOrder.end(), 0); std::iota(transposeOrder.begin(), transposeOrder.end(), 0);

View File

@ -3,18 +3,19 @@
// //
#include "intel_gpu/primitives/matrix_nms.hpp" #include "intel_gpu/primitives/matrix_nms.hpp"
#include <memory> #include "openvino/op/matrix_nms.hpp"
#include <openvino/opsets/opset8.hpp>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "ov_ops/nms_static_shape_ie.hpp" #include "ov_ops/nms_static_shape_ie.hpp"
#include <memory>
namespace ov { namespace ov {
namespace op { namespace op {
namespace internal { namespace internal {
using NmsStaticShapeIE8 = ov::op::internal::NmsStaticShapeIE<ov::opset8::MatrixNms>; using NmsStaticShapeIE8 = ov::op::internal::NmsStaticShapeIE<ov::op::v8::MatrixNms>;
} }
} // namespace op } // namespace op
} // namespace ov } // namespace ov
@ -23,14 +24,14 @@ namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateNmsStaticShapeIE8Op(Program& p, const std::shared_ptr<ov::op::internal::NmsStaticShapeIE8>& op) { void CreateNmsStaticShapeIE8Op(ProgramBuilder& p, const std::shared_ptr<ov::op::internal::NmsStaticShapeIE8>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::vector<cldnn::memory::ptr> shared_memory; std::vector<cldnn::memory::ptr> shared_memory;
auto outputIndices = op->get_output_shape(0)[0]; auto outputIndices = op->get_output_shape(0)[0];
cldnn::layout mutableLayoutFirst = cldnn::layout(cldnn::element_type_to_data_type(ngraph::element::i32), cldnn::layout mutableLayoutFirst = cldnn::layout(cldnn::element_type_to_data_type(ov::element::i32),
cldnn::format::bfyx, cldnn::format::bfyx,
cldnn::tensor(static_cast<int32_t>(outputIndices), 1, 1, 1)); cldnn::tensor(static_cast<int32_t>(outputIndices), 1, 1, 1));
@ -42,7 +43,7 @@ void CreateNmsStaticShapeIE8Op(Program& p, const std::shared_ptr<ov::op::interna
inputs.push_back(cldnn::input_info(matrix_nms_mutable_id_w_first)); inputs.push_back(cldnn::input_info(matrix_nms_mutable_id_w_first));
auto batches_num = op->get_output_shape(2)[0]; auto batches_num = op->get_output_shape(2)[0];
cldnn::layout mutableLayoutSecond = cldnn::layout(cldnn::element_type_to_data_type(ngraph::element::i32), cldnn::layout mutableLayoutSecond = cldnn::layout(cldnn::element_type_to_data_type(ov::element::i32),
cldnn::format::bfyx, cldnn::format::bfyx,
cldnn::tensor(static_cast<int32_t>(batches_num), 1, 1, 1)); cldnn::tensor(static_cast<int32_t>(batches_num), 1, 1, 1));

View File

@ -6,7 +6,7 @@
#include "ov_ops/multiclass_nms_ie_internal.hpp" #include "ov_ops/multiclass_nms_ie_internal.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/multiclass_nms.hpp" #include "intel_gpu/primitives/multiclass_nms.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
@ -14,7 +14,7 @@ namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateMulticlassNmsIEInternalOp(Program& p, const std::shared_ptr<op::internal::MulticlassNmsIEInternal>& op) { static void CreateMulticlassNmsIEInternalOp(ProgramBuilder& p, const std::shared_ptr<op::internal::MulticlassNmsIEInternal>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/mvn.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/core/validation_util.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/mvn.hpp"
#include "ngraph/op/constant.hpp"
#include "intel_gpu/primitives/mvn.hpp" #include "intel_gpu/primitives/mvn.hpp"
#include <algorithm> #include <algorithm>
@ -15,7 +15,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonMVNOp(Program& p, const std::shared_ptr<ngraph::Node>& op, static void CreateCommonMVNOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op,
std::vector<int64_t> axes, bool normalize_variance, float eps, bool eps_inside_sqrt = true) { std::vector<int64_t> axes, bool normalize_variance, float eps, bool eps_inside_sqrt = true) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -30,7 +30,7 @@ static void CreateCommonMVNOp(Program& p, const std::shared_ptr<ngraph::Node>& o
p.add_primitive(*op, mvnPrim); p.add_primitive(*op, mvnPrim);
} }
static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>& op) { static void CreateMVNOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::MVN>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
bool across_channels = op->get_across_channels(); bool across_channels = op->get_across_channels();
@ -48,10 +48,10 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v0::MVN>&
CreateCommonMVNOp(p, op, axes, normalize_variance, eps); CreateCommonMVNOp(p, op, axes, normalize_variance, eps);
} }
static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>& op) { static void CreateMVNOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v6::MVN>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto inConst = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(inConst != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(inConst != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int64_t> axes = inConst->cast_vector<int64_t>(); std::vector<int64_t> axes = inConst->cast_vector<int64_t>();
@ -61,7 +61,7 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>&
bool normalize_variance = op->get_normalize_variance(); bool normalize_variance = op->get_normalize_variance();
float eps = op->get_eps(); float eps = op->get_eps();
bool eps_inside_sqrt = op->get_eps_mode() == ngraph::op::MVNEpsMode::INSIDE_SQRT; bool eps_inside_sqrt = op->get_eps_mode() == ov::op::MVNEpsMode::INSIDE_SQRT;
CreateCommonMVNOp(p, op, axes, normalize_variance, eps, eps_inside_sqrt); CreateCommonMVNOp(p, op, axes, normalize_variance, eps, eps_inside_sqrt);
} }

View File

@ -2,11 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/non_max_suppression.hpp" #include "openvino/op/non_max_suppression.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ov_ops/nms_ie_internal.hpp> #include <ov_ops/nms_ie_internal.hpp>
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -17,7 +16,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_ptr<ov::op::internal::NonMaxSuppressionIEInternal>& op) { static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::shared_ptr<ov::op::internal::NonMaxSuppressionIEInternal>& op) {
validate_inputs_count(op, {2, 3, 4, 5, 6}); validate_inputs_count(op, {2, 3, 4, 5, 6});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::vector<cldnn::input_info> reordered_inputs; std::vector<cldnn::input_info> reordered_inputs;
@ -28,7 +27,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
if ((portIndex == 2) && (inputDataType == cldnn::data_types::i64)) { if ((portIndex == 2) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for 'max_output_boxes_per_class' input // GPU primitive supports only i32 data type for 'max_output_boxes_per_class' input
// so we need additional reorder if it's provided as i64 // so we need additional reorder if it's provided as i64
auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + Program::m_preProcessTag; auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag;
auto targetFormat = cldnn::format::get_default_format(op->get_input_partial_shape(portIndex).size()); auto targetFormat = cldnn::format::get_default_format(op->get_input_partial_shape(portIndex).size());
auto preprocessPrim = cldnn::reorder(reorderPrimName, auto preprocessPrim = cldnn::reorder(reorderPrimName,
inputs[portIndex], inputs[portIndex],
@ -55,8 +54,8 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
for (size_t i = 0; i < num_outputs; i++) { for (size_t i = 0; i < num_outputs; i++) {
auto type = op->get_output_element_type(i); auto type = op->get_output_element_type(i);
// GPU primitive supports only i32 as output data type // GPU primitive supports only i32 as output data type
if (type == ngraph::element::i64) { if (type == ov::element::i64) {
type = ngraph::element::i32; type = ov::element::i32;
} }
output_data_types.push_back(cldnn::element_type_to_data_type(type)); output_data_types.push_back(cldnn::element_type_to_data_type(type));
} }
@ -94,8 +93,8 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
switch (num_outputs) { switch (num_outputs) {
case 3: { case 3: {
auto mutable_precision_second = op->get_output_element_type(2); auto mutable_precision_second = op->get_output_element_type(2);
if (mutable_precision_second == ngraph::element::i64) { if (mutable_precision_second == ov::element::i64) {
mutable_precision_second = ngraph::element::i32; mutable_precision_second = ov::element::i32;
} }
cldnn::layout mutableLayoutSecond = cldnn::layout( cldnn::layout mutableLayoutSecond = cldnn::layout(
cldnn::element_type_to_data_type(mutable_precision_second), cldnn::element_type_to_data_type(mutable_precision_second),

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/non_zero.hpp" #include "openvino/op/non_zero.hpp"
#include "intel_gpu/primitives/non_zero.hpp" #include "intel_gpu/primitives/non_zero.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateNonZeroOp(Program& p, const std::shared_ptr<ngraph::Node>& op) { static void CreateNonZeroOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layer_name = layer_type_name_ID(op); std::string layer_name = layer_type_name_ID(op);

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/normalize_l2.hpp" #include "openvino/op/normalize_l2.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/normalize.hpp" #include "intel_gpu/primitives/normalize.hpp"
#include "intel_gpu/primitives/data.hpp" #include "intel_gpu/primitives/data.hpp"
@ -14,13 +14,13 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0::NormalizeL2>& op) { static void CreateNormalizeL2Op(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::NormalizeL2>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
// params // params
auto const_axis = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto const_axis = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(const_axis != nullptr, "[GPU] Unsupported axis node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(const_axis != nullptr, "[GPU] Unsupported axis node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis = const_axis->cast_vector<size_t>(); auto axis = const_axis->cast_vector<size_t>();
@ -33,7 +33,7 @@ static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0
} }
// We create fake scale constant and fill it with ones to keep the same behavior as current primitive // We create fake scale constant and fill it with ones to keep the same behavior as current primitive
auto scale = std::make_shared<ngraph::op::v0::Constant>(op->get_output_element_type(0), ngraph::Shape{1}, std::vector<float>{1.0}); auto scale = std::make_shared<ov::op::v0::Constant>(op->get_output_element_type(0), ov::Shape{1}, std::vector<float>{1.0});
cldnn::layout constLayout = cldnn::layout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), cldnn::format::bfyx, cldnn::tensor{1}); cldnn::layout constLayout = cldnn::layout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), cldnn::format::bfyx, cldnn::tensor{1});
auto mem = p.get_engine().allocate_memory(constLayout, false); auto mem = p.get_engine().allocate_memory(constLayout, false);
cldnn::mem_lock<int8_t> tmpPointer{mem, p.get_engine().get_service_stream()}; cldnn::mem_lock<int8_t> tmpPointer{mem, p.get_engine().get_service_stream()};

View File

@ -2,26 +2,26 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ngraph/op/one_hot.hpp" #include "openvino/op/one_hot.hpp"
#include "intel_gpu/primitives/one_hot.hpp" #include "intel_gpu/primitives/one_hot.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::OneHot>& op) { static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::OneHot>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int64_t axis = op->get_axis(); int64_t axis = op->get_axis();
auto depth_value_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto depth_value_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto on_value_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto on_value_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
auto off_value_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(3)); auto off_value_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(3));
OPENVINO_ASSERT(on_value_node != nullptr || off_value_node != nullptr || depth_value_node != nullptr, OPENVINO_ASSERT(on_value_node != nullptr || off_value_node != nullptr || depth_value_node != nullptr,
"[GPU] Unsupported on/off/depth nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); "[GPU] Unsupported on/off/depth nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ngraph/op/pad.hpp" #include "openvino/op/pad.hpp"
#include "intel_gpu/primitives/border.hpp" #include "intel_gpu/primitives/border.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>& op) { static void CreatePadOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Pad>& op) {
validate_inputs_count(op, {3, 4}); validate_inputs_count(op, {3, 4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -21,7 +21,7 @@ static void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>&
std::vector<cldnn::input_info> non_constant_inputs = {inputs[0]}; std::vector<cldnn::input_info> non_constant_inputs = {inputs[0]};
int32_t non_constant_input_mask = 0; int32_t non_constant_input_mask = 0;
auto pads_begin_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(1).get_node_shared_ptr()); auto pads_begin_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(1).get_node_shared_ptr());
std::vector<int64_t> pads_begin = std::vector<int64_t>{}; std::vector<int64_t> pads_begin = std::vector<int64_t>{};
if (pads_begin_constant) { if (pads_begin_constant) {
pads_begin = pads_begin_constant->cast_vector<int64_t>(); pads_begin = pads_begin_constant->cast_vector<int64_t>();
@ -30,7 +30,7 @@ static void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>&
non_constant_input_mask |= cldnn::border::PAD_NON_CONST_INPUT::BEGIN; non_constant_input_mask |= cldnn::border::PAD_NON_CONST_INPUT::BEGIN;
} }
auto pads_end_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(2).get_node_shared_ptr()); auto pads_end_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(2).get_node_shared_ptr());
std::vector<int64_t> pads_end = std::vector<int64_t>{}; std::vector<int64_t> pads_end = std::vector<int64_t>{};
if (pads_end_constant) { if (pads_end_constant) {
pads_end = pads_end_constant->cast_vector<int64_t>(); pads_end = pads_end_constant->cast_vector<int64_t>();
@ -42,7 +42,7 @@ static void CreatePadOp(Program& p, const std::shared_ptr<ngraph::op::v1::Pad>&
float pad_value = 0.f; float pad_value = 0.f;
bool is_value_const = false; bool is_value_const = false;
if (op->get_pad_mode() == ov::op::PadMode::CONSTANT && op->get_input_size() == 4) { if (op->get_pad_mode() == ov::op::PadMode::CONSTANT && op->get_input_size() == 4) {
auto const_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(3)); auto const_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(3));
if (const_node) { if (const_node) {
const bool check_value_range = false; // Allows the usage of infinity value as pad_value const bool check_value_range = false; // Allows the usage of infinity value as pad_value
OPENVINO_ASSERT(ov::op::util::get_single_value(const_node, pad_value, check_value_range), OPENVINO_ASSERT(ov::op::util::get_single_value(const_node, pad_value, check_value_range),

View File

@ -2,24 +2,29 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/runtime/intel_gpu/properties.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "openvino/core/preprocess/input_tensor_info.hpp"
#include "openvino/op/fake_quantize.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/nv12_to_rgb.hpp"
#include "openvino/op/nv12_to_bgr.hpp"
#include "openvino/op/i420_to_rgb.hpp"
#include "openvino/op/i420_to_bgr.hpp"
#include "ngraph/op/parameter.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/primitives/input_layout.hpp" #include "intel_gpu/primitives/input_layout.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
#include "intel_gpu/primitives/data.hpp" #include "intel_gpu/primitives/data.hpp"
#include "intel_gpu/primitives/concatenation.hpp" #include "intel_gpu/primitives/concatenation.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::Parameter>& op) { static void CreateParameterOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Parameter>& op) {
auto networkInputs = p.GetNetworkInputs(); auto networkInputs = p.GetNetworkInputs();
OPENVINO_ASSERT(networkInputs.find(op->get_friendly_name()) != networkInputs.end(), OPENVINO_ASSERT(networkInputs.find(op->get_friendly_name()) != networkInputs.end(),
"[GPU] Can't find input ", op->get_friendly_name(), " in InputsDataMap"); "[GPU] Can't find input ", op->get_friendly_name(), " in InputsDataMap");
@ -55,7 +60,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
cldnn::layout networkInputLayout(input_pshape, cldnn::layout networkInputLayout(input_pshape,
cldnn::element_type_to_data_type(op->get_output_element_type(0)), cldnn::element_type_to_data_type(op->get_output_element_type(0)),
inputFormat); inputFormat);
cldnn::primitive_id meanBlobID = inputName + Program::m_meanValuesTag; cldnn::primitive_id meanBlobID = inputName + ProgramBuilder::m_meanValuesTag;
std::vector<float> meanValues; std::vector<float> meanValues;
if ((meanChannels > 0) && if ((meanChannels > 0) &&
@ -132,10 +137,10 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
} }
auto is_convert_color_type = [](const std::shared_ptr<ov::Node> &node) { auto is_convert_color_type = [](const std::shared_ptr<ov::Node> &node) {
return ngraph::is_type<ngraph::op::v8::NV12toRGB>(node) || return ov::is_type<ov::op::v8::NV12toRGB>(node) ||
ngraph::is_type<ngraph::op::v8::NV12toBGR>(node) || ov::is_type<ov::op::v8::NV12toBGR>(node) ||
ngraph::is_type<ngraph::op::v8::I420toRGB>(node) || ov::is_type<ov::op::v8::I420toRGB>(node) ||
ngraph::is_type<ngraph::op::v8::I420toBGR>(node); ov::is_type<ov::op::v8::I420toBGR>(node);
}; };
std::function<bool(const std::shared_ptr<ov::Node>&, size_t)> recursive_search_convert_color = std::function<bool(const std::shared_ptr<ov::Node>&, size_t)> recursive_search_convert_color =
@ -155,7 +160,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
if (node->output(0).get_rt_info().count(ov::preprocess::TensorInfoMemoryType::get_type_info_static())) { if (node->output(0).get_rt_info().count(ov::preprocess::TensorInfoMemoryType::get_type_info_static())) {
std::string mem_type = node->output(0).get_rt_info().at(ov::preprocess::TensorInfoMemoryType::get_type_info_static()) std::string mem_type = node->output(0).get_rt_info().at(ov::preprocess::TensorInfoMemoryType::get_type_info_static())
.as<ov::preprocess::TensorInfoMemoryType>().value; .as<ov::preprocess::TensorInfoMemoryType>().value;
if (mem_type.find(GPU_CONFIG_KEY(SURFACE)) != std::string::npos) { if (mem_type.find(ov::intel_gpu::memory_type::surface) != std::string::npos) {
surface_input_found = true; surface_input_found = true;
} }
} }
@ -165,7 +170,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
std::function<bool(const std::shared_ptr<ov::Node>&)> connected_to_quantize = std::function<bool(const std::shared_ptr<ov::Node>&)> connected_to_quantize =
[&](const std::shared_ptr<ov::Node> &node) -> bool { [&](const std::shared_ptr<ov::Node> &node) -> bool {
for (auto& user : node->get_users()) { for (auto& user : node->get_users()) {
if (ngraph::is_type<ngraph::op::v0::FakeQuantize>(user)) if (ov::is_type<ov::op::v0::FakeQuantize>(user))
return true; return true;
} }
return false; return false;
@ -192,7 +197,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
auto reorder_layout = networkInputLayout; auto reorder_layout = networkInputLayout;
reorder_layout.format = cldnn::format::bfyx; reorder_layout.format = cldnn::format::bfyx;
auto preprocessPrimID = "reorder:" + inputName + Program::m_preProcessTag + suffix; auto preprocessPrimID = "reorder:" + inputName + ProgramBuilder::m_preProcessTag + suffix;
auto reorder = cldnn::reorder(preprocessPrimID, auto reorder = cldnn::reorder(preprocessPrimID,
cldnn::input_info(batched_name), cldnn::input_info(batched_name),
reorder_layout); reorder_layout);
@ -204,7 +209,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
if (batch > 1 && !is_convert_color_input) if (batch > 1 && !is_convert_color_input)
p.add_primitive(*op, cldnn::concatenation(inputName, surfaces_inputs, 0)); p.add_primitive(*op, cldnn::concatenation(inputName, surfaces_inputs, 0));
else else
p.primitive_ids[inputName] = "reorder:" + inputName + Program::m_preProcessTag; p.primitive_ids[inputName] = "reorder:" + inputName + ProgramBuilder::m_preProcessTag;
} else if (is_convert_color_input) { } else if (is_convert_color_input) {
networkInputLayout.format = cldnn::format::byxf; networkInputLayout.format = cldnn::format::byxf;
@ -213,7 +218,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
p.inputLayouts.insert({ inputInfo->name(), networkInputLayout }); p.inputLayouts.insert({ inputInfo->name(), networkInputLayout });
p.add_primitive(*op, cldnn::input_layout(inputName, networkInputLayout)); p.add_primitive(*op, cldnn::input_layout(inputName, networkInputLayout));
} else { } else {
auto preprocessPrimID = "reorder:" + inputName + Program::m_preProcessTag; auto preprocessPrimID = "reorder:" + inputName + ProgramBuilder::m_preProcessTag;
cldnn::layout inputLayout(networkInputLayout); cldnn::layout inputLayout(networkInputLayout);
auto network_input_data_type = DataTypeFromPrecision(ip); auto network_input_data_type = DataTypeFromPrecision(ip);
inputLayout.data_type = network_input_data_type; inputLayout.data_type = network_input_data_type;

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/max_pool.hpp" #include "openvino/op/max_pool.hpp"
#include "ngraph/op/avg_pool.hpp" #include "openvino/op/avg_pool.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/primitives/pooling.hpp" #include "intel_gpu/primitives/pooling.hpp"
@ -14,7 +14,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::AvgPool>& op) { static void CreateAvgPoolOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::AvgPool>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -46,7 +46,7 @@ static void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Av
p.add_primitive(*op, pooling_prim); p.add_primitive(*op, pooling_prim);
} }
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::MaxPool>& op) { static void CreateMaxPoolOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::MaxPool>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -76,10 +76,10 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Ma
p.add_primitive(*op, pooling_prim); p.add_primitive(*op, pooling_prim);
} }
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::MaxPool>& op) { static void CreateMaxPoolOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::MaxPool>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
if (op->get_output_size() != 2) { if (op->get_output_size() != 2) {
OPENVINO_THROW("[GPU] MaxPool opset 8 requires 2 outputs"); OPENVINO_THROW("[GPU] v8:MaxPool requires 2 outputs");
} }
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
const auto layer_type_name = layer_type_name_ID(op); const auto layer_type_name = layer_type_name_ID(op);

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/prior_box.hpp"
#include "openvino/op/prior_box_clustered.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/prior_box.hpp"
#include "ngraph/op/prior_box_clustered.hpp"
#include "intel_gpu/primitives/prior_box.hpp" #include "intel_gpu/primitives/prior_box.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBoxClustered>& op) { static void CreatePriorBoxClusteredOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::PriorBoxClustered>& op) {
OPENVINO_ASSERT(false, "[GPU] PriorBoxClustered op is not supported in GPU plugin yet."); OPENVINO_ASSERT(false, "[GPU] PriorBoxClustered op is not supported in GPU plugin yet.");
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -86,7 +86,7 @@ static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::
} }
} }
static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::PriorBox>& op) { static void CreatePriorBoxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::PriorBox>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -111,8 +111,8 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
OPENVINO_ASSERT(img_pshape.is_static(), "Dynamic shapes are not supported for PriorBox operation yet"); OPENVINO_ASSERT(img_pshape.is_static(), "Dynamic shapes are not supported for PriorBox operation yet");
if (!output_pshape.is_dynamic()) { if (!output_pshape.is_dynamic()) {
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0)); const auto output_size_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); const auto image_size_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
// output_size should be constant to be static output shape // output_size should be constant to be static output shape
OPENVINO_ASSERT(output_size_constant, OPENVINO_ASSERT(output_size_constant,
@ -174,7 +174,7 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
} }
} }
static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v8::PriorBox>& op) { static void CreatePriorBoxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::PriorBox>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
const auto inputs = p.GetInputInfo(op); const auto inputs = p.GetInputInfo(op);
std::string layer_name = layer_type_name_ID(op); std::string layer_name = layer_type_name_ID(op);
@ -183,8 +183,8 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v8::P
auto output_pshape = op->get_output_partial_shape(0); auto output_pshape = op->get_output_partial_shape(0);
if (!output_pshape.is_dynamic()) { if (!output_pshape.is_dynamic()) {
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0)); const auto output_size_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); const auto image_size_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
// output_size should be constant to be static output shape // output_size should be constant to be static output shape
OPENVINO_ASSERT(output_size_constant, OPENVINO_ASSERT(output_size_constant,

View File

@ -2,11 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/proposal.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/proposal.hpp"
#include "intel_gpu/primitives/proposal.hpp" #include "intel_gpu/primitives/proposal.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp" #include "intel_gpu/runtime/debug_configuration.hpp"
@ -14,7 +13,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::Proposal>& op) { static void CreateProposalOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Proposal>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -105,8 +104,8 @@ static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
} else { } else {
if (op->get_output_size() == 2) { if (op->get_output_size() == 2) {
auto mutable_precision = op->get_output_element_type(1); auto mutable_precision = op->get_output_element_type(1);
if (mutable_precision == ngraph::element::i64) { if (mutable_precision == ov::element::i64) {
mutable_precision = ngraph::element::i32; mutable_precision = ov::element::i32;
} }
cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision), cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision),
@ -185,7 +184,7 @@ static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
p.add_primitive(*op, proposalPrim); p.add_primitive(*op, proposalPrim);
} else { } else {
IE_THROW() << op->get_friendly_name() << " Incorrect Proposal outputs number"; OPENVINO_THROW(op->get_friendly_name(), " Incorrect Proposal outputs number");
} }
} }
} }

View File

@ -2,9 +2,9 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/random_uniform.hpp" #include "openvino/op/random_uniform.hpp"
#include "intel_gpu/primitives/random_uniform.hpp" #include "intel_gpu/primitives/random_uniform.hpp"
@ -13,7 +13,7 @@ namespace intel_gpu {
namespace { namespace {
void CreateRandomUniformOp(Program &p, const std::shared_ptr<ngraph::op::v8::RandomUniform> &op) { void CreateRandomUniformOp(ProgramBuilder &p, const std::shared_ptr<ov::op::v8::RandomUniform> &op) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
auto input_pshape = op->get_input_partial_shape(0); auto input_pshape = op->get_input_partial_shape(0);
auto output_pshape = op->get_output_partial_shape(0); auto output_pshape = op->get_output_partial_shape(0);

View File

@ -2,16 +2,15 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <intel_gpu/plugin/program.hpp> #include "openvino/op/range.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include <intel_gpu/plugin/common_utils.hpp> #include <intel_gpu/plugin/common_utils.hpp>
#include <intel_gpu/primitives/range.hpp> #include <intel_gpu/primitives/range.hpp>
#include <ngraph/op/range.hpp>
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateRangeOp(Program &p, const std::shared_ptr<ngraph::op::v4::Range> &op) { static void CreateRangeOp(ProgramBuilder &p, const std::shared_ptr<ov::op::v4::Range> &op) {
validate_inputs_count(op, { 3 }); validate_inputs_count(op, { 3 });
auto output_pshape = op->get_output_partial_shape(0); auto output_pshape = op->get_output_partial_shape(0);
OPENVINO_ASSERT(output_pshape.rank().get_length() == 1 , "[GPU] range v4 output rank should be 1"); OPENVINO_ASSERT(output_pshape.rank().get_length() == 1 , "[GPU] range v4 output rank should be 1");

View File

@ -2,19 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/reduce_sum.hpp" #include "openvino/op/reduce_sum.hpp"
#include "ngraph/op/reduce_prod.hpp" #include "openvino/op/reduce_prod.hpp"
#include "ngraph/op/reduce_mean.hpp" #include "openvino/op/reduce_mean.hpp"
#include "ngraph/op/reduce_logical_or.hpp" #include "openvino/op/reduce_logical_or.hpp"
#include "ngraph/op/reduce_logical_and.hpp" #include "openvino/op/reduce_logical_and.hpp"
#include "ngraph/op/reduce_l1.hpp" #include "openvino/op/reduce_l1.hpp"
#include "ngraph/op/reduce_l2.hpp" #include "openvino/op/reduce_l2.hpp"
#include "ngraph/op/min.hpp" #include "openvino/op/reduce_min.hpp"
#include "ngraph/op/max.hpp" #include "openvino/op/reduce_max.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/reduce.hpp" #include "intel_gpu/primitives/reduce.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -23,14 +23,14 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reduce_mode mode, bool keep_dims) { static void CreateReduceOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, cldnn::reduce_mode mode, bool keep_dims) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto input_pshape = op->get_input_partial_shape(0); auto input_pshape = op->get_input_partial_shape(0);
int64_t rank = input_pshape.size(); int64_t rank = input_pshape.size();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int64_t> axes = axes_constant->cast_vector<int64_t>(); std::vector<int64_t> axes = axes_constant->cast_vector<int64_t>();
@ -94,39 +94,39 @@ static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
} }
} }
static void CreateReduceMaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMax>& op) { static void CreateReduceMaxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceMax>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::max, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::max, op->get_keep_dims());
} }
static void CreateReduceLogicalAndOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalAnd>& op) { static void CreateReduceLogicalAndOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceLogicalAnd>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::logical_and, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::logical_and, op->get_keep_dims());
} }
static void CreateReduceLogicalOrOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceLogicalOr>& op) { static void CreateReduceLogicalOrOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceLogicalOr>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::logical_or, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::logical_or, op->get_keep_dims());
} }
static void CreateReduceMeanOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMean>& op) { static void CreateReduceMeanOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceMean>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::mean, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::mean, op->get_keep_dims());
} }
static void CreateReduceMinOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceMin>& op) { static void CreateReduceMinOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceMin>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::min, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::min, op->get_keep_dims());
} }
static void CreateReduceProdOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceProd>& op) { static void CreateReduceProdOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceProd>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::prod, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::prod, op->get_keep_dims());
} }
static void CreateReduceSumOp(Program& p, const std::shared_ptr<ngraph::op::v1::ReduceSum>& op) { static void CreateReduceSumOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::ReduceSum>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::sum, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::sum, op->get_keep_dims());
} }
static void CreateReduceL1Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL1>& op) { static void CreateReduceL1Op(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::ReduceL1>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::l1, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::l1, op->get_keep_dims());
} }
static void CreateReduceL2Op(Program& p, const std::shared_ptr<ngraph::op::v4::ReduceL2>& op) { static void CreateReduceL2Op(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::ReduceL2>& op) {
CreateReduceOp(p, op, cldnn::reduce_mode::l2, op->get_keep_dims()); CreateReduceOp(p, op, cldnn::reduce_mode::l2, op->get_keep_dims());
} }

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/region_yolo.hpp" #include "openvino/op/region_yolo.hpp"
#include "intel_gpu/primitives/region_yolo.hpp" #include "intel_gpu/primitives/region_yolo.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateRegionYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::RegionYolo>& op) { static void CreateRegionYoloOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::RegionYolo>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/reorg_yolo.hpp" #include "openvino/op/reorg_yolo.hpp"
#include "intel_gpu/primitives/reorg_yolo.hpp" #include "intel_gpu/primitives/reorg_yolo.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateReorgYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReorgYolo>& op) { static void CreateReorgYoloOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::ReorgYolo>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,12 +2,13 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/reshape.hpp" #include "openvino/op/reshape.hpp"
#include "ngraph/op/squeeze.hpp" #include "openvino/op/squeeze.hpp"
#include "ngraph/op/unsqueeze.hpp" #include "openvino/op/unsqueeze.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/primitives/reshape.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -15,7 +16,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reshape::reshape_mode mode, bool special_zero = false) { static void CreateCommonReshapeOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op, cldnn::reshape::reshape_mode mode, bool special_zero = false) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -25,7 +26,7 @@ static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node
if (p.use_new_shape_infer() || op->is_dynamic()) { if (p.use_new_shape_infer() || op->is_dynamic()) {
std::shared_ptr<cldnn::reshape> reshape_prim = nullptr; std::shared_ptr<cldnn::reshape> reshape_prim = nullptr;
auto second_const_input = op->get_input_size() == 2 ? std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)) : nullptr; auto second_const_input = op->get_input_size() == 2 ? std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1)) : nullptr;
std::vector<int64_t> output_pattern = {}; std::vector<int64_t> output_pattern = {};
if (second_const_input != nullptr) { if (second_const_input != nullptr) {
output_pattern = second_const_input->cast_vector<int64_t>(); output_pattern = second_const_input->cast_vector<int64_t>();
@ -82,15 +83,15 @@ static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node
} }
} }
static void CreateReshapeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Reshape>& op) { static void CreateReshapeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Reshape>& op) {
CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::base, op->get_special_zero()); CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::base, op->get_special_zero());
} }
static void CreateSqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Squeeze>& op) { static void CreateSqueezeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Squeeze>& op) {
CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::squeeze); CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::squeeze);
} }
static void CreateUnsqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Unsqueeze>& op) { static void CreateUnsqueezeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Unsqueeze>& op) {
CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::unsqueeze); CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::unsqueeze);
} }

View File

@ -2,11 +2,14 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/result.hpp"
#include "openvino/op/nv12_to_rgb.hpp"
#include "openvino/op/nv12_to_bgr.hpp"
#include "openvino/op/i420_to_rgb.hpp"
#include "openvino/op/i420_to_bgr.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/result.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
@ -14,7 +17,7 @@ using namespace InferenceEngine;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& op) { static void CreateResultOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Result>& op) {
OutputsDataMap networkOutputs = p.GetNetworkOutputs(); OutputsDataMap networkOutputs = p.GetNetworkOutputs();
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
@ -37,10 +40,10 @@ static void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Res
const auto outputDesc = outputData->getTensorDesc(); const auto outputDesc = outputData->getTensorDesc();
auto outputlayout = outputDesc.getLayout(); auto outputlayout = outputDesc.getLayout();
if (ngraph::is_type<ngraph::op::v8::NV12toRGB>(prev) || if (ov::is_type<ov::op::v8::NV12toRGB>(prev) ||
ngraph::is_type<ngraph::op::v8::NV12toBGR>(prev) || ov::is_type<ov::op::v8::NV12toBGR>(prev) ||
ngraph::is_type<ngraph::op::v8::I420toRGB>(prev) || ov::is_type<ov::op::v8::I420toRGB>(prev) ||
ngraph::is_type<ngraph::op::v8::I420toBGR>(prev)) { ov::is_type<ov::op::v8::I420toBGR>(prev)) {
outputlayout = NHWC; outputlayout = NHWC;
} }

View File

@ -2,21 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/primitives/reverse.hpp" #include "openvino/op/reverse.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "ngraph/op/reverse_sequence.hpp" #include "intel_gpu/primitives/reverse.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateReverseOp(Program& p, const std::shared_ptr<ngraph::op::v1::Reverse>& op) { static void CreateReverseOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Reverse>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
const auto inputs = p.GetInputInfo(op); const auto inputs = p.GetInputInfo(op);
const auto layer_name = layer_type_name_ID(op); const auto layer_name = layer_type_name_ID(op);
const auto mode = const auto mode =
op->get_mode() == ngraph::op::v1::Reverse::Mode::INDEX ? cldnn::reverse_mode::index : cldnn::reverse_mode::mask; op->get_mode() == ov::op::v1::Reverse::Mode::INDEX ? cldnn::reverse_mode::index : cldnn::reverse_mode::mask;
const cldnn::reverse reverse{layer_name, inputs[0], inputs[1], mode}; const cldnn::reverse reverse{layer_name, inputs[0], inputs[1], mode};

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/reverse_sequence.hpp" #include "openvino/op/reverse_sequence.hpp"
#include "intel_gpu/primitives/reverse_sequence.hpp" #include "intel_gpu/primitives/reverse_sequence.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateReverseSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v0::ReverseSequence>& op) { static void CreateReverseSequenceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::ReverseSequence>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/lstm_cell.hpp" #include "openvino/op/lstm_cell.hpp"
#include "ngraph/op/lstm_sequence.hpp" #include "openvino/op/lstm_sequence.hpp"
#include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/primitives/reshape.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -61,7 +61,7 @@ void GetLSTMActivationParams(const std::shared_ptr<T>& op,
} }
} }
static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::LSTMCell>& op) { static void CreateLSTMCellOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::LSTMCell>& op) {
validate_inputs_count(op, {6}); validate_inputs_count(op, {6});
int lstm_batch_size, lstm_input_size, lstm_hidden_size; int lstm_batch_size, lstm_input_size, lstm_hidden_size;
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
@ -156,7 +156,7 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::L
p.add_primitive(*op, cldnn::reshape(outputCellID, cldnn::input_info(outputCellCropID), outSz)); p.add_primitive(*op, cldnn::reshape(outputCellID, cldnn::input_info(outputCellCropID), outSz));
} }
static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v5::LSTMSequence>& op) { static void CreateLSTMSequenceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v5::LSTMSequence>& op) {
validate_inputs_count(op, {7}); validate_inputs_count(op, {7});
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -186,7 +186,7 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v
std::vector<cldnn::activation_additional_params> activation_params; std::vector<cldnn::activation_additional_params> activation_params;
GetLSTMActivationParams(op, activations, activation_params); GetLSTMActivationParams(op, activations, activation_params);
float clip = op->get_clip(); float clip = op->get_clip();
bool isForward = op->get_direction() == ngraph::op::RecurrentSequenceDirection::FORWARD; bool isForward = op->get_direction() == ov::op::RecurrentSequenceDirection::FORWARD;
// LSTM primitive works with single precision for all in/out/weights tensors // LSTM primitive works with single precision for all in/out/weights tensors
auto lstm_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0)); auto lstm_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0));

View File

@ -1,12 +1,12 @@
// Copyright (C) 2018-2023 Intel Corporation // Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/roi_align.hpp" #include "openvino/op/roi_align.hpp"
#include <memory> #include <memory>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/primitives/roi_align.hpp" #include "intel_gpu/primitives/roi_align.hpp"
namespace ov { namespace ov {
@ -25,19 +25,19 @@ cldnn::roi_align::PoolingMode from(T mode) {
} }
} }
cldnn::roi_align::AlignedMode from(ngraph::op::v9::ROIAlign::AlignedMode mode) { cldnn::roi_align::AlignedMode from(ov::op::v9::ROIAlign::AlignedMode mode) {
switch (mode) { switch (mode) {
case ngraph::op::v9::ROIAlign::AlignedMode::HALF_PIXEL_FOR_NN: case ov::op::v9::ROIAlign::AlignedMode::HALF_PIXEL_FOR_NN:
return cldnn::roi_align::AlignedMode::half_pixel_for_nn; return cldnn::roi_align::AlignedMode::half_pixel_for_nn;
case ngraph::op::v9::ROIAlign::AlignedMode::HALF_PIXEL: case ov::op::v9::ROIAlign::AlignedMode::HALF_PIXEL:
return cldnn::roi_align::AlignedMode::half_pixel; return cldnn::roi_align::AlignedMode::half_pixel;
case ngraph::op::v9::ROIAlign::AlignedMode::ASYMMETRIC: case ov::op::v9::ROIAlign::AlignedMode::ASYMMETRIC:
default: default:
return cldnn::roi_align::AlignedMode::asymmetric; return cldnn::roi_align::AlignedMode::asymmetric;
} }
} }
void CreateROIAlignOp(Program& p, const std::shared_ptr<ngraph::op::v3::ROIAlign>& op) { void CreateROIAlignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ROIAlign>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto roi_align_prim = cldnn::roi_align(layer_type_name_ID(op), auto roi_align_prim = cldnn::roi_align(layer_type_name_ID(op),
p.GetInputInfo(op), p.GetInputInfo(op),
@ -50,7 +50,7 @@ void CreateROIAlignOp(Program& p, const std::shared_ptr<ngraph::op::v3::ROIAlign
p.add_primitive(*op, roi_align_prim); p.add_primitive(*op, roi_align_prim);
} }
void CreateROIAlignOp(Program& p, const std::shared_ptr<ngraph::op::v9::ROIAlign>& op) { void CreateROIAlignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::ROIAlign>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto roi_align_prim = cldnn::roi_align(layer_type_name_ID(op), auto roi_align_prim = cldnn::roi_align(layer_type_name_ID(op),
p.GetInputInfo(op), p.GetInputInfo(op),

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/roi_pooling.hpp" #include "openvino/op/roi_pooling.hpp"
#include "ngraph/op/psroi_pooling.hpp" #include "openvino/op/psroi_pooling.hpp"
#include "ngraph/op/deformable_psroi_pooling.hpp" #include "openvino/op/deformable_psroi_pooling.hpp"
#include "intel_gpu/primitives/roi_pooling.hpp" #include "intel_gpu/primitives/roi_pooling.hpp"
@ -25,7 +25,7 @@ static cldnn::pooling_mode GetPoolingMode(std::string method) {
return cldnn::pooling_mode::deformable_bilinear; return cldnn::pooling_mode::deformable_bilinear;
} }
static void CreateDeformablePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v1::DeformablePSROIPooling>& op) { static void CreateDeformablePSROIPoolingOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::DeformablePSROIPooling>& op) {
validate_inputs_count(op, {2, 3}); validate_inputs_count(op, {2, 3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -62,7 +62,7 @@ static void CreateDeformablePSROIPoolingOp(Program& p, const std::shared_ptr<ngr
p.add_primitive(*op, psROIPoolingPrim); p.add_primitive(*op, psROIPoolingPrim);
} }
static void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::PSROIPooling>& op) { static void CreatePSROIPoolingOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::PSROIPooling>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -89,13 +89,13 @@ static void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v
p.add_primitive(*op, psROIPoolingPrim); p.add_primitive(*op, psROIPoolingPrim);
} }
static void CreateROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0::ROIPooling>& op) { static void CreateROIPoolingOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::ROIPooling>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
// params // params
auto out_size = op->get_output_size(); auto out_size = op->get_output_roi();
int pooled_height = static_cast<int>(out_size[0]); int pooled_height = static_cast<int>(out_size[0]);
int pooled_width = static_cast<int>(out_size[1]); int pooled_width = static_cast<int>(out_size[1]);
float spatial_scale = op->get_spatial_scale(); float spatial_scale = op->get_spatial_scale();

View File

@ -2,19 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/roll.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/roll.hpp" #include "intel_gpu/primitives/roll.hpp"
#include <ngraph/op/roll.hpp>
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) { void CreateRollOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v7::Roll>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
const auto inputs = p.GetInputInfo(op); const auto inputs = p.GetInputInfo(op);
@ -27,11 +27,11 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
const auto format = cldnn::format::get_default_format(rank); const auto format = cldnn::format::get_default_format(rank);
const auto default_rank = format.dimension(); const auto default_rank = format.dimension();
auto shift_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto shift_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(shift_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")"); OPENVINO_ASSERT(shift_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")");
const auto shift_raw = shift_constant->cast_vector<int32_t>(); const auto shift_raw = shift_constant->cast_vector<int32_t>();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")");
auto axes_raw = axes_constant->cast_vector<int32_t>(); auto axes_raw = axes_constant->cast_vector<int32_t>();

View File

@ -2,23 +2,23 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/validation_util.hpp"
#include "openvino/op/scatter_elements_update.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/scatter_elements_update.hpp"
#include "ngraph/op/constant.hpp"
#include "intel_gpu/primitives/scatter_elements_update.hpp" #include "intel_gpu/primitives/scatter_elements_update.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateScatterElementsUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterElementsUpdate>& op) { static void CreateScatterElementsUpdateOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ScatterElementsUpdate>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(3)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(3));
if (!axes_constant) { if (!axes_constant) {
OPENVINO_ASSERT("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/scatter_nd_update.hpp" #include "openvino/op/scatter_nd_update.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/scatter_nd_update.hpp" #include "intel_gpu/primitives/scatter_nd_update.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateScatterNDUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterNDUpdate>& op) { static void CreateScatterNDUpdateOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ScatterNDUpdate>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,23 +2,23 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/scatter_update.hpp" #include "openvino/op/scatter_update.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/scatter_update.hpp" #include "intel_gpu/primitives/scatter_update.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateScatterUpdateOp(Program& p, const std::shared_ptr<ngraph::op::v3::ScatterUpdate>& op) { static void CreateScatterUpdateOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ScatterUpdate>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(3)); auto axes_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(3));
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
int64_t axis = axes_constant->cast_vector<int64_t>()[0]; int64_t axis = axes_constant->cast_vector<int64_t>()[0];
auto primitive = cldnn::scatter_update(layerName, auto primitive = cldnn::scatter_update(layerName,

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/select.hpp" #include "openvino/op/select.hpp"
#include "intel_gpu/primitives/select.hpp" #include "intel_gpu/primitives/select.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
@ -14,7 +14,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Select>& op) { static void CreateSelectOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Select>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -24,12 +24,12 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
auto broadcast_type = op->get_auto_broadcast(); auto broadcast_type = op->get_auto_broadcast();
if (broadcast_type.m_type != ngraph::op::AutoBroadcastType::NONE && if (broadcast_type.m_type != ov::op::AutoBroadcastType::NONE &&
broadcast_type.m_type != ngraph::op::AutoBroadcastType::NUMPY) { broadcast_type.m_type != ov::op::AutoBroadcastType::NUMPY) {
OPENVINO_THROW("[GPU] Unsupported broadcast type (", broadcast_type.m_type, ") in layer " + op->get_friendly_name()); OPENVINO_THROW("[GPU] Unsupported broadcast type (", broadcast_type.m_type, ") in layer " + op->get_friendly_name());
} }
if (broadcast_type.m_type == ngraph::op::AutoBroadcastType::NUMPY) { if (broadcast_type.m_type == ov::op::AutoBroadcastType::NUMPY) {
// Preprocess inputs // Preprocess inputs
for (size_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
auto input_pshape = op->get_input_partial_shape(i); auto input_pshape = op->get_input_partial_shape(i);

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/shape_of.hpp" #include "openvino/op/shape_of.hpp"
#include "intel_gpu/primitives/shape_of.hpp" #include "intel_gpu/primitives/shape_of.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateShapeOfOpCommon(Program& p, const std::shared_ptr<ngraph::Node>& op) { static void CreateShapeOfOpCommon(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -25,11 +25,11 @@ static void CreateShapeOfOpCommon(Program& p, const std::shared_ptr<ngraph::Node
p.add_primitive(*op, primitive); p.add_primitive(*op, primitive);
} }
static void CreateShapeOfOp(Program& p, const std::shared_ptr<ngraph::op::v0::ShapeOf>& op) { static void CreateShapeOfOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::ShapeOf>& op) {
CreateShapeOfOpCommon(p, op); CreateShapeOfOpCommon(p, op);
} }
static void CreateShapeOfOp(Program& p, const std::shared_ptr<ngraph::op::v3::ShapeOf>& op) { static void CreateShapeOfOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ShapeOf>& op) {
CreateShapeOfOpCommon(p, op); CreateShapeOfOpCommon(p, op);
} }

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/validation_util.hpp"
#include "openvino/op/shuffle_channels.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/shuffle_channels.hpp"
#include "intel_gpu/primitives/shuffle_channels.hpp" #include "intel_gpu/primitives/shuffle_channels.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateShuffleChannelsOp(Program& p, const std::shared_ptr<ngraph::op::v0::ShuffleChannels>& op) { static void CreateShuffleChannelsOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::ShuffleChannels>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/slice.hpp" #include "openvino/op/slice.hpp"
#include "intel_gpu/primitives/slice.hpp" #include "intel_gpu/primitives/slice.hpp"
@ -16,7 +16,7 @@ namespace intel_gpu {
namespace { namespace {
static void CreateSliceOp(Program& p, const std::shared_ptr<ngraph::op::v8::Slice>& op) { static void CreateSliceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::Slice>& op) {
validate_inputs_count(op, { 4, 5 }); validate_inputs_count(op, { 4, 5 });
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
auto output_shape = tensor_from_dims(op->get_output_shape(0)); auto output_shape = tensor_from_dims(op->get_output_shape(0));

View File

@ -2,19 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/core/validation_util.hpp"
#include "openvino/op/softmax.hpp"
#include "openvino/op/log_softmax.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/log_softmax.hpp"
#include "intel_gpu/primitives/softmax.hpp" #include "intel_gpu/primitives/softmax.hpp"
#include "intel_gpu/primitives/activation.hpp" #include "intel_gpu/primitives/activation.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::Softmax>& op) { static void CreateSoftmaxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Softmax>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -24,7 +24,7 @@ static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v1::So
p.add_primitive(*op, softmaxPrim); p.add_primitive(*op, softmaxPrim);
} }
static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v8::Softmax>& op) { static void CreateSoftmaxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v8::Softmax>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -39,7 +39,7 @@ static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v8::So
p.add_primitive(*op, softmaxPrim); p.add_primitive(*op, softmaxPrim);
} }
static void CreateLogSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v5::LogSoftmax>& op) { static void CreateLogSoftmaxOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v5::LogSoftmax>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/space_to_batch.hpp" #include "openvino/op/space_to_batch.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/space_to_batch.hpp" #include "intel_gpu/primitives/space_to_batch.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v1::SpaceToBatch>& op) { static void CreateSpaceToBatchOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::SpaceToBatch>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -26,7 +26,7 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
bool non_constant_input = false; bool non_constant_input = false;
for (size_t i = 1; i < 4; ++i) { for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i)); auto inConst = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(i));
bool is_const_input = (inConst != nullptr); bool is_const_input = (inConst != nullptr);
OPENVINO_ASSERT((i == 1) || (i >= 2 && non_constant_input != is_const_input), OPENVINO_ASSERT((i == 1) || (i >= 2 && non_constant_input != is_const_input),
@ -47,7 +47,7 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
p.add_primitive(*op, spaceToBatchPrim); p.add_primitive(*op, spaceToBatchPrim);
} else { } else {
for (size_t i = 1; i < 4; ++i) { for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i)); auto inConst = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(i));
std::vector<int32_t> sizes = inConst->cast_vector<int32_t>(); std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0; int32_t default_size = i == 1 ? 1 : 0;

View File

@ -2,26 +2,26 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/space_to_depth.hpp" #include "openvino/op/space_to_depth.hpp"
#include "intel_gpu/primitives/space_to_depth.hpp" #include "intel_gpu/primitives/space_to_depth.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static cldnn::space_to_depth::depth_mode GetDepthMode(ngraph::op::v0::SpaceToDepth::SpaceToDepthMode mode) { static cldnn::space_to_depth::depth_mode GetDepthMode(ov::op::v0::SpaceToDepth::SpaceToDepthMode mode) {
switch (mode) { switch (mode) {
case ngraph::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST: return cldnn::space_to_depth::blocks_first; case ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST: return cldnn::space_to_depth::blocks_first;
case ngraph::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST: return cldnn::space_to_depth::depth_first; case ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST: return cldnn::space_to_depth::depth_first;
default: OPENVINO_THROW("[GPU] Unsupported SpaceToDepthMode value: ", static_cast<int>(mode)); default: OPENVINO_THROW("[GPU] Unsupported SpaceToDepthMode value: ", static_cast<int>(mode));
} }
return cldnn::space_to_depth::blocks_first; return cldnn::space_to_depth::blocks_first;
} }
static void CreateSpaceToDepthOp(Program& p, const std::shared_ptr<ngraph::op::v0::SpaceToDepth>& op) { static void CreateSpaceToDepthOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::SpaceToDepth>& op) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);

View File

@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/split.hpp" #include "openvino/op/split.hpp"
#include "ngraph/op/variadic_split.hpp" #include "openvino/op/variadic_split.hpp"
#include "intel_gpu/primitives/crop.hpp" #include "intel_gpu/primitives/crop.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) { static void CreateCommonSplitOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op) {
auto get_layer_name = [&](size_t idx)->std::string { auto get_layer_name = [&](size_t idx)->std::string {
return layer_type_name_ID(op) + ((op->get_output_size() == 1)? "" : ".out" + std::to_string(idx)); return layer_type_name_ID(op) + ((op->get_output_size() == 1)? "" : ".out" + std::to_string(idx));
}; };
@ -24,7 +24,7 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
if (!op->is_dynamic()) { if (!op->is_dynamic()) {
auto input_pshape = op->get_input_partial_shape(0); auto input_pshape = op->get_input_partial_shape(0);
InferenceEngine::SizeVector start_offset(input_pshape.size()); ov::Shape start_offset(input_pshape.size());
for (size_t i = 0; i < op->get_output_size(); i++) { for (size_t i = 0; i < op->get_output_size(); i++) {
const auto outPartialShape = op->get_output_partial_shape(i); const auto outPartialShape = op->get_output_partial_shape(i);
auto offsetTensor = tensor_from_dims(start_offset, 0); auto offsetTensor = tensor_from_dims(start_offset, 0);
@ -40,8 +40,8 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split; cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split;
auto num_splits = static_cast<size_t>(1); auto num_splits = static_cast<size_t>(1);
if (ngraph::is_type<ngraph::op::v1::Split>(op)) { if (ov::is_type<ov::op::v1::Split>(op)) {
num_splits = ngraph::as_type_ptr<ngraph::op::v1::Split>(op)->get_num_splits(); num_splits = ov::as_type_ptr<ov::op::v1::Split>(op)->get_num_splits();
op_mode = cldnn::crop_ngraph_op_mode::split; op_mode = cldnn::crop_ngraph_op_mode::split;
} }
@ -57,10 +57,10 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
} }
} else { } else {
auto input_pshape = op->get_input_partial_shape(0); auto input_pshape = op->get_input_partial_shape(0);
InferenceEngine::SizeVector start_offset(input_pshape.size()); ov::Shape start_offset(input_pshape.size());
for (size_t i = 0; i < op->get_output_size(); i++) { for (size_t i = 0; i < op->get_output_size(); i++) {
const auto outPartialShape = op->get_output_partial_shape(i); const auto outPartialShape = op->get_output_partial_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
if (outPartialShape.size() != start_offset.size()) { if (outPartialShape.size() != start_offset.size()) {
OPENVINO_THROW("Invalid dimesions in split layer: ", op->get_friendly_name(), OPENVINO_THROW("Invalid dimesions in split layer: ", op->get_friendly_name(),
" output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(i))); " output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(i)));
@ -71,7 +71,7 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
" output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(idx))); " output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(idx)));
} }
} }
NGRAPH_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
auto offsetTensor = tensor_from_dims(start_offset, 0); auto offsetTensor = tensor_from_dims(start_offset, 0);
auto outTensor = tensor_from_dims(op->get_output_shape(i), 1); auto outTensor = tensor_from_dims(op->get_output_shape(i), 1);
@ -87,12 +87,12 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
} }
} }
static void CreateSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::Split>& op) { static void CreateSplitOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Split>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
CreateCommonSplitOp(p, op); CreateCommonSplitOp(p, op);
} }
static void CreateVariadicSplitOp(Program& p, const std::shared_ptr<ngraph::op::v1::VariadicSplit>& op) { static void CreateVariadicSplitOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::VariadicSplit>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
CreateCommonSplitOp(p, op); CreateCommonSplitOp(p, op);
} }

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/strided_slice.hpp" #include "openvino/op/strided_slice.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "intel_gpu/primitives/strided_slice.hpp" #include "intel_gpu/primitives/strided_slice.hpp"
#include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/primitives/reshape.hpp"
@ -15,7 +15,7 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v1::StridedSlice>& op) { static void CreateStridedSliceOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::StridedSlice>& op) {
validate_inputs_count(op, {4}); validate_inputs_count(op, {4});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -23,11 +23,11 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
auto output_pshape = op->get_output_partial_shape(0); auto output_pshape = op->get_output_partial_shape(0);
auto input_pshape = op->get_input_partial_shape(0); auto input_pshape = op->get_input_partial_shape(0);
auto begin_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(1).get_node_shared_ptr()); auto begin_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(1).get_node_shared_ptr());
std::vector<int64_t> begin = begin_constant ? begin_constant->cast_vector<int64_t>() : std::vector<int64_t>{}; std::vector<int64_t> begin = begin_constant ? begin_constant->cast_vector<int64_t>() : std::vector<int64_t>{};
auto end_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(2).get_node_shared_ptr()); auto end_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(2).get_node_shared_ptr());
std::vector<int64_t> end = end_constant ? end_constant->cast_vector<int64_t>() : std::vector<int64_t>{}; std::vector<int64_t> end = end_constant ? end_constant->cast_vector<int64_t>() : std::vector<int64_t>{};
auto stride_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(3).get_node_shared_ptr()); auto stride_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(3).get_node_shared_ptr());
std::vector<int64_t> strides = stride_constant ? stride_constant->cast_vector<int64_t>() : std::vector<int64_t>{}; std::vector<int64_t> strides = stride_constant ? stride_constant->cast_vector<int64_t>() : std::vector<int64_t>{};
do { do {
@ -53,7 +53,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
break; break;
auto convert_to_set = [](const std::vector<int64_t> mask) { auto convert_to_set = [](const std::vector<int64_t> mask) {
ngraph::AxisSet axis_set{}; ov::AxisSet axis_set{};
for (size_t i = 0; i < static_cast<size_t>(mask.size()); ++i) { for (size_t i = 0; i < static_cast<size_t>(mask.size()); ++i) {
if (mask[i] == 1) { if (mask[i] == 1) {
axis_set.emplace(i); axis_set.emplace(i);
@ -210,7 +210,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
offset_tensor[axes[i]] = static_cast<cldnn::tensor::value_type>(offset[i]); offset_tensor[axes[i]] = static_cast<cldnn::tensor::value_type>(offset[i]);
} }
ngraph::Shape crop_shape(reshape_pattern); ov::Shape crop_shape(reshape_pattern);
for (size_t i = 0; i < axes.size(); ++i) { for (size_t i = 0; i < axes.size(); ++i) {
crop_shape[axes[i]] = dim[i]; crop_shape[axes[i]] = dim[i];
} }

View File

@ -2,15 +2,15 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "intel_gpu/plugin/plugin.hpp" #include "intel_gpu/plugin/plugin.hpp"
#include <cpp/ie_cnn_network.h> #include <cpp/ie_cnn_network.h>
#include "ngraph/op/tensor_iterator.hpp" #include "openvino/op/tensor_iterator.hpp"
#include "ngraph/op/constant.hpp" #include "openvino/op/constant.hpp"
#include "ngraph/op/util/sub_graph_base.hpp" #include "openvino/op/util/sub_graph_base.hpp"
#include "intel_gpu/primitives/loop.hpp" #include "intel_gpu/primitives/loop.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
@ -21,20 +21,20 @@
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
using TensorIterator = ngraph::op::v0::TensorIterator; using TensorIterator = ov::op::v0::TensorIterator;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
template<class DATA_TYPE> template<class DATA_TYPE>
static DATA_TYPE CreateScalarData(Program &p, const cldnn::primitive_id& id, int64_t num) { static DATA_TYPE CreateScalarData(ProgramBuilder &p, const cldnn::primitive_id& id, int64_t num) {
auto mem = p.get_engine().allocate_memory({ cldnn::data_types::i64, cldnn::format::bfyx, { 1, 1, 1, 1 } }); auto mem = p.get_engine().allocate_memory({ cldnn::data_types::i64, cldnn::format::bfyx, { 1, 1, 1, 1 } });
cldnn::mem_lock<int64_t> ptr{mem, p.get_engine().get_service_stream()}; cldnn::mem_lock<int64_t> ptr{mem, p.get_engine().get_service_stream()};
*ptr.begin() = num; *ptr.begin() = num;
return {id, mem}; return {id, mem};
} }
static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::shared_ptr<ngraph::Node>& op, static cldnn::mutable_data CreateAdditionalOutputData(ProgramBuilder &p, const std::shared_ptr<ov::Node>& op,
const cldnn::primitive_id& id, const cldnn::primitive_id& input, const cldnn::primitive_id& id, const cldnn::primitive_id& input,
const int32_t output_idx) { const int32_t output_idx) {
const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx)); const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx));
@ -46,12 +46,12 @@ static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::sha
return md; return md;
} }
static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorIterator> &op) { static void CreateTensorIteratorOp(ProgramBuilder &p, const std::shared_ptr<TensorIterator> &op) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
// get body topology from ngraph function // get body topology from ngraph function
InferenceEngine::CNNNetwork body_network(op->get_body()); InferenceEngine::CNNNetwork body_network(op->get_body());
Program body_program(body_network, p.get_engine(), p.get_config(), true); ProgramBuilder body_program(body_network, p.get_engine(), p.get_config(), true);
auto body_topology = *body_program.GetTopology(); auto body_topology = *body_program.GetTopology();
// setup input_primitive_maps/ output_primitive_maps and back_edges // setup input_primitive_maps/ output_primitive_maps and back_edges
@ -93,11 +93,10 @@ static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorItera
cldnn::primitive_id from_id = layer_type_name_ID(from); cldnn::primitive_id from_id = layer_type_name_ID(from);
// reset output data type because the data types of the outputs of the // reset output data type because the data types of the outputs of the
// body topology are always FP32 regardless of ngraph data type // body topology are always FP32 regardless of element type
{ {
const auto from_prim = body_topology.at(from_id); const auto from_prim = body_topology.at(from_id);
const auto& to_ngraph_type = to->get_element_type(); const auto to_cldnn_type = cldnn::element_type_to_data_type(to->get_element_type());
const auto to_cldnn_type = cldnn::element_type_to_data_type(to_ngraph_type);
from_prim->output_data_types = {to_cldnn_type}; from_prim->output_data_types = {to_cldnn_type};
} }
back_edges.emplace_back(from_id, to_id); back_edges.emplace_back(from_id, to_id);

View File

@ -2,22 +2,22 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/tile.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/tile.hpp"
#include "intel_gpu/primitives/tile.hpp" #include "intel_gpu/primitives/tile.hpp"
#include "intel_gpu/primitives/reshape.hpp" #include "intel_gpu/primitives/reshape.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateTileOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tile>& op) { static void CreateTileOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Tile>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
if (auto repeats_const = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1))) { if (auto repeats_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1))) {
std::vector<int64_t> repeats = repeats_const->cast_vector<int64_t>(); std::vector<int64_t> repeats = repeats_const->cast_vector<int64_t>();
// TODO: Remove code below once new shape infer is enabled // TODO: Remove code below once new shape infer is enabled

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/topk.hpp" #include "openvino/op/topk.hpp"
#include "intel_gpu/primitives/arg_max_min.hpp" #include "intel_gpu/primitives/arg_max_min.hpp"
#include "intel_gpu/primitives/mutable_data.hpp" #include "intel_gpu/primitives/mutable_data.hpp"
@ -14,8 +14,8 @@
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void TopKImpl(Program& p, static void TopKImpl(ProgramBuilder& p,
const std::shared_ptr<ngraph::Node>& op, const std::shared_ptr<ov::Node>& op,
ov::op::TopKMode mode, ov::op::TopKMode mode,
ov::op::TopKSortType stype, ov::op::TopKSortType stype,
uint32_t top_k, uint32_t top_k,
@ -42,7 +42,7 @@ static void TopKImpl(Program& p,
return output_data_types; return output_data_types;
}; };
auto topk_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(1).get_node_shared_ptr()); auto topk_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->input_value(1).get_node_shared_ptr());
auto argmaxPrim = cldnn::arg_max_min(layerName, auto argmaxPrim = cldnn::arg_max_min(layerName,
inputs[0], inputs[0],
inputs[1], inputs[1],
@ -61,8 +61,8 @@ static void TopKImpl(Program& p,
} else { } else {
if (op->get_output_size() == 2) { if (op->get_output_size() == 2) {
auto mutable_precision = op->get_output_element_type(1); auto mutable_precision = op->get_output_element_type(1);
if (mutable_precision == ngraph::element::i64) { if (mutable_precision == ov::element::i64) {
mutable_precision = ngraph::element::i32; mutable_precision = ov::element::i32;
} }
cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision), cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision),
@ -116,11 +116,11 @@ static void TopKImpl(Program& p,
} }
} }
static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>& op) { static void CreateTopKOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::TopK>& op) {
TopKImpl(p, op, op->get_mode(), op->get_sort_type(), static_cast<uint32_t>(op->get_k()), op->get_axis()); TopKImpl(p, op, op->get_mode(), op->get_sort_type(), static_cast<uint32_t>(op->get_k()), op->get_axis());
} }
static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v11::TopK>& op) { static void CreateTopKOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v11::TopK>& op) {
TopKImpl(p, op, op->get_mode(), op->get_sort_type(), static_cast<uint32_t>(op->get_k()), op->get_axis(), op->get_stable()); TopKImpl(p, op, op->get_mode(), op->get_sort_type(), static_cast<uint32_t>(op->get_k()), op->get_axis(), op->get_stable());
} }

View File

@ -2,39 +2,43 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "openvino/op/transpose.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/nv12_to_rgb.hpp"
#include "openvino/op/nv12_to_bgr.hpp"
#include "openvino/op/i420_to_rgb.hpp"
#include "openvino/op/i420_to_bgr.hpp"
#include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/transpose.hpp"
#include "ngraph/op/constant.hpp"
#include "intel_gpu/primitives/permute.hpp" #include "intel_gpu/primitives/permute.hpp"
#include "intel_gpu/primitives/reorder.hpp" #include "intel_gpu/primitives/reorder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
static void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Transpose>& op) { static void CreateTransposeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::Transpose>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
std::vector<uint16_t> order; std::vector<uint16_t> order;
if (op->get_input_size() == 2) { if (op->get_input_size() == 2) {
auto order_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto order_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(order_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(order_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
order = order_constant->cast_vector<uint16_t>(); order = order_constant->cast_vector<uint16_t>();
} }
auto is_convert_color_type_impl = [](const std::shared_ptr<ov::Node> &node) { auto is_convert_color_type_impl = [](const std::shared_ptr<ov::Node> &node) {
return ngraph::is_type<ngraph::op::v8::NV12toRGB>(node) || return ov::is_type<ov::op::v8::NV12toRGB>(node) ||
ngraph::is_type<ngraph::op::v8::NV12toBGR>(node) || ov::is_type<ov::op::v8::NV12toBGR>(node) ||
ngraph::is_type<ngraph::op::v8::I420toRGB>(node) || ov::is_type<ov::op::v8::I420toRGB>(node) ||
ngraph::is_type<ngraph::op::v8::I420toBGR>(node); ov::is_type<ov::op::v8::I420toBGR>(node);
}; };
auto is_convert_color_type = [&is_convert_color_type_impl](const std::shared_ptr<ov::Node> &node) { auto is_convert_color_type = [&is_convert_color_type_impl](const std::shared_ptr<ov::Node> &node) {
if (ngraph::is_type<ngraph::op::v0::Convert>(node)) { if (ngraph::is_type<ov::op::v0::Convert>(node)) {
return is_convert_color_type_impl(node->get_input_node_shared_ptr(0)); return is_convert_color_type_impl(node->get_input_node_shared_ptr(0));
} }
return is_convert_color_type_impl(node); return is_convert_color_type_impl(node);

View File

@ -2,51 +2,51 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "transformations/utils/utils.hpp" #include "transformations/utils/utils.hpp"
#include "ngraph/op/tanh.hpp" #include "openvino/op/tanh.hpp"
#include "ngraph/op/elu.hpp" #include "openvino/op/elu.hpp"
#include "ngraph/op/sigmoid.hpp" #include "openvino/op/sigmoid.hpp"
#include "ngraph/op/relu.hpp" #include "openvino/op/relu.hpp"
#include "ngraph/op/prelu.hpp" #include "openvino/op/prelu.hpp"
#include "ngraph/op/clamp.hpp" #include "openvino/op/clamp.hpp"
#include "ngraph/op/exp.hpp" #include "openvino/op/exp.hpp"
#include "ngraph/op/not.hpp" #include "openvino/op/logical_not.hpp"
#include "ngraph/op/asin.hpp" #include "openvino/op/asin.hpp"
#include "ngraph/op/asinh.hpp" #include "openvino/op/asinh.hpp"
#include "ngraph/op/acos.hpp" #include "openvino/op/acos.hpp"
#include "ngraph/op/acosh.hpp" #include "openvino/op/acosh.hpp"
#include "ngraph/op/atan.hpp" #include "openvino/op/atan.hpp"
#include "ngraph/op/atanh.hpp" #include "openvino/op/atanh.hpp"
#include "ngraph/op/abs.hpp" #include "openvino/op/abs.hpp"
#include "ngraph/op/floor.hpp" #include "openvino/op/floor.hpp"
#include "ngraph/op/ceiling.hpp" #include "openvino/op/ceiling.hpp"
#include "ngraph/op/erf.hpp" #include "openvino/op/erf.hpp"
#include "ngraph/op/hard_sigmoid.hpp" #include "openvino/op/hard_sigmoid.hpp"
#include "ngraph/op/log.hpp" #include "openvino/op/log.hpp"
#include "ngraph/op/negative.hpp" #include "openvino/op/negative.hpp"
#include "ngraph/op/selu.hpp" #include "openvino/op/selu.hpp"
#include "ngraph/op/softplus.hpp" #include "openvino/op/softplus.hpp"
#include "ngraph/op/tan.hpp" #include "openvino/op/tan.hpp"
#include "ngraph/op/sin.hpp" #include "openvino/op/sin.hpp"
#include "ngraph/op/sinh.hpp" #include "openvino/op/sinh.hpp"
#include "ngraph/op/cos.hpp" #include "openvino/op/cos.hpp"
#include "ngraph/op/cosh.hpp" #include "openvino/op/cosh.hpp"
#include "ngraph/op/swish.hpp" #include "openvino/op/swish.hpp"
#include "ngraph/op/hswish.hpp" #include "openvino/op/hswish.hpp"
#include "ngraph/op/mish.hpp" #include "openvino/op/mish.hpp"
#include "ngraph/op/gelu.hpp" #include "openvino/op/gelu.hpp"
#include "ngraph/op/sign.hpp" #include "openvino/op/sign.hpp"
#include "ngraph/op/hsigmoid.hpp" #include "openvino/op/hsigmoid.hpp"
#include "ngraph/op/round.hpp" #include "openvino/op/round.hpp"
#include "intel_gpu/primitives/activation.hpp" #include "intel_gpu/primitives/activation.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
void CreateUnaryEltwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op, void CreateUnaryEltwiseOp(ProgramBuilder& p, const std::shared_ptr<ov::Node>& op,
cldnn::activation_func func, cldnn::activation_additional_params params) { cldnn::activation_func func, cldnn::activation_additional_params params) {
auto inputs = p.GetInputInfo(op); auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
@ -54,31 +54,31 @@ void CreateUnaryEltwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
p.add_primitive(*op, activationPrimitive); p.add_primitive(*op, activationPrimitive);
} }
static void CreateTanhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tanh>& op) { static void CreateTanhOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Tanh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hyperbolic_tan, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hyperbolic_tan, {});
} }
static void CreateEluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Elu>& op) { static void CreateEluOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Elu>& op) {
auto alpha = static_cast<float>(op->get_alpha()); auto alpha = static_cast<float>(op->get_alpha());
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::elu, {alpha}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::elu, {alpha});
} }
static void CreateSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sigmoid>& op) { static void CreateSigmoidOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Sigmoid>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::logistic, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::logistic, {});
} }
static void CreateReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Relu>& op) { static void CreateReluOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Relu>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::relu, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::relu, {});
} }
static void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRelu>& op) { static void CreatePReluOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::PRelu>& op) {
validate_inputs_count(op, {2}); validate_inputs_count(op, {2});
auto slope_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto slope_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto slope_shape = op->get_input_partial_shape(1); auto slope_shape = op->get_input_partial_shape(1);
auto out_shape = op->get_output_partial_shape(0); auto out_shape = op->get_output_partial_shape(0);
if (slope_node && ngraph::shape_size(slope_shape.to_shape()) == 1) { if (slope_node && ov::shape_size(slope_shape.to_shape()) == 1) {
float slope; float slope;
OPENVINO_ASSERT(ov::op::util::get_single_value(slope_node, slope), OPENVINO_ASSERT(ov::op::util::get_single_value(slope_node, slope),
"[GPU] Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); "[GPU] Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
@ -94,7 +94,7 @@ static void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRel
} }
} }
static void CreateClampOp(Program& p, const std::shared_ptr<ngraph::op::v0::Clamp>& op) { static void CreateClampOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Clamp>& op) {
double min = op->get_min(); double min = op->get_min();
double max = op->get_max(); double max = op->get_max();
if (op->get_output_element_type(0) == ov::element::i32) { if (op->get_output_element_type(0) == ov::element::i32) {
@ -109,68 +109,68 @@ static void CreateClampOp(Program& p, const std::shared_ptr<ngraph::op::v0::Clam
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::clamp, {static_cast<float>(min), static_cast<float>(max)}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::clamp, {static_cast<float>(min), static_cast<float>(max)});
} }
static void CreateExpOp(Program& p, const std::shared_ptr<ngraph::op::v0::Exp>& op) { static void CreateExpOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Exp>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::exp, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::exp, {});
} }
static void CreateLogicalNotOp(Program& p, const std::shared_ptr<ngraph::op::v1::LogicalNot>& op) { static void CreateLogicalNotOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v1::LogicalNot>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negation, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negation, {});
} }
static void CreateAsinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Asin>& op) { static void CreateAsinOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Asin>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asin, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asin, {});
} }
static void CreateAsinhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Asinh>& op) { static void CreateAsinhOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Asinh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asinh, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::asinh, {});
} }
static void CreateAcosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Acos>& op) { static void CreateAcosOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Acos>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acos, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acos, {});
} }
static void CreateAcoshOp(Program& p, const std::shared_ptr<ngraph::op::v3::Acosh>& op) { static void CreateAcoshOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Acosh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acosh, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::acosh, {});
} }
static void CreateAtanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Atan>& op) { static void CreateAtanOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Atan>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atan, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atan, {});
} }
static void CreateAtanhOp(Program& p, const std::shared_ptr<ngraph::op::v3::Atanh>& op) { static void CreateAtanhOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Atanh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atanh, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::atanh, {});
} }
static void CreateAbsOp(Program& p, const std::shared_ptr<ngraph::op::v0::Abs>& op) { static void CreateAbsOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Abs>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::abs, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::abs, {});
} }
static void CreateFloorOp(Program& p, const std::shared_ptr<ngraph::op::v0::Floor>& op) { static void CreateFloorOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Floor>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::floor, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::floor, {});
} }
static void CreateCeilingOp(Program& p, const std::shared_ptr<ngraph::op::v0::Ceiling>& op) { static void CreateCeilingOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Ceiling>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::ceil, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::ceil, {});
} }
static void CreateSqrtOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sqrt>& op) { static void CreateSqrtOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Sqrt>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sqrt, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sqrt, {});
} }
static void CreateErfOp(Program& p, const std::shared_ptr<ngraph::op::v0::Erf>& op) { static void CreateErfOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Erf>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::erf, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::erf, {});
} }
static void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0::HardSigmoid>& op) { static void CreateHardSigmoidOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::HardSigmoid>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto alpha_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto beta_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto beta_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!alpha_node || !beta_node) { if (!alpha_node || !beta_node) {
OPENVINO_THROW("[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }
if (ngraph::shape_size(alpha_node->get_output_shape(0)) == 1 && if (ov::shape_size(alpha_node->get_output_shape(0)) == 1 &&
ngraph::shape_size(beta_node->get_output_shape(0)) == 1) { ov::shape_size(beta_node->get_output_shape(0)) == 1) {
float alpha, beta; float alpha, beta;
if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(beta_node, beta)) { if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(beta_node, beta)) {
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
@ -179,24 +179,24 @@ static void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0
} }
} }
static void CreateLogOp(Program& p, const std::shared_ptr<ngraph::op::v0::Log>& op) { static void CreateLogOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Log>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::log, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::log, {});
} }
static void CreateNegativeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Negative>& op) { static void CreateNegativeOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Negative>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negative, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::negative, {});
} }
static void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>& op) { static void CreateSeluOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Selu>& op) {
validate_inputs_count(op, {3}); validate_inputs_count(op, {3});
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto alpha_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto lambda_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2)); auto lambda_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!alpha_node || !lambda_node) { if (!alpha_node || !lambda_node) {
OPENVINO_THROW("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }
if (ngraph::shape_size(alpha_node->get_output_shape(0)) == 1 && if (ov::shape_size(alpha_node->get_output_shape(0)) == 1 &&
ngraph::shape_size(lambda_node->get_output_shape(0)) == 1) { ov::shape_size(lambda_node->get_output_shape(0)) == 1) {
float alpha, lambda; float alpha, lambda;
if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(lambda_node, lambda)) { if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(lambda_node, lambda)) {
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
@ -207,36 +207,36 @@ static void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>
} }
} }
static void CreateSoftPlusOp(Program& p, const std::shared_ptr<ngraph::op::v4::SoftPlus>& op) { static void CreateSoftPlusOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::SoftPlus>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::softplus, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::softplus, {});
} }
static void CreateTanOp(Program& p, const std::shared_ptr<ngraph::op::v0::Tan>& op) { static void CreateTanOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Tan>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::tan, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::tan, {});
} }
static void CreateSinOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sin>& op) { static void CreateSinOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Sin>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sin, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sin, {});
} }
static void CreateSinhOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sinh>& op) { static void CreateSinhOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Sinh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sinh, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sinh, {});
} }
static void CreateCosOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cos>& op) { static void CreateCosOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Cos>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cos, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cos, {});
} }
static void CreateCoshOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cosh>& op) { static void CreateCoshOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Cosh>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cosh, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::cosh, {});
} }
static void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swish>& op) { static void CreateSwishOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::Swish>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
if (op->get_input_size() == 2) { if (op->get_input_size() == 2) {
auto beta_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto beta_node = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
if (beta_node) { if (beta_node) {
if (ngraph::shape_size(beta_node->get_output_shape(0)) == 1) { if (ov::shape_size(beta_node->get_output_shape(0)) == 1) {
float beta; float beta;
if (!ov::op::util::get_single_value(beta_node, beta)) { if (!ov::op::util::get_single_value(beta_node, beta)) {
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
@ -253,42 +253,42 @@ static void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swis
} }
} }
static void CreateHSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::HSwish>& op) { static void CreateHSwishOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::HSwish>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hswish, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hswish, {});
} }
static void CreateMishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Mish>& op) { static void CreateMishOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v4::Mish>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::mish, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::mish, {});
} }
static void CreateGeluOp(Program& p, const std::shared_ptr<ngraph::op::v7::Gelu>& op) { static void CreateGeluOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v7::Gelu>& op) {
cldnn::activation_func activationFunc = cldnn::activation_func activationFunc =
op->get_approximation_mode() == op::GeluApproximationMode::ERF ? cldnn::activation_func::gelu op->get_approximation_mode() == op::GeluApproximationMode::ERF ? cldnn::activation_func::gelu
: cldnn::activation_func::gelu_tanh; : cldnn::activation_func::gelu_tanh;
CreateUnaryEltwiseOp(p, op, activationFunc, {}); CreateUnaryEltwiseOp(p, op, activationFunc, {});
} }
static void CreateSoftSignOp(Program& p, const std::shared_ptr<ngraph::op::v9::SoftSign>& op) { static void CreateSoftSignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v9::SoftSign>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::softsign, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::softsign, {});
} }
static void CreateGeluOp(Program &p, const std::shared_ptr<ngraph::op::v0::Gelu>& op) { static void CreateGeluOp(ProgramBuilder &p, const std::shared_ptr<ov::op::v0::Gelu>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::gelu, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::gelu, {});
} }
static void CreateSignOp(Program& p, const std::shared_ptr<ngraph::op::v0::Sign>& op) { static void CreateSignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v0::Sign>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sign, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::sign, {});
} }
static void CreateHSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v5::HSigmoid>& op) { static void CreateHSigmoidOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v5::HSigmoid>& op) {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hsigmoid, {}); CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hsigmoid, {});
} }
static void CreateRoundOp(Program& p, const std::shared_ptr<ngraph::op::v5::Round>& op) { static void CreateRoundOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v5::Round>& op) {
auto func = cldnn::activation_func::none; auto func = cldnn::activation_func::none;
switch (op->get_mode()) { switch (op->get_mode()) {
case ngraph::op::v5::Round::RoundMode::HALF_TO_EVEN : func = cldnn::activation_func::round_half_to_even; break; case ov::op::v5::Round::RoundMode::HALF_TO_EVEN : func = cldnn::activation_func::round_half_to_even; break;
case ngraph::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO : func = cldnn::activation_func::round_half_away_from_zero; break; case ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO : func = cldnn::activation_func::round_half_away_from_zero; break;
default: OPENVINO_THROW("Unsupported round mode in ", op->get_friendly_name(), ": ", static_cast<int>(op->get_mode())); default: OPENVINO_THROW("Unsupported round mode in ", op->get_friendly_name(), ": ", static_cast<int>(op->get_mode()));
} }
CreateUnaryEltwiseOp(p, op, func, {}); CreateUnaryEltwiseOp(p, op, func, {});

View File

@ -2,26 +2,30 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/primitives/unique.hpp" #include "openvino/core/validation_util.hpp"
#include "openvino/op/unique.hpp"
#include "openvino/op/constant.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/primitives/unique.hpp"
#include "ngraph/op/unique.hpp" #include "intel_gpu/plugin/program_builder.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
namespace { namespace {
void CreateUniqueOp(Program& p, const std::shared_ptr<ngraph::op::v10::Unique>& op) { void CreateUniqueOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v10::Unique>& op) {
validate_inputs_count(op, {1, 2}); validate_inputs_count(op, {1, 2});
bool flattened = true; bool flattened = true;
int64_t axis{}; int64_t axis{};
if (op->get_input_size() == 2) { if (op->get_input_size() == 2) {
auto axis_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axis_constant = std::dynamic_pointer_cast<ov::op::v0::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(axis_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT(axis_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis = axis_constant->cast_vector<int64_t>().at(0); axis = axis_constant->cast_vector<int64_t>().at(0);
OPENVINO_SUPPRESS_DEPRECATED_START
axis = ov::normalize_axis(op.get(), axis, op->get_input_partial_shape(0).rank()); axis = ov::normalize_axis(op.get(), axis, op->get_input_partial_shape(0).rank());
OPENVINO_SUPPRESS_DEPRECATED_END
flattened = false; flattened = false;
} }

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/common_utils.hpp"
#include "ngraph/op/assign.hpp" #include "openvino/op/assign.hpp"
#include "ngraph/op/read_value.hpp" #include "openvino/op/read_value.hpp"
#include "intel_gpu/primitives/assign.hpp" #include "intel_gpu/primitives/assign.hpp"
#include "intel_gpu/primitives/read_value.hpp" #include "intel_gpu/primitives/read_value.hpp"
@ -15,7 +15,7 @@ namespace intel_gpu {
namespace { namespace {
template<typename T_PRIMITIVE> template<typename T_PRIMITIVE>
void CreateVariableAccessPrimitive(Program &p, const std::shared_ptr<ngraph::op::Op> &op, void CreateVariableAccessPrimitive(ProgramBuilder &p, const std::shared_ptr<ov::op::Op> &op,
const std::string &variable_id) { const std::string &variable_id) {
validate_inputs_count(op, {1}); validate_inputs_count(op, {1});
@ -36,19 +36,19 @@ void CreateVariableAccessPrimitive(Program &p, const std::shared_ptr<ngraph::op:
p.add_primitive(*op, prim); p.add_primitive(*op, prim);
} }
void CreateReadValueOp(Program& p, const std::shared_ptr<ngraph::op::v3::ReadValue>& op) { void CreateReadValueOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::ReadValue>& op) {
CreateVariableAccessPrimitive<cldnn::read_value>(p, op, op->get_variable_id()); CreateVariableAccessPrimitive<cldnn::read_value>(p, op, op->get_variable_id());
} }
void CreateAssignOp(Program& p, const std::shared_ptr<ngraph::op::v3::Assign>& op) { void CreateAssignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v3::Assign>& op) {
CreateVariableAccessPrimitive<cldnn::assign>(p, op, op->get_variable_id()); CreateVariableAccessPrimitive<cldnn::assign>(p, op, op->get_variable_id());
} }
void CreateReadValueOp(Program& p, const std::shared_ptr<ngraph::op::v6::ReadValue>& op) { void CreateReadValueOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v6::ReadValue>& op) {
CreateVariableAccessPrimitive<cldnn::read_value>(p, op, op->get_variable_id()); CreateVariableAccessPrimitive<cldnn::read_value>(p, op, op->get_variable_id());
} }
void CreateAssignOp(Program& p, const std::shared_ptr<ngraph::op::v6::Assign>& op) { void CreateAssignOp(ProgramBuilder& p, const std::shared_ptr<ov::op::v6::Assign>& op) {
CreateVariableAccessPrimitive<cldnn::assign>(p, op, op->get_variable_id()); CreateVariableAccessPrimitive<cldnn::assign>(p, op, op->get_variable_id());
} }

View File

@ -302,7 +302,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network,
config.set_user_property(preprocess_config(orig_config)); config.set_user_property(preprocess_config(orig_config));
config.apply_user_properties(ctx->get_engine().get_device_info()); config.apply_user_properties(ctx->get_engine().get_device_info());
Program prog(ctx->get_engine(), config); ProgramBuilder prog(ctx->get_engine(), config);
bool dyn_shape_batch_found = false; bool dyn_shape_batch_found = false;
auto model = network.getFunction(); auto model = network.getFunction();
@ -859,7 +859,7 @@ uint32_t Plugin::get_max_batch_size(const std::map<std::string, Parameter>& opti
auto& engine = get_default_context(device_id)->get_impl()->get_engine(); auto& engine = get_default_context(device_id)->get_impl()->get_engine();
std::shared_ptr<Program> program; std::shared_ptr<ProgramBuilder> program;
GPU_DEBUG_IF(debug_config->base_batch_for_memory_estimation > 0) { GPU_DEBUG_IF(debug_config->base_batch_for_memory_estimation > 0) {
size_t user_specified_base_batch_size = debug_config->base_batch_for_memory_estimation; size_t user_specified_base_batch_size = debug_config->base_batch_for_memory_estimation;
@ -919,7 +919,7 @@ uint32_t Plugin::get_max_batch_size(const std::map<std::string, Parameter>& opti
auto nGraphFunc = cloned_network.getFunction(); auto nGraphFunc = cloned_network.getFunction();
TransformationsPipeline transformations(config, device_info); TransformationsPipeline transformations(config, device_info);
transformations.apply(nGraphFunc); transformations.apply(nGraphFunc);
program = std::make_shared<Program>(cloned_network, engine, config, false, true); program = std::make_shared<ProgramBuilder>(cloned_network, engine, config, false, true);
std::pair<int64_t, int64_t> device_memory_usage = program->GetCompiledProgram(0)->get_estimated_device_mem_usage(); std::pair<int64_t, int64_t> device_memory_usage = program->GetCompiledProgram(0)->get_estimated_device_mem_usage();
if (device_memory_usage.first == static_cast<int64_t>(-1L) && device_memory_usage.second == static_cast<int64_t>(-1L)) { if (device_memory_usage.first == static_cast<int64_t>(-1L) && device_memory_usage.second == static_cast<int64_t>(-1L)) {
return static_cast<uint32_t>(max_batch_size); return static_cast<uint32_t>(max_batch_size);

View File

@ -2,14 +2,13 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#if defined(__unix__) && !defined(__ANDROID__)
#include <malloc.h>
#endif
#include "openvino/core/graph_util.hpp" #include "openvino/core/graph_util.hpp"
#include "openvino/runtime/system_conf.hpp" #include "openvino/runtime/system_conf.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/split.hpp"
#include "openvino/op/variadic_split.hpp"
#include "intel_gpu/plugin/program.hpp" #include "intel_gpu/plugin/program_builder.hpp"
#include "intel_gpu/plugin/transformations_pipeline.hpp" #include "intel_gpu/plugin/transformations_pipeline.hpp"
#include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp" #include "intel_gpu/runtime/debug_configuration.hpp"
@ -20,39 +19,44 @@
# include <dlfcn.h> # include <dlfcn.h>
#endif #endif
#if defined(__unix__) && !defined(__ANDROID__)
#include <malloc.h>
#endif
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
const cldnn::primitive_id Program::m_preProcessTag("_cldnn_input_preprocess"); const cldnn::primitive_id ProgramBuilder::m_preProcessTag("_cldnn_input_preprocess");
const cldnn::primitive_id Program::m_meanValuesTag("_cldnn_mean_values"); const cldnn::primitive_id ProgramBuilder::m_meanValuesTag("_cldnn_mean_values");
const cldnn::primitive_id Program::m_preCustomLayerTag("_cldnn_custom_preprocess"); const cldnn::primitive_id ProgramBuilder::m_preCustomLayerTag("_cldnn_custom_preprocess");
const cldnn::primitive_id Program::m_postCustomLayerTag("_cldnn_custom_postprocess"); const cldnn::primitive_id ProgramBuilder::m_postCustomLayerTag("_cldnn_custom_postprocess");
Program::factories_map_t Program::factories_map = {}; ProgramBuilder::factories_map_t ProgramBuilder::factories_map = {};
std::mutex Program::m_mutex = {}; std::mutex ProgramBuilder::m_mutex = {};
std::string layer_type_lower(const ngraph::Node* op) { std::string layer_type_lower(const ov::Node* op) {
std::string layerType = op->get_type_name(); std::string layerType = op->get_type_name();
std::transform(layerType.begin(), layerType.end(), layerType.begin(), std::transform(layerType.begin(), layerType.end(), layerType.begin(),
[](unsigned char c) -> unsigned char { return std::tolower(c); }); [](unsigned char c) -> unsigned char { return std::tolower(c); });
return layerType; return layerType;
} }
std::string layer_type_name_ID(const ngraph::Node* op) { std::string layer_type_name_ID(const ov::Node* op) {
return layer_type_lower(op) + ":" + op->get_friendly_name(); return layer_type_lower(op) + ":" + op->get_friendly_name();
} }
std::string layer_type_lower(const std::shared_ptr<ngraph::Node>& op) { std::string layer_type_lower(const std::shared_ptr<ov::Node>& op) {
return layer_type_lower(op.get()); return layer_type_lower(op.get());
} }
std::string layer_type_name_ID(const std::shared_ptr<ngraph::Node>& op) { std::string layer_type_name_ID(const std::shared_ptr<ov::Node>& op) {
return layer_type_name_ID(op.get()); return layer_type_name_ID(op.get());
} }
void Program::ChangeInputBatch(int batch) { void ProgramBuilder::ChangeInputBatch(int batch) {
m_curBatch = batch; m_curBatch = batch;
} }
@ -66,7 +70,7 @@ auto getParamName = [](const std::shared_ptr<ov::Node>& param) -> std::string {
// detect the only supported dynamic shape case - // detect the only supported dynamic shape case -
// exactly one dimension is dynamic in input params with defined min/max interval // exactly one dimension is dynamic in input params with defined min/max interval
bool Program::IsDynBatchModel(const std::shared_ptr<ov::Model>& model, bool ProgramBuilder::IsDynBatchModel(const std::shared_ptr<ov::Model>& model,
std::map<std::string, ov::PartialShape>& shapes, std::map<std::string, ov::PartialShape>& shapes,
std::map<std::string, std::pair<int64_t, int64_t>>& batch_dim) { std::map<std::string, std::pair<int64_t, int64_t>>& batch_dim) {
for (const auto& param : model->get_parameters()) { for (const auto& param : model->get_parameters()) {
@ -120,7 +124,7 @@ bool Program::IsDynBatchModel(const std::shared_ptr<ov::Model>& model,
return dyn_shape_batch_found; return dyn_shape_batch_found;
} }
Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config, ProgramBuilder::ProgramBuilder(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config,
bool createTopologyOnly, bool partialBuild, bool createTopologyOnly, bool partialBuild,
InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs, InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor, bool innerProgram) std::shared_ptr<ov::threading::IStreamsExecutor> task_executor, bool innerProgram)
@ -173,7 +177,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
auto ops = func->get_ordered_ops(); auto ops = func->get_ordered_ops();
bool dyn_shape_batch_found = false; bool dyn_shape_batch_found = false;
std::map<std::string, ngraph::PartialShape> shapes; std::map<std::string, ov::PartialShape> shapes;
std::map<std::string, std::pair<int64_t, int64_t>> batch_dim; std::map<std::string, std::pair<int64_t, int64_t>> batch_dim;
dyn_shape_batch_found = IsDynBatchModel(func, shapes, batch_dim); dyn_shape_batch_found = IsDynBatchModel(func, shapes, batch_dim);
@ -199,7 +203,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
// clone the source model, find the batch dim // clone the source model, find the batch dim
// and reshape the model to next batch size // and reshape the model to next batch size
auto new_func = func->clone(); auto new_func = func->clone();
std::map<ov::Output<ov::Node>, ngraph::PartialShape> new_shapes; std::map<ov::Output<ov::Node>, ov::PartialShape> new_shapes;
for (const auto& param : new_func->get_parameters()) { for (const auto& param : new_func->get_parameters()) {
ov::PartialShape pshape = param->get_output_partial_shape(0); ov::PartialShape pshape = param->get_output_partial_shape(0);
@ -248,7 +252,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
// and store them into internal maps // and store them into internal maps
// same operations as above, but for maximum batch // same operations as above, but for maximum batch
auto new_func = func->clone(); auto new_func = func->clone();
std::map<ov::Output<ov::Node>, ngraph::PartialShape> new_shapes; std::map<ov::Output<ov::Node>, ov::PartialShape> new_shapes;
for (const auto& param : new_func->get_parameters()) { for (const auto& param : new_func->get_parameters()) {
ov::PartialShape pshape = param->get_output_partial_shape(0); ov::PartialShape pshape = param->get_output_partial_shape(0);
@ -300,7 +304,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
} }
} }
Program::Program(cldnn::engine& engine, const ExecutionConfig& config, ProgramBuilder::ProgramBuilder(cldnn::engine& engine, const ExecutionConfig& config,
InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs) InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs)
: m_max_batch(1) : m_max_batch(1)
, m_curBatch(-1) , m_curBatch(-1)
@ -314,7 +318,7 @@ Program::Program(cldnn::engine& engine, const ExecutionConfig& config,
m_networkOutputs = *outputs; m_networkOutputs = *outputs;
} }
int Program::GetMaxBatchSizeForSingleProgram() { int ProgramBuilder::GetMaxBatchSizeForSingleProgram() {
auto max_dynamic_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch); auto max_dynamic_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch);
if (max_dynamic_batch > 1) { if (max_dynamic_batch > 1) {
// calculate number of networks necessary based on binary log // calculate number of networks necessary based on binary log
@ -333,20 +337,20 @@ int Program::GetMaxBatchSizeForSingleProgram() {
return 0; return 0;
} }
std::shared_ptr<cldnn::program> Program::GetCompiledProgram(int program_id) { std::shared_ptr<cldnn::program> ProgramBuilder::GetCompiledProgram(int program_id) {
if (program_id >= static_cast<int32_t>(m_programs.size())) if (program_id >= static_cast<int32_t>(m_programs.size()))
OPENVINO_THROW("Invalid program ID"); OPENVINO_THROW("Invalid program ID");
return m_programs[program_id]; return m_programs[program_id];
} }
void Program::PrepareBuild(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) { void ProgramBuilder::PrepareBuild(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) {
m_topology.reset(new cldnn::topology()); m_topology.reset(new cldnn::topology());
m_networkInputs = networkInputs; m_networkInputs = networkInputs;
m_networkOutputs = networkOutputs; m_networkOutputs = networkOutputs;
} }
void Program::CleanupBuild() { void ProgramBuilder::CleanupBuild() {
m_topology.reset(); m_topology.reset();
m_networkInputs.clear(); m_networkInputs.clear();
m_networkOutputs.clear(); m_networkOutputs.clear();
@ -360,11 +364,11 @@ void Program::CleanupBuild() {
#endif #endif
} }
std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::shared_ptr<ngraph::Node>>& ops, std::shared_ptr<cldnn::program> ProgramBuilder::BuildProgram(const std::vector<std::shared_ptr<ov::Node>>& ops,
InferenceEngine::InputsDataMap networkInputs, InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs, InferenceEngine::OutputsDataMap networkOutputs,
bool createTopologyOnly, bool partialBuild, bool innerProgram) { bool createTopologyOnly, bool partialBuild, bool innerProgram) {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::BuildProgram"); OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::BuildProgram");
// std::cout << "BuildProgram " << createTopologyOnly << ", " << partialBuild << ", " << innerProgram << std::endl; // std::cout << "BuildProgram " << createTopologyOnly << ", " << partialBuild << ", " << innerProgram << std::endl;
// In the case of inner program, allow_new_shape_infer flag is setted by outside of program. // In the case of inner program, allow_new_shape_infer flag is setted by outside of program.
// So, do not check allow_new_shape_infer for inner program build // So, do not check allow_new_shape_infer for inner program build
@ -393,7 +397,7 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
if (createTopologyOnly) { if (createTopologyOnly) {
return {}; return {};
} else { } else {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::CreateProgram"); OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::CreateProgram");
cldnn::program::ptr program; cldnn::program::ptr program;
try { try {
program = cldnn::program::build_program(m_engine, *m_topology, m_config, get_task_executor()); program = cldnn::program::build_program(m_engine, *m_topology, m_config, get_task_executor());
@ -406,8 +410,8 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
} }
} }
bool Program::IsOpSupported(const InferenceEngine::CNNNetwork& network, const std::shared_ptr<ngraph::Node>& op) { bool ProgramBuilder::IsOpSupported(const InferenceEngine::CNNNetwork& network, const std::shared_ptr<ov::Node>& op) {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::IsOpSupported"); OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::IsOpSupported");
cldnn::topology topology; cldnn::topology topology;
try { try {
// Query mode disables checks that input primitives are created, // Query mode disables checks that input primitives are created,
@ -434,13 +438,13 @@ bool Program::IsOpSupported(const InferenceEngine::CNNNetwork& network, const st
return true; return true;
} }
void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, const std::shared_ptr<ngraph::Node>& op) { void ProgramBuilder::CreateSingleLayerPrimitive(cldnn::topology& topology, const std::shared_ptr<ov::Node>& op) {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::CreateSingleLayerPrimitive"); OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::CreateSingleLayerPrimitive");
GPU_DEBUG_LOG << "Process " << "op::v" << op->get_type_info().version_id << "::" << op->get_type_name() << " operation " GPU_DEBUG_LOG << "Process " << "op::v" << op->get_type_info().version_id << "::" << op->get_type_name() << " operation "
<< "(friendly_name=" << op->get_friendly_name() << ")" << std::endl; << "(friendly_name=" << op->get_friendly_name() << ")" << std::endl;
bool is_created = false; bool is_created = false;
const ngraph::NodeTypeInfo* op_type_info = &op->get_type_info(); const ov::NodeTypeInfo* op_type_info = &op->get_type_info();
while (op_type_info != nullptr) { while (op_type_info != nullptr) {
auto customLayer = m_custom_layers.find(op->get_type_name()); auto customLayer = m_custom_layers.find(op->get_type_name());
if (customLayer != m_custom_layers.end()) { if (customLayer != m_custom_layers.end()) {
@ -464,7 +468,7 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, const std::s
} }
} }
std::vector<cldnn::input_info> Program::GetInputInfo(const std::shared_ptr<ngraph::Node>& op) const { std::vector<cldnn::input_info> ProgramBuilder::GetInputInfo(const std::shared_ptr<ov::Node>& op) const {
if (!op) { if (!op) {
return {}; return {};
} }
@ -477,8 +481,8 @@ std::vector<cldnn::input_info> Program::GetInputInfo(const std::shared_ptr<ngrap
std::string prevName = layer_type_name_ID(prevOp); std::string prevName = layer_type_name_ID(prevOp);
bool is_legacy_multiple_outputs = !allow_new_shape_infer bool is_legacy_multiple_outputs = !allow_new_shape_infer
// Note:: Currently Split/Variadic Split are divided to multiple crops // Note:: Currently Split/Variadic Split are divided to multiple crops
|| ngraph::is_type<ngraph::op::v1::Split>(prevOp) || ov::is_type<ov::op::v1::Split>(prevOp)
|| ngraph::is_type<ngraph::op::v1::VariadicSplit>(prevOp); || ov::is_type<ov::op::v1::VariadicSplit>(prevOp);
if (prevOp->get_output_size() > 1 && is_legacy_multiple_outputs) { if (prevOp->get_output_size() > 1 && is_legacy_multiple_outputs) {
prevName += ".out" + std::to_string(op->get_input_source_output(i).get_index()); prevName += ".out" + std::to_string(op->get_input_source_output(i).get_index());
} }
@ -496,7 +500,7 @@ std::vector<cldnn::input_info> Program::GetInputInfo(const std::shared_ptr<ngrap
return inputInfo; return inputInfo;
} }
void Program::init_profile_info(const cldnn::primitive& prim) { void ProgramBuilder::init_profile_info(const cldnn::primitive& prim) {
perfMap[prim.id].first = prim.id; perfMap[prim.id].first = prim.id;
auto& perfEntry = perfMap[prim.id].second; auto& perfEntry = perfMap[prim.id].second;
perfEntry.layerType = prim.origin_op_type_name; perfEntry.layerType = prim.origin_op_type_name;
@ -506,7 +510,7 @@ void Program::init_profile_info(const cldnn::primitive& prim) {
perfEntry.parentPrimitive = prim.origin_op_name; perfEntry.parentPrimitive = prim.origin_op_name;
} }
void Program::AddVariableStateInfo(const std::string& variable_id, const cldnn::layout& layout) { void ProgramBuilder::AddVariableStateInfo(const std::string& variable_id, const cldnn::layout& layout) {
auto it = m_variablesStateInfo.find(variable_id); auto it = m_variablesStateInfo.find(variable_id);
if (it != m_variablesStateInfo.end()) if (it != m_variablesStateInfo.end())
it->second.insert(layout); it->second.insert(layout);
@ -514,8 +518,8 @@ void Program::AddVariableStateInfo(const std::string& variable_id, const cldnn::
m_variablesStateInfo.insert({variable_id, { layout }}); m_variablesStateInfo.insert({variable_id, { layout }});
} }
void Program::add_primitive(const ngraph::Node& op, std::shared_ptr<cldnn::primitive> prim, std::vector<std::string> aliases) { void ProgramBuilder::add_primitive(const ov::Node& op, std::shared_ptr<cldnn::primitive> prim, std::vector<std::string> aliases) {
OPENVINO_ASSERT(m_topology != nullptr, "[GPU] Invalid Program builder state: topology is nullptr"); OPENVINO_ASSERT(m_topology != nullptr, "[GPU] Invalid ProgramBuilder builder state: topology is nullptr");
prim->origin_op_name = op.get_friendly_name(); prim->origin_op_name = op.get_friendly_name();
prim->origin_op_type_name = op.get_type_name(); prim->origin_op_type_name = op.get_type_name();
@ -547,7 +551,7 @@ void Program::add_primitive(const ngraph::Node& op, std::shared_ptr<cldnn::primi
m_topology->add_primitive(prim); m_topology->add_primitive(prim);
} }
bool Program::requires_new_shape_infer(const ngraph::Node& op) const { bool ProgramBuilder::requires_new_shape_infer(const ov::Node& op) const {
if (op.is_dynamic()) { if (op.is_dynamic()) {
return true; return true;
} }
@ -565,14 +569,14 @@ bool Program::requires_new_shape_infer(const ngraph::Node& op) const {
return false; return false;
} }
// TODO: Does it make sense to add such method to ngraph core? // TODO: Does it make sense to add such method to ov core?
bool IsNodeOnConstPath(const std::shared_ptr<ngraph::Node>& node) { bool IsNodeOnConstPath(const std::shared_ptr<ov::Node>& node) {
std::set<std::shared_ptr<ngraph::Node>> nodes_processed = {}; std::set<std::shared_ptr<ov::Node>> nodes_processed = {};
std::function<bool(const std::shared_ptr<ngraph::Node>&)> is_const_node = [&nodes_processed, &is_const_node](const std::shared_ptr<ngraph::Node>& node) { std::function<bool(const std::shared_ptr<ov::Node>&)> is_const_node = [&nodes_processed, &is_const_node](const std::shared_ptr<ov::Node>& node) {
if (nodes_processed.count(node)) return true; if (nodes_processed.count(node)) return true;
nodes_processed.insert(node); nodes_processed.insert(node);
// If input is constant, then drop it from the processing list // If input is constant, then drop it from the processing list
if (std::dynamic_pointer_cast<ngraph::op::v0::Constant>(node) != nullptr) if (std::dynamic_pointer_cast<ov::op::v0::Constant>(node) != nullptr)
return true; return true;
// If the node doesn't have any parents and it's not a constant, then we deal with dynamic path // If the node doesn't have any parents and it's not a constant, then we deal with dynamic path
if (node->get_input_size() == 0) if (node->get_input_size() == 0)
@ -587,7 +591,7 @@ bool IsNodeOnConstPath(const std::shared_ptr<ngraph::Node>& node) {
return is_const_node(node); return is_const_node(node);
} }
void validate_inputs_count(const std::shared_ptr<ngraph::Node>& op, std::vector<size_t> valid_inputs_count) { void validate_inputs_count(const std::shared_ptr<ov::Node>& op, std::vector<size_t> valid_inputs_count) {
for (auto ic : valid_inputs_count) { for (auto ic : valid_inputs_count) {
if (op->get_input_size() == ic) { if (op->get_input_size() == ic) {
return; return;

View File

@ -8,15 +8,17 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <ngraph/opsets/opset9.hpp> #include "openvino/op/avg_pool.hpp"
#include <ngraph/pattern/op/wrap_type.hpp> #include "openvino/op/reduce_mean.hpp"
#include <ngraph/rt_info.hpp> #include "openvino/op/constant.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/core/rt_info.hpp"
ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() { ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() {
// Check all AvgPool nodes // Check all AvgPool nodes
auto m = std::make_shared<ngraph::pattern::Matcher>(ngraph::pattern::wrap_type<ngraph::opset9::AvgPool>(), "ConvertAvgPoolingToReduce"); auto m = std::make_shared<ov::pass::pattern::Matcher>(ov::pass::pattern::wrap_type<ov::op::v1::AvgPool>(), "ConvertAvgPoolingToReduce");
register_matcher(m, [&](ngraph::pattern::Matcher& m) { register_matcher(m, [&](ov::pass::pattern::Matcher& m) {
auto pool = std::dynamic_pointer_cast<ngraph::opset9::AvgPool>(m.get_match_root()); auto pool = std::dynamic_pointer_cast<ov::op::v1::AvgPool>(m.get_match_root());
if (!pool || transformation_callback(pool)) { if (!pool || transformation_callback(pool)) {
return false; return false;
} }
@ -45,9 +47,9 @@ ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() {
std::vector<int64_t> axes_shape(rank - 2); std::vector<int64_t> axes_shape(rank - 2);
std::iota(axes_shape.begin(), axes_shape.end(), 2); std::iota(axes_shape.begin(), axes_shape.end(), 2);
auto reduce = std::make_shared<ngraph::opset9::ReduceMean>( auto reduce = std::make_shared<ov::op::v1::ReduceMean>(
pool->input_value(0), pool->input_value(0),
ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{axes_shape.size()}, axes_shape), ov::op::v0::Constant::create(ov::element::i64, ov::Shape{axes_shape.size()}, axes_shape),
true); true);
reduce->set_friendly_name(pool->get_friendly_name()); reduce->set_friendly_name(pool->get_friendly_name());

View File

@ -4,17 +4,17 @@
#pragma once #pragma once
#include <ngraph/pass/graph_rewrite.hpp> #include "openvino/pass/graph_rewrite.hpp"
#include <transformations_visibility.hpp> #include "openvino/core/visibility.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
class ConvertAvgPoolingToReduce : public ngraph::pass::MatcherPass { class ConvertAvgPoolingToReduce : public ov::pass::MatcherPass {
public: public:
OPENVINO_RTTI("ConvertAvgPoolingToReduce", "0"); OPENVINO_RTTI("ConvertAvgPoolingToReduce", "0");
ConvertAvgPoolingToReduce(); ConvertAvgPoolingToReduce();
}; };
} // namespace pass } // namespace intel_gpu
} // namespace ngraph } // namespace ov

View File

@ -4,29 +4,28 @@
#include "convert_shapeof.hpp" #include "convert_shapeof.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/core/rt_info.hpp"
#include <memory> #include <memory>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
#include <openvino/opsets/opset1.hpp>
#include <openvino/opsets/opset3.hpp>
#include <vector> #include <vector>
ov::intel_gpu::ConvertShapeOf1To3::ConvertShapeOf1To3() { ov::intel_gpu::ConvertShapeOf1To3::ConvertShapeOf1To3() {
auto shapeof1 = ov::pass::pattern::wrap_type<ov::opset1::ShapeOf>(); auto shapeof1 = ov::pass::pattern::wrap_type<ov::op::v0::ShapeOf>();
matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) {
auto shapeof1 = std::dynamic_pointer_cast<ov::opset1::ShapeOf>(m.get_match_root()); auto shapeof1 = std::dynamic_pointer_cast<ov::op::v0::ShapeOf>(m.get_match_root());
if (!shapeof1) { if (!shapeof1) {
return false; return false;
} }
auto new_shapeof3 = std::make_shared<ov::opset3::ShapeOf>(shapeof1->input_value(0)); auto new_shapeof3 = std::make_shared<ov::op::v3::ShapeOf>(shapeof1->input_value(0));
new_shapeof3->set_friendly_name(shapeof1->get_friendly_name()); new_shapeof3->set_friendly_name(shapeof1->get_friendly_name());
ngraph::copy_runtime_info(shapeof1, new_shapeof3); ov::copy_runtime_info(shapeof1, new_shapeof3);
ngraph::replace_node(shapeof1, new_shapeof3); ov::replace_node(shapeof1, new_shapeof3);
return true; return true;
}; };
auto m = std::make_shared<ngraph::pattern::Matcher>(shapeof1, "ConvertShapeOf1To3"); auto m = std::make_shared<ov::pass::pattern::Matcher>(shapeof1, "ConvertShapeOf1To3");
register_matcher(m, callback); register_matcher(m, callback);
} }

View File

@ -4,8 +4,8 @@
#pragma once #pragma once
#include <ngraph/pass/graph_rewrite.hpp> #include "openvino/pass/graph_rewrite.hpp"
#include <transformations_visibility.hpp> #include "openvino/core/visibility.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {

View File

@ -2,14 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/reduce_mean.hpp"
#include "openvino/op/reduce_prod.hpp"
#include "openvino/op/reduce_min.hpp"
#include "openvino/op/reduce_max.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/reshape.hpp"
#include "decompose_reduce_for_false_keepdims.hpp" #include "decompose_reduce_for_false_keepdims.hpp"
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
#include <memory> #include <memory>
#include <ngraph/opsets/opset10.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
#include <vector> #include <vector>
namespace ov { namespace ov {
@ -17,17 +24,17 @@ namespace intel_gpu {
DecomposeReduceForFalseKeepDims::DecomposeReduceForFalseKeepDims() { DecomposeReduceForFalseKeepDims::DecomposeReduceForFalseKeepDims() {
// Get one MatcherPass for all modes // Get one MatcherPass for all modes
auto reduce_pattern = ngraph::pattern::wrap_type<ngraph::opset10::ReduceSum, auto reduce_pattern = ov::pass::pattern::wrap_type<ov::op::v1::ReduceSum,
ngraph::opset10::ReduceMean, ov::op::v1::ReduceMean,
ngraph::opset10::ReduceProd, ov::op::v1::ReduceProd,
ngraph::opset10::ReduceMin, ov::op::v1::ReduceMin,
ngraph::opset10::ReduceMax>( ov::op::v1::ReduceMax>(
{ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), {ov::pass::pattern::any_input(ov::pass::pattern::has_static_shape()),
ngraph::pattern::wrap_type<ngraph::opset10::Constant>()}, ov::pass::pattern::wrap_type<ov::op::v0::Constant>()},
ngraph::pattern::has_static_shape()); ov::pass::pattern::has_static_shape());
// register callback // register callback
ov::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) { ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map(); const auto& pattern_map = m.get_pattern_value_map();
auto reduce = auto reduce =
as_type_ptr<op::util::ArithmeticReductionKeepDims>(pattern_map.at(reduce_pattern).get_node_shared_ptr()); as_type_ptr<op::util::ArithmeticReductionKeepDims>(pattern_map.at(reduce_pattern).get_node_shared_ptr());
@ -45,23 +52,23 @@ DecomposeReduceForFalseKeepDims::DecomposeReduceForFalseKeepDims() {
if (!reduce->get_keep_dims() && if (!reduce->get_keep_dims() &&
need_transformation_for_reordered_axes(axes_vector, input_rank, (input_rank - 2)) && need_transformation_for_reordered_axes(axes_vector, input_rank, (input_rank - 2)) &&
input_shape.size() < 6) { input_shape.size() < 6) {
ngraph::NodeVector new_ops; ov::NodeVector new_ops;
// Reduce // Reduce
auto reduce_const = auto reduce_const =
ngraph::opset10::Constant::create(ngraph::element::i64, ngraph::Shape{axes_vector.size()}, axes_vector); ov::op::v0::Constant::create(ov::element::i64, ov::Shape{axes_vector.size()}, axes_vector);
// Add each reduce mode supported by oneDNN // Add each reduce mode supported by oneDNN
if (ngraph::is_type<ngraph::opset10::ReduceSum>(reduce)) if (ov::is_type<ov::op::v1::ReduceSum>(reduce))
input = std::make_shared<ngraph::opset10::ReduceSum>(input, reduce_const, true); input = std::make_shared<ov::op::v1::ReduceSum>(input, reduce_const, true);
else if (ngraph::is_type<ngraph::opset10::ReduceMean>(reduce)) else if (ov::is_type<ov::op::v1::ReduceMean>(reduce))
input = std::make_shared<ngraph::opset10::ReduceMean>(input, reduce_const, true); input = std::make_shared<ov::op::v1::ReduceMean>(input, reduce_const, true);
else if (ngraph::is_type<ngraph::opset10::ReduceMin>(reduce)) else if (ov::is_type<ov::op::v1::ReduceMin>(reduce))
input = std::make_shared<ngraph::opset10::ReduceMin>(input, reduce_const, true); input = std::make_shared<ov::op::v1::ReduceMin>(input, reduce_const, true);
else if (ngraph::is_type<ngraph::opset10::ReduceMax>(reduce)) else if (ov::is_type<ov::op::v1::ReduceMax>(reduce))
input = std::make_shared<ngraph::opset10::ReduceMax>(input, reduce_const, true); input = std::make_shared<ov::op::v1::ReduceMax>(input, reduce_const, true);
else if (ngraph::is_type<ngraph::opset10::ReduceProd>(reduce)) else if (ov::is_type<ov::op::v1::ReduceProd>(reduce))
input = std::make_shared<ngraph::opset10::ReduceProd>(input, reduce_const, true); input = std::make_shared<ov::op::v1::ReduceProd>(input, reduce_const, true);
else else
return false; return false;
@ -69,21 +76,21 @@ DecomposeReduceForFalseKeepDims::DecomposeReduceForFalseKeepDims() {
new_ops.push_back(input.get_node_shared_ptr()); new_ops.push_back(input.get_node_shared_ptr());
// Reshape // Reshape
auto reshape_shape = ngraph::Shape((input_rank - axes_vector.size()), 1); auto reshape_shape = ov::Shape((input_rank - axes_vector.size()), 1);
// Expected that a feature axis is only un-reduced unless a new case for this decomposition is added. // Expected that a feature axis is only un-reduced unless a new case for this decomposition is added.
assert(reshape_shape.size() == 1); assert(reshape_shape.size() == 1);
reshape_shape[0] = reduce_shape[0]; reshape_shape[0] = reduce_shape[0];
input = std::make_shared<ngraph::opset10::Reshape>( input = std::make_shared<ov::op::v1::Reshape>(
input, input,
ngraph::opset10::Constant::create(ngraph::element::i64, ov::op::v0::Constant::create(ov::element::i64,
ngraph::Shape{reshape_shape.size()}, ov::Shape{reshape_shape.size()},
reshape_shape), reshape_shape),
false); false);
input.get_node_shared_ptr()->set_friendly_name(reduce->get_friendly_name() + "_reshape_false_keepdims"); input.get_node_shared_ptr()->set_friendly_name(reduce->get_friendly_name() + "_reshape_false_keepdims");
new_ops.push_back(input.get_node_shared_ptr()); new_ops.push_back(input.get_node_shared_ptr());
ngraph::copy_runtime_info(reduce, new_ops); ov::copy_runtime_info(reduce, new_ops);
reduce->output(0).replace(input); reduce->output(0).replace(input);
return true; return true;
} }
@ -91,7 +98,7 @@ DecomposeReduceForFalseKeepDims::DecomposeReduceForFalseKeepDims() {
return false; return false;
}; };
auto m = std::make_shared<ngraph::pattern::Matcher>(reduce_pattern, "DecomposeReduceForFalseKeepDims"); auto m = std::make_shared<ov::pass::pattern::Matcher>(reduce_pattern, "DecomposeReduceForFalseKeepDims");
register_matcher(m, callback); register_matcher(m, callback);
} }

View File

@ -4,8 +4,8 @@
#pragma once #pragma once
#include <ngraph/pass/graph_rewrite.hpp> #include "openvino/pass/graph_rewrite.hpp"
#include <transformations_visibility.hpp> #include "openvino/core/visibility.hpp"
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
@ -15,7 +15,7 @@ namespace intel_gpu {
* A clDNN Reduce reorders un-reduced axes of its output tensor to b-f and spatial order when keep_dims is false. * A clDNN Reduce reorders un-reduced axes of its output tensor to b-f and spatial order when keep_dims is false.
* oneDNN reduction does not allow this. And clDNN execution shows a huge perf drop for blocked formats. * oneDNN reduction does not allow this. And clDNN execution shows a huge perf drop for blocked formats.
*/ */
class DecomposeReduceForFalseKeepDims : public ngraph::pass::MatcherPass { class DecomposeReduceForFalseKeepDims : public ov::pass::MatcherPass {
public: public:
// Decompose reduce if keep_dims is false and it reduces batch and spatial axes // Decompose reduce if keep_dims is false and it reduces batch and spatial axes
DecomposeReduceForFalseKeepDims(); DecomposeReduceForFalseKeepDims();

View File

@ -4,11 +4,19 @@
#include "einsum_decomposition.hpp" #include "einsum_decomposition.hpp"
#include <unordered_map> #include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/op/einsum.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/unsqueeze.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/transpose.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/matmul.hpp"
#include <ngraph/opsets/opset7.hpp> #include <unordered_map>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
namespace ov { namespace ov {
namespace intel_gpu { namespace intel_gpu {
@ -24,7 +32,7 @@ using LabelDimMap = std::unordered_map<std::string, std::vector<size_t>>;
/// \return a vector of pairs with input indices assuming that the intermediate result is /// \return a vector of pairs with input indices assuming that the intermediate result is
/// appended in the tail /// appended in the tail
/// ///
std::vector<std::pair<size_t, size_t>> compute_einsum_path(std::shared_ptr<const ngraph::opset7::Einsum> einsum_node) { std::vector<std::pair<size_t, size_t>> compute_einsum_path(std::shared_ptr<const ov::op::v7::Einsum> einsum_node) {
// TODO: implement algorithm for finding (pseudo-)optimal einsum_path // TODO: implement algorithm for finding (pseudo-)optimal einsum_path
std::vector<std::pair<size_t, size_t>> einsum_path; std::vector<std::pair<size_t, size_t>> einsum_path;
const size_t num_inputs = einsum_node->get_input_size(); const size_t num_inputs = einsum_node->get_input_size();
@ -111,7 +119,7 @@ std::string generate_grouping_subscript(const std::string& input_subscript, cons
return input_subscript; return input_subscript;
} }
auto labels = ngraph::opset7::Einsum::extract_labels(input_subscript); auto labels = ov::op::v7::Einsum::extract_labels(input_subscript);
std::string required_subscript = ""; std::string required_subscript = "";
for (auto index : labels_inds) { for (auto index : labels_inds) {
required_subscript += labels[index]; required_subscript += labels[index];
@ -131,8 +139,8 @@ std::string generate_grouping_subscript(const std::string& input_subscript, cons
/// \param new_node New input node to be inserted in the tail /// \param new_node New input node to be inserted in the tail
/// \param new_subscript New input subscript to be inserted in the tail /// \param new_subscript New input subscript to be inserted in the tail
/// ///
void update_operands(ngraph::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, size_t input_ind1, size_t input_ind2, void update_operands(ov::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, size_t input_ind1, size_t input_ind2,
const ngraph::Output<ngraph::Node>& new_node, const std::string& new_subscript) { const ov::Output<ov::Node>& new_node, const std::string& new_subscript) {
NGRAPH_CHECK(input_ind1 < input_ind2); NGRAPH_CHECK(input_ind1 < input_ind2);
NGRAPH_CHECK(input_ind2 < input_nodes.size()); NGRAPH_CHECK(input_ind2 < input_nodes.size());
NGRAPH_CHECK(input_ind2 < input_subscripts.size()); NGRAPH_CHECK(input_ind2 < input_subscripts.size());
@ -154,12 +162,12 @@ void update_operands(ngraph::OutputVector& input_nodes, std::vector<std::string>
/// ///
/// \return sub-shape /// \return sub-shape
/// ///
ngraph::Shape compute_sub_shape(const ngraph::Shape& input_shape, size_t begin, size_t end, bool is_product = false) { ov::Shape compute_sub_shape(const ov::Shape& input_shape, size_t begin, size_t end, bool is_product = false) {
NGRAPH_CHECK(end <= input_shape.size()); NGRAPH_CHECK(end <= input_shape.size());
if (end <= begin) { if (end <= begin) {
return ngraph::Shape(); return ov::Shape();
} }
ngraph::Shape sub_shape(input_shape.begin() + begin, input_shape.begin() + end); ov::Shape sub_shape(input_shape.begin() + begin, input_shape.begin() + end);
if (is_product) { if (is_product) {
const auto prod = shape_size(sub_shape); const auto prod = shape_size(sub_shape);
@ -179,13 +187,13 @@ ngraph::Shape compute_sub_shape(const ngraph::Shape& input_shape, size_t begin,
/// \return Unsqueezed input node if a vector of unsqueezing dimensions is not empty, /// \return Unsqueezed input node if a vector of unsqueezing dimensions is not empty,
/// otherwise, the original input node /// otherwise, the original input node
/// ///
ngraph::Output<ngraph::Node> unsqueeze_input(const ngraph::Output<ngraph::Node>& input_node, const std::vector<int64_t>& unsqueeze_axes, ov::Output<ov::Node> unsqueeze_input(const ov::Output<ov::Node>& input_node, const std::vector<int64_t>& unsqueeze_axes,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
if (unsqueeze_axes.empty()) { if (unsqueeze_axes.empty()) {
return input_node; return input_node;
} }
auto unsqueeze_axes_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {unsqueeze_axes.size()}, unsqueeze_axes); auto unsqueeze_axes_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {unsqueeze_axes.size()}, unsqueeze_axes);
auto unsqueeze = std::make_shared<ngraph::opset7::Unsqueeze>(input_node, unsqueeze_axes_const); auto unsqueeze = std::make_shared<ov::op::v0::Unsqueeze>(input_node, unsqueeze_axes_const);
subgraph_nodes.insert(subgraph_nodes.end(), {unsqueeze_axes_const, unsqueeze}); subgraph_nodes.insert(subgraph_nodes.end(), {unsqueeze_axes_const, unsqueeze});
return unsqueeze->output(0); return unsqueeze->output(0);
} }
@ -204,16 +212,16 @@ ngraph::Output<ngraph::Node> unsqueeze_input(const ngraph::Output<ngraph::Node>&
/// ///
/// \return Reshaped input node /// \return Reshaped input node
/// ///
ngraph::Output<ngraph::Node> reshape_input_for_matmul(const ngraph::Output<ngraph::Node>& input_node, ov::Output<ov::Node> reshape_input_for_matmul(const ov::Output<ov::Node>& input_node,
const ngraph::Shape& common_sub_shape, const ov::Shape& common_sub_shape,
const ngraph::Shape& separate_sub_shape, const ov::Shape& separate_sub_shape,
const ngraph::Shape& reduced_sub_shape_prod, const ov::Shape& reduced_sub_shape_prod,
bool is_separate_first, bool is_separate_first,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
ngraph::Shape new_shape{common_sub_shape.begin(), common_sub_shape.end()}; ov::Shape new_shape{common_sub_shape.begin(), common_sub_shape.end()};
// compute a product of a sub-shape for separate labels // compute a product of a sub-shape for separate labels
ngraph::Shape separate_sub_shape_prod = separate_sub_shape; ov::Shape separate_sub_shape_prod = separate_sub_shape;
if (!common_sub_shape.empty() && separate_sub_shape_prod.empty()) { if (!common_sub_shape.empty() && separate_sub_shape_prod.empty()) {
// in this case new dimension corresponding to separate labels must be added // in this case new dimension corresponding to separate labels must be added
// since MatMul operation is not possible to do without separate dimensions // since MatMul operation is not possible to do without separate dimensions
@ -241,16 +249,16 @@ ngraph::Output<ngraph::Node> reshape_input_for_matmul(const ngraph::Output<ngrap
return input_node; return input_node;
} }
const auto new_shape_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {new_shape.size()}, new_shape); const auto new_shape_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {new_shape.size()}, new_shape);
const auto reshaped_input_op = std::make_shared<ngraph::opset7::Reshape>(input_node, new_shape_const, false); const auto reshaped_input_op = std::make_shared<ov::op::v1::Reshape>(input_node, new_shape_const, false);
subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, reshaped_input_op}); subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, reshaped_input_op});
return reshaped_input_op->output(0); return reshaped_input_op->output(0);
} }
LabelDimMap compute_label_dim_map(const ngraph::Rank& input_rank, LabelDimMap compute_label_dim_map(const ov::Rank& input_rank,
const std::string& input_subscript) { const std::string& input_subscript) {
static const std::string ellipsis = "..."; static const std::string ellipsis = "...";
const auto labels = ngraph::opset7::Einsum::extract_labels(input_subscript); const auto labels = ov::op::v7::Einsum::extract_labels(input_subscript);
const auto static_input_rank = input_rank.is_static(); const auto static_input_rank = input_rank.is_static();
NGRAPH_CHECK(static_input_rank || (std::find(labels.begin(), labels.end(), ellipsis) == labels.end()), NGRAPH_CHECK(static_input_rank || (std::find(labels.begin(), labels.end(), ellipsis) == labels.end()),
"Input rank cannot be dynamic in case of ellipsis in input subscript"); "Input rank cannot be dynamic in case of ellipsis in input subscript");
@ -292,8 +300,8 @@ LabelDimMap compute_label_dim_map(const ngraph::Rank& input_rank,
/// \param subgraph_nodes A vector of operation nodes that is included into /// \param subgraph_nodes A vector of operation nodes that is included into
/// a sub-graph decomposing Einsum that is needed for copy_runtime_info /// a sub-graph decomposing Einsum that is needed for copy_runtime_info
/// ///
void transpose_input(ngraph::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, const std::string& required_subscript, size_t input_ind, void transpose_input(ov::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, const std::string& required_subscript, size_t input_ind,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
// perform sanity check for arguments // perform sanity check for arguments
const auto num_inputs = input_nodes.size(); const auto num_inputs = input_nodes.size();
NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript."); NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript.");
@ -312,8 +320,8 @@ void transpose_input(ngraph::OutputVector& input_nodes, std::vector<std::string>
// find permutation that establishes bijection between the input subscript // find permutation that establishes bijection between the input subscript
// and the required one // and the required one
const auto& input_node = input_nodes[input_ind]; const auto& input_node = input_nodes[input_ind];
const auto labels = ngraph::opset7::Einsum::extract_labels(input_subscript); const auto labels = ov::op::v7::Einsum::extract_labels(input_subscript);
const auto required_labels = ngraph::opset7::Einsum::extract_labels(required_subscript); const auto required_labels = ov::op::v7::Einsum::extract_labels(required_subscript);
NGRAPH_CHECK(labels.size() == required_labels.size()); NGRAPH_CHECK(labels.size() == required_labels.size());
const auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript); const auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript);
for (const auto& required_label : required_labels) { for (const auto& required_label : required_labels) {
@ -324,8 +332,8 @@ void transpose_input(ngraph::OutputVector& input_nodes, std::vector<std::string>
} }
// create a sub-graph for transposing into the required layout // create a sub-graph for transposing into the required layout
const auto permutation_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {permutation.size()}, permutation); const auto permutation_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {permutation.size()}, permutation);
const auto transpose = std::make_shared<ngraph::opset7::Transpose>(input_node, permutation_const); const auto transpose = std::make_shared<ov::op::v1::Transpose>(input_node, permutation_const);
// update a vector of inputs and input subscripts // update a vector of inputs and input subscripts
input_nodes[input_ind] = transpose->output(0); input_nodes[input_ind] = transpose->output(0);
@ -348,8 +356,8 @@ void transpose_input(ngraph::OutputVector& input_nodes, std::vector<std::string>
/// a sub-graph decomposing Einsum that is needed for copy_runtime_info /// a sub-graph decomposing Einsum that is needed for copy_runtime_info
/// ///
void reduce_input(EinsumDecomposition *einsum_decompose_ptr, void reduce_input(EinsumDecomposition *einsum_decompose_ptr,
ngraph::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, ov::OutputVector& input_nodes, std::vector<std::string>& input_subscripts,
const std::string& output_subscript, size_t input_ind, ngraph::NodeVector& subgraph_nodes) { const std::string& output_subscript, size_t input_ind, ov::NodeVector& subgraph_nodes) {
// perform sanity check for arguments // perform sanity check for arguments
const auto num_inputs = input_nodes.size(); const auto num_inputs = input_nodes.size();
NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript."); NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript.");
@ -360,7 +368,7 @@ void reduce_input(EinsumDecomposition *einsum_decompose_ptr,
// compute output shape and axes to reduce // compute output shape and axes to reduce
std::set<int64_t> reduced_axes; std::set<int64_t> reduced_axes;
const auto labels = ngraph::opset7::Einsum::extract_labels(input_subscripts[input_ind]); const auto labels = ov::op::v7::Einsum::extract_labels(input_subscripts[input_ind]);
auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript); auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript);
std::string new_input_subscript = ""; std::string new_input_subscript = "";
@ -387,8 +395,8 @@ void reduce_input(EinsumDecomposition *einsum_decompose_ptr,
// reduce by summed up elements along dimension for which label is met just once // reduce by summed up elements along dimension for which label is met just once
const std::vector<int64_t> reduced_axes_vec{reduced_axes.cbegin(), reduced_axes.cend()}; const std::vector<int64_t> reduced_axes_vec{reduced_axes.cbegin(), reduced_axes.cend()};
const auto axes_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {reduced_axes.size()}, reduced_axes_vec); const auto axes_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {reduced_axes.size()}, reduced_axes_vec);
const auto reduce_sum = einsum_decompose_ptr->register_new_node<ngraph::opset7::ReduceSum>(input_node, axes_const, false); const auto reduce_sum = einsum_decompose_ptr->register_new_node<ov::op::v1::ReduceSum>(input_node, axes_const, false);
// update a vector of inputs and input subscripts // update a vector of inputs and input subscripts
input_nodes[input_ind] = reduce_sum->output(0); input_nodes[input_ind] = reduce_sum->output(0);
@ -401,17 +409,17 @@ void reduce_input(EinsumDecomposition *einsum_decompose_ptr,
/// \brief Broadcast input to a new shape. The MatMul operation requires the /// \brief Broadcast input to a new shape. The MatMul operation requires the
/// same shape of both operands in the common (or batch) dimensions. /// same shape of both operands in the common (or batch) dimensions.
/// ///
void broadcast_input(ngraph::OutputVector& inputs, void broadcast_input(ov::OutputVector& inputs,
size_t input_ind, size_t input_ind,
const ngraph::Shape& new_common_shape, const ov::Shape& new_common_shape,
const ngraph::Shape& separate_shape, const ov::Shape& separate_shape,
const ngraph::Shape& reduced_shape, const ov::Shape& reduced_shape,
bool is_separate_first, bool is_separate_first,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
NGRAPH_CHECK(input_ind < inputs.size()); NGRAPH_CHECK(input_ind < inputs.size());
const auto& input = inputs[input_ind]; const auto& input = inputs[input_ind];
ngraph::Shape new_shape{new_common_shape.begin(), new_common_shape.end()}; ov::Shape new_shape{new_common_shape.begin(), new_common_shape.end()};
if (is_separate_first) { if (is_separate_first) {
new_shape.insert(new_shape.end(), separate_shape.begin(), separate_shape.end()); new_shape.insert(new_shape.end(), separate_shape.begin(), separate_shape.end());
new_shape.insert(new_shape.end(), reduced_shape.begin(), reduced_shape.end()); new_shape.insert(new_shape.end(), reduced_shape.begin(), reduced_shape.end());
@ -420,7 +428,7 @@ void broadcast_input(ngraph::OutputVector& inputs,
new_shape.insert(new_shape.end(), separate_shape.begin(), separate_shape.end()); new_shape.insert(new_shape.end(), separate_shape.begin(), separate_shape.end());
} }
const ngraph::Shape old_shape = input.get_shape(); const ov::Shape old_shape = input.get_shape();
if (old_shape == new_shape) { if (old_shape == new_shape) {
return; return;
} }
@ -428,20 +436,20 @@ void broadcast_input(ngraph::OutputVector& inputs,
const auto new_shape_size = new_shape.size(); const auto new_shape_size = new_shape.size();
NGRAPH_CHECK(old_shape_size <= new_shape_size); NGRAPH_CHECK(old_shape_size <= new_shape_size);
const auto new_shape_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {new_shape.size()}, new_shape); const auto new_shape_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {new_shape.size()}, new_shape);
const auto broadcast = std::make_shared<ngraph::opset7::Broadcast>(input, new_shape_const, ngraph::op::BroadcastType::NUMPY); const auto broadcast = std::make_shared<ov::op::v3::Broadcast>(input, new_shape_const, ov::op::BroadcastType::NUMPY);
inputs[input_ind] = broadcast->output(0); inputs[input_ind] = broadcast->output(0);
subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, broadcast}); subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, broadcast});
} }
ngraph::Output<ngraph::Node> build_identity(const ngraph::Output<ngraph::Node>& input_node, ov::Output<ov::Node> build_identity(const ov::Output<ov::Node>& input_node,
const std::vector<size_t>& repeated_label_dims, const std::vector<size_t>& repeated_label_dims,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
NGRAPH_CHECK(repeated_label_dims.size() > 1); NGRAPH_CHECK(repeated_label_dims.size() > 1);
const auto input_shape = input_node.get_shape(); const auto input_shape = input_node.get_shape();
ngraph::Shape identity_shape(input_shape.size(), 1); ov::Shape identity_shape(input_shape.size(), 1);
const size_t repeated_label_dim_size = input_shape[repeated_label_dims[0]]; const size_t repeated_label_dim_size = input_shape[repeated_label_dims[0]];
for (const auto dim : repeated_label_dims) { for (const auto dim : repeated_label_dims) {
NGRAPH_CHECK(dim < input_shape.size()); NGRAPH_CHECK(dim < input_shape.size());
@ -463,16 +471,16 @@ ngraph::Output<ngraph::Node> build_identity(const ngraph::Output<ngraph::Node>&
offset += alpha; offset += alpha;
} }
} }
const auto identity = ngraph::opset7::Constant::create(input_node.get_element_type(), identity_shape, identity_values); const auto identity = ov::op::v0::Constant::create(input_node.get_element_type(), identity_shape, identity_values);
subgraph_nodes.insert(subgraph_nodes.end(), {identity}); subgraph_nodes.insert(subgraph_nodes.end(), {identity});
return subgraph_nodes.back(); return subgraph_nodes.back();
} }
ngraph::Output<ngraph::Node> build_multi_identity(EinsumDecomposition* einsum_decompose_ptr, ov::Output<ov::Node> build_multi_identity(EinsumDecomposition* einsum_decompose_ptr,
const ngraph::Output<ngraph::Node>& input_node, const ov::Output<ov::Node>& input_node,
const std::vector<std::string>& repeated_labels, const std::vector<std::string>& repeated_labels,
const LabelDimMap& label_dim_map, const LabelDimMap& label_dim_map,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
NGRAPH_CHECK(repeated_labels.size() > 0); NGRAPH_CHECK(repeated_labels.size() > 0);
const auto get_identity = [&](size_t idx) { const auto get_identity = [&](size_t idx) {
@ -486,7 +494,7 @@ ngraph::Output<ngraph::Node> build_multi_identity(EinsumDecomposition* einsum_de
for (size_t label_ind = 1; label_ind < repeated_labels.size(); ++label_ind) { for (size_t label_ind = 1; label_ind < repeated_labels.size(); ++label_ind) {
const auto identity = get_identity(label_ind); const auto identity = get_identity(label_ind);
const auto mul = std::make_shared<ngraph::opset7::Multiply>(multi_identity, identity, ngraph::op::AutoBroadcastType::NUMPY); const auto mul = std::make_shared<ov::op::v1::Multiply>(multi_identity, identity, ov::op::AutoBroadcastType::NUMPY);
subgraph_nodes.insert(subgraph_nodes.end(), {mul}); subgraph_nodes.insert(subgraph_nodes.end(), {mul});
} }
@ -497,16 +505,16 @@ ngraph::Output<ngraph::Node> build_multi_identity(EinsumDecomposition* einsum_de
/// and subscript, repeated labels, axes to reduce. /// and subscript, repeated labels, axes to reduce.
/// ///
void prepare_diagonal_extraction_data( void prepare_diagonal_extraction_data(
const ngraph::Shape& input_shape, const ov::Shape& input_shape,
const std::string& input_subscript, const std::string& input_subscript,
const LabelDimMap& label_dim_map, const LabelDimMap& label_dim_map,
ngraph::Shape& result_shape, ov::Shape& result_shape,
std::string& resultant_subscript, std::string& resultant_subscript,
std::vector<std::string>& repeated_labels, std::vector<std::string>& repeated_labels,
ngraph::AxisSet& reduced_axes ov::AxisSet& reduced_axes
) { ) {
static const std::string ellipsis = "..."; static const std::string ellipsis = "...";
const auto labels = ngraph::opset7::Einsum::extract_labels(input_subscript); const auto labels = ov::op::v7::Einsum::extract_labels(input_subscript);
for (const auto& label : labels) { for (const auto& label : labels) {
if (resultant_subscript.find(label) != std::string::npos) { if (resultant_subscript.find(label) != std::string::npos) {
@ -538,10 +546,10 @@ void prepare_diagonal_extraction_data(
} }
void extract_diagonal(EinsumDecomposition* einsum_decompose_ptr, void extract_diagonal(EinsumDecomposition* einsum_decompose_ptr,
ngraph::OutputVector& inputs, ov::OutputVector& inputs,
std::vector<std::string>& input_subscripts, std::vector<std::string>& input_subscripts,
size_t input_ind, size_t input_ind,
ngraph::NodeVector& subgraph_nodes) { ov::NodeVector& subgraph_nodes) {
// perform sanity check for arguments // perform sanity check for arguments
const auto num_inputs = inputs.size(); const auto num_inputs = inputs.size();
NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript."); NGRAPH_CHECK(num_inputs == input_subscripts.size(), "Each input must have own subscript.");
@ -552,10 +560,10 @@ void extract_diagonal(EinsumDecomposition* einsum_decompose_ptr,
const auto input_shape = input_node.get_shape(); const auto input_shape = input_node.get_shape();
const auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript); const auto label_dim_map = compute_label_dim_map(input_node.get_partial_shape().rank(), input_subscript);
ngraph::Shape result_shape; ov::Shape result_shape;
std::string resultant_subscript; std::string resultant_subscript;
std::vector<std::string> repeated_labels; std::vector<std::string> repeated_labels;
ngraph::AxisSet reduced_axes; ov::AxisSet reduced_axes;
prepare_diagonal_extraction_data(input_shape, input_subscript, label_dim_map, prepare_diagonal_extraction_data(input_shape, input_subscript, label_dim_map,
result_shape, resultant_subscript, repeated_labels, reduced_axes); result_shape, resultant_subscript, repeated_labels, reduced_axes);
@ -567,19 +575,19 @@ void extract_diagonal(EinsumDecomposition* einsum_decompose_ptr,
const auto multi_identity = build_multi_identity(einsum_decompose_ptr, input_node, repeated_labels, label_dim_map, subgraph_nodes); const auto multi_identity = build_multi_identity(einsum_decompose_ptr, input_node, repeated_labels, label_dim_map, subgraph_nodes);
// multiply both operands with broadcasting // multiply both operands with broadcasting
const auto mul = std::make_shared<ngraph::opset7::Multiply>(input_node, multi_identity, ngraph::op::AutoBroadcastType::NUMPY); const auto mul = std::make_shared<ov::op::v1::Multiply>(input_node, multi_identity, ov::op::AutoBroadcastType::NUMPY);
subgraph_nodes.insert(subgraph_nodes.end(), {mul}); subgraph_nodes.insert(subgraph_nodes.end(), {mul});
const std::vector<int64_t> reduced_axes_vec{reduced_axes.cbegin(), reduced_axes.cend()}; const std::vector<int64_t> reduced_axes_vec{reduced_axes.cbegin(), reduced_axes.cend()};
const auto axes_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {reduced_axes.size()}, reduced_axes_vec); const auto axes_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {reduced_axes.size()}, reduced_axes_vec);
const auto reduce_sum = std::make_shared<ngraph::opset7::ReduceSum>(mul->output(0), axes_const, false); const auto reduce_sum = std::make_shared<ov::op::v1::ReduceSum>(mul->output(0), axes_const, false);
subgraph_nodes.insert(subgraph_nodes.end(), {axes_const, reduce_sum}); subgraph_nodes.insert(subgraph_nodes.end(), {axes_const, reduce_sum});
inputs[input_ind] = reduce_sum->output(0); inputs[input_ind] = reduce_sum->output(0);
input_subscripts[input_ind] = resultant_subscript; input_subscripts[input_ind] = resultant_subscript;
} }
void compute_ranges(const ngraph::Rank& input_rank, void compute_ranges(const ov::Rank& input_rank,
const std::string& input_subscript, const std::string& input_subscript,
const std::vector<std::string>& common_labels, const std::vector<std::string>& common_labels,
const std::vector<std::string>& sep_labels, const std::vector<std::string>& sep_labels,
@ -642,9 +650,9 @@ void compute_ranges(const ngraph::Rank& input_rank,
/// sub-graph decomposing Einsum that is needed for copy_runtime_info /// sub-graph decomposing Einsum that is needed for copy_runtime_info
/// ///
void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr, void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
ngraph::OutputVector& input_nodes, std::vector<std::string>& input_subscripts, ov::OutputVector& input_nodes, std::vector<std::string>& input_subscripts,
const std::string& output_subscript, size_t input_ind1, const std::string& output_subscript, size_t input_ind1,
size_t input_ind2, ngraph::NodeVector& subgraph_nodes) { size_t input_ind2, ov::NodeVector& subgraph_nodes) {
// assume that input_ind1 < input_ind2 without loss of generality, otherwise, just swap them // assume that input_ind1 < input_ind2 without loss of generality, otherwise, just swap them
if (input_ind2 < input_ind1) { if (input_ind2 < input_ind1) {
std::swap(input_ind1, input_ind2); std::swap(input_ind1, input_ind2);
@ -676,9 +684,9 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
// neither the output subscript nor the input subscripts for other Einsum inputs excluding // neither the output subscript nor the input subscripts for other Einsum inputs excluding
// two given inputs // two given inputs
auto& input_subscript1 = input_subscripts[input_ind1]; auto& input_subscript1 = input_subscripts[input_ind1];
auto labels1 = ngraph::opset7::Einsum::extract_labels(input_subscript1); auto labels1 = ov::op::v7::Einsum::extract_labels(input_subscript1);
auto& input_subscript2 = input_subscripts[input_ind2]; auto& input_subscript2 = input_subscripts[input_ind2];
auto labels2 = ngraph::opset7::Einsum::extract_labels(input_subscript2); auto labels2 = ov::op::v7::Einsum::extract_labels(input_subscript2);
std::string common_part = ""; std::string common_part = "";
std::string separate_part1 = ""; std::string separate_part1 = "";
std::string separate_part2 = ""; std::string separate_part2 = "";
@ -726,8 +734,8 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
// for further unsqueezing // for further unsqueezing
transpose_input(input_nodes, input_subscripts, convenient_subscript, input_ind2, subgraph_nodes); transpose_input(input_nodes, input_subscripts, convenient_subscript, input_ind2, subgraph_nodes);
const auto separate_labels1 = ngraph::opset7::Einsum::extract_labels(separate_part1); const auto separate_labels1 = ov::op::v7::Einsum::extract_labels(separate_part1);
const auto separate_labels2 = ngraph::opset7::Einsum::extract_labels(separate_part2); const auto separate_labels2 = ov::op::v7::Einsum::extract_labels(separate_part2);
auto label_to_dim_map1 = compute_label_dim_map(input_node1.get_partial_shape().rank(), input_subscript1); auto label_to_dim_map1 = compute_label_dim_map(input_node1.get_partial_shape().rank(), input_subscript1);
auto label_to_dim_map2 = compute_label_dim_map(input_node2.get_partial_shape().rank(), input_subscript2); auto label_to_dim_map2 = compute_label_dim_map(input_node2.get_partial_shape().rank(), input_subscript2);
@ -758,7 +766,7 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
auto unsqueeze_output2 = unsqueeze_input(input_node2, unsqueeze_axis2, subgraph_nodes); auto unsqueeze_output2 = unsqueeze_input(input_node2, unsqueeze_axis2, subgraph_nodes);
// multiply both operands with broadcasting // multiply both operands with broadcasting
auto mul = std::make_shared<ngraph::opset7::Multiply>(unsqueeze_output1, unsqueeze_output2, ngraph::op::AutoBroadcastType::NUMPY); auto mul = std::make_shared<ov::op::v1::Multiply>(unsqueeze_output1, unsqueeze_output2, ov::op::AutoBroadcastType::NUMPY);
// update input operand and input subscript for Einsum operation // update input operand and input subscript for Einsum operation
update_operands(input_nodes, input_subscripts, input_ind1, input_ind2, mul->output(0), resultant_subscript); update_operands(input_nodes, input_subscripts, input_ind1, input_ind2, mul->output(0), resultant_subscript);
@ -819,15 +827,15 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
const auto input_shape1 = input_node1.get_shape(); const auto input_shape1 = input_node1.get_shape();
const auto input_shape2 = input_node2.get_shape(); const auto input_shape2 = input_node2.get_shape();
ngraph::PartialShape common_sub_shape1 = compute_sub_shape(input_shape1, common_dims_begin, common_dims_end); ov::PartialShape common_sub_shape1 = compute_sub_shape(input_shape1, common_dims_begin, common_dims_end);
ngraph::PartialShape common_sub_shape2 = compute_sub_shape(input_shape2, common_dims_begin2, common_dims_end2); ov::PartialShape common_sub_shape2 = compute_sub_shape(input_shape2, common_dims_begin2, common_dims_end2);
const auto reduced_sub_shape_prod = compute_sub_shape(input_shape1, reduced_dims_begin, reduced_dims_end, true); const auto reduced_sub_shape_prod = compute_sub_shape(input_shape1, reduced_dims_begin, reduced_dims_end, true);
const auto reduced_sub_shape = compute_sub_shape(input_shape1, reduced_dims_begin, reduced_dims_end); const auto reduced_sub_shape = compute_sub_shape(input_shape1, reduced_dims_begin, reduced_dims_end);
const auto separate1_sub_shape = compute_sub_shape(input_shape1, separate1_dims_begin, separate1_dims_end); const auto separate1_sub_shape = compute_sub_shape(input_shape1, separate1_dims_begin, separate1_dims_end);
const auto separate2_sub_shape = compute_sub_shape(input_shape2, separate2_dims_begin, separate2_dims_end); const auto separate2_sub_shape = compute_sub_shape(input_shape2, separate2_dims_begin, separate2_dims_end);
// broadcast both inputs to have common sub-shape broadcasted that is needed in case of ellipsis among the common labels // broadcast both inputs to have common sub-shape broadcasted that is needed in case of ellipsis among the common labels
ngraph::PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, ngraph::op::AutoBroadcastType::NUMPY); ov::PartialShape::broadcast_merge_into(common_sub_shape1, common_sub_shape2, ov::op::AutoBroadcastType::NUMPY);
const auto common_sub_shape = common_sub_shape1.get_shape(); const auto common_sub_shape = common_sub_shape1.get_shape();
broadcast_input(input_nodes, broadcast_input(input_nodes,
input_ind1, input_ind1,
@ -860,7 +868,7 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
// step 3. apply MatMul operation for formatted inputs // step 3. apply MatMul operation for formatted inputs
const bool transpose_a = (is_separate_first1 ? false : true); const bool transpose_a = (is_separate_first1 ? false : true);
const bool transpose_b = (is_separate_first2 ? true : false); const bool transpose_b = (is_separate_first2 ? true : false);
const auto matmul = std::make_shared<ngraph::opset7::MatMul>(matmul_operand1, matmul_operand2, transpose_a, transpose_b); const auto matmul = std::make_shared<ov::op::v0::MatMul>(matmul_operand1, matmul_operand2, transpose_a, transpose_b);
// step 4. reshape back by unrolling dimensions corresponding to separate labels if needed // step 4. reshape back by unrolling dimensions corresponding to separate labels if needed
// now dimensions corresponding to reduced labels are reduced by the MatMul operation // now dimensions corresponding to reduced labels are reduced by the MatMul operation
@ -869,13 +877,13 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
common_part += common_label; common_part += common_label;
} }
const std::string resultant_subscript = common_part + separate_part1 + separate_part2; const std::string resultant_subscript = common_part + separate_part1 + separate_part2;
ngraph::Shape back_shape; ov::Shape back_shape;
back_shape.insert(back_shape.end(), common_sub_shape.begin(), common_sub_shape.end()); back_shape.insert(back_shape.end(), common_sub_shape.begin(), common_sub_shape.end());
back_shape.insert(back_shape.end(), separate1_sub_shape.begin(), separate1_sub_shape.end()); back_shape.insert(back_shape.end(), separate1_sub_shape.begin(), separate1_sub_shape.end());
back_shape.insert(back_shape.end(), separate2_sub_shape.begin(), separate2_sub_shape.end()); back_shape.insert(back_shape.end(), separate2_sub_shape.begin(), separate2_sub_shape.end());
const auto new_shape_const = ngraph::opset7::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape {back_shape.size()}, back_shape); const auto new_shape_const = ov::op::v0::Constant::create(ov::element::Type_t::i64, ov::Shape {back_shape.size()}, back_shape);
const auto reshape_result_op = std::make_shared<ngraph::opset7::Reshape>(matmul->output(0), new_shape_const, false); const auto reshape_result_op = std::make_shared<ov::op::v1::Reshape>(matmul->output(0), new_shape_const, false);
subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, reshape_result_op}); subgraph_nodes.insert(subgraph_nodes.end(), {new_shape_const, reshape_result_op});
// update input operand and input subscript for Einsum operation // update input operand and input subscript for Einsum operation
@ -887,8 +895,8 @@ void contract_two_inputs(EinsumDecomposition* einsum_decompose_ptr,
} // namespace } // namespace
EinsumDecomposition::EinsumDecomposition() { EinsumDecomposition::EinsumDecomposition() {
ngraph::matcher_pass_callback callback = [this](ngraph::pattern::Matcher& m) { ov::matcher_pass_callback callback = [this](ov::pass::pattern::Matcher& m) {
auto einsum_node = std::dynamic_pointer_cast<ngraph::opset7::Einsum>(m.get_match_root()); auto einsum_node = std::dynamic_pointer_cast<ov::op::v7::Einsum>(m.get_match_root());
if (!einsum_node) { if (!einsum_node) {
return false; return false;
} }
@ -900,12 +908,12 @@ EinsumDecomposition::EinsumDecomposition() {
auto equation = einsum_node->get_equation(); auto equation = einsum_node->get_equation();
std::vector<std::string> input_subscripts; std::vector<std::string> input_subscripts;
std::string output_subscript; std::string output_subscript;
ngraph::opset7::Einsum::parse_equation(equation, input_subscripts, output_subscript); ov::op::v7::Einsum::parse_equation(equation, input_subscripts, output_subscript);
// create a list of input nodes with preserving their order // create a list of input nodes with preserving their order
// and a vector of sub-graph nodes for copy_runtime_info // and a vector of sub-graph nodes for copy_runtime_info
ngraph::OutputVector input_nodes = einsum_node->input_values(); ov::OutputVector input_nodes = einsum_node->input_values();
ngraph::NodeVector subgraph_nodes; ov::NodeVector subgraph_nodes;
// compute einsum path that is used to contract a pair of operands // compute einsum path that is used to contract a pair of operands
// in more optimal order // in more optimal order
@ -930,13 +938,13 @@ EinsumDecomposition::EinsumDecomposition() {
// preserve the original node name // preserve the original node name
auto last_node = input_nodes[0].get_node_shared_ptr(); auto last_node = input_nodes[0].get_node_shared_ptr();
last_node->set_friendly_name(einsum_node->get_friendly_name()); last_node->set_friendly_name(einsum_node->get_friendly_name());
ngraph::copy_runtime_info(einsum_node, subgraph_nodes); ov::copy_runtime_info(einsum_node, subgraph_nodes);
ngraph::replace_node(einsum_node, last_node); ov::replace_node(einsum_node, last_node);
return true; return true;
}; };
const auto einsum = ngraph::pattern::wrap_type<ngraph::opset7::Einsum>(); const auto einsum = ov::pass::pattern::wrap_type<ov::op::v7::Einsum>();
const auto matcher = std::make_shared<ngraph::pattern::Matcher>(einsum, "EinsumDecompositionGpu"); const auto matcher = std::make_shared<ov::pass::pattern::Matcher>(einsum, "EinsumDecompositionGpu");
register_matcher(matcher, callback); register_matcher(matcher, callback);
} }

Some files were not shown because too many files have changed in this diff Show More