[GPU] Use stream executor and exceptions from new api (#18531)

* [Common] Handle ov::Exception too in CompiledModel wrapper

* [GPU] Use new threading and exception interfaces where possible
This commit is contained in:
Vladimir Paramuzov
2023-07-14 22:51:40 +04:00
committed by GitHub
parent 08cd757ed5
commit 325d02b760
92 changed files with 350 additions and 415 deletions

View File

@@ -44,6 +44,24 @@ ov::Any InferenceEngine::ICompiledModelWrapper::get_property(const std::string&
if (ov::loaded_from_cache == name) {
return m_model->isLoadedFromCache();
}
auto get_supported_properties = [&]() {
auto ro_properties = m_model->GetMetric(METRIC_KEY(SUPPORTED_METRICS)).as<std::vector<std::string>>();
auto rw_properties = m_model->GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
std::vector<ov::PropertyName> supported_properties;
for (auto&& ro_property : ro_properties) {
if (ro_property != METRIC_KEY(SUPPORTED_METRICS) && ro_property != METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
supported_properties.emplace_back(ro_property, ov::PropertyMutability::RO);
}
}
for (auto&& rw_property : rw_properties) {
supported_properties.emplace_back(rw_property, ov::PropertyMutability::RW);
}
supported_properties.emplace_back(ov::supported_properties.name(), ov::PropertyMutability::RO);
supported_properties.emplace_back(ov::loaded_from_cache.name(), ov::PropertyMutability::RO);
return supported_properties;
};
if (ov::supported_properties == name) {
try {
auto supported_properties = m_model->GetMetric(name).as<std::vector<ov::PropertyName>>();
@@ -55,25 +73,16 @@ ov::Any InferenceEngine::ICompiledModelWrapper::get_property(const std::string&
}),
supported_properties.end());
return supported_properties;
} catch (ov::Exception&) {
return get_supported_properties();
} catch (InferenceEngine::Exception&) {
auto ro_properties = m_model->GetMetric(METRIC_KEY(SUPPORTED_METRICS)).as<std::vector<std::string>>();
auto rw_properties = m_model->GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
std::vector<ov::PropertyName> supported_properties;
for (auto&& ro_property : ro_properties) {
if (ro_property != METRIC_KEY(SUPPORTED_METRICS) && ro_property != METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
supported_properties.emplace_back(ro_property, ov::PropertyMutability::RO);
}
}
for (auto&& rw_property : rw_properties) {
supported_properties.emplace_back(rw_property, ov::PropertyMutability::RW);
}
supported_properties.emplace_back(ov::supported_properties.name(), ov::PropertyMutability::RO);
supported_properties.emplace_back(ov::loaded_from_cache.name(), ov::PropertyMutability::RO);
return supported_properties;
return get_supported_properties();
}
}
try {
return m_model->GetMetric(name);
} catch (ov::Exception&) {
return m_model->GetConfig(name);
} catch (InferenceEngine::Exception&) {
return m_model->GetConfig(name);
}

View File

@@ -4,6 +4,8 @@
#pragma once
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include "intel_gpu/graph/topology.hpp"
#include "intel_gpu/graph/program.hpp"
#include "intel_gpu/graph/serialization/binary_buffer.hpp"
@@ -81,12 +83,12 @@ public:
const topology& topo,
const ExecutionConfig& config = {},
bool is_internal = false,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor = nullptr);
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor = nullptr);
network(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal);
network(program::ptr program, uint16_t stream_id = 0);
@@ -103,13 +105,13 @@ public:
static ptr build_network(engine& engine,
const topology& topology,
const ExecutionConfig& config = {},
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor = nullptr,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor = nullptr,
bool is_internal = false);
static ptr build_network(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal);
static ptr allocate_network(stream::ptr stream,

View File

@@ -4,6 +4,8 @@
#pragma once
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include "intel_gpu/runtime/engine.hpp"
#include "intel_gpu/runtime/stream.hpp"
#include "intel_gpu/runtime/lru_cache.hpp"
@@ -125,7 +127,7 @@ public:
program(engine& engine_ref,
topology const& topology,
const ExecutionConfig& config,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal = false,
bool no_optimizations = false,
bool is_body_program = false);
@@ -133,14 +135,14 @@ public:
program(engine& engine_ref,
std::set<std::shared_ptr<program_node>> const& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal);
explicit program(engine& engine);
~program();
engine& get_engine() const { return _engine; }
const ExecutionConfig& get_config() const { return _config; }
InferenceEngine::CPUStreamsExecutor::Ptr get_task_executor() const { return _task_executor; }
std::shared_ptr<ov::threading::IStreamsExecutor> get_task_executor() const { return _task_executor; }
std::list<program_node*>& get_inputs() {
return inputs;
} // ToDo: redesign trim to ouptut pass to make it const as_well as get_engine and get options
@@ -240,14 +242,14 @@ public:
static ptr build_program(engine& engine,
const topology& topology,
const ExecutionConfig& config,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal = false,
bool no_optimizations = false,
bool is_body_program = false);
static ptr build_program(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal);
static void init_primitives();
kernels_cache& get_kernels_cache() const;
@@ -261,7 +263,7 @@ public:
ICompilationContext& get_compilation_context() const { return *_compilation_context; }
void cancel_compilation_context();
static std::shared_ptr<InferenceEngine::CPUStreamsExecutor> make_task_executor(const ExecutionConfig& config);
static std::shared_ptr<ov::threading::IStreamsExecutor> make_task_executor(const ExecutionConfig& config);
private:
uint32_t prog_id = 0;
@@ -270,7 +272,7 @@ private:
// TODO: Consider moving it to engine
std::unique_ptr<kernels_cache> _kernels_cache;
ExecutionConfig _config;
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> _task_executor = nullptr;
std::shared_ptr<ov::threading::IStreamsExecutor> _task_executor = nullptr;
std::list<program_node*> inputs;
std::vector<program_node*> outputs;
nodes_ordering processing_order;

View File

@@ -7,11 +7,13 @@
#define RUN_ALL_MODEL_CACHING_TESTS
#include <unordered_map>
#include "openvino/core/deprecated.hpp"
#include "ie/ie_common.h"
namespace cldnn {
class serial_util {
public:
OPENVINO_SUPPRESS_DEPRECATED_START
static InferenceEngine::Layout layout_from_string(const std::string& name) {
static const std::unordered_map<std::string, InferenceEngine::Layout> layouts = {
{ "ANY", InferenceEngine::Layout::ANY },
@@ -36,8 +38,9 @@ public:
if (it != layouts.end()) {
return it->second;
}
IE_THROW(NetworkNotRead) << "Unknown layout with name '" << name << "'";
OPENVINO_THROW("Unknown layout with name '", name, "'");
}
OPENVINO_SUPPRESS_DEPRECATED_END
};
class membuf : public std::streambuf {

View File

@@ -7,6 +7,7 @@
#include <ie_layouts.h>
#include "intel_gpu/runtime/layout.hpp"
#include "openvino/core/layout.hpp"
#include "openvino/core/deprecated.hpp"
#include "ngraph/type/element_type.hpp"
@@ -15,7 +16,7 @@ namespace intel_gpu {
#define TensorValue(val) static_cast<cldnn::tensor::value_type>(val)
inline cldnn::tensor tensor_from_dims(const InferenceEngine::SizeVector& dims, int def = 1) {
inline cldnn::tensor tensor_from_dims(const ov::Shape& dims, int def = 1) {
switch (dims.size()) {
case 0: return cldnn::tensor(cldnn::batch(def), cldnn::feature(def), cldnn::spatial(def, def));
case 1: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(def), cldnn::spatial(def, def));
@@ -24,10 +25,11 @@ inline cldnn::tensor tensor_from_dims(const InferenceEngine::SizeVector& dims, i
case 4: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[3], dims[2]));
case 5: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[4], dims[3], dims[2]));
case 6: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[5], dims[4], dims[3], dims[2]));
default: IE_THROW() << "Invalid dimensions size(" << dims.size() << ") for gpu tensor";
default: OPENVINO_THROW("Invalid dimensions size(", dims.size(), ") for gpu tensor");
}
}
OPENVINO_SUPPRESS_DEPRECATED_START
inline cldnn::data_types DataTypeFromPrecision(InferenceEngine::Precision p) {
switch (p) {
case InferenceEngine::Precision::I16:
@@ -74,7 +76,7 @@ inline InferenceEngine::Precision PrecisionFromDataType(cldnn::data_types dt) {
case cldnn::data_types::i64:
return InferenceEngine::Precision::ePrecision::I64;
default:
IE_THROW(ParameterMismatch) << "The plugin does not support " << cldnn::data_type_traits::name(dt) << " data type";
OPENVINO_THROW("The plugin does not support ", cldnn::data_type_traits::name(dt), " data type");
}
}
@@ -140,21 +142,7 @@ inline cldnn::format ImageFormatFromLayout(InferenceEngine::Layout l) {
<< "The plugin does not support " << l << " image layout";
}
}
inline InferenceEngine::Layout InferenceEngineLayoutFromOVLayout(ov::Layout l) {
if (l == ov::Layout("C")) return InferenceEngine::Layout::C;
if (l == ov::Layout("CN")) return InferenceEngine::Layout::CN;
if (l == ov::Layout("HW")) return InferenceEngine::Layout::HW;
if (l == ov::Layout("NC")) return InferenceEngine::Layout::NC;
if (l == ov::Layout("CHW")) return InferenceEngine::Layout::CHW;
if (l == ov::Layout("HWC")) return InferenceEngine::Layout::HWC;
if (l == ov::Layout("NCHW")) return InferenceEngine::Layout::NCHW;
if (l == ov::Layout("NC??")) return InferenceEngine::Layout::NCHW;
if (l == ov::Layout("NHWC")) return InferenceEngine::Layout::NHWC;
if (l == ov::Layout("NCDHW")) return InferenceEngine::Layout::NCDHW;
if (l == ov::Layout("NDHWC")) return InferenceEngine::Layout::NDHWC;
IE_THROW() << "The plugin does not support " << l.to_string() << " layout";
}
OPENVINO_SUPPRESS_DEPRECATED_END
/// WA: Force exit. Any opencl api call can be hang after CL_OUT_OF_RESOURCES.
inline void ForceExit() {

View File

@@ -3,6 +3,12 @@
//
#include "intel_gpu/runtime/execution_config.hpp"
#include "ie_metric_helpers.hpp"
#include <ie_ngraph_utils.hpp>
#include "ie_plugin_config.hpp"
#include "gpu/gpu_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "ie_icore.hpp"
namespace ov {
namespace intel_gpu {

View File

@@ -31,16 +31,15 @@ enum class reduce_mode : uint16_t;
enum class eltwise_mode : int32_t;
} // namespace cldnn
#define REGISTER_FACTORY_IMPL(op_version, op_name) \
void __register ## _ ## op_name ## _ ## op_version(); \
void __register ## _ ## op_name ## _ ## op_version() { \
Program::RegisterFactory<ov::op::op_version::op_name>( \
[](Program& p, const std::shared_ptr<ov::Node>& op) { \
auto op_casted = std::dynamic_pointer_cast<ov::op::op_version::op_name>(op); \
if (!op_casted) \
IE_THROW() << "Invalid ov Node type passed into " << __PRETTY_FUNCTION__; \
Create##op_name##Op(p, op_casted); \
}); \
#define REGISTER_FACTORY_IMPL(op_version, op_name) \
void __register ## _ ## op_name ## _ ## op_version(); \
void __register ## _ ## op_name ## _ ## op_version() { \
Program::RegisterFactory<ov::op::op_version::op_name>( \
[](Program& p, const std::shared_ptr<ov::Node>& op) { \
auto op_casted = std::dynamic_pointer_cast<ov::op::op_version::op_name>(op); \
OPENVINO_ASSERT(op_casted, "[GPU] Invalid ov Node type passed into ", __PRETTY_FUNCTION__); \
Create##op_name##Op(p, op_casted); \
}); \
}
namespace ov {
@@ -84,7 +83,7 @@ public:
Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config,
bool createTopologyOnly = false, bool partialBuild = false,
InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor = nullptr, bool innerProgram = false);
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor = nullptr, bool innerProgram = false);
Program(cldnn::engine& engine, const ExecutionConfig& config,
InferenceEngine::InputsDataMap* inputs = nullptr, InferenceEngine::OutputsDataMap* outputs = nullptr);
@@ -159,7 +158,7 @@ public:
bool use_new_shape_infer() const { return allow_new_shape_infer; }
bool requires_new_shape_infer(const ngraph::Node& op) const;
InferenceEngine::CPUStreamsExecutor::Ptr get_task_executor() { return m_task_executor; }
std::shared_ptr<ov::threading::IStreamsExecutor> get_task_executor() { return m_task_executor; }
private:
static factories_map_t factories_map;
@@ -177,7 +176,7 @@ private:
bool queryMode;
InferenceEngine::CPUStreamsExecutor::Ptr m_task_executor;
std::shared_ptr<ov::threading::IStreamsExecutor> m_task_executor;
void EnableQueryMode() { queryMode = true; }
void DisableQueryMode() { queryMode = false; }

View File

@@ -11,7 +11,6 @@
#include "layout.hpp"
#include "execution_config.hpp"
#include "engine_configuration.hpp"
#include <threading/ie_cpu_streams_executor.hpp>
#include <memory>
#include <set>
@@ -147,7 +146,6 @@ public:
/// Factory method which creates engine object with impl configured by @p engine_type
/// @param engine_type requested engine type
/// @param task_executor GPU plugin internal task executor
/// @param runtime_type requested execution runtime for the engine. @note some runtime/engine types configurations might be unsupported
/// @param device specifies the device which the engine is created for
/// @param configuration options for the engine
@@ -156,7 +154,6 @@ public:
/// Factory method which creates engine object with impl configured by @p engine_type
/// @param engine_type requested engine type
/// @param runtime_type requested execution runtime for the engine. @note some runtime/engine types configurations might be unsupported
/// @param task_executor GPU plugin internal task executor
/// @param configuration options for the engine
/// @note engine is created for the first device returned by devices query
static std::shared_ptr<cldnn::engine> create(engine_types engine_type, runtime_types runtime_type);

View File

@@ -4,12 +4,7 @@
#pragma once
#include "utils.hpp"
#include <string>
#include <stdexcept>
#include <thread>
#include <threading/ie_cpu_streams_executor.hpp>
namespace cldnn {

View File

@@ -60,7 +60,7 @@ layout arg_max_min_inst::calc_output_layout(arg_max_min_node const& node, kernel
auto format = input_layout.format;
auto sizes = input_layout.get_dims();
if (desc->axis >= static_cast<int64_t>(sizes.size()) || desc->axis < 0) {
IE_THROW() << "Incorrect arg_max_min axis.";
OPENVINO_THROW("Incorrect arg_max_min axis.");
}
sizes[desc->axis] = desc->top_k;
return layout{output_data_type, format, tensor(format::get_default_format(input_layout.get_rank()), sizes)};

View File

@@ -12,9 +12,9 @@
namespace cldnn {
class CompilationContext : public ICompilationContext {
public:
CompilationContext(InferenceEngine::CPUStreamsExecutor::Config task_executor_config) : _task_executor_config(task_executor_config) {
CompilationContext(ov::threading::IStreamsExecutor::Config task_executor_config) : _task_executor_config(task_executor_config) {
_task_executor_config._streams = 4;
_task_executor = std::make_shared<InferenceEngine::CPUStreamsExecutor>(_task_executor_config);
_task_executor = std::make_shared<ov::threading::CPUStreamsExecutor>(_task_executor_config);
}
void push_task(size_t key, Task&& task) override {
@@ -62,14 +62,14 @@ public:
}
private:
InferenceEngine::CPUStreamsExecutor::Config _task_executor_config;
InferenceEngine::CPUStreamsExecutor::Ptr _task_executor;
ov::threading::IStreamsExecutor::Config _task_executor_config;
std::shared_ptr<ov::threading::IStreamsExecutor> _task_executor;
std::mutex _mutex;
std::unordered_set<size_t> _task_keys;
std::atomic_bool _stop_compilation{false};
};
std::unique_ptr<ICompilationContext> ICompilationContext::create(InferenceEngine::CPUStreamsExecutor::Config task_executor_config) {
std::unique_ptr<ICompilationContext> ICompilationContext::create(ov::threading::IStreamsExecutor::Config task_executor_config) {
return cldnn::make_unique<CompilationContext>(task_executor_config);
}

View File

@@ -20,7 +20,7 @@
#include <cmath>
#include <iomanip>
#include <threading/ie_cpu_streams_executor.hpp>
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
using namespace cldnn;
@@ -35,7 +35,7 @@ void compile_graph::run(program& p) {
auto task_executor = p.get_task_executor();
auto& proc_order = p.get_processing_order();
std::vector<InferenceEngine::Task> tasks;
std::vector<ov::threading::Task> tasks;
std::exception_ptr exception;
for (size_t idx = 0; idx < proc_order.size(); idx++) {
auto& node = *(std::next(proc_order.begin(), idx));
@@ -97,7 +97,7 @@ void compile_graph::run(program& p) {
}
}
task_executor->runAndWait(tasks);
task_executor->run_and_wait(tasks);
tasks.clear();
if (exception) {

View File

@@ -110,7 +110,7 @@ bool propagate_constants::has_non_const_user(program_node& node) const {
std::list<std::pair<primitive_id, memory::ptr>> propagate_constants::calculate(engine& engine,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor) {
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor) {
if (!has_non_trivial_constants)
return {};

View File

@@ -24,7 +24,7 @@ struct condition_impl : typed_primitive_impl<condition> {
}
void set_node_params(const program_node& arg) override {
IE_ASSERT(arg.is_type<condition>());
OPENVINO_ASSERT(arg.is_type<condition>());
const auto& node = arg.as<condition>();
_node_id = node.id();
}

View File

@@ -35,7 +35,7 @@ struct loop_impl : typed_primitive_impl<loop> {
}
void set_node_params(const program_node& arg) override {
IE_ASSERT(arg.is_type<loop>());
OPENVINO_ASSERT(arg.is_type<loop>());
const auto& node = arg.as<loop>();
_max_iteration = node.get_max_iteration();
_back_edges = node.get_back_edges();

View File

@@ -29,7 +29,7 @@ struct assign_impl : public typed_primitive_impl<assign> {
}
void set_node_params(const program_node& arg) override {
IE_ASSERT(arg.is_type<assign>());
OPENVINO_ASSERT(arg.is_type<assign>());
const auto& node = arg.as<assign>();
variable_id = node.get_primitive()->variable_id;
}

View File

@@ -56,7 +56,7 @@ public:
}
void set_node_params(const program_node& arg) override {
IE_ASSERT(arg.is_type<detection_output>());
OPENVINO_ASSERT(arg.is_type<detection_output>());
const auto& node = arg.as<detection_output>();
nms_type = (node.get_primitive()->decrease_label_id ? NMSType::MXNET : NMSType::CAFFE);
}

View File

@@ -29,7 +29,7 @@ struct read_value_impl : public typed_primitive_impl<read_value> {
}
void set_node_params(const program_node& arg) override {
IE_ASSERT(arg.is_type<read_value>());
OPENVINO_ASSERT(arg.is_type<read_value>());
const auto& node = arg.as<read_value>();
variable_id = node.get_primitive()->variable_id;
}

View File

@@ -29,7 +29,7 @@ static inline kernel_selector::argm_axis GetArgMaxMinAxis(int64_t axis, size_t r
else
return kernel_selector::argm_axis::X;
case 4: return kernel_selector::argm_axis::X;
default: IE_THROW() << "Invalid arg_max_min axis " << axis;
default: OPENVINO_THROW("Invalid arg_max_min axis ", axis);
}
}

View File

@@ -15,7 +15,7 @@ namespace {
kernel_selector::concat_axis convert_axis(int64_t axis, size_t rank) {
auto cldnn_axis = axis >= 0 ? axis : axis + static_cast<int64_t>(rank);
if (cldnn_axis >= static_cast<int64_t>(rank))
IE_THROW() << "Concatenation axis exceeds number of dimensions";
OPENVINO_THROW("Concatenation axis exceeds number of dimensions");
// Difference in dimension ordering between IE and GPU plugin,
// reverse spatial dimensions after batch and feature.
@@ -33,7 +33,7 @@ kernel_selector::concat_axis convert_axis(int64_t axis, size_t rank) {
case 3: return kernel_selector::concat_axis::Y;
case 4: return kernel_selector::concat_axis::Z;
case 5: return kernel_selector::concat_axis::W;
default: IE_THROW() << "Unsupported concatenation axis: " << axis;
default: OPENVINO_THROW("Unsupported concatenation axis: ", axis);
}
return kernel_selector::concat_axis::FEATURE; // shouldn't get here

View File

@@ -24,7 +24,7 @@ static kernel_selector::gather_axis convert_axis(int64_t axis, size_t rank) {
case -1: return kernel_selector::gather_axis::Y;
case -2: return kernel_selector::gather_axis::FEATURE;
case -3: return kernel_selector::gather_axis::BATCH;
default: IE_THROW() << "Unsupported gather axis: " << axis;
default: OPENVINO_THROW("Unsupported gather axis: ", axis);
}
} else if (rank == 5) {
switch (axis) {
@@ -35,7 +35,7 @@ static kernel_selector::gather_axis convert_axis(int64_t axis, size_t rank) {
case -2: return kernel_selector::gather_axis::Z;
case -3: return kernel_selector::gather_axis::FEATURE;
case -4: return kernel_selector::gather_axis::BATCH;
default: IE_THROW() << "Unsupported gather axis: " << axis;
default: OPENVINO_THROW("Unsupported gather axis: ", axis);
}
} else if (rank == 6) {
switch (axis) {
@@ -48,10 +48,10 @@ static kernel_selector::gather_axis convert_axis(int64_t axis, size_t rank) {
case -3: return kernel_selector::gather_axis::W;
case -4: return kernel_selector::gather_axis::FEATURE;
case -5: return kernel_selector::gather_axis::BATCH;
default: IE_THROW() << "Unsupported gather axis: " << axis;
default: OPENVINO_THROW("Unsupported gather axis: ", axis);
}
} else {
IE_THROW() << "Unsupported gather axis: " << axis;
OPENVINO_THROW("Unsupported gather axis: ", axis);
}
}

View File

@@ -38,7 +38,7 @@ static inline kernel_selector::gather_elements_axis convert_axis(int64_t axis, s
else
return kernel_selector::gather_elements_axis::X;
case 5: return kernel_selector::gather_elements_axis::X;
default: IE_THROW() << "Incorrect gather_elements axis.";
default: OPENVINO_THROW("Incorrect gather_elements axis.");
}
}

View File

@@ -29,7 +29,7 @@ static inline kernel_selector::softmax_dim get_softmax_dim(int64_t axis, size_t
else
return kernel_selector::softmax_dim::X;
case 4: return kernel_selector::softmax_dim::X;
default: IE_THROW() << "Invalid softmax axis " << axis;
default: OPENVINO_THROW("Invalid softmax axis ", axis);
}
}

View File

@@ -4,7 +4,7 @@
#pragma once
#include <threading/ie_cpu_streams_executor.hpp>
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include <functional>
#include <memory>
@@ -19,7 +19,7 @@ public:
virtual bool is_stopped() = 0;
virtual void cancel() = 0;
static std::unique_ptr<ICompilationContext> create(InferenceEngine::CPUStreamsExecutor::Config task_executor_config);
static std::unique_ptr<ICompilationContext> create(ov::threading::IStreamsExecutor::Config task_executor_config);
};
} // namespace cldnn

View File

@@ -254,7 +254,7 @@ private:
void run(program& p) override;
std::list<std::pair<primitive_id, memory::ptr>> calculate(engine& engine,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor);
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor);
bool has_non_const_user(program_node& node) const;
void handle_constant(program& prog, program_node& node);
void add_constant(program& prog, program_node& node);

View File

@@ -353,13 +353,13 @@ network::network(engine& engine,
const topology& topo,
const ExecutionConfig& config,
bool is_internal,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor)
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor)
: network(program::build_program(engine, topo, config, task_executor, is_internal), config, engine.create_stream(config), is_internal) {}
network::network(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal)
: network(program::build_program(engine, nodes, config, task_executor, is_internal), config, engine.create_stream(config), is_internal) {}
@@ -674,7 +674,7 @@ network::ptr network::allocate_network(engine& engine, program::ptr program, boo
network::ptr network::build_network(engine& engine,
const topology& topology,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal) {
return std::make_shared<network>(engine, topology, config, is_internal, task_executor);
}
@@ -682,7 +682,7 @@ network::ptr network::build_network(engine& engine,
network::ptr network::build_network(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal) {
return std::make_shared<network>(engine, nodes, config, task_executor, is_internal);
}

View File

@@ -1525,7 +1525,7 @@ int32_t primitive_inst::get_index_in_deps(memory::cptr arg) const {
return idx;
}
IE_THROW() << "[get_index_in_deps]: not found in _deps";
OPENVINO_THROW("[get_index_in_deps]: not found in _deps");
}
void primitive_inst::load(cldnn::BinaryInputBuffer& ib) {

View File

@@ -104,7 +104,7 @@
using namespace cldnn;
using namespace ov::intel_gpu;
static void adjust_num_cores(InferenceEngine::CPUStreamsExecutor::Config& config) {
static void adjust_num_cores(ov::threading::IStreamsExecutor::Config& config) {
if (InferenceEngine::getAvailableCoresTypes().size() == 1) {
return;
}
@@ -115,23 +115,23 @@ static void adjust_num_cores(InferenceEngine::CPUStreamsExecutor::Config& config
auto core_type = config._threadPreferredCoreType;
int num_cores = total_num_cores;
if (core_type == InferenceEngine::IStreamsExecutor::Config::BIG) {
if (core_type == ov::threading::IStreamsExecutor::Config::BIG) {
num_cores = total_num_big_cores;
} else if (core_type == InferenceEngine::IStreamsExecutor::Config::LITTLE) {
} else if (core_type == ov::threading::IStreamsExecutor::Config::LITTLE) {
num_cores = total_num_little_cores;
}
config._streams = std::min(config._streams, num_cores);
}
static InferenceEngine::CPUStreamsExecutor::Config make_task_executor_config(const ExecutionConfig& config, std::string tags) {
InferenceEngine::CPUStreamsExecutor::Config task_executor_config(tags, 1);
static ov::threading::IStreamsExecutor::Config make_task_executor_config(const ExecutionConfig& config, std::string tags) {
ov::threading::IStreamsExecutor::Config task_executor_config(tags, 1);
task_executor_config._streams = config.get_property(ov::compilation_num_threads);
auto priority = config.get_property(ov::intel_gpu::hint::host_task_priority);
switch (priority) {
case ov::hint::Priority::LOW: task_executor_config._threadPreferredCoreType = InferenceEngine::IStreamsExecutor::Config::LITTLE; break;
case ov::hint::Priority::MEDIUM: task_executor_config._threadPreferredCoreType = InferenceEngine::IStreamsExecutor::Config::ANY; break;
case ov::hint::Priority::HIGH: task_executor_config._threadPreferredCoreType = InferenceEngine::IStreamsExecutor::Config::BIG; break;
case ov::hint::Priority::LOW: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::LITTLE; break;
case ov::hint::Priority::MEDIUM: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::ANY; break;
case ov::hint::Priority::HIGH: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::BIG; break;
default: OPENVINO_ASSERT(false, "[GPU] Can't create task executor: invalid host task priority value: ", priority);
}
@@ -140,15 +140,15 @@ static InferenceEngine::CPUStreamsExecutor::Config make_task_executor_config(con
return task_executor_config;
}
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> program::make_task_executor(const ExecutionConfig& config) {
InferenceEngine::CPUStreamsExecutor::Config task_executor_config = make_task_executor_config(config, "CPU Tasks executor for GPU plugin");
return std::make_shared<InferenceEngine::CPUStreamsExecutor>(task_executor_config);
std::shared_ptr<ov::threading::IStreamsExecutor> program::make_task_executor(const ExecutionConfig& config) {
ov::threading::IStreamsExecutor::Config task_executor_config = make_task_executor_config(config, "CPU Tasks executor for GPU plugin");
return std::make_shared<ov::threading::CPUStreamsExecutor>(task_executor_config);
}
program::program(engine& engine_ref,
topology const& topology,
const ExecutionConfig& config,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal,
bool no_optimizations,
bool is_body_program)
@@ -175,7 +175,7 @@ program::program(engine& engine_ref,
program::program(engine& engine_ref,
std::set<std::shared_ptr<program_node>> const& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal)
: _engine(engine_ref),
_stream(_engine.create_stream(config)),
@@ -246,7 +246,7 @@ kernels_cache& program::get_kernels_cache() const {
program::ptr program::build_program(engine& engine,
const topology& topology,
const ExecutionConfig& config,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal,
bool no_optimizations,
bool is_body_program) {
@@ -265,7 +265,7 @@ program::ptr program::build_program(engine& engine,
program::ptr program::build_program(engine& engine,
const std::set<std::shared_ptr<program_node>>& nodes,
const ExecutionConfig& config,
std::shared_ptr<InferenceEngine::CPUStreamsExecutor> task_executor,
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor,
bool is_internal) {
return std::make_shared<program>(engine, nodes, config, task_executor, is_internal);
}

View File

@@ -987,8 +987,8 @@ void program_node::init_onednn_primitive_attributes() {
} else {
dnnl::algorithm alg = onednn::convert_activation_func(fused_desc->activation_function);
if (alg == dnnl::algorithm::undef)
IE_THROW() << "Activations that are undef algorithms must be converted to other activations before "
"pushing to post-op.";
OPENVINO_THROW("Activations that are undef algorithms must be converted to other activations before "
"pushing to post-op.");
// Usage of alpha and beta between cldnn::pow and dnnl::eltwise::pow is different : d = pow(src, a) / d = a * pow(src, b)
if (alg == dnnl::algorithm::eltwise_pow)
post_ops.append_eltwise(alg, 1.0f, fused_desc->additional_params.a);

View File

@@ -1741,8 +1741,7 @@ JitConstants FusedOpsCodeGenerator::MakeOpJitConstants(const FusedOpsConfigurati
if (desc.GetType() == KernelType::ELTWISE) {
auto p = desc.GetOpParams<eltwise_fuse_params>();
if (!p)
IE_THROW() << "[clDNN] Eltwise fuse params can't be nullptr";
OPENVINO_ASSERT(p != nullptr, "[GPU] Eltwise fuse params can't be nullptr");
if (p->mode == kernel_selector::EltwiseMode::DIV) {
if (p->m_pythondiv)

View File

@@ -47,7 +47,7 @@ JitConstants ConvertColorKernelBase::GetJitConstants(const convert_color_params&
jit.AddConstant(MakeJitConstant("CONVERT_FROM_I420", ""));
break;
default:
IE_THROW() << "Not supported input color format";
OPENVINO_THROW("Not supported input color format");
}
switch (params.output_color_format) {
@@ -58,7 +58,7 @@ JitConstants ConvertColorKernelBase::GetJitConstants(const convert_color_params&
jit.AddConstant(MakeJitConstant("CONVERT_TO_BGR", ""));
break;
default:
IE_THROW() << "Not supported output color format";
OPENVINO_THROW("Not supported output color format");
}
switch (params.mem_type) {
@@ -69,7 +69,7 @@ JitConstants ConvertColorKernelBase::GetJitConstants(const convert_color_params&
jit.AddConstant(MakeJitConstant("SURFACE_MEM", ""));
break;
default:
IE_THROW() << "Not supported memory type";
OPENVINO_THROW("Not supported memory type");
}
return jit;
}

View File

@@ -189,7 +189,7 @@ JitConstants EltwiseKernel_blocked_opt::MakeLoadJitConstants(const eltwise_param
else if (DataTensor::ChannelsCount(params.inputs[input_idx].GetLayout()) == 5)
default_indexing_str = "b, (f_block * " + toCodeString(vec_size) +"), z, y, x";
else
IE_ASSERT("MakeLoadJit : Unexpected dimension for eltwise optimized kernel.");
OPENVINO_ASSERT("MakeLoadJit : Unexpected dimension for eltwise optimized kernel.");
// Generate Jit
switch (input.mode) {
@@ -439,7 +439,7 @@ static inline int GetInnerBatchBlockSize(const DataTensor& tensor) {
case DataLayout::bs_fs_zyx_bsv32_fsv16:
return 32;
default:
IE_ASSERT("GetInnerBatchBlockSize : Unexpected format for eltwise_blocked_optimized kernel.");
OPENVINO_ASSERT("GetInnerBatchBlockSize : Unexpected format for eltwise_blocked_optimized kernel.");
}
return 1;
@@ -465,7 +465,7 @@ static inline int GetInnerFeatureBlockSize(const DataTensor& tensor) {
case DataLayout::bs_fs_zyx_bsv16_fsv32:
return 32;
default:
IE_ASSERT("GetInnerFeatureBlockSize : Unexpected format for eltwise_blocked_optimized kernel.");
OPENVINO_ASSERT("GetInnerFeatureBlockSize : Unexpected format for eltwise_blocked_optimized kernel.");
}
return 1;

View File

@@ -105,7 +105,7 @@ static inline Tensor::Dim GetGatherIndexDim(const gather_params& params) {
case GatherAxis::X:
return params.inputs[0].X();
default:
IE_THROW() << "Unknown gather axis=" << static_cast<int>(params.axis);
OPENVINO_THROW("Unknown gather axis=", static_cast<int>(params.axis));
}
}
@@ -124,7 +124,7 @@ static inline int64_t GetGatherAxisIndexInShapeInfo(const gather_params& params)
case GatherAxis::X:
return 7;
default:
IE_THROW() << "Unknown gather axis=" << static_cast<int>(params.axis);
OPENVINO_THROW("Unknown gather axis=", static_cast<int>(params.axis));
}
}
@@ -224,7 +224,7 @@ CommonDispatchData GatherKernelRef::SetDefault(const gather_params& params) cons
{Tensor::DataChannelName::Z, Tensor::DataChannelName::W},
{Tensor::DataChannelName::FEATURE, Tensor::DataChannelName::BATCH}};
} else {
IE_THROW() << "Unknown rank: rank=" << rank;
OPENVINO_THROW("Unknown rank: rank=", rank);
}
dispatchData.lws =

View File

@@ -2,7 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ie_metric_helpers.hpp"
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include "intel_gpu/graph/serialization/binary_buffer.hpp"
#include "intel_gpu/graph/serialization/layout_serializer.hpp"
#include "intel_gpu/graph/serialization/string_serializer.hpp"
@@ -14,10 +17,7 @@
#include "intel_gpu/plugin/compiled_model.hpp"
#include "intel_gpu/plugin/async_infer_request.hpp"
#include "intel_gpu/plugin/async_infer_request_legacy.hpp"
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include <description_buffer.hpp>
#include <threading/ie_executor_manager.hpp>
#include "threading/ie_cpu_streams_executor.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
@@ -143,16 +143,16 @@ IInferRequestInternal::Ptr CompiledModel::CreateInferRequest() {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "CompiledModel::CreateInferRequest");
InferenceEngine::IInferRequestInternal::Ptr internalRequest;
if (m_graphs.empty()) {
IE_THROW(NetworkNotLoaded);
OPENVINO_THROW("[GPU] Model not loaded");
}
for (auto& graph : m_graphs) {
if (graph == nullptr) {
IE_THROW(NetworkNotLoaded);
OPENVINO_THROW("[GPU] Model not loaded");
}
if (!graph->IsLoaded()) {
IE_THROW(NetworkNotLoaded) << ": no networks created";
OPENVINO_THROW("[GPU] Model not loaded: no networks created");
}
}
@@ -186,7 +186,7 @@ IInferRequestInternal::Ptr CompiledModel::CreateInferRequest() {
void CompiledModel::Export(std::ostream& networkModel) {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "CompiledModel::Export");
if (m_graphs.empty())
IE_THROW(NetworkNotLoaded);
OPENVINO_THROW("[GPU] Model not loaded");
cldnn::BinaryOutputBuffer ob(networkModel);
@@ -286,7 +286,7 @@ void CompiledModel::Export(std::ostream& networkModel) {
std::shared_ptr<ngraph::Function> CompiledModel::GetExecGraphInfo() {
if (m_graphs.empty())
IE_THROW(NetworkNotLoaded);
OPENVINO_THROW("[GPU] Model not loaded");
return m_graphs.front()->GetExecGraphInfo();
}
@@ -331,7 +331,7 @@ InferenceEngine::Parameter CompiledModel::GetMetric(const std::string &name) con
ov::PropertyName{ov::execution_devices.name(), PropertyMutability::RO}
};
} else if (name == ov::model_name) {
IE_ASSERT(!m_graphs.empty());
OPENVINO_ASSERT(!m_graphs.empty());
return decltype(ov::model_name)::value_type {m_graphs[0]->getName()};
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics;
@@ -367,7 +367,7 @@ InferenceEngine::Parameter CompiledModel::GetMetric(const std::string &name) con
} else if (name == ov::execution_devices) {
return decltype(ov::execution_devices)::value_type{m_context->getDeviceName()};
} else {
IE_THROW() << "Unsupported ExecutableNetwork metric: " << name;
OPENVINO_THROW("[GPU] Unsupported CompiledModel property: ", name);
}
}

View File

@@ -237,8 +237,7 @@ void CustomLayer::LoadFromFile(const std::string configFile, CustomLayerMap& cus
// config file might not exist - like global config, for example
return;
} else {
IE_THROW() << "Error loading custom layer configuration file: " << configFile << ", " << res.description()
<< " at offset " << res.offset;
OPENVINO_THROW("Error loading custom layer configuration file: ", configFile, ", ", res.description(), " at offset ", res.offset);
}
}
@@ -252,8 +251,7 @@ void CustomLayer::LoadFromFile(const std::string configFile, CustomLayerMap& cus
#error "Intel GPU plugin: unknown target system"
#endif
if (abs_path_ptr == nullptr) {
IE_THROW() << "Error loading custom layer configuration file: " << configFile << ", "
<< "Can't get canonicalized absolute pathname.";
OPENVINO_THROW("Error loading custom layer configuration file: ", configFile, ", ", "Can't get canonicalized absolute pathname.");
}
std::string abs_file_name(path);
@@ -268,8 +266,7 @@ void CustomLayer::LoadFromFile(const std::string configFile, CustomLayerMap& cus
// path is absolute
dir_path = abs_file_name.substr(0, dir_split_pos);
} else {
IE_THROW() << "Error loading custom layer configuration file: " << configFile << ", "
<< "Path is not valid";
OPENVINO_THROW("Error loading custom layer configuration file: ", configFile, ", ", "Path is not valid");
}
for (auto r = xmlDoc.document_element(); r; r = r.next_sibling()) {
@@ -277,7 +274,7 @@ void CustomLayer::LoadFromFile(const std::string configFile, CustomLayerMap& cus
layer->LoadSingleLayer(r);
if (layer->Error()) {
customLayers.clear();
IE_THROW() << layer->m_ErrorMessage;
OPENVINO_THROW(layer->m_ErrorMessage);
} else {
customLayers[layer->Name()] = layer;
}

View File

@@ -11,17 +11,13 @@
#include "intel_gpu/graph/serialization/vector_serializer.hpp"
#include "intel_gpu/runtime/profiling.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/plugin/graph.hpp"
#include "intel_gpu/plugin/simple_math.hpp"
#include "intel_gpu/plugin/infer_request.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include <description_buffer.hpp>
#include <threading/ie_executor_manager.hpp>
#include <exec_graph_info.hpp>
#include <ie_ngraph_utils.hpp>
#include "openvino/runtime/threading/executor_manager.hpp"
#include "openvino/runtime/exec_model_info.hpp"
#include <list>
#include <set>
@@ -34,9 +30,6 @@
#include <utility>
#include <sys/types.h>
#include <sys/stat.h>
#include <exec_graph_info.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
using namespace InferenceEngine;
using namespace InferenceEngine::details;
@@ -74,7 +67,7 @@ Graph::Graph(cldnn::BinaryInputBuffer &ib, RemoteContextImpl::Ptr context, const
#ifdef ENABLE_ONEDNN_FOR_GPU
get_engine().create_onednn_engine(config);
#else
IE_THROW() << "[GPU] Current model cache requires OneDNN, but cannot use it.";
OPENVINO_THROW("[GPU] Current model cache requires OneDNN, but cannot use it.");
#endif // ENABLE_ONEDNN_FOR_GPU
}
@@ -178,7 +171,7 @@ std::shared_ptr<cldnn::network> Graph::BuildNetwork(std::shared_ptr<cldnn::progr
auto externalQueue = m_context->get_external_queue();
if (externalQueue) {
if (m_config.get_property(ov::num_streams) != 1)
IE_THROW(ParameterMismatch) << "Throughput streams can't be used with shared queue!\n";
OPENVINO_THROW("Throughput streams can't be used with shared queue!\n");
auto &engine = m_program->get_engine();
network = std::make_shared<cldnn::network>(program, engine.create_stream(m_config, externalQueue), m_stream_id);
} else {
@@ -375,7 +368,7 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
params.push_back(param);
return_node = param;
} else {
return_node = std::make_shared<ExecGraphInfoSerialization::ExecutionNode>(get_inputs(prim_info), output_size);
return_node = std::make_shared<ov::exec_model_info::ExecutionNode>(get_inputs(prim_info), output_size);
if (is_output) { // create additional result node
nodes.push_back(return_node);
@@ -405,12 +398,12 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
std::map<std::string, std::string> info;
Precision prec = data_type_to_precision(prim_info.output_layout.data_type);
Precision inference_precision = data_type_to_precision(prim_info.runtime_precision);
info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = prec.name();
info[ExecGraphInfoSerialization::LAYER_TYPE] = to_IE_type_name(prim_info.type_id);
info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = prim_info.layout_str;
info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(prim_info.exec_id);
info[ExecGraphInfoSerialization::IMPL_TYPE] = prim_info.kernel_id;
info[ExecGraphInfoSerialization::RUNTIME_PRECISION] = inference_precision.name();
info[ov::exec_model_info::OUTPUT_PRECISIONS] = prec.name();
info[ov::exec_model_info::LAYER_TYPE] = to_IE_type_name(prim_info.type_id);
info[ov::exec_model_info::OUTPUT_LAYOUTS] = prim_info.layout_str;
info[ov::exec_model_info::EXECUTION_ORDER] = std::to_string(prim_info.exec_id);
info[ov::exec_model_info::IMPL_TYPE] = prim_info.kernel_id;
info[ov::exec_model_info::RUNTIME_PRECISION] = inference_precision.name();
std::vector<std::string> originalNames{find_origin_layers(prim_info.original_id)};
for (auto& fused_id : prim_info.c_fused_ids) {
@@ -419,7 +412,7 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
originalNames.push_back(origin_id);
}
}
info[ExecGraphInfoSerialization::ORIGINAL_NAMES] = concat_strings(originalNames, ',');
info[ov::exec_model_info::ORIGINAL_NAMES] = concat_strings(originalNames, ',');
std::string exec_time = "not_executed";
if (perfMap.find(prim_info.original_id) != perfMap.end()) {
@@ -428,7 +421,7 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
exec_time = std::to_string(perfCounter.realTime_avg());
}
}
info[ExecGraphInfoSerialization::PERF_COUNTER] = exec_time;
info[ov::exec_model_info::PERF_COUNTER] = exec_time;
for (auto&& kvp : info) {
return_node->get_rt_info()[kvp.first] = kvp.second;
@@ -436,7 +429,7 @@ std::shared_ptr<ngraph::Function> Graph::GetExecGraphInfoByPrimitivesInfo(std::v
results.back()->get_rt_info()[kvp.first] = kvp.second;
}
if (is_output)
results.back()->get_rt_info()[ExecGraphInfoSerialization::LAYER_TYPE] = "Result";
results.back()->get_rt_info()[ov::exec_model_info::LAYER_TYPE] = "Result";
nodes.push_back(return_node);
node2layer[prim_info.original_id] = return_node;
@@ -824,7 +817,7 @@ std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> Graph::GetPer
std::shared_ptr<cldnn::network> Graph::GetNetwork(size_t idx) const {
if (idx >= GetNetworksCount())
IE_THROW() << "Unable to find network with id=" << idx << ". Stored networks count: " << GetNetworksCount();
OPENVINO_THROW("Unable to find network with id=", idx, ". Stored networks count: ", GetNetworksCount());
return m_networks[idx];
}
@@ -836,18 +829,18 @@ std::string Graph::MapOutputName(std::string outName) const {
// Find correct output ID. Start with name stored in IR.
if (primitiveIDs.find(outName) == primitiveIDs.end()) {
IE_THROW() << "output with name " << outName << " was not found in primitiveIDs";
OPENVINO_THROW("output with name ", outName, " was not found in primitiveIDs");
}
std::string outputID = primitiveIDs.at(outName);
while (std::find(networkOutputsIDs.begin(), networkOutputsIDs.end(), outputID) == networkOutputsIDs.end()) {
// If current ID isn't found in cldnn network outputs, get previous primitive id and try again.
auto prim = allPrimitiveIds.find(outputID);
if (prim == allPrimitiveIds.end()) {
IE_THROW() << "Unknown primitive id " << outputID;
OPENVINO_THROW("Unknown primitive id ", outputID);
}
if (prevPrimitiveIDs.at(outputID).size() != 1 || prim->second != "_optimized_") {
IE_THROW() << "Unable to find parent for output primitive " << outputID;
OPENVINO_THROW("Unable to find parent for output primitive ", outputID);
}
outputID = prevPrimitiveIDs.at(outputID)[0];
}

View File

@@ -7,17 +7,14 @@
#include <map>
#include <functional>
#include <utility>
#include <description_buffer.hpp>
#include "intel_gpu/plugin/infer_request.hpp"
#include "intel_gpu/plugin/remote_context.hpp"
#include "intel_gpu/plugin/remote_allocators.hpp"
#include "intel_gpu/plugin/compiled_model.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/plugin/variable_state.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp"
#include "openvino/core/preprocess/input_tensor_info.hpp"
#include <ie_algorithm.hpp>
#include "ie_ngraph_utils.hpp"
#include <debug.h>
using namespace InferenceEngine;
@@ -35,13 +32,12 @@ void convertAndCopy(const InferenceEngine::Blob* src, dst_t* dst) {
}
auto t_blob = dynamic_cast<const InferenceEngine::TBlob<src_t>*>(src);
if (!t_blob) {
IE_THROW() << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
<< typeid(src_t).name();
OPENVINO_THROW("input type is ", src->getTensorDesc().getPrecision(), " but input is not ", typeid(src_t).name());
}
const src_t* srcPtr = t_blob->readOnly();
if (!srcPtr) {
IE_THROW(NotAllocated) << str_input_not_allocated;
OPENVINO_THROW(str_input_not_allocated);
}
for (size_t i = 0; i < t_blob->size(); i++)
dst[i] = srcPtr[i];
@@ -88,7 +84,7 @@ inline void checkAlloc(const Blob::Ptr& blob, const std::string& err_str) {
not_allocated = !ov::intel_gpu::getBlobImpl(blob->as<gpu::ClBlob>())->is_allocated();
}
if (not_allocated) {
IE_THROW(NotAllocated) << err_str;
OPENVINO_THROW(err_str);
}
}
@@ -98,7 +94,7 @@ void checkInputBlob(const Blob::Ptr &blob,
const std::string strNotMatched("The input blob size is not equal to the network input size");
if (!blob) {
IE_THROW(NotAllocated) << str_input_not_allocated;
OPENVINO_THROW(str_input_not_allocated);
}
SizeVector dims = foundInput->getTensorDesc().getDims();
@@ -107,7 +103,7 @@ void checkInputBlob(const Blob::Ptr &blob,
: 1;
if (refSize != blob->size()) {
IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize;
OPENVINO_THROW(strNotMatched + ": got ", blob->size(), " expecting ", refSize);
}
checkAlloc(blob, str_input_not_allocated);
@@ -119,7 +115,7 @@ void checkOutputBlob(const Blob::Ptr &blob,
const std::string strNotMatched("The output blob size is not equal to the network output size");
if (!blob) {
IE_THROW(NotAllocated) << str_output_not_allocated;
OPENVINO_THROW(str_output_not_allocated);
}
SizeVector dims = foundOutput->getTensorDesc().getDims();
size_t refSize = foundOutput->getTensorDesc().getLayout() != SCALAR
@@ -127,7 +123,7 @@ void checkOutputBlob(const Blob::Ptr &blob,
: 1;
if (refSize != blob->size()) {
IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize;
OPENVINO_THROW(strNotMatched + ": got ", blob->size(), " expecting ", refSize);
}
checkAlloc(blob, str_output_not_allocated);
@@ -175,10 +171,10 @@ void InferRequest::SetBlob(const std::string& name, const Blob::Ptr& data) {
// perform all common checks first
if (name.empty()) {
IE_THROW(NotFound) << "Failed to set blob with empty name";
OPENVINO_THROW("Failed to set blob with empty name");
}
if (!data)
IE_THROW(NotAllocated) << "Failed to set empty blob with name: \'" << name << "\'";
OPENVINO_THROW("Failed to set empty blob with name: \'", name, "\'");
if (inputTensorsMap.find(name) != inputTensorsMap.end()) {
inputTensorsMap.erase(name);
@@ -194,8 +190,7 @@ void InferRequest::SetBlob(const std::string& name, const Blob::Ptr& data) {
: foundOutput->getTensorDesc();
if (desc.getPrecision() != blobDesc.getPrecision()) {
IE_THROW(ParameterMismatch) << "Failed to set Blob with precision not corresponding to user "
<< (is_input ? "input" : "output") << " precision";
OPENVINO_THROW("Failed to set Blob with precision not corresponding to user ", (is_input ? "input" : "output"), " precision");
}
size_t netReqBinSize = std::accumulate(desc.getDims().begin(), desc.getDims().end(),
@@ -206,14 +201,14 @@ void InferRequest::SetBlob(const std::string& name, const Blob::Ptr& data) {
size_t dataSize = data->size();
if (0 == dataSize && !isDynamic) {
IE_THROW() << "Input data is empty. Input name: \'" << name << "\'";
OPENVINO_THROW("Input data is empty. Input name: \'", name, "\'");
}
size_t dataBinSize = dataSize * data->element_size();
if (!isDynamic && dataBinSize != netReqBinSize) {
IE_THROW() << "Incorrect binary data size for " << (is_input ? "input" : "output") <<
" blob with name: \'" << name << "\' " <<
"Current: " << dataBinSize << " Required: " << netReqBinSize;
OPENVINO_THROW("Incorrect binary data size for ", (is_input ? "input" : "output"),
" blob with name: \'", name, "\' ",
"Current: ", dataBinSize, " Required: ", netReqBinSize);
}
if (is_input) {
@@ -248,7 +243,7 @@ void InferRequest::set_output(const std::string& name, const Blob::Ptr& data) {
} else {
if (!isDynamic) {
if (data->buffer() == nullptr)
IE_THROW(NotAllocated) << str_output_not_allocated << " Output name: \'" << name << "\'";
OPENVINO_THROW(str_output_not_allocated, " Output name: \'", name, "\'");
}
}
_outputs[name] = data;
@@ -261,16 +256,16 @@ void InferRequest::SetBlobs(const std::string& name, const std::vector<Blob::Ptr
}
if (name.empty()) {
IE_THROW(NotFound) << "Failed to set blobs with empty name";
OPENVINO_THROW("Failed to set blobs with empty name");
}
if (blobs.empty()) {
IE_THROW(NotAllocated) << "Failed to set empty blobs with name: \'" << name << "\'";
OPENVINO_THROW("Failed to set empty blobs with name: \'", name, "\'");
}
bool empty_data = std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) {
return blob->size() == 0;
});
if (empty_data) {
IE_THROW() << "At least one of the input blobs is empty. Input name: \'" << name << "\'";
OPENVINO_THROW("At least one of the input blobs is empty. Input name: \'", name, "\'");
}
bool is_buffer = std::all_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) {
@@ -287,7 +282,7 @@ void InferRequest::SetBlobs(const std::string& name, const std::vector<Blob::Ptr
is_host &= !is_remote;
if (!is_host && !is_remote) {
IE_THROW() << "Incorrect input blobs. All blobs must be of the same type";
OPENVINO_THROW("Incorrect input blobs. All blobs must be of the same type");
}
InputInfo::Ptr foundInput;
@@ -295,7 +290,7 @@ void InferRequest::SetBlobs(const std::string& name, const std::vector<Blob::Ptr
bool is_input = findInputAndOutputBlobByName(name, foundInput, foundOutput);
if (!is_input) {
IE_THROW() << "SetBlobs method doesn't support outputs";
OPENVINO_THROW("SetBlobs method doesn't support outputs");
}
const TensorDesc& desc = foundInput->getTensorDesc();
@@ -305,8 +300,7 @@ void InferRequest::SetBlobs(const std::string& name, const std::vector<Blob::Ptr
desc.getPrecision().size(),
std::multiplies<size_t>());
if (dataBinSize != netReqBinSize) {
IE_THROW() << "Incorrect binary data size for input blobs with name: \'" << name << "\' " <<
"Current: " << dataBinSize << " Required: " << netReqBinSize;
OPENVINO_THROW("Incorrect binary data size for input blobs with name: \'", name, "\' ", "Current: ", dataBinSize, " Required: ", netReqBinSize);
}
if (is_surface) {
@@ -337,7 +331,7 @@ void InferRequest::checkBlobs() {
if (foundInputPair != std::end(_networkInputs)) {
foundInput = foundInputPair->second;
} else {
IE_THROW(NotFound) << "Failed to find input with name: \'" << input.first << "\'";
OPENVINO_THROW("Failed to find input with name: \'", input.first, "\'");
}
auto node = findInputByNodeName(input.first);
bool is_dynamic = (node && node->get_output_partial_shape(0).is_dynamic());
@@ -353,7 +347,7 @@ void InferRequest::checkBlobs() {
if (foundOutputPair != std::end(_networkOutputs)) {
foundOutput = foundOutputPair->second;
} else {
IE_THROW(NotFound) << "Failed to find output with name: \'" << output.first << "\'";
OPENVINO_THROW("Failed to find output with name: \'", output.first, "\'");
}
auto node = findOutputByNodeName(output.first);
bool is_dynamic = (node && node->get_output_partial_shape(0).is_dynamic());
@@ -366,9 +360,7 @@ void InferRequest::SetGraph(std::shared_ptr<Graph> graph) {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "InferRequest::SetGraph");
m_graph = graph;
if (m_graph == nullptr) {
IE_THROW(NetworkNotLoaded);
}
OPENVINO_ASSERT(m_graph != nullptr, "[GPU] Model not loaded");
allocate_inputs();
allocate_outputs();
@@ -378,7 +370,7 @@ void InferRequest::SetGraph(std::shared_ptr<Graph> graph) {
InferRequest::InferRequest(InputsDataMap networkInputs, OutputsDataMap networkOutputs,
const CompiledModel::Ptr& execNetwork)
: IInferRequestInternal(networkInputs, networkOutputs) {
IE_ASSERT(nullptr != execNetwork);
OPENVINO_ASSERT(nullptr != execNetwork);
streamExecutor = dynamic_cast<InferenceEngine::IStreamsExecutor*>(execNetwork->m_taskExecutor.get());
m_context = std::dynamic_pointer_cast<InferenceEngine::gpu::ClContext>(execNetwork->GetContext());
OPENVINO_ASSERT(m_context != nullptr, "[GPU] Can't initialize context of InferRequest: wrong context type");
@@ -388,7 +380,7 @@ InferRequest::InferRequest(const std::vector<std::shared_ptr<const ov::Node>>& i
const std::vector<std::shared_ptr<const ov::Node>>& outputs,
const CompiledModel::Ptr& execNetwork)
: IInferRequestInternal(inputs, outputs) {
IE_ASSERT(nullptr != execNetwork);
OPENVINO_ASSERT(nullptr != execNetwork);
streamExecutor = dynamic_cast<InferenceEngine::IStreamsExecutor*>(execNetwork->m_taskExecutor.get());
m_context = std::dynamic_pointer_cast<InferenceEngine::gpu::ClContext>(execNetwork->GetContext());
OPENVINO_ASSERT(m_context != nullptr, "[GPU] Can't initialize context of InferRequest: wrong context type");
@@ -490,7 +482,7 @@ void InferRequest::wait_notify() {
void InferRequest::wait() {
if (internal_outputs.empty()) {
IE_THROW() << "Inference was not started!\n";
OPENVINO_THROW("Inference was not started!\n");
}
// wait for completion & collect outputs as requested by the model
for (auto& no : _networkOutputs) {
@@ -519,7 +511,7 @@ void InferRequest::wait() {
if (static_cast<int32_t>(out_rank) < static_cast<int32_t>(dims.size())) {
for (size_t i = out_rank; i < dims.size(); i++) {
if (dims[i] != 1)
IE_THROW() << "[GPU] Unexpected out shape";
OPENVINO_THROW("[GPU] Unexpected out shape");
}
dims.resize(out_rank);
}
@@ -715,7 +707,7 @@ void InferRequest::copy_input_data(std::shared_ptr<cldnn::network> network,
break;
}
default:
IE_THROW() << "The plugin does not support input " << inputBlob.getTensorDesc().getPrecision() << " precision";
OPENVINO_THROW("The plugin does not support input ", inputBlob.getTensorDesc().getPrecision(), " precision");
}
}
@@ -820,7 +812,7 @@ void InferRequest::InferImpl() {
std::map<std::string, InferenceEngineProfileInfo> InferRequest::GetPerformanceCounts() const {
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "InferRequest::GetPerformanceCounts");
if (!m_useProfiling) {
IE_THROW() << "Performance counters were not enabled";
OPENVINO_THROW("Performance counters were not enabled");
} else {
return m_graph->GetPerformanceCounts();
}
@@ -956,7 +948,7 @@ void InferRequest::prepare_input(const cldnn::primitive_id& inputName, Blob::Ptr
remote_ptr :
reqBlob);
if (!impl->is_allocated()) {
IE_THROW() << str_input_not_allocated;
OPENVINO_THROW(str_input_not_allocated);
}
auto inputMem = impl->get_memory();
@@ -1001,7 +993,7 @@ void InferRequest::prepare_input(const cldnn::primitive_id& inputName, Blob::Ptr
break;
}
default:
IE_THROW() << "Unsupported input precision " << prec;
OPENVINO_THROW("Unsupported input precision ", prec);
}
}
@@ -1032,7 +1024,7 @@ void InferRequest::prepare_output(const cldnn::primitive_id& outputName, Blob::P
: reqBlob->as<gpu::ClBlob>();
auto impl = getBlobImpl(output_blob_ptr);
if (!impl->is_allocated()) {
IE_THROW(NotAllocated) << str_output_not_allocated;
OPENVINO_THROW(str_output_not_allocated);
}
auto outputMem = impl->get_memory();
_nw_ptr->set_output_memory(internalName, outputMem);
@@ -1080,7 +1072,7 @@ Blob::Ptr InferRequest::reinterpret_device_blob(Blob::Ptr data, const TensorDesc
auto remote_blob = data->as<gpu::ClBlob>();
if (!remote_blob)
IE_THROW() << "Invalid blob used for reinterpretation";
OPENVINO_THROW("Invalid blob used for reinterpretation");
remote_blob->setShape(new_desc.getDims());

View File

@@ -3,9 +3,6 @@
//
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "ie_plugin_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "gpu/gpu_config.hpp"
namespace ov {
namespace intel_gpu {
@@ -247,6 +244,7 @@ std::vector<std::string> LegacyAPIHelper::get_supported_configs() {
}
std::vector<std::string> LegacyAPIHelper::get_supported_metrics() {
OPENVINO_SUPPRESS_DEPRECATED_START
std::vector<std::string> supported_metrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
@@ -265,6 +263,7 @@ std::vector<std::string> LegacyAPIHelper::get_supported_metrics() {
GPU_METRIC_KEY(EXECUTION_UNITS_COUNT),
GPU_METRIC_KEY(MEMORY_STATISTICS),
};
OPENVINO_SUPPRESS_DEPRECATED_END
return supported_metrics;
}

View File

@@ -26,8 +26,7 @@ static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v
for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i));
if (!inConst)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(inConst != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0;

View File

@@ -97,8 +97,7 @@ static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v1::
validate_inputs_count(op, {2, 3});
if (op->get_broadcast_spec().m_type == ngraph::op::AutoBroadcastType::NONE && op->get_input_size() == 3) {
auto axis_mapping_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!axis_mapping_node)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis_mapping = axis_mapping_node->get_axis_set_val();
CreateCommonBroadcastOp(p, op, axis_mapping);
@@ -113,8 +112,7 @@ static void CreateBroadcastOp(Program& p, const std::shared_ptr<ngraph::op::v3::
ngraph::AxisSet axis_mapping;
if (op->get_input_size() == 3) {
auto axis_mapping_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!axis_mapping_node)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(axis_mapping_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis_mapping = axis_mapping_node->get_axis_set_val();
}

View File

@@ -55,7 +55,7 @@ static cldnn::tensor getConstTensor(const ngraph::Shape constDims) {
break;
case 0: constTensor = cldnn::tensor(1, 1, 1, 1);
break;
default: IE_THROW() << "Invalid constant blob dimensions";
default: OPENVINO_THROW("Invalid constant blob dimensions");
}
return constTensor;
}
@@ -182,7 +182,7 @@ void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std::
if (props.swapOI) {
size_t expected_min_rank = 2 + (props.hasGroupDimension ? 1 : 0);
if (expected_min_rank > constDims.size())
IE_THROW() << "Invalid constant properties or shape";
OPENVINO_THROW("Invalid constant properties or shape");
if (props.hasGroupDimension) {
std::swap(newDims[2], newDims[1]);

View File

@@ -106,7 +106,7 @@ static void CreateConvolutionBackpropDataOp(Program& p, const std::shared_ptr<ng
auto dilations = op->get_dilations();
for (auto d : dilations) {
if (d != 1) {
IE_THROW() << "Unsupported dilation in ConvolutionBackpropData " << op->get_friendly_name();
OPENVINO_THROW("Unsupported dilation in ConvolutionBackpropData ", op->get_friendly_name());
}
}
@@ -194,7 +194,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
auto dilations = op->get_dilations();
for (auto d : dilations) {
if (d != 1) {
IE_THROW() << "Unsupported dilation in GroupConvolutionBackpropData " << op->get_friendly_name();
OPENVINO_THROW("Unsupported dilation in GroupConvolutionBackpropData ", op->get_friendly_name());
}
}

View File

@@ -47,11 +47,11 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
if (reordered_inputs.size() == 3) {
auto blank_index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!blank_index_node) {
IE_THROW() << "Unsupported blank_index node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
float val;
if (ngraph::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
blank_index = static_cast<uint32_t>(val);
reordered_inputs.pop_back();

View File

@@ -24,9 +24,7 @@ static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cum
int64_t axis = 0;
if (op->get_input_size() == 2) {
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!axes_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis = axes_constant->cast_vector<int64_t>()[0];
}
OPENVINO_SUPPRESS_DEPRECATED_START

View File

@@ -44,7 +44,7 @@ public:
CustomLayerAttributeVisitor() : m_values({}) { }
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override {
IE_THROW() << "Attribute " << name << " can't be processed\n";
OPENVINO_THROW("Attribute ", name, " can't be processed\n");
}
// The remaining adapter methods fall back on the void adapter if not implemented
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& adapter) override {
@@ -165,7 +165,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
break;
}
default:
IE_THROW() << "Invalid custom layer param type: " << param.type << " in operation: " << op->get_friendly_name();
OPENVINO_THROW("Invalid custom layer param type: ", param.type, " in operation: ", op->get_friendly_name());
}
}
const std::string layerTitle("\n// Layer " + op->get_friendly_name() + " using Custom Layer " + customLayer->Name() + "\n");
@@ -194,7 +194,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
// if input index is greater than -1, take dimension from input
if (iidx >= 0) {
if (static_cast<size_t>(iidx) >= op->get_input_size())
IE_THROW() << "Invalid input tensor for index: " << iidx;
OPENVINO_THROW("Invalid input tensor for index: ", iidx);
auto inputDims = op->get_input_shape(iidx);
xDim = static_cast<int>(inputDims[inputDims.size() - 1]);

View File

@@ -18,7 +18,7 @@ static cldnn::depth_to_space_mode GetDepthMode(ngraph::op::v0::DepthToSpace::Dep
return cldnn::depth_to_space_mode::blocks_first;
case ngraph::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST:
return cldnn::depth_to_space_mode::depth_first;
default: IE_THROW() << "Unsupported DepthToSpaceMode value: " << static_cast<int>(mode);
default: OPENVINO_THROW("Unsupported DepthToSpaceMode value: ", static_cast<int>(mode));
}
return cldnn::depth_to_space_mode::blocks_first;
}

View File

@@ -22,7 +22,7 @@ static cldnn::prior_box_code_type PriorBoxCodeFromString(const std::string& str)
if (it != CodeNameToType.end()) {
return it->second;
} else {
IE_THROW() << "Unknown Prior-Box code type: " << str;
OPENVINO_THROW("Unknown Prior-Box code type: ", str);
}
return cldnn::prior_box_code_type::corner;
}

View File

@@ -25,9 +25,7 @@ void createDft(Program& p,
const auto& out_shape = op->get_output_shape(0);
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!axes_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << friendly_name << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")");
auto axes = axes_constant->cast_vector<int64_t>();
uint8_t axis_correction = static_cast<uint8_t>(op->get_input_shape(0).size());
if (direction != cldnn::dft_direction::forward || mode != cldnn::dft_mode::real) {
@@ -40,9 +38,7 @@ void createDft(Program& p,
std::vector<int64_t> signal_size;
if (op->get_input_size() == 3) {
auto signal_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2));
if (!signal_size_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << friendly_name << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(signal_size_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", friendly_name, " (", op->get_type_name(), ")");
signal_size = signal_size_constant->cast_vector<int64_t>();
}

View File

@@ -162,7 +162,7 @@ static void CreatePowerOp(Program& p, const std::shared_ptr<ngraph::op::v1::Powe
if (ngraph::shape_size(power_node->get_output_shape(0)) == 1) {
float pow;
if (!ov::op::util::get_single_value(power_node, pow))
IE_THROW() << "Invalid parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Invalid parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::pow, {pow});
return;
}

View File

@@ -25,13 +25,11 @@ static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngr
int32_t defaultIndex = -1;
if (inputs.size() > 3) {
auto index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(3));
if (!index_node) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
float val;
if (ngraph::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val))
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
defaultIndex = static_cast<int32_t>(val);
inputs.erase(inputs.begin() + 3); // Remove "default_index"
@@ -113,13 +111,11 @@ static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngrap
// port of default_index is 4 by default, but we removed "num_segments" above, so now it's equal to 3
if (inputs.size() > 3) {
auto index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(4));
if (!index_node) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(index_node != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
float val;
if (ngraph::shape_size(index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(index_node, val))
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
defaultIndex = static_cast<int32_t>(val);
inputs.erase(inputs.begin() + 3); // Remove "default_index"

View File

@@ -18,7 +18,7 @@ static void CreateExperimentalDetectronDetectionOutputOp(
validate_inputs_count(op, {4});
if (op->get_output_size() != 3) {
IE_THROW() << "ExperimentalDetectronDetectionOutput requires 3 outputs";
OPENVINO_THROW("ExperimentalDetectronDetectionOutput requires 3 outputs");
}
auto inputs = p.GetInputInfo(op);
@@ -54,7 +54,7 @@ static void CreateExperimentalDetectronDetectionOutputOp(
const auto expectedPrimInputCount = 4 + 2; // 4 operation inputs plus 2 input-outputs
if (inputs.size() != expectedPrimInputCount) {
IE_THROW() << "experimental_detectron_detection_output primitive requires 6 inputs";
OPENVINO_THROW("experimental_detectron_detection_output primitive requires 6 inputs");
}
const cldnn::experimental_detectron_detection_output prim{layer_name,

View File

@@ -18,7 +18,7 @@ static void CreateExperimentalDetectronGenerateProposalsSingleImageOp(
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronGenerateProposalsSingleImage>& op) {
validate_inputs_count(op, {4});
if (op->get_output_size() != 2) {
IE_THROW() << "ExperimentalDetectronGenerateProposalsSingleImage requires 2 outputs";
OPENVINO_THROW("ExperimentalDetectronGenerateProposalsSingleImage requires 2 outputs");
}
auto inputs = p.GetInputInfo(op);

View File

@@ -17,7 +17,7 @@ static inline std::string PadToString(ngraph::op::PadType pad) {
case ngraph::op::PadType::SAME_UPPER: return "same_upper";
case ngraph::op::PadType::SAME_LOWER: return "same_lower";
case ngraph::op::PadType::VALID: return "valid";
default: IE_THROW() << "Unsupported pad type in ExtractImagePatches primitive " << pad;
default: OPENVINO_THROW("Unsupported pad type in ExtractImagePatches primitive ", pad);
}
return "";

View File

@@ -18,7 +18,7 @@ static void CreateGenerateProposalsIEInternalOp(
const std::shared_ptr<ov::op::internal::GenerateProposalsIEInternal>& op) {
validate_inputs_count(op, {4});
if (op->get_output_size() != 3) {
IE_THROW() << "GenerateProposals requires 3 outputs";
OPENVINO_THROW("GenerateProposals requires 3 outputs");
}
auto inputs = p.GetInputInfo(op);

View File

@@ -135,7 +135,7 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
const cldnn::primitive_id execution_condition_id = layer_type_name_ID(op->get_input_node_shared_ptr(1));
const int64_t num_iterations = op->get_num_iterations();
if (num_iterations < 0) {
IE_THROW() << "loop's num_iteration cannot be negative";
OPENVINO_THROW("loop's num_iteration cannot be negative");
}
const cldnn::primitive_id num_iteration_id = layerName + "_numIteration";
{

View File

@@ -27,9 +27,7 @@ static void CreateLRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::LRN>&
std::string layerName = layer_type_name_ID(op);
auto axis_const = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
if (!axis_const) {
IE_THROW() << "Unsupported axes node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axis_const != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis_value = axis_const->cast_vector<int64_t>();
auto localSize = static_cast<uint32_t>(op->get_nsize());

View File

@@ -62,7 +62,7 @@ static std::tuple<bool, PartialShape, PartialShape> get_aligned_shapes(const Par
}
} else {
if (a_dim != b_dim && a_dim.get_length() > 1 && b_dim.get_length() > 1) {
IE_THROW() << "Shapes can't be aligned: " << shape_a_aligned << " " << shape_b_aligned;
OPENVINO_THROW("Shapes can't be aligned: ", shape_a_aligned, " ", shape_b_aligned);
}
auto max_value = std::max(a_dim.get_length(), b_dim.get_length());
shape_a_aligned[i] = shape_b_aligned[i] = max_value;
@@ -96,7 +96,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
if (is_fc) {
if (shape_a_aligned.size() < 2 || shape_b_aligned.size() < 2) {
IE_THROW() << "MatMul " << op->get_friendly_name() << " shapes are inconsistent.";
OPENVINO_THROW("MatMul ", op->get_friendly_name(), " shapes are inconsistent.");
}
auto inputName = inputs[0].pid;

View File

@@ -53,7 +53,7 @@ static void CreateMulticlassNmsIEInternalOp(Program& p, const std::shared_ptr<op
constexpr auto expected_inputs_count = 3 + 2; // 3 operation inputs plus 2 additional outputs
if (inputs.size() != expected_inputs_count) {
IE_THROW() << "multiclass_nms primitive requires 5 inputs";
OPENVINO_THROW("multiclass_nms primitive requires 5 inputs");
}
const cldnn::multiclass_nms prim{layer_name,

View File

@@ -52,8 +52,7 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>&
validate_inputs_count(op, {2});
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!inConst)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(inConst != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int64_t> axes = inConst->cast_vector<int64_t>();
OPENVINO_SUPPRESS_DEPRECATED_START

View File

@@ -83,7 +83,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
case 4: prim.iou_threshold = reordered_inputs[3].pid;
case 3: prim.num_select_per_class = reordered_inputs[2].pid;
case 2: break;
default: IE_THROW() << "Incorrect number of input primitives for layer: " << op->get_friendly_name();
default: OPENVINO_THROW("Incorrect number of input primitives for layer: ", op->get_friendly_name());
}
p.add_primitive(*op, prim);
@@ -128,7 +128,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
inputs.push_back(cldnn::input_info(non_max_suppression_mutable_id_w_first));
}
case 1: break;
default: IE_THROW() << "Incorrect number of output for layer: " << op->get_friendly_name();
default: OPENVINO_THROW("Incorrect number of output for layer: ", op->get_friendly_name());
}
auto nonMaxSuppressionLayerName = num_outputs > 1 ? layer_type_name_ID(op) + ".out0" : layer_type_name_ID(op);
@@ -150,7 +150,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
case 4: prim.iou_threshold = reordered_inputs[3].pid;
case 3: prim.num_select_per_class = reordered_inputs[2].pid;
case 2: break;
default: IE_THROW() << "Incorrect number of input primitives for layer: " << op->get_friendly_name();
default: OPENVINO_THROW("Incorrect number of input primitives for layer: ", op->get_friendly_name());
}
switch (num_outputs) {

View File

@@ -21,8 +21,7 @@ static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0
// params
auto const_axis = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
if (!const_axis)
IE_THROW() << "Unsupported axis node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(const_axis != nullptr, "[GPU] Unsupported axis node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
auto axis = const_axis->cast_vector<size_t>();
bool across_spatial = !(axis.size() == 1 && axis[0] == 1);
@@ -42,7 +41,7 @@ static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0
auto bufSize = scale->get_output_tensor(0).size();
if (bufSize != constLayout.bytes_count())
IE_THROW() << "Invalid scales buffer in NormalizeL2 op " << op->get_friendly_name();
OPENVINO_THROW("Invalid scales buffer in NormalizeL2 op ", op->get_friendly_name());
std::memcpy(&buf[0], scale->get_data_ptr(), bufSize);
auto scalesName = layerName + "_cldnn_input_scales";

View File

@@ -23,21 +23,21 @@ static void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::One
auto on_value_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
auto off_value_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(3));
if (on_value_node == nullptr || off_value_node == nullptr || depth_value_node == nullptr)
IE_THROW() << "Unsupported on/off/depth node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(on_value_node != nullptr || off_value_node != nullptr || depth_value_node != nullptr,
"[GPU] Unsupported on/off/depth nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
float on_value;
float off_value;
if (!ov::op::util::get_single_value(on_value_node, on_value) ||
!ov::op::util::get_single_value(off_value_node, off_value)) {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
auto dims = op->get_input_partial_shape(0);
if (axis < -1 || axis > static_cast<int16_t>(dims.size()))
IE_THROW() << op->get_friendly_name() << " Incorrect OneHot axis value: " << axis << ". Should be between -1 and " << dims.size();
OPENVINO_THROW(op->get_friendly_name(), " Incorrect OneHot axis value: ", axis, ". Should be between -1 and ", dims.size());
if (axis == -1) {
axis = dims.size();

View File

@@ -21,9 +21,8 @@ namespace intel_gpu {
static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::Parameter>& op) {
auto networkInputs = p.GetNetworkInputs();
if (networkInputs.find(op->get_friendly_name()) == networkInputs.end()) {
IE_THROW() << "Can't find input " << op->get_friendly_name() << " in InputsDataMap";
}
OPENVINO_ASSERT(networkInputs.find(op->get_friendly_name()) != networkInputs.end(),
"[GPU] Can't find input ", op->get_friendly_name(), " in InputsDataMap");
auto inputInfo = networkInputs.at(op->get_friendly_name());
// first create and add the input layout
@@ -61,7 +60,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
if ((meanChannels > 0) &&
(meanChannels != static_cast<size_t>(networkInputLayout.feature()))) {
IE_THROW() << "Mismatched mean values channels in input " << inputName;
OPENVINO_THROW("Mismatched mean values channels in input ", inputName);
}
switch (preProcess.getMeanVariant()) {
@@ -70,14 +69,14 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
if (meanChannels > 0) {
for (size_t c = 0; c < meanChannels; c++) {
if (fabs(preProcess[c]->stdScale - 1.0f) > 1e-10)
IE_THROW() << "not supporting stdScale yet in input " << inputName;
OPENVINO_THROW("not supporting stdScale yet in input ", inputName);
meanValues.push_back(preProcess[c]->meanValue);
}
}
break;
}
case MEAN_IMAGE: {
IE_ASSERT(meanChannels);
OPENVINO_ASSERT(meanChannels);
// first merge all mean values to a single blob
// todo make sure mean blob precision is the same as the input precision
auto meanDims = input_pshape;
@@ -86,7 +85,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
case 4: meanDims[0] = 1;
break;
default:
IE_THROW() << "Missing batch dimensions in input image";
OPENVINO_THROW("Missing batch dimensions in input image");
}
const TensorDesc desc(Precision::FP32, meanDims.to_shape(), TensorDesc::getLayoutByDims(meanDims.to_shape()));
TBlob<float> meanBlob(desc);
@@ -94,7 +93,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
auto meanBlobData = meanBlob.data();
for (size_t c = 0; c < meanChannels; c++) {
if (fabs(preProcess[c]->stdScale - 1.0f) > 1e-10)
IE_THROW() << "not supporting stdScale yet in input " << inputName;
OPENVINO_THROW("not supporting stdScale yet in input ", inputName);
auto channelMeanBlob = std::dynamic_pointer_cast<TBlob<float>>(preProcess[c]->meanData);
auto channelSize = channelMeanBlob->size();
auto channelBlobData = channelMeanBlob->data();
@@ -128,7 +127,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
}
break;
}
default: IE_THROW() << "Invalid mean variant in input " << inputName;
default: OPENVINO_THROW("Invalid mean variant in input ", inputName);
break;
}
@@ -230,7 +229,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
cldnn::reorder_mean_mode::subtract), {inputName});
break;
}
default: IE_THROW() << "Invalid mean variant in input " << inputName;
default: OPENVINO_THROW("Invalid mean variant in input ", inputName);
break;
}
}

View File

@@ -79,7 +79,7 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Ma
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::MaxPool>& op) {
validate_inputs_count(op, {1});
if (op->get_output_size() != 2) {
IE_THROW() << "MaxPool opset 8 requires 2 outputs";
OPENVINO_THROW("[GPU] MaxPool opset 8 requires 2 outputs");
}
auto inputs = p.GetInputInfo(op);
const auto layer_type_name = layer_type_name_ID(op);

View File

@@ -125,9 +125,8 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v8::P
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!(output_size_constant && image_size_constant)) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(output_size_constant && image_size_constant,
"[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
const auto output_size = output_size_constant->cast_vector<int64_t>();
const auto width = output_size[0];

View File

@@ -31,9 +31,7 @@ static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
int64_t rank = input_pshape.size();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!axes_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int64_t> axes = axes_constant->cast_vector<int64_t>();
for (size_t i = 0; i < axes.size(); i++) {
@@ -41,7 +39,7 @@ static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
axes[i] += rank;
if (axes[i] >= static_cast<int64_t>(rank) || axes[i] < 0)
IE_THROW() << "Unsupported axis value in " << op->get_friendly_name() << " (" << axes[i] << ")";
OPENVINO_THROW("[GPU] Unsupported axis value in ", op->get_friendly_name(), " (", axes[i], ")");
}
auto reducePrim = cldnn::reduce(layerName,

View File

@@ -29,9 +29,7 @@ static void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Res
}
}
auto it = networkOutputs.find(inputID);
if (it == networkOutputs.end()) {
IE_THROW() << "Can't find output " << inputID << " in OutputsDataMap";
}
OPENVINO_ASSERT(it != networkOutputs.end(), "[GPU] Can't find output ", inputID, " in OutputsDataMap");
std::string originalOutName = it->first;
DataPtr outputData = it->second;
@@ -56,7 +54,7 @@ static void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Res
outputlayout != NC &&
outputlayout != C &&
outputlayout != SCALAR) {
IE_THROW() << "Unsupported layout (" << outputlayout << ") in output: " << originalOutName;
OPENVINO_THROW("[GPU] Unsupported layout (", outputlayout, ") in output: ", originalOutName);
}
auto out_rank = op->get_output_partial_shape(0).size();
auto out_format = cldnn::format::get_default_format(out_rank);

View File

@@ -41,12 +41,11 @@ void GetLSTMActivationParams(const std::shared_ptr<T>& op,
auto op_activations = op->get_activations();
if (!op_activations.empty()) {
if (op_activations.size() != 3)
IE_THROW() << "Wrong number of activations for LSTMCell op " << op->get_friendly_name();
OPENVINO_THROW("Wrong number of activations for LSTMCell op ", op->get_friendly_name());
for (int i = 0; i < 3; i++) {
auto af = GetActivationFunc(op_activations[i]);
if (af == cldnn::activation_func::none)
IE_THROW() << "Wrong or unsupported activation type " << op_activations[i]
<< " for LSTMCell op " << op->get_friendly_name();
OPENVINO_THROW("Wrong or unsupported activation type ", op_activations[i], " for LSTMCell op ", op->get_friendly_name());
activations[i] = af;
}
}
@@ -54,7 +53,7 @@ void GetLSTMActivationParams(const std::shared_ptr<T>& op,
auto op_b = op->get_activations_beta();
if (!op_a.empty()) {
if (op_a.size() != 3 || op_b.size() != 3)
IE_THROW() << "Wrong number of activation parameters for LSTMCell op " << op->get_friendly_name();
OPENVINO_THROW("Wrong number of activation parameters for LSTMCell op ", op->get_friendly_name());
for (int i = 0; i < 3; i++) {
cldnn::activation_additional_params params = { op_a[i], op_b[i] };
activation_params.push_back(cldnn::activation_additional_params(params));
@@ -80,7 +79,7 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::L
if (in_dims0.size() != 2 ||
op->get_input_shape(1).size() != 2 ||
op->get_input_shape(2).size() != 2)
IE_THROW() << "Wrong input shapes for LSTMCell op " << op->get_friendly_name();
OPENVINO_THROW("Wrong input shapes for LSTMCell op ", op->get_friendly_name());
lstm_input_size = static_cast<int>(in_dims0.back());
lstm_batch_size = static_cast<int>(in_dims0.at(in_dims0.size()-2));
@@ -175,7 +174,7 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v
if (in_dims0.size() != 3 ||
op->get_input_shape(1).size() != 3 ||
op->get_input_shape(2).size() != 3)
IE_THROW() << "Wrong input shapes for LSTMSequence op " << op->get_friendly_name();
OPENVINO_THROW("Wrong input shapes for LSTMSequence op ", op->get_friendly_name());
lstm_input_size = static_cast<int>(in_dims0.back());
lstm_sequence_len = static_cast<int>(in_dims0.at(in_dims0.size() - 2));

View File

@@ -28,15 +28,11 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
const auto default_rank = format.dimension();
auto shift_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!shift_constant) {
IE_THROW() << "Unsupported parameter node type in " << op_friendly_name << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(shift_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")");
const auto shift_raw = shift_constant->cast_vector<int32_t>();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(2));
if (!axes_constant) {
IE_THROW() << "Unsupported parameter node type in " << op_friendly_name << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op_friendly_name, " (", op->get_type_name(), ")");
auto axes_raw = axes_constant->cast_vector<int32_t>();
// Normalize axes and sum shift
@@ -47,7 +43,7 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
axis += rank;
}
if (axis < 0 || axis >= rank) {
IE_THROW() << op_friendly_name << " Incorrect axis value: " << axis;
OPENVINO_THROW(op_friendly_name, " Incorrect axis value: ", axis);
}
shift[axis] += shift_raw[a];
}

View File

@@ -19,9 +19,7 @@ static void CreateScatterUpdateOp(Program& p, const std::shared_ptr<ngraph::op::
std::string layerName = layer_type_name_ID(op);
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(3));
if (!axes_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(axes_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
int64_t axis = axes_constant->cast_vector<int64_t>()[0];
auto primitive = cldnn::scatter_update(layerName,
inputs[0],

View File

@@ -26,7 +26,7 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
if (broadcast_type.m_type != ngraph::op::AutoBroadcastType::NONE &&
broadcast_type.m_type != ngraph::op::AutoBroadcastType::NUMPY) {
IE_THROW() << "Unsupported broadcast type (" << broadcast_type.m_type << ") in layer " + op->get_friendly_name();
OPENVINO_THROW("[GPU] Unsupported broadcast type (", broadcast_type.m_type, ") in layer " + op->get_friendly_name());
}
if (broadcast_type.m_type == ngraph::op::AutoBroadcastType::NUMPY) {

View File

@@ -26,8 +26,7 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
for (size_t i = 1; i < 4; ++i) {
auto inConst = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(i));
if (!inConst)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(inConst != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0;

View File

@@ -16,7 +16,7 @@ static cldnn::space_to_depth::depth_mode GetDepthMode(ngraph::op::v0::SpaceToDep
switch (mode) {
case ngraph::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST: return cldnn::space_to_depth::blocks_first;
case ngraph::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST: return cldnn::space_to_depth::depth_first;
default: IE_THROW() << "Unsupported SpaceToDepthMode value: " << static_cast<int>(mode);
default: OPENVINO_THROW("[GPU] Unsupported SpaceToDepthMode value: ", static_cast<int>(mode));
}
return cldnn::space_to_depth::blocks_first;
}

View File

@@ -38,13 +38,13 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
const auto outPartialShape = op->get_output_partial_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START
if (outPartialShape.size() != start_offset.size()) {
IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(i));
OPENVINO_THROW("Invalid dimesions in split layer: ", op->get_friendly_name(),
" output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(i)));
}
for (size_t idx = 0; idx < input_pshape.size(); idx++) {
if ((outPartialShape[idx].get_length() + static_cast<ov::Dimension::value_type>(start_offset[idx])) > input_pshape[idx].get_length()) {
IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(idx));
OPENVINO_THROW("Invalid dimesions in split layer: ", op->get_friendly_name(),
" output: ", ov::descriptor::get_ov_tensor_legacy_name(op->get_output_tensor(idx)));
}
}
NGRAPH_SUPPRESS_DEPRECATED_END

View File

@@ -108,7 +108,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>
p.add_primitive(*op, argmaxPrim);
} else {
IE_THROW() << op->get_friendly_name() << " Incorrect TopK outputs number";
OPENVINO_THROW(op->get_friendly_name(), " Incorrect TopK outputs number");
}
}
}

View File

@@ -22,9 +22,7 @@ static void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::
std::vector<uint16_t> order;
if (op->get_input_size() == 2) {
auto order_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!order_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
OPENVINO_ASSERT(order_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
order = order_constant->cast_vector<uint16_t>();
}

View File

@@ -80,8 +80,8 @@ static void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRel
if (slope_node && ngraph::shape_size(slope_shape.to_shape()) == 1) {
float slope;
if (!ov::op::util::get_single_value(slope_node, slope))
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_ASSERT(ov::op::util::get_single_value(slope_node, slope),
"[GPU] Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::relu_negative_slope, {slope});
} else if (out_shape.size() >= 2) {
auto inputs = p.GetInputInfo(op);
@@ -166,14 +166,14 @@ static void CreateHardSigmoidOp(Program& p, const std::shared_ptr<ngraph::op::v0
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto beta_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!alpha_node || !beta_node) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
if (ngraph::shape_size(alpha_node->get_output_shape(0)) == 1 &&
ngraph::shape_size(beta_node->get_output_shape(0)) == 1) {
float alpha, beta;
if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(beta_node, beta)) {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::hard_sigmoid, {alpha, beta});
}
@@ -192,18 +192,18 @@ static void CreateSeluOp(Program& p, const std::shared_ptr<ngraph::op::v0::Selu>
auto alpha_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto lambda_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!alpha_node || !lambda_node) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
if (ngraph::shape_size(alpha_node->get_output_shape(0)) == 1 &&
ngraph::shape_size(lambda_node->get_output_shape(0)) == 1) {
float alpha, lambda;
if (!ov::op::util::get_single_value(alpha_node, alpha) || !ov::op::util::get_single_value(lambda_node, lambda)) {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::selu, {alpha, lambda});
} else {
IE_THROW() << "Unsupported shapes of parameter nodes in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported shapes of parameter nodes in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
}
@@ -239,14 +239,14 @@ static void CreateSwishOp(Program& p, const std::shared_ptr<ngraph::op::v4::Swis
if (ngraph::shape_size(beta_node->get_output_shape(0)) == 1) {
float beta;
if (!ov::op::util::get_single_value(beta_node, beta)) {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::swish, {beta});
} else {
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
} else {
IE_THROW() << "Unsupported parameter type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
OPENVINO_THROW("Unsupported parameter type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
}
} else {
CreateUnaryEltwiseOp(p, op, cldnn::activation_func::swish, {1.0f});
@@ -289,7 +289,7 @@ static void CreateRoundOp(Program& p, const std::shared_ptr<ngraph::op::v5::Roun
switch (op->get_mode()) {
case ngraph::op::v5::Round::RoundMode::HALF_TO_EVEN : func = cldnn::activation_func::round_half_to_even; break;
case ngraph::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO : func = cldnn::activation_func::round_half_away_from_zero; break;
default: IE_THROW() << "Unsupported round mode in " << op->get_friendly_name() << ": " << static_cast<int>(op->get_mode());
default: OPENVINO_THROW("Unsupported round mode in ", op->get_friendly_name(), ": ", static_cast<int>(op->get_mode()));
}
CreateUnaryEltwiseOp(p, op, func, {});
}

View File

@@ -19,10 +19,7 @@ void CreateUniqueOp(Program& p, const std::shared_ptr<ngraph::op::v10::Unique>&
int64_t axis{};
if (op->get_input_size() == 2) {
auto axis_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!axis_constant) {
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " ("
<< op->get_type_name() << ")";
}
OPENVINO_ASSERT(axis_constant != nullptr, "[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
axis = axis_constant->cast_vector<int64_t>().at(0);
axis = ov::normalize_axis(op.get(), axis, op->get_input_partial_shape(0).rank());
flattened = false;

View File

@@ -12,11 +12,15 @@
#include <tuple>
#include <cctype>
#include <memory>
#include "ie_metric_helpers.hpp"
#include <ie_ngraph_utils.hpp>
#include <ie_algorithm.hpp>
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include "openvino/runtime/device_id_parser.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "openvino/pass/manager.hpp"
#include "openvino/util/common_util.hpp"
#include "intel_gpu/graph/serialization/layout_serializer.hpp"
#include "intel_gpu/graph/serialization/string_serializer.hpp"
#include "intel_gpu/graph/serialization/utils.hpp"
@@ -25,25 +29,14 @@
#include "intel_gpu/plugin/compiled_model.hpp"
#include "intel_gpu/plugin/transformations_pipeline.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include "intel_gpu/runtime/execution_config.hpp"
#include "intel_gpu/runtime/device_query.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp"
#include "ie_plugin_config.hpp"
#include "gpu/gpu_config.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "openvino/runtime/device_id_parser.hpp"
#include "ie_icore.hpp"
#include "openvino/core/dimension_tracker.hpp"
#include "transformations/init_node_info.hpp"
#include "transformations/common_optimizations/dimension_tracking.hpp"
#include <transformations/rt_info/fused_names_attribute.hpp>
#include <transformations/utils/utils.hpp>
#include <openvino/pass/manager.hpp>
#include <openvino/util/common_util.hpp>
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/utils/utils.hpp"
#include <performance_heuristics.hpp>
@@ -180,8 +173,7 @@ auto check_inputs = [](InferenceEngine::InputsDataMap _networkInputs) {
input_precision != InferenceEngine::Precision::I64 &&
input_precision != InferenceEngine::Precision::U64 &&
input_precision != InferenceEngine::Precision::BOOL) {
IE_THROW(NotImplemented)
<< "Input image format " << input_precision << " is not supported yet...";
OPENVINO_THROW("Input image format ", input_precision, " is not supported yet...");
}
}
};
@@ -314,9 +306,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network,
bool dyn_shape_batch_found = false;
auto model = network.getFunction();
if (model == nullptr) {
IE_THROW() << "Only ngraph-based models are supported!";
}
OPENVINO_ASSERT(model != nullptr, "[GPU] Only ngraph-based models are supported!");
auto supported = GetSupportedNodes(model,
[&](std::shared_ptr<ov::Model>& model) {
@@ -601,6 +591,7 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Plugin::GetMetric");
GPU_DEBUG_GET_INSTANCE(debug_config);
OPENVINO_SUPPRESS_DEPRECATED_START
// The metrics below don't depend on the device ID, so we should handle those
// earler than querying actual ID to avoid exceptions when no devices are found
if (name == ov::supported_properties) {
@@ -721,8 +712,10 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
}
return decltype(ov::device::architecture)::value_type {s.str()};
} else {
IE_THROW() << "Unsupported metric key " << name;
OPENVINO_THROW("Unsupported metric key ", name);
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
std::vector<ov::PropertyName> Plugin::get_supported_properties() const {
@@ -827,11 +820,11 @@ uint32_t Plugin::get_max_batch_size(const std::map<std::string, Parameter>& opti
auto n_streams_str = it_streams->second.as<std::string>();
if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO) &&
n_streams_str != util::to_string(ov::streams::AUTO)) {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"";
OPENVINO_THROW("[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"");
}
n_streams = std::max(/* config.GetDefaultNStreamsForThroughputMode() */2u, device_info.num_ccs);
} else {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"";
OPENVINO_THROW("[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"");
}
}
@@ -843,10 +836,10 @@ uint32_t Plugin::get_max_batch_size(const std::map<std::string, Parameter>& opti
available_device_mem = std::min(static_cast<int64_t>(available_device_mem), available_device_mem_it->second.as<int64_t>());
GPU_DEBUG_LOG << "[GPU_MAX_BATCH_SIZE] available memory is reset by user " << available_device_mem << std::endl;
} else {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: ov::intel_gpu::hint::available_device_mem should be int64_t type";
OPENVINO_THROW("[GPU_MAX_BATCH_SIZE] bad casting: ov::intel_gpu::hint::available_device_mem should be int64_t type");
}
if (available_device_mem < 0) {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] ov::intel_gpu::hint::available_device_mem value should be greater than 0 for max batch size calculation";
OPENVINO_THROW("[GPU_MAX_BATCH_SIZE] ov::intel_gpu::hint::available_device_mem value should be greater than 0 for max batch size calculation");
}
}
@@ -855,7 +848,7 @@ uint32_t Plugin::get_max_batch_size(const std::map<std::string, Parameter>& opti
if (model_param.is<std::shared_ptr<ngraph::Function>>()) {
model = model_param.as<std::shared_ptr<ngraph::Function>>();
} else {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] ov::hint::model should be std::shared_ptr<ov::Model> type";
OPENVINO_THROW("[GPU_MAX_BATCH_SIZE] ov::hint::model should be std::shared_ptr<ov::Model> type");
}
InferenceEngine::CNNNetwork network(model);
@@ -960,7 +953,7 @@ uint32_t Plugin::get_optimal_batch_size(const std::map<std::string, Parameter>&
try {
model = model_param->second.as<std::shared_ptr<ngraph::Function>>();
} catch (...) {
IE_THROW() << "[OPTIMAL_BATCH_SIZE] ov::hint::model should be std::shared_ptr<ov::Model> type";
OPENVINO_THROW("[OPTIMAL_BATCH_SIZE] ov::hint::model should be std::shared_ptr<ov::Model> type");
}
GPU_DEBUG_INFO << "DEVICE_INFO:"
<< "gfx_version.major, " << device_info.gfx_ver.major

View File

@@ -6,18 +6,16 @@
#include <malloc.h>
#endif
#include "intel_gpu/plugin/program.hpp"
#include "ngraph/ops.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include "openvino/core/graph_util.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "openvino/runtime/system_conf.hpp"
#include "intel_gpu/plugin/program.hpp"
#include "intel_gpu/plugin/transformations_pipeline.hpp"
#include "intel_gpu/runtime/itt.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp"
#include "intel_gpu/primitives/mutable_data.hpp"
#include "intel_gpu/primitives/data.hpp"
#include <ie_system_conf.h>
#ifdef __linux__
# include <dlfcn.h>
#endif
@@ -125,7 +123,7 @@ bool Program::IsDynBatchModel(const std::shared_ptr<ov::Model>& model,
Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, const ExecutionConfig& config,
bool createTopologyOnly, bool partialBuild,
InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor, bool innerProgram)
std::shared_ptr<ov::threading::IStreamsExecutor> task_executor, bool innerProgram)
: m_curBatch(-1)
, m_config(config)
, m_engine(engine)
@@ -139,7 +137,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
auto func = network.getFunction();
if (!func) {
IE_THROW() << "Function pointer inside CNNNetwork is nullptr";
OPENVINO_THROW("Function pointer inside CNNNetwork is nullptr");
}
// locate global custom kernel config
@@ -337,7 +335,7 @@ int Program::GetMaxBatchSizeForSingleProgram() {
std::shared_ptr<cldnn::program> Program::GetCompiledProgram(int program_id) {
if (program_id >= static_cast<int32_t>(m_programs.size()))
IE_THROW() << "Invalid program ID";
OPENVINO_THROW("Invalid program ID");
return m_programs[program_id];
}
@@ -460,9 +458,9 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, const std::s
}
if (!is_created) {
IE_THROW() << "Operation: " << op->get_friendly_name()
<< " of type " << op->get_type_name()
<< "(op::v" << op->get_type_info().version_id << ") is not supported";
OPENVINO_THROW("Operation: ", op->get_friendly_name(),
" of type ", op->get_type_name(),
"(", op->get_type_info().version_id, ") is not supported");
}
}
@@ -487,7 +485,7 @@ std::vector<cldnn::input_info> Program::GetInputInfo(const std::shared_ptr<ngrap
if (!queryMode) {
if (primitive_ids.find(prevName) == primitive_ids.end()) {
IE_THROW() << "Input " << prevName << " hasn't been found in primitive_ids map";
OPENVINO_THROW("Input ", prevName, " hasn't been found in primitive_ids map");
}
inputInfo.push_back(
cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: static_cast<int>(op->get_input_source_output(i).get_index())));
@@ -596,9 +594,9 @@ void validate_inputs_count(const std::shared_ptr<ngraph::Node>& op, std::vector<
}
}
IE_THROW() << "Invalid inputs count (" << op->get_input_size() << ") in "
<< op->get_friendly_name() << " (" << op->get_type_name()
<< " op::v" << op->get_type_info().version_id << ")";
OPENVINO_THROW("Invalid inputs count (", op->get_input_size(), ") in )",
op->get_friendly_name(), " (", op->get_type_name(),
" ", op->get_type_info().version_id, ")");
}
} // namespace intel_gpu

View File

@@ -104,7 +104,7 @@ AnyMap RemoteBlobImpl::getParams() const {
{ GPU_PARAM_KEY(VA_PLANE), params.plane }
};
default:
IE_THROW() << "Unsupported shared object type " << static_cast<int>(m_mem_type);
OPENVINO_THROW("Unsupported shared object type ", static_cast<int>(m_mem_type));
}
}
@@ -112,7 +112,7 @@ void RemoteBlobImpl::setShape(const SizeVector& dims) {
if (ov::shape_size(dims) > m_memory_object->count()) {
OPENVINO_ASSERT(!is_shared(), "Cannot call setShape for Blobs created on top of preallocated memory if shape was increased.");
if (!deallocate()) {
IE_THROW() << "Cannot deallocate blob while an attempt to enlarge blob area in setShape.";
OPENVINO_THROW("Cannot deallocate blob while an attempt to enlarge blob area in setShape.");
}
m_layout.set_partial_shape(ov::PartialShape{dims});
@@ -217,7 +217,7 @@ void RemoteBlobImpl::reinterpret(const cldnn::layout& new_layout) {
void RemoteBlobImpl::lock() const {
if (!is_allocated()) {
IE_THROW(NotAllocated) << "[GPU] Remote blob can't be locked as it's not allocated";
OPENVINO_THROW("[GPU] Remote blob can't be locked as it's not allocated");
}
std::lock_guard<std::mutex> locker(lockedMutex);

View File

@@ -58,7 +58,7 @@ RemoteContextImpl::RemoteContextImpl(const std::vector<RemoteContextImpl::Ptr>&
m_va_display = _va_device = extract_object<gpu_handle_param>(params, GPU_PARAM_KEY(VA_DEVICE));
m_type = ContextType::DEV_SHARED;
} else {
IE_THROW() << "Invalid execution context type" << contextTypeStr;
OPENVINO_THROW("Invalid execution context type", contextTypeStr);
}
auto tile_id_itr = params.find(GPU_PARAM_KEY(TILE_ID));
if (tile_id_itr != params.end()) {
@@ -94,7 +94,7 @@ AnyMap RemoteContextImpl::get_params() const {
ret[GPU_PARAM_KEY(VA_DEVICE)] = m_va_display;
break;
default:
IE_THROW() << "Unsupported shared context type " << m_type;
OPENVINO_THROW("Unsupported shared context type ", m_type);
}
return ret;

View File

@@ -13,9 +13,10 @@
#include <memory>
#include "intel_gpu/plugin/transformations_pipeline.hpp"
#include "intel_gpu/plugin/legacy_api_helper.hpp"
#include <ie_ngraph_utils.hpp>
#include "ie_metric_helpers.hpp"
#include "ie_plugin_config.hpp"
#include <ngraph/opsets/opset2.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/opsets/opset4.hpp>
@@ -23,8 +24,6 @@
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ie_ngraph_utils.hpp>
#include <ie_algorithm.hpp>
#include "transformations/einsum_decomposition.hpp"
#include "transformations/convert_pooling_to_reduce.hpp"

View File

@@ -170,7 +170,7 @@ void kernels_cache::get_program_source(const kernels_code& kernels_source_code,
kernels_cache::kernels_cache(engine& engine,
const ExecutionConfig& config,
uint32_t prog_id,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor,
std::shared_ptr<ov::threading::ITaskExecutor> task_executor,
const std::vector<std::string>& batch_header_str)
: _engine(engine)
, _task_executor(task_executor)
@@ -426,7 +426,7 @@ void kernels_cache::build_all() {
if (_task_executor && use_threads) {
std::exception_ptr exception;
std::vector<InferenceEngine::Task> tasks;
std::vector<ov::threading::Task> tasks;
for (size_t idx = 0; idx < batches.size(); idx++) {
auto& batch = batches[idx];
tasks.push_back([this, &_build_engine, &batch, &exception] {
@@ -437,7 +437,7 @@ void kernels_cache::build_all() {
}
});
}
_task_executor->runAndWait(tasks);
_task_executor->run_and_wait(tasks);
tasks.clear();
if (exception) {
@@ -583,7 +583,7 @@ void kernels_cache::load(BinaryInputBuffer& ib) {
for (auto& p : err.getBuildLog()) {
err_log += p.second + '\n';
}
IE_THROW() << err_log;
OPENVINO_THROW(err_log);
}
}

View File

@@ -18,7 +18,7 @@
#include <string>
#include <set>
#include <threading/ie_cpu_streams_executor.hpp>
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include "kernels_factory.hpp"
#include "ocl/ocl_engine.hpp"
@@ -75,7 +75,7 @@ public:
private:
static std::mutex _mutex;
engine& _engine;
InferenceEngine::CPUStreamsExecutor::Ptr _task_executor;
std::shared_ptr<ov::threading::ITaskExecutor> _task_executor;
ExecutionConfig _config;
uint32_t _prog_id = 0;
kernels_code _kernels_code;
@@ -96,7 +96,7 @@ public:
explicit kernels_cache(engine& engine,
const ExecutionConfig& config,
uint32_t prog_id,
InferenceEngine::CPUStreamsExecutor::Ptr task_executor = nullptr,
std::shared_ptr<ov::threading::ITaskExecutor> task_executor = nullptr,
const std::vector<std::string>& batch_header_str = {});
kernel::ptr get_kernel_from_cached_kernels(std::string id) const;
std::vector<kernel::ptr> get_kernels(kernel_impl_params params) const;

View File

@@ -144,7 +144,7 @@ protected:
break;
}
default:
IE_THROW() << "roi_pooling. Unsupported precision";
OPENVINO_THROW("roi_pooling. Unsupported precision");
}
} else {
switch (funcInput.get_element_type()) {
@@ -157,7 +157,7 @@ protected:
break;
}
default:
IE_THROW() << "roi_pooling. Unsupported precision";
OPENVINO_THROW("roi_pooling. Unsupported precision");
}
}
} else {

View File

@@ -81,7 +81,7 @@ protected:
data[i] = indicesVals[i];
}
} else {
IE_THROW() << "GatherNDUpdate. Unsupported indices precision: " << inputPrecision;
OPENVINO_THROW("GatherNDUpdate. Unsupported indices precision: ", inputPrecision);
}
} else {
if (inputPrecision.is_real()) {

View File

@@ -94,9 +94,9 @@ TEST(kernels_cache, reuse_kernel_for_static_model_01) {
TEST(kernels_cache, sub_kernel_ordering_test) {
auto& engine = get_test_engine();
ExecutionConfig config = get_test_default_config(engine);
InferenceEngine::CPUStreamsExecutor::Config task_executor_config("sub_kernel_ordering_test", 1);
ov::threading::IStreamsExecutor::Config task_executor_config("sub_kernel_ordering_test", 1);
task_executor_config._streams = 2;
auto executor = std::make_shared<InferenceEngine::CPUStreamsExecutor>(task_executor_config);
auto executor = std::make_shared<ov::threading::CPUStreamsExecutor>(task_executor_config);
const size_t num_kernels = 9;
auto _kernels_cache = std::unique_ptr<kernels_cache>(new kernels_cache(engine, config, 0, executor));
std::vector<std::string> entry_point_list;

View File

@@ -21,9 +21,9 @@ using namespace ::tests;
TEST(multistream_gpu, basic) {
const int num_streams = 2;
auto task_config = InferenceEngine::CPUStreamsExecutor::Config();
auto task_config = ov::threading::IStreamsExecutor::Config();
task_config._streams = num_streams;
auto task_executor = std::make_shared<InferenceEngine::CPUStreamsExecutor>(task_config);
auto task_executor = std::make_shared<ov::threading::CPUStreamsExecutor>(task_config);
auto& engine = get_test_engine();
ExecutionConfig config = get_test_default_config(engine);
@@ -53,7 +53,7 @@ TEST(multistream_gpu, basic) {
streams.push_back(networks[i]->get_stream_ptr());
}
std::vector<InferenceEngine::Task> tasks;
std::vector<ov::threading::Task> tasks;
for (size_t i = 0; i < num_streams; i++) {
tasks.push_back([&networks, &streams, i, &engine] {
auto cfg = get_test_default_config(engine);
@@ -85,7 +85,7 @@ TEST(multistream_gpu, basic) {
});
}
task_executor->runAndWait(tasks);
task_executor->run_and_wait(tasks);
tasks.clear();
networks.clear();
}

View File

@@ -254,7 +254,7 @@ TEST_P(scatter_nd_update_random_test, random)
else if (param.input_type == data_types::f32)
this->execute<float>(param, false);
else
IE_THROW() << "unidentified data type";
OPENVINO_THROW("unidentified data type");
}
INSTANTIATE_TEST_SUITE_P(scatter_nd_update_gpu_random_test_fp32_bsv32_fsv16_4d_rank_1,
@@ -4588,7 +4588,7 @@ TEST_P(scatter_nd_update_random_test, random_cached)
else if (param.input_type == data_types::f32)
this->execute<float>(param, true);
else
IE_THROW() << "unidentified data type";
OPENVINO_THROW("unidentified data type");
}
#endif
TEST(scatter_nd_update_gpu_fp16, d222222_i211111_cached) {

View File

@@ -399,9 +399,9 @@ double default_tolerance(data_types dt) {
case data_types::u8:
return 1.5;
default:
IE_THROW() << "Unknown";
OPENVINO_THROW("Unknown");
}
IE_THROW() << "Unknown";
OPENVINO_THROW("Unknown");
}
cldnn::format generic_test::get_plain_format_for(const cldnn::format input) {

View File

@@ -528,8 +528,8 @@ std::vector<float> get_output_values_to_float(cldnn::network& net, const cldnn::
auto ptr = output.get_memory();
cldnn::mem_lock<T, cldnn::mem_lock_type::read> mem(ptr, net.get_stream());
if (ptr->get_layout().data_type != cldnn::type_to_data_type<T>::value)
IE_THROW() << "target type " << cldnn::data_type_traits::name(cldnn::type_to_data_type<T>::value)
<< " mismatched with actual type " << cldnn::data_type_traits::name(ptr->get_layout().data_type);
OPENVINO_THROW("target type ", cldnn::data_type_traits::name(cldnn::type_to_data_type<T>::value),
" mismatched with actual type ", cldnn::data_type_traits::name(ptr->get_layout().data_type));
for (size_t i = 0; i < std::min(max_cnt, ptr->get_layout().count()); i++)
ret.push_back(mem[i]);
return ret;
@@ -550,7 +550,7 @@ inline std::vector<float> get_output_values_to_float(cldnn::network& net, const
case cldnn::data_types::i64:
return get_output_values_to_float<int64_t>(net, output, max_cnt);
default:
IE_THROW() << "Unknown output data_type";
OPENVINO_THROW( "Unknown output data_type");
}
}