[Coverity] fix coverity (#18784)
* fix CVS-112758 * fix CVS-114053 * fix CVS-116203 * fix CVS-112785 part 0 * fix CVS-112785 part 1 * fix CVS-112785 part 2 * fix CVS-112785 part 2+ * Update src/plugins/intel_gpu/src/plugin/ops/gather.cpp Co-authored-by: Roman Lyamin <Roman.Lyamin@intel.com> * fix CVS-112785 OPENVINO_ASSERT * add suggestion * fix CVS-112785 --------- Co-authored-by: Roman Lyamin <Roman.Lyamin@intel.com>
This commit is contained in:
parent
b76fc24824
commit
36309938d9
@ -21,7 +21,7 @@ private:
|
||||
};
|
||||
|
||||
struct fused_primitive_desc {
|
||||
explicit fused_primitive_desc(std::shared_ptr<const primitive> prim) : desc(prim) {}
|
||||
explicit fused_primitive_desc(const std::shared_ptr<const primitive>& prim) : desc(prim) {}
|
||||
|
||||
template <class PType>
|
||||
bool is_type() const {
|
||||
|
@ -74,8 +74,8 @@ struct kernel_impl_params {
|
||||
const std::vector<cldnn::fused_primitive_desc>& _fused_descs)
|
||||
: has_runtime_layouts(true)
|
||||
, prog(&_prog)
|
||||
, strm(_strm)
|
||||
, desc(_desc)
|
||||
, strm(std::move(_strm))
|
||||
, desc(std::move(_desc))
|
||||
, unique_id(_uid)
|
||||
, input_layouts(_in_layouts)
|
||||
, output_layouts(_out_layouts)
|
||||
@ -114,10 +114,10 @@ struct kernel_impl_params {
|
||||
}
|
||||
|
||||
bool is_dynamic() const {
|
||||
for (auto i : input_layouts)
|
||||
for (auto& i : input_layouts)
|
||||
if (i.is_dynamic())
|
||||
return true;
|
||||
for (auto i : output_layouts)
|
||||
for (auto& i : output_layouts)
|
||||
if (i.is_dynamic())
|
||||
return true;
|
||||
return false;
|
||||
|
@ -70,7 +70,7 @@ public:
|
||||
using Ptr = std::shared_ptr<VariableState>;
|
||||
|
||||
VariableState(cldnn::memory_ptr mem = nullptr) :
|
||||
memory { mem }, is_set { false } {
|
||||
memory { std::move(mem) }, is_set { false } {
|
||||
}
|
||||
void set_memory(cldnn::memory_ptr new_mem) {
|
||||
memory = new_mem;
|
||||
|
@ -41,13 +41,13 @@ public:
|
||||
using variable_states_map = std::map<std::string, std::vector<cldnn::network::VariableState::Ptr>>;
|
||||
|
||||
Graph(InferenceEngine::CNNNetwork& network,
|
||||
RemoteContextImpl::Ptr context,
|
||||
const RemoteContextImpl::Ptr& context,
|
||||
const ExecutionConfig& config,
|
||||
uint16_t stream_id = 0,
|
||||
InferenceEngine::InputsDataMap* inputs = nullptr,
|
||||
InferenceEngine::OutputsDataMap* outputs = nullptr);
|
||||
Graph(cldnn::BinaryInputBuffer& ib,
|
||||
RemoteContextImpl::Ptr context,
|
||||
const RemoteContextImpl::Ptr& context,
|
||||
const ExecutionConfig& config,
|
||||
uint16_t stream_id = 0,
|
||||
InferenceEngine::InputsDataMap* inputs = nullptr,
|
||||
|
@ -142,7 +142,7 @@ public:
|
||||
|
||||
template<typename PType, typename = typename std::enable_if<!is_smart_pointer<PType>::value>::type>
|
||||
void add_primitive(const ngraph::Node& op, PType prim, std::vector<std::string> aliases = {}) {
|
||||
add_primitive(op, std::static_pointer_cast<cldnn::primitive>(std::make_shared<PType>(prim)), aliases);
|
||||
add_primitive(op, std::static_pointer_cast<cldnn::primitive>(std::make_shared<PType>(prim)), std::move(aliases));
|
||||
}
|
||||
|
||||
void add_primitive(const ngraph::Node& op, std::shared_ptr<cldnn::primitive> prim, std::vector<std::string> aliases = {});
|
||||
|
@ -24,7 +24,7 @@ struct data : public primitive_base<data> {
|
||||
/// @param mem @ref memory object which contains data.
|
||||
/// @note If memory is attached by memory::attach(), the attached buffer should be valid till network build.
|
||||
data(const primitive_id& id, memory::ptr mem)
|
||||
: primitive_base(id, {}, {padding()}), mem(mem) {}
|
||||
: primitive_base(id, {}, {padding()}), mem(std::move(mem)) {}
|
||||
|
||||
/// @brief @ref memory object which contains data.
|
||||
/// @note If memory is attached by memory::attach(), the attached buffer should be valid till network build.
|
||||
|
@ -68,8 +68,8 @@ struct loop : public primitive_base<loop> {
|
||||
/// @param stride Step of iteration. Negative value means backward iteration. Applies only when axis >=0.
|
||||
io_primitive_map(primitive_id external_id = "", primitive_id internal_id = "",
|
||||
int64_t axis = -1, int64_t start = 0, int64_t end = -1, int64_t stride = 1) :
|
||||
external_id(external_id),
|
||||
internal_id(internal_id),
|
||||
external_id(std::move(external_id)),
|
||||
internal_id(std::move(internal_id)),
|
||||
axis(axis),
|
||||
start(start),
|
||||
end(end),
|
||||
|
@ -43,7 +43,7 @@ struct mutable_data : public primitive_base<mutable_data> {
|
||||
const std::vector<input_info>& inputs,
|
||||
memory::ptr mem,
|
||||
filler_type fill_type = filler_type::no_fill)
|
||||
: primitive_base(id, inputs, {padding()}), mem(mem), fill_type(fill_type) {}
|
||||
: primitive_base(id, inputs, {padding()}), mem(std::move(mem)), fill_type(fill_type) {}
|
||||
|
||||
/// @brief @ref memory object which contains data.
|
||||
/// @note If memory is attached by memory::attach(), the attached buffer should be valid till network build.
|
||||
|
@ -35,8 +35,8 @@ struct primitive_info;
|
||||
/// @details Contains infomation about id and output index of input primitive.
|
||||
struct input_info {
|
||||
input_info() : pid(""), idx(0) {}
|
||||
input_info(primitive_id pid) : pid(pid), idx(0) {}
|
||||
input_info(primitive_id pid, int idx) : pid(pid), idx(idx) {}
|
||||
input_info(primitive_id pid) : pid(std::move(pid)), idx(0) {}
|
||||
input_info(primitive_id pid, int idx) : pid(std::move(pid)), idx(idx) {}
|
||||
|
||||
/// @brief Copy assignment.
|
||||
input_info& operator=(const input_info& other) {
|
||||
|
@ -208,7 +208,7 @@ public:
|
||||
}
|
||||
|
||||
void set_remove_item_callback(FuncRemoveItem callback) {
|
||||
_remove_popped_item = callback;
|
||||
_remove_popped_item = std::move(callback);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -123,7 +123,8 @@ private:
|
||||
|
||||
template <class T, mem_lock_type lock_type = mem_lock_type::read_write>
|
||||
struct mem_lock {
|
||||
explicit mem_lock(memory::ptr mem, const stream& stream) : _mem(mem), _stream(stream), _ptr(reinterpret_cast<T*>(_mem->lock(_stream, lock_type))) {}
|
||||
explicit mem_lock(memory::ptr mem, const stream& stream) : _mem(std::move(mem)), _stream(stream),
|
||||
_ptr(reinterpret_cast<T*>(_mem->lock(_stream, lock_type))) {}
|
||||
|
||||
~mem_lock() {
|
||||
_ptr = nullptr;
|
||||
|
@ -154,7 +154,7 @@ concatenation_inst::typed_primitive_inst(network& network, concatenation_node co
|
||||
auto nodes_list = stack.front();
|
||||
stack.pop_front();
|
||||
|
||||
for (auto processed_nodes : *nodes_list) {
|
||||
for (const auto& processed_nodes : *nodes_list) {
|
||||
auto processed_node = processed_nodes.first;
|
||||
processed_node->_outputs = _outputs;
|
||||
if (processed_node->type() == concatenation::type_id() && processed_node->can_be_optimized()) {
|
||||
|
@ -198,8 +198,8 @@ layout gemm_inst::transform_output_layout(const std::shared_ptr<const gemm> prim
|
||||
return idx;
|
||||
};
|
||||
|
||||
output_pshape[get_spatial_idx(updated_output_layout.format, 0)] = N;
|
||||
output_pshape[get_spatial_idx(updated_output_layout.format, 1)] = M;
|
||||
output_pshape[get_spatial_idx(updated_output_layout.format, 0)] = std::move(N);
|
||||
output_pshape[get_spatial_idx(updated_output_layout.format, 1)] = std::move(M);
|
||||
updated_output_layout.set_partial_shape(output_pshape);
|
||||
}
|
||||
return updated_output_layout;
|
||||
|
@ -72,7 +72,7 @@ struct loop_impl : typed_primitive_impl<loop> {
|
||||
// read trip_count from outer network
|
||||
bool update_num_iterations = false;
|
||||
memory::ptr trip_count_mem = outer_network.get_primitive(primitive->trip_count_id)->output_memory_ptr();
|
||||
int64_t trip_count = loop_node::read_scalar_value(trip_count_mem, stream);
|
||||
int64_t trip_count = loop_node::read_scalar_value(std::move(trip_count_mem), stream);
|
||||
if (trip_count < 0) {
|
||||
trip_count = _max_iteration;
|
||||
update_num_iterations = true;
|
||||
@ -94,7 +94,7 @@ struct loop_impl : typed_primitive_impl<loop> {
|
||||
// If there are concatenated_output_mem_mappings or backedge_memory_mappings we need to wait for
|
||||
// previous tasks before accessing memory in get_sliced_mem() and setup_iteration() functions
|
||||
if (!concatenated_input_mem_mappings.empty() || !instance.backedge_memory_mappings.empty()) {
|
||||
for (auto e : events) {
|
||||
for (auto& e : events) {
|
||||
e->wait();
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,8 @@ struct crop_impl : public typed_primitive_impl<crop> {
|
||||
if (!op)
|
||||
op = std::make_shared<ov::op::v8::Slice>();
|
||||
|
||||
op->evaluate(output_host_tensors, input_host_tensors);
|
||||
OPENVINO_ASSERT(op->evaluate(output_host_tensors, input_host_tensors),
|
||||
"[GPU] Couldn't execute crop primitive with id ", instance.id());
|
||||
|
||||
ev->set();
|
||||
|
||||
|
@ -499,7 +499,7 @@ public:
|
||||
std::vector<std::array<float, PRIOR_BOX_SIZE>>& prior_variances) {
|
||||
auto input_prior_box = instance.prior_box_memory();
|
||||
const int num_of_priors = static_cast<int>(prior_bboxes.size()) / images_count;
|
||||
mem_lock<dtype, mem_lock_type::read> lock{input_prior_box, stream};
|
||||
mem_lock<dtype, mem_lock_type::read> lock{std::move(input_prior_box), stream};
|
||||
for (int i = 0; i < images_count; i++) {
|
||||
auto prior_box_data =
|
||||
lock.begin() + i * num_of_priors * prior_info_size * (variance_encoded_in_target ? 1 : 2);
|
||||
|
@ -309,7 +309,7 @@ void store_second_output(stream& stream, memory::ptr mem, const std::vector<resu
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void store_third_output_impl(stream& stream, memory::ptr mem, const std::vector<result_indices>& result) {
|
||||
void store_third_output_impl(stream& stream, const memory::ptr& mem, const std::vector<result_indices>& result) {
|
||||
mem_lock<T, mem_lock_type::write> lock(mem, stream);
|
||||
auto ptr = lock.data();
|
||||
ptr[0] = static_cast<T>(result.size());
|
||||
|
@ -284,7 +284,7 @@ struct proposal_impl : typed_primitive_impl<proposal> {
|
||||
int fm_sz = fm_w * fm_h;
|
||||
|
||||
mem_lock<dtype, mem_lock_type::read> cls_scores_ptr{cls_scores, stream};
|
||||
mem_lock<dtype, mem_lock_type::read> bbox_pred_ptr{bbox_pred, stream};
|
||||
mem_lock<dtype, mem_lock_type::read> bbox_pred_ptr{std::move(bbox_pred), stream};
|
||||
const dtype* cls_scores_mem = cls_scores_ptr.data();
|
||||
const dtype* bbox_pred_mem = bbox_pred_ptr.data();
|
||||
|
||||
|
@ -160,7 +160,7 @@ struct resample_impl : typed_primitive_impl_ocl<resample> {
|
||||
bool scales_calc_mod = primitive->shape_calc_mode == resample::InterpolateOp::ShapeCalcMode::SCALES;
|
||||
if (scales_calc_mod && impl_param.input_layouts.size() > 1 && scales.empty()) {
|
||||
auto mem = impl_param.memory_deps.at(2);
|
||||
scales = read_vector<float>(mem, impl_param.get_stream());
|
||||
scales = read_vector<float>(std::move(mem), impl_param.get_stream());
|
||||
}
|
||||
|
||||
params.scales = scales;
|
||||
|
@ -118,9 +118,9 @@ public:
|
||||
std::vector<uint8_t> end_mask(end_mask_.begin(), end_mask_.end());
|
||||
std::vector<uint8_t> new_axis_mask(new_axis_mask_.begin(), new_axis_mask_.end());
|
||||
std::vector<uint8_t> shrink_axis_mask(shrink_axis_mask_.begin(), shrink_axis_mask_.end());
|
||||
params.end_mask = end_mask;
|
||||
params.end_mask = std::move(end_mask);
|
||||
pad_vector_to_size(params.end_mask, dims_num, 0);
|
||||
params.begin_mask = begin_mask;
|
||||
params.begin_mask = std::move(begin_mask);
|
||||
pad_vector_to_size(params.begin_mask, dims_num, 0);
|
||||
|
||||
params.new_axis_mask = new_axis_mask;
|
||||
|
@ -13,7 +13,7 @@ namespace cldnn {
|
||||
|
||||
class EltwiseFuseParams : public NodeFuseParams {
|
||||
public:
|
||||
EltwiseFuseParams(std::shared_ptr<eltwise> desc) : NodeFuseParams(eltwise::type_id()), _desc(desc) {}
|
||||
EltwiseFuseParams(std::shared_ptr<eltwise> desc) : NodeFuseParams(eltwise::type_id()), _desc(std::move(desc)) {}
|
||||
size_t ops_count() const override { return 1; }
|
||||
|
||||
std::shared_ptr<eltwise> _desc;
|
||||
|
@ -123,22 +123,22 @@ public:
|
||||
|
||||
static void add(impl_types impl_type, shape_types shape_type, factory_type factory,
|
||||
const std::vector<data_types>& types, const std::vector<format::type>& formats) {
|
||||
add(impl_type, shape_type, factory, combine(types, formats));
|
||||
add(impl_type, shape_type, std::move(factory), combine(types, formats));
|
||||
}
|
||||
|
||||
static void add(impl_types impl_type, factory_type factory,
|
||||
const std::vector<data_types>& types, const std::vector<format::type>& formats) {
|
||||
add(impl_type, factory, combine(types, formats));
|
||||
add(impl_type, std::move(factory), combine(types, formats));
|
||||
}
|
||||
|
||||
static void add(impl_types impl_type, factory_type factory, std::set<key_type> keys) {
|
||||
OPENVINO_ASSERT(impl_type != impl_types::any, "[GPU] Can't register impl with type any");
|
||||
add(impl_type, shape_types::static_shape, factory, keys);
|
||||
add(impl_type, shape_types::static_shape, std::move(factory), keys);
|
||||
}
|
||||
|
||||
static void add(impl_types impl_type, shape_types shape_type, factory_type factory, std::set<key_type> keys) {
|
||||
OPENVINO_ASSERT(impl_type != impl_types::any, "[GPU] Can't register impl with type any");
|
||||
list_type::instance().push_back({impl_type, shape_type, keys, factory});
|
||||
list_type::instance().push_back({impl_type, shape_type, keys, std::move(factory)});
|
||||
}
|
||||
|
||||
static std::set<key_type> combine(const std::vector<data_types>& types, const std::vector<format::type>& formats) {
|
||||
|
@ -254,7 +254,7 @@ public:
|
||||
} else {
|
||||
auto body_output_prim = body.at(body_output->first);
|
||||
auto mem = get_program().get_engine().allocate_memory(body_output_layout);
|
||||
body_output_prim.reset(new mutable_data(body_output->first, mem));
|
||||
body_output_prim.reset(new mutable_data(body_output->first, std::move(mem)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -351,9 +351,9 @@ public:
|
||||
std::shared_ptr<primitive_inst> _from_primitive, std::shared_ptr<primitive_inst> _to_primitive,
|
||||
std::vector<memory::ptr> _from_mems, memory::ptr _initial_mem, cldnn::stream& _stream, backedge_type _type = CONCAT_OUTPUT):
|
||||
from_primitive(_from_primitive),
|
||||
to_primitive(_to_primitive),
|
||||
to_primitive(std::move(_to_primitive)),
|
||||
from_mems(_from_mems),
|
||||
initial_mem(_initial_mem),
|
||||
initial_mem(std::move(_initial_mem)),
|
||||
stream(_stream),
|
||||
type(_type),
|
||||
total_bytes(initial_mem->get_layout().bytes_count()) {
|
||||
@ -364,9 +364,9 @@ public:
|
||||
std::shared_ptr<primitive_inst> _from_primitive, std::shared_ptr<primitive_inst> _to_primitive,
|
||||
memory::ptr _from_mem, memory::ptr _initial_mem, cldnn::stream& _stream, backedge_type _type = SINGLE_SHARED):
|
||||
from_primitive(_from_primitive),
|
||||
to_primitive(_to_primitive),
|
||||
from_mems{_from_mem},
|
||||
initial_mem(_initial_mem),
|
||||
to_primitive(std::move(_to_primitive)),
|
||||
from_mems{std::move(_from_mem)},
|
||||
initial_mem(std::move(_initial_mem)),
|
||||
stream(_stream),
|
||||
type(_type),
|
||||
total_bytes(initial_mem->get_layout().bytes_count()) {
|
||||
@ -377,8 +377,8 @@ public:
|
||||
std::shared_ptr<primitive_inst> _from_primitive, std::shared_ptr<primitive_inst> _to_primitive,
|
||||
memory::ptr _initial_mem, cldnn::stream& _stream, backedge_type _type = SINGLE):
|
||||
from_primitive(_from_primitive),
|
||||
to_primitive(_to_primitive),
|
||||
initial_mem(_initial_mem),
|
||||
to_primitive(std::move(_to_primitive)),
|
||||
initial_mem(std::move(_initial_mem)),
|
||||
stream(_stream),
|
||||
type(_type),
|
||||
total_bytes(initial_mem->get_layout().bytes_count()) {
|
||||
@ -402,7 +402,7 @@ public:
|
||||
mem1->copy_from(stream, *initial_mem);
|
||||
} else {
|
||||
memory::ptr mem2 = from_primitive->output_memory_ptr();
|
||||
to_primitive->set_output_memory(mem2);
|
||||
to_primitive->set_output_memory(std::move(mem2));
|
||||
from_primitive->set_output_memory(mem1);
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ private:
|
||||
|
||||
public:
|
||||
typed_program_node(std::shared_ptr<primitive> prim, program& prog)
|
||||
: parent(prim, prog) {
|
||||
: parent(std::move(prim), prog) {
|
||||
init_params_list();
|
||||
can_share_buffer(false);
|
||||
}
|
||||
|
@ -44,11 +44,11 @@ class typed_primitive_inst;
|
||||
*/
|
||||
struct primitive_impl {
|
||||
primitive_impl() = default;
|
||||
explicit primitive_impl(std::shared_ptr<WeightsReorderParams> params, std::string kernel_name = "", bool is_dynamic = false)
|
||||
explicit primitive_impl(const std::shared_ptr<WeightsReorderParams>& params, std::string kernel_name = "", bool is_dynamic = false)
|
||||
: _weights_reorder_params(params), _kernel_name(kernel_name), _is_dynamic(is_dynamic) {
|
||||
}
|
||||
explicit primitive_impl(std::string kernel_name, bool is_dynamic = false) :
|
||||
primitive_impl(nullptr, kernel_name, is_dynamic) {}
|
||||
primitive_impl(nullptr, std::move(kernel_name), is_dynamic) {}
|
||||
virtual ~primitive_impl() = default;
|
||||
|
||||
virtual std::vector<layout> get_internal_buffer_layouts() const = 0;
|
||||
@ -502,7 +502,7 @@ protected:
|
||||
|
||||
typed_primitive_inst_base(network& network, typed_node const& node, memory::ptr buffer)
|
||||
: typed_primitive_inst_base(network, node, false) {
|
||||
_outputs[0] = buffer;
|
||||
_outputs[0] = std::move(buffer);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
bool is_fused_dep(size_t dep_idx) const;
|
||||
|
||||
bool has_fused_dep() const {
|
||||
for (auto fused : get_fused_primitives()) {
|
||||
for (auto& fused : get_fused_primitives()) {
|
||||
if (fused.has_outer_dep())
|
||||
return true;
|
||||
}
|
||||
@ -113,7 +113,7 @@ public:
|
||||
int32_t get_first_fused_dep_idx() const {
|
||||
if (!has_fused_dep())
|
||||
return -1;
|
||||
for (auto fused : get_fused_primitives()) {
|
||||
for (auto& fused : get_fused_primitives()) {
|
||||
if (fused.has_outer_dep())
|
||||
return fused.outer_dep_start_idx;
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ void loop_inst::preprocess_output_memory() {
|
||||
const int64_t num_elements_iteration = sliced_layout.count() / num_elements_batch;
|
||||
const int64_t start = output_mapping.start < 0? _max_iteration - 1: output_mapping.start;
|
||||
concatenated_memory_mapping memory_mapping_info(
|
||||
output_mapping.axis, to_mem, sliced_mems, _network.get_stream(),
|
||||
output_mapping.axis, std::move(to_mem), sliced_mems, _network.get_stream(),
|
||||
num_elements_iteration, output_mapping.stride, start);
|
||||
memory_mapping_info.sliced_data_prim = body_network->get_primitive(internal_id);
|
||||
memory_mapping_info.concat_data_prim = get_network().get_primitive(external_id);
|
||||
|
@ -47,10 +47,10 @@ std::string lstm_inst::to_string(lstm_node const& node) {
|
||||
json_composite lstm_info;
|
||||
lstm_info.add("weights id", weights_id);
|
||||
lstm_info.add("recurrent id", recurrent_id);
|
||||
lstm_info.add("bias id", bias_id);
|
||||
lstm_info.add("peepholes id", peepholes_id);
|
||||
lstm_info.add("initial_hidden id", initial_hidden_id);
|
||||
lstm_info.add("initial_cell id", initial_cell_id);
|
||||
lstm_info.add("bias id", std::move(bias_id));
|
||||
lstm_info.add("peepholes id", std::move(peepholes_id));
|
||||
lstm_info.add("initial_hidden id", std::move(initial_hidden_id));
|
||||
lstm_info.add("initial_cell id", std::move(initial_cell_id));
|
||||
node_info->add("lstm info", lstm_info);
|
||||
node_info->dump(primitive_description);
|
||||
|
||||
|
@ -37,10 +37,10 @@ std::string lstm_dynamic_inst::to_string(lstm_dynamic_node const& node) {
|
||||
std::stringstream primitive_description;
|
||||
json_composite lstm_dynamic_info;
|
||||
lstm_dynamic_info.add("dyn_length id", desc->dyn_length);
|
||||
lstm_dynamic_info.add("weights id", weights_id);
|
||||
lstm_dynamic_info.add("weights id", std::move(weights_id));
|
||||
lstm_dynamic_info.add("recurrent id", recurrent_id);
|
||||
lstm_dynamic_info.add("bias id", bias_id);
|
||||
lstm_dynamic_info.add("initial_hidden id", initial_hidden_id);
|
||||
lstm_dynamic_info.add("initial_hidden id", std::move(initial_hidden_id));
|
||||
lstm_dynamic_info.add("initial_cell id", initial_cell_id);
|
||||
node_info->add("lstm_dynamic info", lstm_dynamic_info);
|
||||
node_info->dump(primitive_description);
|
||||
|
@ -104,10 +104,10 @@ std::string lstm_dynamic_timeloop_inst::to_string(lstm_dynamic_timeloop_node con
|
||||
json_composite lstm_dynamic_input_info;
|
||||
lstm_dynamic_input_info.add("dyn_length id", desc->dyn_length);
|
||||
lstm_dynamic_input_info.add("recurrent id", desc->recurrent);
|
||||
lstm_dynamic_input_info.add("initial cell id", initial_cell_id);
|
||||
lstm_dynamic_input_info.add("initial cell id", std::move(initial_cell_id));
|
||||
lstm_dynamic_input_info.add("initial hidden id", initial_hidden_id);
|
||||
lstm_dynamic_input_info.add("last cell id", last_cell_id);
|
||||
lstm_dynamic_input_info.add("last hidden id", last_hidden_id);
|
||||
lstm_dynamic_input_info.add("last hidden id", std::move(last_hidden_id));
|
||||
lstm_dynamic_input_info.add("max seq len", node.input().get_output_layout().feature());
|
||||
lstm_dynamic_input_info.add("hidden size", node.recurrent().get_output_layout().spatial(0));
|
||||
lstm_dynamic_input_info.add("direction", node.recurrent().get_output_layout().feature());
|
||||
|
@ -42,7 +42,7 @@ std::string lstm_gemm_inst::to_string(lstm_gemm_node const& node) {
|
||||
json_composite lstm_gemm_info;
|
||||
lstm_gemm_info.add("weights id", weights_id);
|
||||
lstm_gemm_info.add("recurrent id", recurrent_id);
|
||||
lstm_gemm_info.add("bias id", bias_id);
|
||||
lstm_gemm_info.add("bias id", std::move(bias_id));
|
||||
lstm_gemm_info.add("hidden id", hidden_id);
|
||||
node_info->add("lstm gemm info", lstm_gemm_info);
|
||||
node_info->dump(primitive_description);
|
||||
|
@ -37,7 +37,7 @@ mutable_data_node::typed_program_node(const std::shared_ptr<mutable_data> dprim,
|
||||
}
|
||||
|
||||
void mutable_data_node::attach_memory(memory::ptr new_mem, bool invalidate_users_if_changed) {
|
||||
mem = new_mem;
|
||||
mem = std::move(new_mem);
|
||||
recalc_output_layout(invalidate_users_if_changed);
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ std::string mvn_inst::to_string(mvn_node const& node) {
|
||||
json_composite mvn_info;
|
||||
mvn_info.add("input id", input.id());
|
||||
mvn_info.add("epsilon", epsilon);
|
||||
mvn_info.add("reduction axes", axes);
|
||||
mvn_info.add("reduction axes", std::move(axes));
|
||||
mvn_info.add("normalize_variance region", normalize_variance);
|
||||
mvn_info.add("eps_inside_sqrt region", eps_inside_sqrt);
|
||||
|
||||
|
@ -60,7 +60,7 @@ layout gather_nonzero_inst::calc_output_layout(gather_nonzero_node const& node,
|
||||
auto out_size = read_vector<int64_t>(impl_param.memory_deps.at(1), impl_param.get_stream());
|
||||
ov::Shape output_shape(out_size.begin(), out_size.end());
|
||||
ov::PartialShape output_pshape(output_shape);
|
||||
return layout{output_pshape, cldnn::data_types::i32, cldnn::format::bfyx};
|
||||
return layout{std::move(output_pshape), cldnn::data_types::i32, cldnn::format::bfyx};
|
||||
} else {
|
||||
return layout{ov::PartialShape({ov::Dimension::dynamic(), ov::Dimension::dynamic(), 1, 1}), cldnn::data_types::i32, cldnn::format::bfyx};
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ void primitive_inst::update_shape() {
|
||||
if (_deps[i].first->get_node().is_in_shape_of_subgraph()) {
|
||||
bool can_skip = true;
|
||||
const auto& insts = _deps[i].first->dependant_shape_of_insts;
|
||||
for (auto inst : insts) {
|
||||
for (auto& inst : insts) {
|
||||
can_skip &= !inst->shape_changed();
|
||||
}
|
||||
if (can_skip)
|
||||
@ -631,7 +631,7 @@ bool primitive_inst::update_impl() {
|
||||
if (!can_be_optimized()) {
|
||||
auto& kernels_cache = get_network().get_program()->get_kernels_cache();
|
||||
auto kernels = kernels_cache.compile(updated_params_no_dyn_pad, _impl->get_kernels_source());
|
||||
_impl->set_kernels(kernels);
|
||||
_impl->set_kernels(std::move(kernels));
|
||||
cache.add(updated_params_no_dyn_pad, _impl->clone());
|
||||
}
|
||||
auto new_impl_str = _impl != nullptr ? _impl->get_kernel_name() : "nullptr";
|
||||
@ -720,7 +720,7 @@ void primitive_inst::do_runtime_in_place_concat() {
|
||||
|
||||
std::vector<kernel_impl_params> pred_params;
|
||||
std::vector<layout> preds_layouts;
|
||||
for (auto pred : concat_inst->_deps) {
|
||||
for (auto& pred : concat_inst->_deps) {
|
||||
pred_params.push_back(*pred.first->_impl_params);
|
||||
preds_layouts.push_back(pred.first->_impl_params->get_output_layout());
|
||||
}
|
||||
@ -784,7 +784,7 @@ event::ptr primitive_inst::execute(const std::vector<event::ptr>& events) {
|
||||
auto& engine = _network.get_engine();
|
||||
// Need to use actual layout, not the fake aligned memory layout
|
||||
auto actual_mem = engine.reinterpret_buffer(*allocated_mem, actual_input_layout);
|
||||
subgraph->set_input_data(d.first->id(), actual_mem);
|
||||
subgraph->set_input_data(d.first->id(), std::move(actual_mem));
|
||||
}
|
||||
}
|
||||
GPU_DEBUG_TRACE_DETAIL << "[Start] Executing unfused subgraph of " << id() << std::endl;
|
||||
@ -993,11 +993,11 @@ primitive_inst::primitive_inst(network& network, program_node const& node, bool
|
||||
// input_0 -> input_1, ..., fused_dep_0, fused_dep1, ..., output_0, output_1, ...
|
||||
// For each tensor we save max_rank dimensions in [bfvuwzyx] order
|
||||
size_t num_dynamic_pads = 0;
|
||||
for (auto in : _node->get_dependencies()) {
|
||||
for (auto& in : _node->get_dependencies()) {
|
||||
const auto& dyn_pad_dims = in.first->get_output_layout(false).data_padding.get_dynamic_pad_dims().sizes();
|
||||
num_dynamic_pads += std::accumulate(dyn_pad_dims.begin(), dyn_pad_dims.end(), static_cast<int32_t>(0));
|
||||
}
|
||||
for (auto o : _node->get_output_layouts()) {
|
||||
for (auto& o : _node->get_output_layouts()) {
|
||||
const auto& dyn_pad_dims = o.data_padding.get_dynamic_pad_dims().sizes();
|
||||
num_dynamic_pads += std::accumulate(dyn_pad_dims.begin(), dyn_pad_dims.end(), static_cast<int32_t>(0));
|
||||
}
|
||||
|
@ -542,18 +542,18 @@ std::string prior_box_inst::to_string(prior_box_node const& node) {
|
||||
json_composite prior_info;
|
||||
prior_info.add("input id", node.input().id());
|
||||
prior_info.add("iamge size", desc->img_size);
|
||||
prior_info.add("variance", str_variance);
|
||||
prior_info.add("variance", std::move(str_variance));
|
||||
|
||||
json_composite box_sizes_info;
|
||||
box_sizes_info.add("min sizes", str_min_sizes);
|
||||
box_sizes_info.add("max sizes", str_max_sizes);
|
||||
box_sizes_info.add("min sizes", std::move(str_min_sizes));
|
||||
box_sizes_info.add("max sizes", std::move(str_max_sizes));
|
||||
prior_info.add("box sizes", box_sizes_info);
|
||||
|
||||
prior_info.add("aspect_ratio", str_aspect_ratio);
|
||||
prior_info.add("flip", flip);
|
||||
prior_info.add("clip", clip);
|
||||
prior_info.add("scale all sizes", scale_all_sizes);
|
||||
prior_info.add("fixed size", str_fixed_size);
|
||||
prior_info.add("fixed size", std::move(str_fixed_size));
|
||||
prior_info.add("fixed ratio", str_fixed_ratio);
|
||||
prior_info.add("density", str_density);
|
||||
|
||||
|
@ -155,7 +155,7 @@ program::program(engine& engine_ref,
|
||||
: _engine(engine_ref),
|
||||
_stream(_engine.create_stream(config)),
|
||||
_config(config),
|
||||
_task_executor(task_executor),
|
||||
_task_executor(std::move(task_executor)),
|
||||
processing_order(),
|
||||
is_internal(is_internal),
|
||||
is_body_program(is_body_program) {
|
||||
@ -181,7 +181,7 @@ program::program(engine& engine_ref,
|
||||
: _engine(engine_ref),
|
||||
_stream(_engine.create_stream(config)),
|
||||
_config(config),
|
||||
_task_executor(task_executor),
|
||||
_task_executor(std::move(task_executor)),
|
||||
processing_order(),
|
||||
is_internal(is_internal) {
|
||||
_config.apply_user_properties(_engine.get_device_info());
|
||||
@ -822,7 +822,7 @@ void program::add_intermediate(std::shared_ptr<primitive> prim,
|
||||
size_t prev_idx,
|
||||
bool connect_int_node_with_old_dep,
|
||||
bool move_usrs_of_prev_to_node) {
|
||||
add_intermediate(get_or_create(prim), next, prev_idx, connect_int_node_with_old_dep, move_usrs_of_prev_to_node);
|
||||
add_intermediate(get_or_create(std::move(prim)), next, prev_idx, connect_int_node_with_old_dep, move_usrs_of_prev_to_node);
|
||||
}
|
||||
|
||||
void program::add_intermediate(program_node& node,
|
||||
|
@ -33,7 +33,7 @@ std::string pyramid_roi_align_inst::to_string(pyramid_roi_align_node const& node
|
||||
auto node_info = node.desc_to_json();
|
||||
std::stringstream primitive_description;
|
||||
json_composite pyramid_roi_align_info;
|
||||
node_info->add("pyramid_roi_align_info", pyramid_roi_align_info);
|
||||
node_info->add("pyramid_roi_align_info", std::move(pyramid_roi_align_info));
|
||||
node_info->dump(primitive_description);
|
||||
return primitive_description.str();
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ std::string space_to_depth_inst::to_string(space_to_depth_node const& node) {
|
||||
|
||||
json_composite space_to_depth_info;
|
||||
space_to_depth_info.add("input id", input.id());
|
||||
space_to_depth_info.add("mode", depth_mode);
|
||||
space_to_depth_info.add("mode", std::move(depth_mode));
|
||||
space_to_depth_info.add("block size", desc->block_size);
|
||||
|
||||
node_info->add("space_to_depth info", space_to_depth_info);
|
||||
|
@ -55,7 +55,7 @@ std::vector<layout> strided_slice_inst::calc_output_layouts(strided_slice_node c
|
||||
|
||||
std::vector<ShapeType> output_shapes;
|
||||
std::vector<ShapeType> input_shapes = {
|
||||
input0_shape,
|
||||
std::move(input0_shape),
|
||||
begin_shape,
|
||||
end_shape,
|
||||
strides_shape
|
||||
|
@ -687,7 +687,7 @@ struct base_params : public Params {
|
||||
out.SetDynamicShapeOffset(offset);
|
||||
if (out.is_dynamic()) {
|
||||
offset += DataTensor::max_rank();
|
||||
for (auto dim : out.GetDims()) {
|
||||
for (auto& dim : out.GetDims()) {
|
||||
if (dim.pad.is_dynamic)
|
||||
offset += Tensor::Pad::NumPadOffsetsPerDim();
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ using namespace InferenceEngine::details;
|
||||
namespace ov {
|
||||
namespace intel_gpu {
|
||||
|
||||
Graph::Graph(InferenceEngine::CNNNetwork& network, RemoteContextImpl::Ptr context, const ExecutionConfig& config, uint16_t stream_id,
|
||||
Graph::Graph(InferenceEngine::CNNNetwork& network, const RemoteContextImpl::Ptr& context, const ExecutionConfig& config, uint16_t stream_id,
|
||||
InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs)
|
||||
: m_context(context)
|
||||
, m_networkName(network.getName())
|
||||
@ -50,7 +50,7 @@ Graph::Graph(InferenceEngine::CNNNetwork& network, RemoteContextImpl::Ptr contex
|
||||
Build();
|
||||
}
|
||||
|
||||
Graph::Graph(cldnn::BinaryInputBuffer &ib, RemoteContextImpl::Ptr context, const ExecutionConfig& config, uint16_t stream_id,
|
||||
Graph::Graph(cldnn::BinaryInputBuffer &ib, const RemoteContextImpl::Ptr& context, const ExecutionConfig& config, uint16_t stream_id,
|
||||
InferenceEngine::InputsDataMap* inputs, InferenceEngine::OutputsDataMap* outputs)
|
||||
: m_context(context)
|
||||
, m_config(config)
|
||||
|
@ -1034,7 +1034,7 @@ Blob::Ptr InferRequest::reinterpret_device_blob(Blob::Ptr data, const TensorDesc
|
||||
auto dt = DataTypeFromPrecision(new_desc.getPrecision());
|
||||
ov::PartialShape shape(new_desc.getDims());
|
||||
|
||||
auto l = cldnn::layout(shape, dt, format);
|
||||
auto l = cldnn::layout(std::move(shape), dt, format);
|
||||
|
||||
auto remote_blob = data->as<gpu::ClBlob>();
|
||||
if (!remote_blob)
|
||||
|
@ -780,7 +780,7 @@ void InferRequestLegacy::wait_dynamic() {
|
||||
auto outputMemory = internal_outputs_dynamic[nb].at(outputID).get_memory();
|
||||
Blob::Ptr bptr = _outputs[no.first];
|
||||
|
||||
copy_output_data(outputMemory, bptr, &batchOutputs[no.first][nb]);
|
||||
copy_output_data(outputMemory, std::move(bptr), &batchOutputs[no.first][nb]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -945,7 +945,7 @@ void InferRequestLegacy::allocate_inputs() {
|
||||
TensorDesc desc_fp32 = desc;
|
||||
desc_fp32.setPrecision(Precision::FP32);
|
||||
auto blobPtr = create_device_blob(desc_fp32, litr->second);
|
||||
_deviceInputs[name] = blobPtr;
|
||||
_deviceInputs[name] = std::move(blobPtr);
|
||||
Blob::Ptr inputBlob = create_host_blob(desc);
|
||||
_inputs[name] = inputBlob;
|
||||
} else {
|
||||
@ -1008,7 +1008,7 @@ void InferRequestLegacy::allocate_outputs() {
|
||||
device_blob_desc.setPrecision(Precision::FP32);
|
||||
|
||||
auto host_blob = create_host_blob(desc);
|
||||
_outputs[no.first] = host_blob;
|
||||
_outputs[no.first] = std::move(host_blob);
|
||||
auto device_blob = create_device_blob(device_blob_desc, output_layout);
|
||||
_deviceOutputs[no.first] = device_blob;
|
||||
} else {
|
||||
@ -1036,7 +1036,7 @@ void InferRequestLegacy::allocate_outputs_dynamic() {
|
||||
|
||||
Blob::Ptr outputBlob = create_host_blob(desc);
|
||||
_outputs[no.first] = outputBlob;
|
||||
outputsMap[no.first] = outputID;
|
||||
outputsMap[no.first] = std::move(outputID);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1151,7 +1151,7 @@ void InferRequestLegacy::prepare_output(const cldnn::primitive_id& outputName, B
|
||||
IE_THROW(NotAllocated) << str_output_not_allocated;
|
||||
}
|
||||
auto outputMem = impl->get_memory();
|
||||
_nw_ptr->set_output_memory(internalName, outputMem);
|
||||
_nw_ptr->set_output_memory(internalName, std::move(outputMem));
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr InferRequestLegacy::create_device_blob(const InferenceEngine::TensorDesc& desc, const cldnn::layout& layout) {
|
||||
|
@ -102,7 +102,8 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
|
||||
const auto& indices_node = indices.get_node_shared_ptr();
|
||||
auto indices_constant = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(indices_node);
|
||||
float result = 0.f;
|
||||
ov::op::util::get_single_value(indices_constant, result);
|
||||
OPENVINO_ASSERT(ov::op::util::get_single_value(indices_constant, result),
|
||||
"Unsupported indices node in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
|
||||
|
||||
// Set tensors for crop shape and offset
|
||||
InferenceEngine::SizeVector start_offset(input_shape.size());
|
||||
|
@ -140,7 +140,7 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
|
||||
const cldnn::primitive_id num_iteration_id = layerName + "_numIteration";
|
||||
{
|
||||
cldnn::mutable_data num_iteration = CreateScalarData<cldnn::mutable_data>(p, num_iteration_id, 0);
|
||||
p.add_primitive(*op, num_iteration);
|
||||
p.add_primitive(*op, std::move(num_iteration));
|
||||
}
|
||||
|
||||
// set output mapping
|
||||
@ -154,7 +154,7 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
|
||||
std::string external_id;
|
||||
if (output_idx > 0) {
|
||||
cldnn::mutable_data output_data = CreateAdditionalOutputData(p, op, layerNameWithIndex, layerName, output_idx);
|
||||
p.add_primitive(*op, output_data);
|
||||
p.add_primitive(*op, std::move(output_data));
|
||||
external_id = layerNameWithIndex;
|
||||
} else {
|
||||
external_id = layerName;
|
||||
|
@ -42,7 +42,7 @@ static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::sha
|
||||
const auto tensor = tensor_from_dims(op->get_output_shape(output_idx));
|
||||
cldnn::layout output_layout = cldnn::layout(precision, format, tensor);
|
||||
auto mem = p.get_engine().allocate_memory(output_layout);
|
||||
auto md = cldnn::mutable_data(id, {cldnn::input_info(input)}, mem); // cldnn::data cannot set dependency
|
||||
auto md = cldnn::mutable_data(id, {cldnn::input_info(input)}, std::move(mem)); // cldnn::data cannot set dependency
|
||||
return md;
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorItera
|
||||
const cldnn::primitive_id execution_condition_id = layerName + "_initialExecutionCondition";
|
||||
{
|
||||
cldnn::mutable_data execution_condition = CreateScalarData<cldnn::mutable_data>(p, execution_condition_id, 1);
|
||||
p.add_primitive(*op, execution_condition);
|
||||
p.add_primitive(*op, std::move(execution_condition));
|
||||
}
|
||||
const cldnn::primitive_id num_iteration_id = layerName + "_numIteration";
|
||||
{
|
||||
@ -138,7 +138,7 @@ static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorItera
|
||||
std::string external_id;
|
||||
if (output_idx > 0) {
|
||||
cldnn::mutable_data output_data = CreateAdditionalOutputData(p, op, layerNameWithIndex, layerName, output_idx);
|
||||
p.add_primitive(*op, output_data);
|
||||
p.add_primitive(*op, std::move(output_data));
|
||||
external_id = layerNameWithIndex;
|
||||
} else {
|
||||
p.primitive_ids[layerNameWithIndex] = layerName;
|
||||
|
@ -20,7 +20,7 @@ RemoteContextImpl::RemoteContextImpl(std::string device_name, std::vector<cldnn:
|
||||
: m_va_display(nullptr)
|
||||
, m_external_queue(nullptr)
|
||||
, m_type(ContextType::OCL)
|
||||
, m_device_name(device_name)
|
||||
, m_device_name(std::move(device_name))
|
||||
, m_memory_cache(cache_capacity) {
|
||||
OPENVINO_ASSERT(devices.size() == 1, "[GPU] Currently context can be created for single device only");
|
||||
// TODO: Parameterize this based on plugin config and compilation options
|
||||
|
@ -121,7 +121,7 @@ public:
|
||||
std::vector<std::string> get_cached_kernel_ids(const std::vector<kernel::ptr>& kernels) const;
|
||||
void add_to_cached_kernels(const std::vector<kernel::ptr>& kernels);
|
||||
|
||||
size_t get_kernel_batch_hash(const kernel_impl_params params) const {
|
||||
size_t get_kernel_batch_hash(const kernel_impl_params& params) const {
|
||||
if (_kernel_batch_hash.find(params) != _kernel_batch_hash.end())
|
||||
return _kernel_batch_hash.at(params);
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user