[Fixing]Fix some scan coverity issues (#21378)

* Fix some scan coverity issues

* Fix an error
This commit is contained in:
River Li 2023-11-30 18:39:55 +08:00 committed by GitHub
parent 405d97e4a5
commit ec239b3d5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 13 additions and 13 deletions

View File

@ -477,7 +477,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model_impl(const std::string
auto& rt_info = input.get_rt_info();
auto it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
auto& td = it->second.as<InferenceEngine::TensorDesc>();
auto element_type = InferenceEngine::details::convertPrecision(td.getPrecision());
if (element_type != input.get_element_type()) {
preproc.input(i).tensor().set_element_type(element_type);
@ -499,7 +499,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model_impl(const std::string
auto& rt_info = output.get_rt_info();
auto it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
auto& td = it->second.as<InferenceEngine::TensorDesc>();
auto element_type = InferenceEngine::details::convertPrecision(td.getPrecision());
if (element_type != output.get_element_type()) {
preproc.output(i).tensor().set_element_type(element_type);
@ -545,8 +545,8 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model_impl(const std::string
auto_s_context->m_model = cloned_model;
auto_s_context->m_model_path = model_path;
auto_s_context->m_device_priorities = support_devices;
auto_s_context->m_device_priorities_initial = support_devices;
auto_s_context->m_str_devices = str_devices;
auto_s_context->m_device_priorities_initial = std::move(support_devices);
auto_s_context->m_str_devices = std::move(str_devices);
auto_s_context->m_plugin = shared_from_this();
auto_s_context->m_ov_core = get_core();
OPENVINO_ASSERT(auto_s_context->m_ov_core);
@ -602,7 +602,7 @@ ov::SupportedOpsMap Plugin::query_model(const std::shared_ptr<const ov::Model>&
device_supported_layers.emplace(layer_qm.first);
}
supported_layers = supported_layers.empty()
? device_supported_layers : (device_supported_layers.empty()
? std::move(device_supported_layers) : (device_supported_layers.empty()
? supported_layers : inter_section(supported_layers, device_supported_layers));
}
for (auto&& iter : supported_layers) {

View File

@ -77,7 +77,7 @@ ov::SoPtr<ov::IRemoteContext> Plugin::create_context(const ov::AnyMap& remote_pr
if (it == full_properties.end())
OPENVINO_THROW("Value for ov::device::priorities is not set");
auto val = it->second.as<std::string>();
auto& val = it->second.as<std::string>();
auto metaDevice = parse_meta_device(val, ov::AnyMap());
full_properties.erase(it);
return get_core()->create_context(metaDevice.device_name, full_properties);
@ -308,7 +308,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model(const std::shared_ptr<
auto& rt_info = input.get_rt_info();
auto it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
auto& td = it->second.as<InferenceEngine::TensorDesc>();
rt_info["ie_legacy_td"] =
InferenceEngine::TensorDesc(td.getPrecision(), input.get_shape(), td.getLayout());
}
@ -318,7 +318,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model(const std::shared_ptr<
auto& rt_info = output.get_rt_info();
auto it = rt_info.find("ie_legacy_td");
if (it != rt_info.end()) {
auto td = it->second.as<InferenceEngine::TensorDesc>();
auto& td = it->second.as<InferenceEngine::TensorDesc>();
rt_info["ie_legacy_td"] =
InferenceEngine::TensorDesc(td.getPrecision(), output.get_shape(), td.getLayout());
}

View File

@ -275,8 +275,8 @@ void Engine::apply_performance_hints(ov::AnyMap& config, const std::shared_ptr<o
const auto& perf_hint = config.find(ov::hint::performance_mode.name());
/* performance hints set for network has higher pririty than engine ones.
* This applies for all the configuration parameters */
const auto perf_hint_name =
(perf_hint != config.end()) ? perf_hint->second.as<std::string>() : ov::util::to_string(engConfig.hintPerfMode);
const auto& perf_hint_name = (perf_hint != config.end()) ? perf_hint->second.as<std::string>()
: ov::util::to_string(engConfig.hintPerfMode);
return perf_hint_name;
};
@ -790,7 +790,7 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
return decltype(ov::supported_properties)::value_type(std::move(supportedProperties));
} else if (ov::internal::supported_properties == name) {
return decltype(ov::internal::supported_properties)::value_type{
ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO},
@ -811,7 +811,7 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio
capabilities.push_back(ov::device::capability::INT8);
capabilities.push_back(ov::device::capability::BIN);
capabilities.push_back(ov::device::capability::EXPORT_IMPORT);
return decltype(ov::device::capabilities)::value_type(capabilities);
return decltype(ov::device::capabilities)::value_type(std::move(capabilities));
} else if (name == ov::range_for_async_infer_requests) {
const std::tuple<unsigned int, unsigned int, unsigned int> range = std::make_tuple(1, 1, 1);
return decltype(ov::range_for_async_infer_requests)::value_type(range);
@ -820,7 +820,7 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio
return decltype(ov::range_for_streams)::value_type(range);
} else if (name == ov::internal::caching_properties) {
std::vector<ov::PropertyName> cachingProperties = { ov::device::full_name };
return decltype(ov::internal::caching_properties)::value_type(cachingProperties);
return decltype(ov::internal::caching_properties)::value_type(std::move(cachingProperties));
} else if (name == ov::intel_cpu::denormals_optimization) {
return decltype(ov::intel_cpu::denormals_optimization)::value_type(engConfig.denormalsOptMode == Config::DenormalsOptMode::DO_On);
} else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {