[AUTO] Fix static code scan issue (#19295)
* fix scan issue Signed-off-by: fishbell <bell.song@intel.com> * clang Signed-off-by: fishbell <bell.song@intel.com> * clang Signed-off-by: fishbell <bell.song@intel.com> --------- Signed-off-by: fishbell <bell.song@intel.com> Co-authored-by: Chen Peter <peter.chen@intel.com>
This commit is contained in:
parent
6deca48413
commit
498731f8fd
@ -74,7 +74,9 @@ struct WorkerInferRequest {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ThisRequestExecutor : public ov::threading::ITaskExecutor {
|
struct ThisRequestExecutor : public ov::threading::ITaskExecutor {
|
||||||
explicit ThisRequestExecutor(WorkerInferRequest** ptr, AutoImmediateExecutor::Ptr executor = nullptr): m_workptrptr{ptr}, m_fallback_exec(executor) {}
|
explicit ThisRequestExecutor(WorkerInferRequest** ptr, AutoImmediateExecutor::Ptr executor = nullptr):
|
||||||
|
m_workptrptr{ptr},
|
||||||
|
m_fallback_exec(std::move(executor)) {}
|
||||||
void run(ov::threading::Task task) override {
|
void run(ov::threading::Task task) override {
|
||||||
(*m_workptrptr)->m_task = std::move(task);
|
(*m_workptrptr)->m_task = std::move(task);
|
||||||
(*m_workptrptr)->m_fallback_exec = m_fallback_exec;
|
(*m_workptrptr)->m_fallback_exec = m_fallback_exec;
|
||||||
|
@ -102,7 +102,7 @@ void Schedule::generate_workers(const std::string& device, const SoCompiledModel
|
|||||||
worker_request.m_inferrequest->set_callback(
|
worker_request.m_inferrequest->set_callback(
|
||||||
[worker_request_ptr, this, device, idle_workerrequests_ptr](std::exception_ptr exception_ptr) mutable {
|
[worker_request_ptr, this, device, idle_workerrequests_ptr](std::exception_ptr exception_ptr) mutable {
|
||||||
IdleGuard<NotBusyPriorityWorkerRequests> idleGuard{worker_request_ptr, *idle_workerrequests_ptr};
|
IdleGuard<NotBusyPriorityWorkerRequests> idleGuard{worker_request_ptr, *idle_workerrequests_ptr};
|
||||||
worker_request_ptr->m_exception_ptr = exception_ptr;
|
worker_request_ptr->m_exception_ptr = std::move(exception_ptr);
|
||||||
{
|
{
|
||||||
auto stop_retry_and_continue = [worker_request_ptr]() {
|
auto stop_retry_and_continue = [worker_request_ptr]() {
|
||||||
auto captured_task = std::move(worker_request_ptr->m_task);
|
auto captured_task = std::move(worker_request_ptr->m_task);
|
||||||
@ -154,7 +154,7 @@ Pipeline Schedule::get_async_pipeline(const ISyncInferPtr& infer_request, Worker
|
|||||||
: m_inferrequest(infer_request),
|
: m_inferrequest(infer_request),
|
||||||
m_worker(worker) {
|
m_worker(worker) {
|
||||||
m_inferrequest->set_callback([this](std::exception_ptr exceptionPtr) mutable {
|
m_inferrequest->set_callback([this](std::exception_ptr exceptionPtr) mutable {
|
||||||
m_exceptionptr = exceptionPtr;
|
m_exceptionptr = std::move(exceptionPtr);
|
||||||
auto capturedTask = std::move(m_task);
|
auto capturedTask = std::move(m_task);
|
||||||
capturedTask();
|
capturedTask();
|
||||||
INFO_RUN([&]() {
|
INFO_RUN([&]() {
|
||||||
|
@ -52,7 +52,11 @@ void SyncInferRequest::share_tensors_with_batched_req(const std::set<std::string
|
|||||||
auto batched_tensor = m_batched_request_wrapper->_infer_request_batched->get_tensor(it);
|
auto batched_tensor = m_batched_request_wrapper->_infer_request_batched->get_tensor(it);
|
||||||
if (!batched_tensor._so)
|
if (!batched_tensor._so)
|
||||||
batched_tensor._so = m_batched_request_wrapper->_infer_request_batched._so;
|
batched_tensor._so = m_batched_request_wrapper->_infer_request_batched._so;
|
||||||
res = create_shared_tensor_on_batched_tensor(batched_tensor, name, batched_inputs, m_batch_id, m_batch_size);
|
res = create_shared_tensor_on_batched_tensor(batched_tensor,
|
||||||
|
std::move(name),
|
||||||
|
batched_inputs,
|
||||||
|
m_batch_id,
|
||||||
|
m_batch_size);
|
||||||
set_tensor(it, res);
|
set_tensor(it, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +66,11 @@ void SyncInferRequest::share_tensors_with_batched_req(const std::set<std::string
|
|||||||
auto batched_tensor = m_batched_request_wrapper->_infer_request_batched->get_tensor(it);
|
auto batched_tensor = m_batched_request_wrapper->_infer_request_batched->get_tensor(it);
|
||||||
if (!batched_tensor._so)
|
if (!batched_tensor._so)
|
||||||
batched_tensor._so = m_batched_request_wrapper->_infer_request_batched._so;
|
batched_tensor._so = m_batched_request_wrapper->_infer_request_batched._so;
|
||||||
res = create_shared_tensor_on_batched_tensor(batched_tensor, name, batched_outputs, m_batch_id, m_batch_size);
|
res = create_shared_tensor_on_batched_tensor(batched_tensor,
|
||||||
|
std::move(name),
|
||||||
|
batched_outputs,
|
||||||
|
m_batch_id,
|
||||||
|
m_batch_size);
|
||||||
set_tensor(it, res);
|
set_tensor(it, res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user