[C API] Add more C API for infer request (#12636)

* [C API] Add more C API for infer request

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* [Code Style] Fix code style issue

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* Fix review comments

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* [C API] add more details desc for infer request interfaces

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* [C API] Modify the structure of tensor list

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* [C API] Fix code style issue

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* [C API] remove define about tensor list

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>

* Update documents for infer request

Change-Id: I1acbae95425cfe4aad85da48d290bafe5bb7d319

* Update documents for ov_infer_request

Change-Id: Ib68530c49cf439c7b570ee63bd3ee8ea85018d51

Signed-off-by: xuejun <Xuejun.Zhai@intel.com>
Co-authored-by: River,Li <river.li@intel.com>
This commit is contained in:
Xuejun Zhai 2022-09-09 15:47:46 +08:00 committed by GitHub
parent e7fe00f5f2
commit 349f3e02e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 374 additions and 36 deletions

View File

@ -207,13 +207,13 @@ int main(int argc, char** argv) {
CHECK_STATUS(ov_compiled_model_create_infer_request(compiled_model, &infer_request));
// -------- Step 7. Prepare input --------
CHECK_STATUS(ov_infer_request_set_input_tensor(infer_request, 0, tensor));
CHECK_STATUS(ov_infer_request_set_input_tensor_by_index(infer_request, 0, tensor));
// -------- Step 8. Do inference synchronously --------
CHECK_STATUS(ov_infer_request_infer(infer_request));
// -------- Step 9. Process output
CHECK_STATUS(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
CHECK_STATUS(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
// Print classification results
size_t results_num;
results = tensor_to_infer_result(output_tensor, &results_num);

View File

@ -307,7 +307,7 @@ int main(int argc, char** argv) {
CHECK_STATUS(ov_infer_request_infer(infer_request));
// -------- Step 8. Process output --------
CHECK_STATUS(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
CHECK_STATUS(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
// Print classification results
size_t results_num = 0;
results = tensor_to_infer_result(output_tensor, &results_num);

View File

@ -11,6 +11,7 @@
#pragma once
#include "openvino/c/ov_common.h"
#include "openvino/c/ov_node.h"
#include "openvino/c/ov_tensor.h"
typedef struct ov_infer_request ov_infer_request_t;
@ -59,7 +60,7 @@ typedef struct {
*/
/**
* @brief Sets an input/output tensor to infer on.
* @brief Set an input/output tensor to infer on by the name of tensor.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor_name Name of the input or output tensor.
@ -70,21 +71,86 @@ OPENVINO_C_API(ov_status_e)
ov_infer_request_set_tensor(ov_infer_request_t* infer_request, const char* tensor_name, const ov_tensor_t* tensor);
/**
* @brief Sets an input tensor to infer on.
* @brief Set an input/output tensor to infer request for the port.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the input tensor. If @p idx is greater than the number of model inputs, an exception is thrown.
* @param port Port of the input or output tensor, which can be got by calling ov_model_t/ov_compiled_model_t interface.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request, size_t idx, const ov_tensor_t* tensor);
ov_infer_request_set_tensor_by_port(ov_infer_request_t* infer_request,
const ov_output_node_t* port,
const ov_tensor_t* tensor);
/**
* @brief Gets an input/output tensor to infer on.
* @brief Set an input/output tensor to infer request for the port.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor_name Name of the input or output tensor.
* @param port Const port of the input or output tensor, which can be got by call interface from
* ov_model_t/ov_compiled_model_t.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_tensor_by_const_port(ov_infer_request_t* infer_request,
const ov_output_const_node_t* port,
const ov_tensor_t* tensor);
/**
* @brief Set an input tensor to infer on by the index of tensor.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the input port. If @p idx is greater than the number of model inputs, an error will return.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_input_tensor_by_index(ov_infer_request_t* infer_request,
const size_t idx,
const ov_tensor_t* tensor);
/**
* @brief Set an input tensor for the model with single input to infer on.
* @note If model has several inputs, an error will return.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor);
/**
* @brief Set an output tensor to infer by the index of output tensor.
* @note Index of the output preserved accross ov_model_t, ov_compiled_model_t.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the output tensor.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_output_tensor_by_index(ov_infer_request_t* infer_request,
const size_t idx,
const ov_tensor_t* tensor);
/**
* @brief Set an output tensor to infer models with single output.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_set_output_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor);
/**
* @brief Get an input/output tensor by the name of tensor.
* @note If model has several outputs, an error will return.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor_name Name of the input or output tensor to get.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
@ -92,18 +158,82 @@ OPENVINO_C_API(ov_status_e)
ov_infer_request_get_tensor(const ov_infer_request_t* infer_request, const char* tensor_name, ov_tensor_t** tensor);
/**
* @brief Gets an output tensor to infer on.
* @brief Get an input/output tensor by const port.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the tensor to get.
* @param port Port of the tensor to get. @p port is not found, an error will return.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_request, size_t idx, ov_tensor_t** tensor);
ov_infer_request_get_tensor_by_const_port(const ov_infer_request_t* infer_request,
const ov_output_const_node_t* port,
ov_tensor_t** tensor);
/**
* @brief Infers specified input(s) in synchronous mode.
* @brief Get an input/output tensor by port.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param port Port of the tensor to get. @p port is not found, an error will return.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_tensor_by_port(const ov_infer_request_t* infer_request,
const ov_output_node_t* port,
ov_tensor_t** tensor);
/**
* @brief Get an input tensor by the index of input tensor.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the tensor to get. @p idx. If the tensor with the specified @p idx is not found, an error will
* return.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_input_tensor_by_index(const ov_infer_request_t* infer_request,
const size_t idx,
ov_tensor_t** tensor);
/**
* @brief Get an input tensor from the model with only one input tensor.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_input_tensor(const ov_infer_request_t* infer_request, ov_tensor_t** tensor);
/**
* @brief Get an output tensor by the index of output tensor.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param idx Index of the tensor to get. @p idx. If the tensor with the specified @p idx is not found, an error will
* return.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_output_tensor_by_index(const ov_infer_request_t* infer_request,
const size_t idx,
ov_tensor_t** tensor);
/**
* @brief Get an output tensor from the model with only one output tensor.
* @note If model has several outputs, an error will return.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param tensor Reference to the tensor.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e)
ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_request, ov_tensor_t** tensor);
/**
* @brief Infer specified input(s) in synchronous mode.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @return Status code of the operation: OK(0) for success.
@ -111,7 +241,7 @@ ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_request, size
OPENVINO_C_API(ov_status_e) ov_infer_request_infer(ov_infer_request_t* infer_request);
/**
* @brief Cancels inference request.
* @brief Cancel inference request.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @return Status code of the operation: OK(0) for success.
@ -119,7 +249,7 @@ OPENVINO_C_API(ov_status_e) ov_infer_request_infer(ov_infer_request_t* infer_req
OPENVINO_C_API(ov_status_e) ov_infer_request_cancel(ov_infer_request_t* infer_request);
/**
* @brief Starts inference of specified input(s) in asynchronous mode.
* @brief Start inference of specified input(s) in asynchronous mode.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @return Status code of the operation: OK(0) for success.
@ -127,7 +257,7 @@ OPENVINO_C_API(ov_status_e) ov_infer_request_cancel(ov_infer_request_t* infer_re
OPENVINO_C_API(ov_status_e) ov_infer_request_start_async(ov_infer_request_t* infer_request);
/**
* @brief Waits for the result to become available. Blocks until the result
* @brief Wait for the result to become available.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @return Status code of the operation: OK(0) for success.
@ -135,7 +265,17 @@ OPENVINO_C_API(ov_status_e) ov_infer_request_start_async(ov_infer_request_t* inf
OPENVINO_C_API(ov_status_e) ov_infer_request_wait(ov_infer_request_t* infer_request);
/**
* @brief Waits for the result to become available. Blocks until the result
* @brief Waits for the result to become available. Blocks until the specified timeout has elapsed or the result
* becomes available, whichever comes first.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param timeout Maximum duration, in milliseconds, to block for.
* @return Status code of the operation: OK(0) for success.
*/
OPENVINO_C_API(ov_status_e) ov_infer_request_wait_for(ov_infer_request_t* infer_request, const int64_t timeout);
/**
* @brief Set callback function, which will be called when inference is done.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param callback A function to be called.
@ -152,7 +292,7 @@ ov_infer_request_set_callback(ov_infer_request_t* infer_request, const ov_callba
OPENVINO_C_API(void) ov_infer_request_free(ov_infer_request_t* infer_request);
/**
* @brief Queries performance measures per layer to identify the most time consuming operation.
* @brief Query performance measures per layer to identify the most time consuming operation.
* @ingroup infer_request
* @param infer_request A pointer to the ov_infer_request_t.
* @param profiling_infos Vector of profiling information for operations in a model.

View File

@ -25,9 +25,39 @@ ov_status_e ov_infer_request_set_tensor(ov_infer_request_t* infer_request,
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request,
size_t idx,
const ov_tensor_t* tensor) {
ov_status_e ov_infer_request_set_tensor_by_port(ov_infer_request_t* infer_request,
const ov_output_node_t* port,
const ov_tensor_t* tensor) {
if (!infer_request || !port || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->set_tensor(*port->object, *tensor->object);
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_tensor_by_const_port(ov_infer_request_t* infer_request,
const ov_output_const_node_t* port,
const ov_tensor_t* tensor) {
if (!infer_request || !port || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->set_tensor(*port->object, *tensor->object);
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_input_tensor_by_index(ov_infer_request_t* infer_request,
const size_t idx,
const ov_tensor_t* tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
@ -40,6 +70,47 @@ ov_status_e ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request,
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_input_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->set_input_tensor(*tensor->object);
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_output_tensor_by_index(ov_infer_request_t* infer_request,
const size_t idx,
const ov_tensor_t* tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->set_output_tensor(idx, *tensor->object);
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_output_tensor(ov_infer_request_t* infer_request, const ov_tensor_t* tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->set_output_tensor(*tensor->object);
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_tensor(const ov_infer_request_t* infer_request,
const char* tensor_name,
ov_tensor_t** tensor) {
@ -58,9 +129,79 @@ ov_status_e ov_infer_request_get_tensor(const ov_infer_request_t* infer_request,
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_request,
size_t idx,
ov_tensor_t** tensor) {
ov_status_e ov_infer_request_get_tensor_by_const_port(const ov_infer_request_t* infer_request,
const ov_output_const_node_t* port,
ov_tensor_t** tensor) {
if (!infer_request || !port || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
std::unique_ptr<ov_tensor_t> _tensor(new ov_tensor_t);
ov::Tensor tensor_get = infer_request->object->get_tensor(*port->object);
_tensor->object = std::make_shared<ov::Tensor>(std::move(tensor_get));
*tensor = _tensor.release();
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_tensor_by_port(const ov_infer_request_t* infer_request,
const ov_output_node_t* port,
ov_tensor_t** tensor) {
if (!infer_request || !port || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
std::unique_ptr<ov_tensor_t> _tensor(new ov_tensor_t);
ov::Tensor tensor_get = infer_request->object->get_tensor(*port->object);
_tensor->object = std::make_shared<ov::Tensor>(std::move(tensor_get));
*tensor = _tensor.release();
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_input_tensor_by_index(const ov_infer_request_t* infer_request,
const size_t idx,
ov_tensor_t** tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
std::unique_ptr<ov_tensor_t> _tensor(new ov_tensor_t);
ov::Tensor tensor_get = infer_request->object->get_input_tensor(idx);
_tensor->object = std::make_shared<ov::Tensor>(std::move(tensor_get));
*tensor = _tensor.release();
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_input_tensor(const ov_infer_request_t* infer_request, ov_tensor_t** tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
std::unique_ptr<ov_tensor_t> _tensor(new ov_tensor_t);
ov::Tensor tensor_get = infer_request->object->get_input_tensor();
_tensor->object = std::make_shared<ov::Tensor>(std::move(tensor_get));
*tensor = _tensor.release();
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_output_tensor_by_index(const ov_infer_request_t* infer_request,
const size_t idx,
ov_tensor_t** tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
@ -76,6 +217,22 @@ ov_status_e ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_r
return ov_status_e::OK;
}
ov_status_e ov_infer_request_get_output_tensor(const ov_infer_request_t* infer_request, ov_tensor_t** tensor) {
if (!infer_request || !tensor) {
return ov_status_e::INVALID_C_PARAM;
}
try {
std::unique_ptr<ov_tensor_t> _tensor(new ov_tensor_t);
ov::Tensor tensor_get = infer_request->object->get_output_tensor();
_tensor->object = std::make_shared<ov::Tensor>(std::move(tensor_get));
*tensor = _tensor.release();
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_infer(ov_infer_request_t* infer_request) {
if (!infer_request) {
return ov_status_e::INVALID_C_PARAM;
@ -128,6 +285,19 @@ ov_status_e ov_infer_request_wait(ov_infer_request_t* infer_request) {
return ov_status_e::OK;
}
ov_status_e ov_infer_request_wait_for(ov_infer_request_t* infer_request, const int64_t timeout) {
if (!infer_request) {
return ov_status_e::INVALID_C_PARAM;
}
try {
infer_request->object->wait_for(std::chrono::milliseconds(timeout));
}
CATCH_OV_EXCEPTIONS
return ov_status_e::OK;
}
ov_status_e ov_infer_request_set_callback(ov_infer_request_t* infer_request, const ov_callback_t* callback) {
if (!infer_request || !callback) {
return ov_status_e::INVALID_C_PARAM;

View File

@ -197,8 +197,24 @@ TEST_P(ov_infer_request, set_tensor) {
OV_EXPECT_OK(ov_infer_request_set_tensor(infer_request, in_tensor_name, input_tensor));
}
TEST_P(ov_infer_request, set_input_tensor_by_index) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor));
}
TEST_P(ov_infer_request, set_input_tensor) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, input_tensor));
}
TEST_P(ov_infer_request, set_output_tensor_by_index) {
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
OV_EXPECT_OK(ov_infer_request_set_output_tensor_by_index(infer_request, 0, output_tensor));
}
TEST_P(ov_infer_request, set_output_tensor) {
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
OV_EXPECT_OK(ov_infer_request_set_output_tensor(infer_request, output_tensor));
}
TEST_P(ov_infer_request, set_tensor_error_handling) {
@ -212,8 +228,20 @@ TEST_P(ov_infer_request, get_tensor) {
EXPECT_NE(nullptr, input_tensor);
}
TEST_P(ov_infer_request, get_out_tensor) {
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
TEST_P(ov_infer_request, get_input_tensor_by_index) {
OV_EXPECT_OK(ov_infer_request_get_input_tensor_by_index(infer_request, 0, &output_tensor));
}
TEST_P(ov_infer_request, get_input_tensor) {
OV_EXPECT_OK(ov_infer_request_get_input_tensor(infer_request, &output_tensor));
}
TEST_P(ov_infer_request, get_output_tensor_by_index) {
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
}
TEST_P(ov_infer_request, get_output_tensor) {
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, &output_tensor));
}
TEST_P(ov_infer_request, get_tensor_error_handling) {
@ -246,11 +274,11 @@ TEST_P(ov_infer_request, cancel) {
}
TEST_P(ov_infer_request_ppp, infer_ppp) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_infer(infer_request));
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
}
@ -259,27 +287,27 @@ TEST(ov_infer_request, infer_error_handling) {
}
TEST_P(ov_infer_request, infer_async) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_start_async(infer_request));
if (!HasFatalFailure()) {
OV_EXPECT_OK(ov_infer_request_wait(infer_request));
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
}
}
TEST_P(ov_infer_request_ppp, infer_async_ppp) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_start_async(infer_request));
if (!HasFatalFailure()) {
OV_EXPECT_OK(ov_infer_request_wait(infer_request));
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
}
}
@ -288,7 +316,7 @@ inline void infer_request_callback(void* args) {
ov_infer_request_t* infer_request = (ov_infer_request_t*)args;
ov_tensor_t* out_tensor = nullptr;
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &out_tensor));
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &out_tensor));
EXPECT_NE(nullptr, out_tensor);
ov_tensor_free(out_tensor);
@ -299,7 +327,7 @@ inline void infer_request_callback(void* args) {
}
TEST_P(ov_infer_request, infer_request_set_callback) {
OV_EXPECT_OK(ov_infer_request_set_input_tensor(infer_request, 0, input_tensor));
OV_EXPECT_OK(ov_infer_request_set_input_tensor_by_index(infer_request, 0, input_tensor));
ov_callback_t callback;
callback.callback_func = infer_request_callback;
@ -323,7 +351,7 @@ TEST_P(ov_infer_request, get_profiling_info) {
OV_EXPECT_OK(ov_infer_request_infer(infer_request));
OV_EXPECT_OK(ov_infer_request_get_output_tensor(infer_request, 0, &output_tensor));
OV_EXPECT_OK(ov_infer_request_get_output_tensor_by_index(infer_request, 0, &output_tensor));
EXPECT_NE(nullptr, output_tensor);
ov_profiling_info_list_t profiling_infos;