2020-09-22 18:01:48 +03:00
|
|
|
|
#include <inference_engine.hpp>
|
|
|
|
|
|
|
|
|
|
|
|
int main() {
|
2020-10-10 11:19:16 +03:00
|
|
|
|
InferenceEngine::Core core;
|
|
|
|
|
|
InferenceEngine::IInferRequest::CompletionCallback callback;
|
2020-09-22 18:01:48 +03:00
|
|
|
|
int numRequests = 42;
|
|
|
|
|
|
int i = 1;
|
|
|
|
|
|
auto network = core.ReadNetwork("sample.xml");
|
|
|
|
|
|
auto executable_network = core.LoadNetwork(network, "CPU");
|
|
|
|
|
|
//! [part0]
|
|
|
|
|
|
struct Request {
|
|
|
|
|
|
InferenceEngine::InferRequest::Ptr inferRequest;
|
|
|
|
|
|
int frameidx;
|
|
|
|
|
|
};
|
|
|
|
|
|
//! [part0]
|
|
|
|
|
|
|
|
|
|
|
|
//! [part1]
|
|
|
|
|
|
// numRequests is the number of frames (max size, equal to the number of VPUs in use)
|
|
|
|
|
|
std::vector<Request> request(numRequests);
|
|
|
|
|
|
//! [part1]
|
|
|
|
|
|
|
|
|
|
|
|
//! [part2]
|
|
|
|
|
|
// initialize infer request pointer – Consult IE API for more detail.
|
|
|
|
|
|
request[i].inferRequest = executable_network.CreateInferRequestPtr();
|
|
|
|
|
|
//! [part2]
|
|
|
|
|
|
|
|
|
|
|
|
//! [part3]
|
|
|
|
|
|
// Run inference
|
|
|
|
|
|
request[i].inferRequest->StartAsync();
|
|
|
|
|
|
//! [part3]
|
|
|
|
|
|
|
|
|
|
|
|
//! [part4]
|
2020-10-10 11:19:16 +03:00
|
|
|
|
request[i].inferRequest->SetCompletionCallback(callback);
|
2020-09-22 18:01:48 +03:00
|
|
|
|
//! [part4]
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
}
|