[IE PYTHON] FIX InputInfoCPtr, DataPtr and CDataPtr deallocation (#5730)

* linked ExecutableNetwork to InputInfo and Data

* Add tests

* Skip test_exec_graph_info_deallocation on ARM plugin
This commit is contained in:
Alexey Lebedev
2021-05-31 12:25:13 +03:00
committed by GitHub
parent 090dde93b8
commit 69b052c8ca
7 changed files with 87 additions and 14 deletions

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo
from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo, CExecutableNetwork
import os
@@ -43,6 +43,7 @@ cdef class InferRequest:
cdef class IENetwork:
cdef C.IENetwork impl
cdef shared_ptr[CExecutableNetwork] _ptr_plugin
cdef class ExecutableNetwork:
cdef unique_ptr[C.IEExecNetwork] impl
@@ -64,9 +65,11 @@ cdef class IECore:
cdef class DataPtr:
cdef C.DataPtr _ptr
cdef C.IENetwork * _ptr_network
cdef shared_ptr[CExecutableNetwork] _ptr_plugin
cdef class CDataPtr:
cdef C.CDataPtr _ptr
cdef shared_ptr[CExecutableNetwork] _ptr_plugin
cdef class TensorDesc:
cdef C.CTensorDesc impl
@@ -77,6 +80,7 @@ cdef class InputInfoPtr:
cdef class InputInfoCPtr:
cdef InputInfo.CPtr _ptr
cdef shared_ptr[CExecutableNetwork] _ptr_plugin
cdef class PreProcessInfo:
cdef CPreProcessInfo* _ptr

View File

@@ -751,6 +751,7 @@ cdef class InputInfoCPtr:
cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData()
data_ptr = DataPtr()
data_ptr._ptr = c_data_ptr
data_ptr._ptr_plugin = self._ptr_plugin
return data_ptr
## tensor_desc of this input
@@ -918,6 +919,7 @@ cdef class ExecutableNetwork:
for in_ in c_inputs:
input_info_ptr = InputInfoCPtr()
input_info_ptr._ptr = in_.second
input_info_ptr._ptr_plugin = deref(self.impl).getPluginLink()
inputs[in_.first.decode()] = input_info_ptr
return inputs
@@ -937,6 +939,7 @@ cdef class ExecutableNetwork:
for in_ in c_inputs:
data_ptr = DataPtr()
data_ptr._ptr = in_.second
data_ptr._ptr_plugin = deref(self.impl).getPluginLink()
inputs[in_.first.decode()] = data_ptr
return inputs
@@ -949,6 +952,7 @@ cdef class ExecutableNetwork:
for in_ in c_outputs:
data_ptr = CDataPtr()
data_ptr._ptr = in_.second
data_ptr._ptr_plugin = deref(self.impl).getPluginLink()
outputs[in_.first.decode()] = data_ptr
return outputs
@@ -965,6 +969,7 @@ cdef class ExecutableNetwork:
def get_exec_graph_info(self):
ie_network = IENetwork()
ie_network.impl = deref(self.impl).GetExecGraphInfo()
ie_network._ptr_plugin = deref(self.impl).getPluginLink()
return ie_network
## Gets general runtime metric for an executable network. It can be network name, actual device ID on

View File

@@ -324,23 +324,23 @@ void InferenceEnginePython::IEExecNetwork::infer() {
}
InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGraphInfo() {
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(actual.GetExecGraphInfo()));
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(actual->GetExecGraphInfo()));
}
PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string& metric_name) {
return parse_parameter(actual.GetMetric(metric_name));
return parse_parameter(actual->GetMetric(metric_name));
}
PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string& name) {
return parse_parameter(actual.GetConfig(name));
return parse_parameter(actual->GetConfig(name));
}
void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& model_file) {
actual.Export(model_file);
actual->Export(model_file);
}
std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNetwork::getInputs() {
InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo();
InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo();
std::map<std::string, InferenceEngine::DataPtr> pyInputs;
for (const auto& item : inputsDataMap) {
pyInputs[item.first] = item.second->getInputData();
@@ -349,7 +349,7 @@ std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNet
}
std::map<std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::IEExecNetwork::getInputsInfo() {
InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo();
InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo();
std::map<std::string, InferenceEngine::InputInfo::CPtr> pyInputs;
for (const auto& item : inputsDataMap) {
pyInputs[item.first] = item.second;
@@ -358,7 +358,7 @@ std::map<std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::I
}
std::map<std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecNetwork::getOutputs() {
InferenceEngine::ConstOutputsDataMap outputsDataMap = actual.GetOutputsInfo();
InferenceEngine::ConstOutputsDataMap outputsDataMap = actual->GetOutputsInfo();
std::map<std::string, InferenceEngine::CDataPtr> pyOutputs;
for (const auto& item : outputsDataMap) {
pyOutputs[item.first] = item.second;
@@ -366,6 +366,10 @@ std::map<std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecNe
return pyOutputs;
}
std::shared_ptr<InferenceEngine::ExecutableNetwork> InferenceEnginePython::IEExecNetwork::getPluginLink(){
return actual;
}
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name,
const InferenceEngine::Blob::Ptr& blob_ptr) {
request_ptr.SetBlob(blob_name.c_str(), blob_ptr);
@@ -512,7 +516,7 @@ int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() {
void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) {
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(actual);
num_requests = getOptimalNumberOfRequests(*actual);
}
infer_requests.resize(num_requests);
@@ -521,7 +525,7 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests)
infer_request.index = i;
request_queue_ptr->setRequestIdle(i);
infer_request.request_queue_ptr = request_queue_ptr;
infer_request.request_ptr = actual.CreateInferRequest();
infer_request.request_ptr = actual->CreateInferRequest();
infer_request.request_ptr.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest r,
InferenceEngine::StatusCode)>>(
@@ -564,7 +568,7 @@ std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IEC
const std::map<std::string, std::string>& config,
int num_requests) {
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(network.name, num_requests);
exec_network->actual = actual.LoadNetwork(*network.actual, deviceName, config);
exec_network->actual = std::make_shared<InferenceEngine::ExecutableNetwork>(actual.LoadNetwork(*network.actual, deviceName, config));
exec_network->createInferRequests(num_requests);
return exec_network;
@@ -575,7 +579,7 @@ std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IEC
const std::map<std::string, std::string>& config,
int num_requests) {
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(modelPath, num_requests);
exec_network->actual = actual.LoadNetwork(modelPath, deviceName, config);
exec_network->actual = std::make_shared<InferenceEngine::ExecutableNetwork>(actual.LoadNetwork(modelPath, deviceName, config));
exec_network->createInferRequests(num_requests);
return exec_network;
@@ -585,7 +589,7 @@ std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IEC
const std::map<std::string, std::string>& config,
int num_requests) {
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(EXPORTED_NETWORK_NAME, num_requests);
exec_network->actual = actual.ImportNetwork(modelFIle, deviceName, config);
exec_network->actual = std::make_shared<InferenceEngine::ExecutableNetwork>(actual.ImportNetwork(modelFIle, deviceName, config));
exec_network->createInferRequests(num_requests);
return exec_network;

View File

@@ -134,8 +134,9 @@ struct InferRequestWrap {
std::vector<InferenceEnginePython::CVariableState> queryState();
};
struct IEExecNetwork {
InferenceEngine::ExecutableNetwork actual;
std::shared_ptr<InferenceEngine::ExecutableNetwork> actual;
std::vector<InferRequestWrap> infer_requests;
std::string name;
IdleInferRequestQueue::Ptr request_queue_ptr;
@@ -158,6 +159,9 @@ struct IEExecNetwork {
int getIdleRequestId();
void createInferRequests(int num_requests);
//binds plugin to InputInfo and Data, so that they can be destroyed before plugin (ussue 28996)
std::shared_ptr<InferenceEngine::ExecutableNetwork> getPluginLink();
};
struct IECore {

View File

@@ -13,6 +13,8 @@ from libc.stdint cimport int64_t, uint8_t
cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
ctypedef vector[size_t] SizeVector
cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork"
cdef cppclass TBlob[T]:
ctypedef shared_ptr[TBlob[T]] Ptr
@@ -160,6 +162,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
object getConfig(const string & metric_name) except +
int wait(int num_requests, int64_t timeout)
int getIdleRequestId()
shared_ptr[CExecutableNetwork] getPluginLink() except +
cdef cppclass IENetwork:
IENetwork() except +

View File

@@ -304,3 +304,38 @@ def test_get_config(device):
exec_net = ie_core.load_network(net, device)
config = exec_net.get_config("PERF_COUNT")
assert config == "NO"
# issue 28996
# checks that objects can deallocate in this order, if not - segfault happends
def test_input_info_deallocation(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
input_info = exec_net.input_info["data"]
del ie_core
del exec_net
del input_info
def test_outputs_deallocation(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
output = exec_net.outputs["fc_out"]
del ie_core
del exec_net
del output
def test_exec_graph_info_deallocation(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to get_exec_graph_info method isn't implemented")
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
exec_graph_info = exec_net.get_exec_graph_info()
del ie_core
del exec_net
del exec_graph_info

View File

@@ -16,6 +16,7 @@ def test_name(device):
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.input_info['data'], InputInfoCPtr)
assert exec_net.input_info['data'].name == "data", "Incorrect name"
del ie
del exec_net
@@ -25,6 +26,7 @@ def test_precision(device):
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.input_info['data'], InputInfoCPtr)
assert exec_net.input_info['data'].precision == "FP32", "Incorrect precision"
del ie
del exec_net
@@ -36,6 +38,7 @@ def test_no_precision_setter(device):
exec_net.input_info['data'].precision = "I8"
assert "attribute 'precision' of 'openvino.inference_engine.ie_api.InputInfoCPtr' " \
"objects is not writable" in str(e.value)
del ie
del exec_net
@@ -45,9 +48,24 @@ def test_input_data(device):
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.input_info['data'], InputInfoCPtr)
assert isinstance(exec_net.input_info['data'].input_data, DataPtr), "Incorrect precision for layer 'fc_out'"
del ie
del exec_net
# issue 28996
# checks that objects can deallocate in this order, if not - segfault happends
def test_input_data_deallocation(device):
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net, device)
input_info = exec_net.input_info['data']
input_data = input_info.input_data
del ie
del exec_net
del input_info
del input_data
def test_tensor_desc(device):
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)