[Python API] setting and getting preprocessing info through InferRequest (#1009)
This commit is contained in:
parent
8ff7e3381d
commit
b553b6ea17
@ -1,4 +1,4 @@
|
||||
from .ie_api import *
|
||||
__all__ = ['IENetwork', "TensorDesc", "IECore", "Blob", "get_version"]
|
||||
__all__ = ['IENetwork', "TensorDesc", "IECore", "Blob", "PreProcessInfo", "get_version"]
|
||||
__version__ = get_version()
|
||||
|
||||
|
@ -75,6 +75,7 @@ cdef class InputInfoCPtr:
|
||||
|
||||
cdef class PreProcessInfo:
|
||||
cdef CPreProcessInfo* _ptr
|
||||
cpdef object _user_data
|
||||
|
||||
cdef class PreProcessChannel:
|
||||
cdef CPreProcessChannel.Ptr _ptr
|
||||
|
@ -503,6 +503,14 @@ cdef class PreProcessChannel:
|
||||
|
||||
## This class stores pre-process information for the input
|
||||
cdef class PreProcessInfo:
|
||||
def __cinit__(self):
|
||||
self._ptr = new CPreProcessInfo()
|
||||
self._user_data = True
|
||||
|
||||
def __dealloc__(self):
|
||||
if self._user_data:
|
||||
del self._ptr
|
||||
|
||||
def __getitem__(self, size_t index):
|
||||
cdef CPreProcessChannel.Ptr c_channel = deref(self._ptr)[index]
|
||||
channel = PreProcessChannel()
|
||||
@ -544,10 +552,18 @@ cdef class PreProcessInfo:
|
||||
|
||||
## Resize Algorithm to be applied for input before inference if needed.
|
||||
#
|
||||
# \note It's need to set your input via the set_blob method.
|
||||
#
|
||||
# Usage example:\n
|
||||
# ```python
|
||||
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
# net.input_info['data'].preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR
|
||||
# exec_net = ie_core.load_network(net, 'CPU')
|
||||
# tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW")
|
||||
# img_blob = ie.Blob(tensor_desc, image)
|
||||
# request = exec_net.requests[0]
|
||||
# request.set_blob('data', img_blob)
|
||||
# request.infer()
|
||||
# ```
|
||||
@property
|
||||
def resize_algorithm(self):
|
||||
@ -615,6 +631,8 @@ cdef class InputInfoPtr:
|
||||
def preprocess_info(self):
|
||||
cdef CPreProcessInfo* c_preprocess_info = &deref(self._ptr).getPreProcess()
|
||||
preprocess_info = PreProcessInfo()
|
||||
del preprocess_info._ptr
|
||||
preprocess_info._user_data = False
|
||||
preprocess_info._ptr = c_preprocess_info
|
||||
return preprocess_info
|
||||
|
||||
@ -1028,9 +1046,24 @@ cdef class InferRequest:
|
||||
output_blobs[output] = deepcopy(blob)
|
||||
return output_blobs
|
||||
|
||||
## Dictionary that maps input layer names to corresponding preprocessing information
|
||||
@property
|
||||
def preprocess_info(self):
|
||||
preprocess_info = {}
|
||||
cdef const CPreProcessInfo** c_preprocess_info
|
||||
for input_blob in self.input_blobs.keys():
|
||||
preprocess = PreProcessInfo()
|
||||
del preprocess._ptr
|
||||
preprocess._user_data = False
|
||||
c_preprocess_info = <const CPreProcessInfo**>(&preprocess._ptr)
|
||||
deref(self.impl).getPreProcess(input_blob.encode(), c_preprocess_info)
|
||||
preprocess_info[input_blob] = preprocess
|
||||
return preprocess_info
|
||||
|
||||
## Sets user defined Blob for the infer request
|
||||
# @param blob_name: A name of input blob
|
||||
# @param blob: Blob object to set for the infer request
|
||||
# @param preprocess_info: PreProcessInfo object to set for the infer request.
|
||||
# @return None
|
||||
#
|
||||
# Usage example:\n
|
||||
@ -1043,8 +1076,11 @@ cdef class InferRequest:
|
||||
# blob = Blob(td, blob_data)
|
||||
# exec_net.requests[0].set_blob(blob_name="input_blob_name", blob=blob),
|
||||
# ```
|
||||
def set_blob(self, blob_name : str, blob : Blob):
|
||||
deref(self.impl).setBlob(blob_name.encode(), blob._ptr)
|
||||
def set_blob(self, blob_name : str, blob : Blob, preprocess_info: PreProcessInfo = None):
|
||||
if preprocess_info:
|
||||
deref(self.impl).setBlob(blob_name.encode(), blob._ptr, deref(preprocess_info._ptr))
|
||||
else:
|
||||
deref(self.impl).setBlob(blob_name.encode(), blob._ptr)
|
||||
self._user_blobs[blob_name] = blob
|
||||
## Starts synchronous inference of the infer request and fill outputs array
|
||||
#
|
||||
|
@ -362,6 +362,19 @@ void InferenceEnginePython::InferRequestWrap::setBlob(const std::string &blob_na
|
||||
IE_CHECK_CALL(request_ptr->SetBlob(blob_name.c_str(), blob_ptr, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string &blob_name,
|
||||
const InferenceEngine::Blob::Ptr &blob_ptr,
|
||||
const InferenceEngine::PreProcessInfo& info) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->SetBlob(blob_name.c_str(), blob_ptr, info, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::getPreProcess(const std::string& blob_name,
|
||||
const InferenceEngine::PreProcessInfo** info) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->GetPreProcess(blob_name.c_str(), info, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name,
|
||||
InferenceEngine::Blob::Ptr &blob_ptr) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
|
@ -114,8 +114,13 @@ struct InferRequestWrap {
|
||||
|
||||
void setBlob(const std::string &blob_name, const InferenceEngine::Blob::Ptr &blob_ptr);
|
||||
|
||||
void setBlob(const std::string &name, const InferenceEngine::Blob::Ptr &data,
|
||||
const InferenceEngine::PreProcessInfo& info);
|
||||
|
||||
void setBatch(int size);
|
||||
|
||||
void getPreProcess(const std::string& blob_name, const InferenceEngine::PreProcessInfo** info);
|
||||
|
||||
std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
|
||||
};
|
||||
|
||||
|
@ -200,6 +200,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
||||
int index;
|
||||
void getBlobPtr(const string & blob_name, CBlob.Ptr & blob_ptr) except +
|
||||
void setBlob(const string & blob_name, const CBlob.Ptr & blob_ptr) except +
|
||||
void setBlob(const string &blob_name, const CBlob.Ptr &blob_ptr, CPreProcessInfo& info) except +
|
||||
void getPreProcess(const string& blob_name, const CPreProcessInfo** info) except +
|
||||
map[string, ProfileInfo] getPerformanceCounts() except +
|
||||
void infer() except +
|
||||
void infer_async() except +
|
||||
|
@ -122,11 +122,11 @@ def test_incompatible_array_and_td():
|
||||
def test_incompatible_input_precision():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(path_to_image) / 255
|
||||
image = cv2.imread(path_to_image)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w))
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1))
|
||||
image = image.reshape((n, c, h, w))
|
||||
tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
|
||||
|
@ -16,11 +16,11 @@ path_to_img = image_path()
|
||||
def read_image():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(path_to_img) / 255
|
||||
image = cv2.imread(path_to_img)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w))
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1)).astype(np.float32)
|
||||
image = image.reshape((n, c, h, w))
|
||||
return image
|
||||
@ -452,3 +452,63 @@ def test_blob_setter(device):
|
||||
request.infer()
|
||||
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
|
||||
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
|
||||
|
||||
|
||||
def test_blob_setter_with_preprocess(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
|
||||
img = read_image()
|
||||
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
|
||||
img_blob = ie.Blob(tensor_desc, img)
|
||||
preprocess_info = ie.PreProcessInfo()
|
||||
preprocess_info.mean_variant = ie.MeanVariant.MEAN_IMAGE
|
||||
|
||||
request = exec_net.requests[0]
|
||||
request.set_blob('data', img_blob, preprocess_info)
|
||||
pp = request.preprocess_info["data"]
|
||||
assert pp.mean_variant == ie.MeanVariant.MEAN_IMAGE
|
||||
|
||||
|
||||
def test_getting_preprocess(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
request = exec_net.requests[0]
|
||||
preprocess_info = request.preprocess_info["data"]
|
||||
assert isinstance(preprocess_info, ie.PreProcessInfo)
|
||||
assert preprocess_info.mean_variant == ie.MeanVariant.NONE
|
||||
|
||||
|
||||
def test_resize_algorithm_work(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
|
||||
img = read_image()
|
||||
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
|
||||
|
||||
net.input_info['data'].preprocess_info.resize_algorithm = ie.ResizeAlgorithm.RESIZE_BILINEAR
|
||||
|
||||
exec_net_2 = ie_core.load_network(net, 'CPU')
|
||||
|
||||
import cv2
|
||||
|
||||
image = cv2.imread(path_to_img)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = image / 255
|
||||
image = image.transpose((2, 0, 1)).astype(np.float32)
|
||||
image = np.expand_dims(image, 0)
|
||||
|
||||
tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW")
|
||||
img_blob = ie.Blob(tensor_desc, image)
|
||||
request = exec_net_2.requests[0]
|
||||
assert request.preprocess_info["data"].resize_algorithm == ie.ResizeAlgorithm.RESIZE_BILINEAR
|
||||
request.set_blob('data', img_blob)
|
||||
request.infer()
|
||||
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
|
||||
|
||||
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
|
||||
|
@ -103,3 +103,14 @@ def test_set_mean_image_for_channel():
|
||||
assert isinstance(pre_process_channel.mean_data, Blob)
|
||||
assert pre_process_channel.mean_data.tensor_desc.dims == [127, 127]
|
||||
assert preprocess_info.mean_variant == MeanVariant.MEAN_IMAGE
|
||||
|
||||
|
||||
def test_resize_algorithm_set(device):
|
||||
ie_core = IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
preprocess_info = net.input_info["data"].preprocess_info
|
||||
preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR
|
||||
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
request = exec_net.requests[0]
|
||||
pp = request.preprocess_info["data"]
|
||||
assert pp.resize_algorithm == ResizeAlgorithm.RESIZE_BILINEAR
|
||||
|
Loading…
Reference in New Issue
Block a user