[IE PYTHON] optimize blob creation with numpy (#7813)

* Optimize blob constructor and remove user blobs

* restore user_blobs

* Rename helper function

* add test

* Remove empty line

* Fix code style

* add const

* Add test for SCALAR layout
This commit is contained in:
Alexey Lebedev 2021-10-21 12:03:23 +03:00 committed by GitHub
parent 2a20d9a6e7
commit 0793290762
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 21 additions and 7 deletions

View File

@ -206,10 +206,9 @@ cdef class Blob:
elif tensor_desc is not None and self._array_data is not None:
c_tensor_desc = tensor_desc.impl
precision = tensor_desc.precision
size_arr = np.prod(array.shape)
size_td = np.prod(tensor_desc.dims)
if size_arr != size_td:
raise AttributeError(f"Number of elements in provided numpy array {size_arr} and "
size_td = C.product(c_tensor_desc.getDims())
if array.size != size_td:
raise AttributeError(f"Number of elements in provided numpy array {array.size} and "
f"required by TensorDesc {size_td} are not equal")
if self._array_data.dtype != format_map[precision]:
raise ValueError(f"Data type {self._array_data.dtype} of provided numpy array "

View File

@ -682,3 +682,7 @@ InferenceEngine::Blob::Ptr InferenceEnginePython::CVariableState::getState() {
void InferenceEnginePython::CVariableState::setState(InferenceEngine::Blob::Ptr state) {
variableState.SetState(state);
}
const size_t InferenceEnginePython::product(const InferenceEngine::SizeVector& dims) {
return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>{});
}

View File

@ -7,11 +7,13 @@
#include <algorithm>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <iostream>
#include <iterator>
#include <list>
#include <map>
#include <mutex>
#include <numeric>
#include <queue>
#include <set>
#include <sstream>
@ -203,4 +205,6 @@ InferenceEnginePython::IENetwork read_network(std::string path_to_xml, std::stri
PyObject* getPartialShape_capsule(InferenceEngine::CDataPtr data);
const size_t product(const InferenceEngine::SizeVector& dims);
}; // namespace InferenceEnginePython

View File

@ -230,3 +230,5 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
cdef IENetwork read_network(string path_to_xml, string path_to_bin)
cdef object getPartialShape_capsule(DataPtr)
cdef const size_t product(const SizeVector& dims)

View File

@ -21,11 +21,16 @@ def test_init_with_tensor_desc():
assert blob.tensor_desc == tensor_desc
def test_init_with_numpy():
tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NCHW")
array = np.ones(shape=(1, 3, 127, 127), dtype=np.float32)
@pytest.mark.parametrize("shape, layout", [
([1, 3, 127, 127], "NCHW"),
([], "SCALAR"),
])
def test_init_with_numpy(shape, layout):
tensor_desc = TensorDesc("FP32", shape, layout)
array = np.ones(shape=shape, dtype=np.float32)
blob = Blob(tensor_desc, array)
assert isinstance(blob.buffer, np.ndarray)
assert np.shares_memory(blob.buffer, array)
assert blob.tensor_desc == tensor_desc