[Python API] fix memleak in read_network from buffer (#3345)

* [Python API] fix memleak in read_network from buffer

* use memblob
This commit is contained in:
Anastasia Kuporosova 2020-11-27 16:52:20 +03:00 committed by GitHub
parent 6534d315a5
commit 5ecb4a3960
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 7 deletions

View File

@ -264,10 +264,8 @@ cdef class IECore:
cdef string model_
cdef IENetwork net = IENetwork()
if init_from_buffer:
bin_buffer = <uint8_t *> malloc(len(weights))
memcpy(bin_buffer, <uint8_t *> weights, len(weights))
model_ = bytes(model)
net.impl = self.impl.readNetwork(model_, bin_buffer, len(weights))
net.impl = self.impl.readNetwork(model_, weights, len(weights))
else:
weights_ = "".encode()

View File

@ -530,12 +530,14 @@ InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const s
}
InferenceEnginePython::IENetwork
InferenceEnginePython::IECore::readNetwork(const std::string& model, uint8_t *bin, size_t bin_size) {
InferenceEngine::Blob::Ptr weights_blob;
InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size) {
InferenceEngine::MemoryBlob::Ptr weights_blob;
if(bin_size!=0)
{
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C);
weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, bin, bin_size);
weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc);
weights_blob->allocate();
memcpy(weights_blob->rwmap().as<uint8_t*>(), bin, bin_size);
}
InferenceEngine::CNNNetwork net = actual.ReadNetwork(model, weights_blob);
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));

View File

@ -156,7 +156,7 @@ struct IECore {
explicit IECore(const std::string & xmlConfigFile = std::string());
std::map<std::string, InferenceEngine::Version> getVersions(const std::string & deviceName);
InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath);
InferenceEnginePython::IENetwork readNetwork(const std::string& model, uint8_t *bin, size_t bin_size);
InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetwork(IENetwork network, const std::string & deviceName,
const std::map<std::string, std::string> & config, int num_requests);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> importNetwork(const std::string & modelFIle, const std::string & deviceName,