feat: clang-style for cython api (#5354)
This commit is contained in:
parent
39e1a21c42
commit
add383c566
25
inference-engine/ie_bridges/python/src/.clang-format
Normal file
25
inference-engine/ie_bridges/python/src/.clang-format
Normal file
@ -0,0 +1,25 @@
|
||||
BasedOnStyle: Google
|
||||
IndentWidth: 4
|
||||
UseTab: Never
|
||||
|
||||
Language: Cpp
|
||||
Standard: Cpp11
|
||||
|
||||
AccessModifierOffset: -4
|
||||
AlignConsecutiveMacros: true
|
||||
AllowAllArgumentsOnNextLine: false
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortFunctionsOnASingleLine: Empty
|
||||
AllowShortIfStatementsOnASingleLine: Never
|
||||
AllowShortLambdasOnASingleLine: Empty
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
ColumnLimit: 160
|
||||
# Specialize this comment pragma in order to avoid changes in SEA copyrights
|
||||
CommentPragmas: '^#'
|
||||
DerivePointerAlignment: false
|
||||
FixNamespaceComments: true
|
||||
IndentCaseLabels: false
|
||||
IndentPPDirectives: BeforeHash
|
||||
SpaceBeforeCpp11BracedList: true
|
||||
SpaceBeforeCtorInitializerColon: false
|
@ -11,7 +11,8 @@ set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engin
|
||||
|
||||
file(GLOB SOURCE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
|
||||
|
||||
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
|
||||
|
||||
@ -84,3 +85,5 @@ install(TARGETS ${INSTALLED_TARGETS}
|
||||
install(PROGRAMS __init__.py
|
||||
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
|
||||
COMPONENT ${PYTHON_VERSION})
|
||||
|
||||
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
|
@ -3,46 +3,41 @@
|
||||
//
|
||||
|
||||
#include "ie_api_impl.hpp"
|
||||
|
||||
#include "hetero/hetero_plugin_config.hpp"
|
||||
#include "ie_iinfer_request.hpp"
|
||||
|
||||
const std::string EXPORTED_NETWORK_NAME = "undefined";
|
||||
std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
|
||||
{"FP64", InferenceEngine::Precision::FP64},
|
||||
{"FP16", InferenceEngine::Precision::FP16},
|
||||
{"I8", InferenceEngine::Precision::I8},
|
||||
{"I16", InferenceEngine::Precision::I16},
|
||||
{"I32", InferenceEngine::Precision::I32},
|
||||
{"I64", InferenceEngine::Precision::I64},
|
||||
{"U8", InferenceEngine::Precision::U8},
|
||||
{"U16", InferenceEngine::Precision::U16},
|
||||
{"U32", InferenceEngine::Precision::U32},
|
||||
{"U64", InferenceEngine::Precision::U64}};
|
||||
std::map<std::string, InferenceEngine::Precision> precision_map = {
|
||||
{"FP32", InferenceEngine::Precision::FP32}, {"FP64", InferenceEngine::Precision::FP64}, {"FP16", InferenceEngine::Precision::FP16},
|
||||
{"I8", InferenceEngine::Precision::I8}, {"I16", InferenceEngine::Precision::I16}, {"I32", InferenceEngine::Precision::I32},
|
||||
{"I64", InferenceEngine::Precision::I64}, {"U8", InferenceEngine::Precision::U8}, {"U16", InferenceEngine::Precision::U16},
|
||||
{"U32", InferenceEngine::Precision::U32}, {"U64", InferenceEngine::Precision::U64}};
|
||||
|
||||
std::map <std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
|
||||
{"NCHW", InferenceEngine::Layout::NCHW},
|
||||
{"NHWC", InferenceEngine::Layout::NHWC},
|
||||
{"OIHW", InferenceEngine::Layout::OIHW},
|
||||
{"C", InferenceEngine::Layout::C},
|
||||
{"CHW", InferenceEngine::Layout::CHW},
|
||||
{"HW", InferenceEngine::Layout::HW},
|
||||
{"NC", InferenceEngine::Layout::NC},
|
||||
{"CN", InferenceEngine::Layout::CN},
|
||||
{"NCDHW", InferenceEngine::Layout::NCDHW},
|
||||
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
|
||||
#define stringify(name) # name
|
||||
#define IE_CHECK_CALL(expr) { \
|
||||
auto ret = (expr); \
|
||||
if (ret != InferenceEngine::StatusCode::OK) { \
|
||||
IE_THROW() << response.msg; \
|
||||
} \
|
||||
} \
|
||||
std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
|
||||
{"NCHW", InferenceEngine::Layout::NCHW},
|
||||
{"NHWC", InferenceEngine::Layout::NHWC},
|
||||
{"OIHW", InferenceEngine::Layout::OIHW},
|
||||
{"C", InferenceEngine::Layout::C},
|
||||
{"CHW", InferenceEngine::Layout::CHW},
|
||||
{"HW", InferenceEngine::Layout::HW},
|
||||
{"NC", InferenceEngine::Layout::NC},
|
||||
{"CN", InferenceEngine::Layout::CN},
|
||||
{"NCDHW", InferenceEngine::Layout::NCDHW},
|
||||
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
|
||||
#define stringify(name) #name
|
||||
#define IE_CHECK_CALL(expr) \
|
||||
{ \
|
||||
auto ret = (expr); \
|
||||
if (ret != InferenceEngine::StatusCode::OK) { \
|
||||
IE_THROW() << response.msg; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
uint32_t getOptimalNumberOfRequests(const InferenceEngine::ExecutableNetwork & actual) {
|
||||
uint32_t getOptimalNumberOfRequests(const InferenceEngine::ExecutableNetwork& actual) {
|
||||
try {
|
||||
auto parameter_value = actual.GetMetric(METRIC_KEY(SUPPORTED_METRICS));
|
||||
auto supported_metrics = parameter_value.as < std::vector < std::string >> ();
|
||||
auto supported_metrics = parameter_value.as<std::vector<std::string>>();
|
||||
const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS);
|
||||
if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) {
|
||||
parameter_value = actual.GetMetric(key);
|
||||
@ -50,121 +45,119 @@ uint32_t getOptimalNumberOfRequests(const InferenceEngine::ExecutableNetwork & a
|
||||
return parameter_value.as<unsigned int>();
|
||||
else
|
||||
IE_THROW() << "Unsupported format for " << key << "!"
|
||||
<< " Please specify number of infer requests directly!";
|
||||
<< " Please specify number of infer requests directly!";
|
||||
} else {
|
||||
IE_THROW() << "Can't load network: " << key << " is not supported!"
|
||||
<< " Please specify number of infer requests directly!";
|
||||
<< " Please specify number of infer requests directly!";
|
||||
}
|
||||
} catch (const std::exception &ex) {
|
||||
IE_THROW() << "Can't load network: " << ex.what()
|
||||
<< " Please specify number of infer requests directly!";
|
||||
} catch (const std::exception& ex) {
|
||||
IE_THROW() << "Can't load network: " << ex.what() << " Please specify number of infer requests directly!";
|
||||
}
|
||||
}
|
||||
|
||||
PyObject *parse_parameter(const InferenceEngine::Parameter ¶m) {
|
||||
PyObject* parse_parameter(const InferenceEngine::Parameter& param) {
|
||||
// Check for std::string
|
||||
if (param.is<std::string>()) {
|
||||
return PyUnicode_FromString(param.as<std::string>().c_str());
|
||||
}
|
||||
// Check for int
|
||||
// Check for int
|
||||
else if (param.is<int>()) {
|
||||
auto val = param.as<int>();
|
||||
return PyLong_FromLong((long)val);
|
||||
}
|
||||
// Check for unsigned int
|
||||
// Check for unsigned int
|
||||
else if (param.is<unsigned int>()) {
|
||||
auto val = param.as<unsigned int>();
|
||||
return PyLong_FromLong((unsigned long)val);
|
||||
}
|
||||
// Check for float
|
||||
// Check for float
|
||||
else if (param.is<float>()) {
|
||||
auto val = param.as<float>();
|
||||
return PyFloat_FromDouble((double)val);
|
||||
}
|
||||
// Check for bool
|
||||
// Check for bool
|
||||
else if (param.is<bool>()) {
|
||||
auto val = param.as<bool>();
|
||||
return val ? Py_True : Py_False;
|
||||
}
|
||||
// Check for std::vector<std::string>
|
||||
// Check for std::vector<std::string>
|
||||
else if (param.is<std::vector<std::string>>()) {
|
||||
auto val = param.as<std::vector<std::string>>();
|
||||
PyObject *list = PyList_New(0);
|
||||
for (const auto & it : val){
|
||||
PyObject *str_val = PyUnicode_FromString(it.c_str());
|
||||
PyObject* list = PyList_New(0);
|
||||
for (const auto& it : val) {
|
||||
PyObject* str_val = PyUnicode_FromString(it.c_str());
|
||||
PyList_Append(list, str_val);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
// Check for std::vector<int>
|
||||
else if (param.is<std::vector<int>>()){
|
||||
// Check for std::vector<int>
|
||||
else if (param.is<std::vector<int>>()) {
|
||||
auto val = param.as<std::vector<int>>();
|
||||
PyObject *list = PyList_New(0);
|
||||
for (const auto & it : val){
|
||||
PyObject* list = PyList_New(0);
|
||||
for (const auto& it : val) {
|
||||
PyList_Append(list, PyLong_FromLong(it));
|
||||
}
|
||||
return list;
|
||||
}
|
||||
// Check for std::vector<unsigned int>
|
||||
else if (param.is<std::vector<unsigned int>>()){
|
||||
// Check for std::vector<unsigned int>
|
||||
else if (param.is<std::vector<unsigned int>>()) {
|
||||
auto val = param.as<std::vector<unsigned int>>();
|
||||
PyObject *list = PyList_New(0);
|
||||
for (const auto &it : val) {
|
||||
PyObject* list = PyList_New(0);
|
||||
for (const auto& it : val) {
|
||||
PyList_Append(list, PyLong_FromLong(it));
|
||||
}
|
||||
return list;
|
||||
}
|
||||
// Check for std::vector<float>
|
||||
else if (param.is<std::vector<float>>()){
|
||||
// Check for std::vector<float>
|
||||
else if (param.is<std::vector<float>>()) {
|
||||
auto val = param.as<std::vector<float>>();
|
||||
PyObject *list = PyList_New(0);
|
||||
for (const auto &it : val) {
|
||||
PyList_Append(list, PyFloat_FromDouble((double) it));
|
||||
PyObject* list = PyList_New(0);
|
||||
for (const auto& it : val) {
|
||||
PyList_Append(list, PyFloat_FromDouble((double)it));
|
||||
}
|
||||
return list;
|
||||
}
|
||||
// Check for std::tuple<unsigned int, unsigned int>
|
||||
else if (param.is<std::tuple<unsigned int, unsigned int >>()) {
|
||||
auto val = param.as<std::tuple<unsigned int, unsigned int >>();
|
||||
PyObject *tuple = PyTuple_New(2);
|
||||
// Check for std::tuple<unsigned int, unsigned int>
|
||||
else if (param.is<std::tuple<unsigned int, unsigned int>>()) {
|
||||
auto val = param.as<std::tuple<unsigned int, unsigned int>>();
|
||||
PyObject* tuple = PyTuple_New(2);
|
||||
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val)));
|
||||
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val)));
|
||||
return tuple;
|
||||
}
|
||||
// Check for std::tuple<unsigned int, unsigned int, unsigned int>
|
||||
else if (param.is<std::tuple<unsigned int, unsigned int, unsigned int >>()) {
|
||||
auto val = param.as<std::tuple<unsigned int, unsigned int, unsigned int >>();
|
||||
PyObject *tuple = PyTuple_New(3);
|
||||
// Check for std::tuple<unsigned int, unsigned int, unsigned int>
|
||||
else if (param.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
|
||||
auto val = param.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
|
||||
PyObject* tuple = PyTuple_New(3);
|
||||
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val)));
|
||||
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val)));
|
||||
PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val)));
|
||||
return tuple;
|
||||
}
|
||||
// Check for std::map<std::string, std::string>
|
||||
// Check for std::map<std::string, std::string>
|
||||
else if (param.is<std::map<std::string, std::string>>()) {
|
||||
auto val = param.as<std::map<std::string, std::string>>();
|
||||
PyObject *dict = PyDict_New();
|
||||
for (const auto &it : val){
|
||||
PyObject* dict = PyDict_New();
|
||||
for (const auto& it : val) {
|
||||
PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str()));
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
// Check for std::map<std::string, int>
|
||||
// Check for std::map<std::string, int>
|
||||
else if (param.is<std::map<std::string, int>>()) {
|
||||
auto val = param.as<std::map<std::string, int>>();
|
||||
PyObject *dict = PyDict_New();
|
||||
for (const auto &it : val){
|
||||
PyObject* dict = PyDict_New();
|
||||
for (const auto& it : val) {
|
||||
PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second));
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!");
|
||||
return (PyObject *) NULL;
|
||||
return (PyObject*)NULL;
|
||||
}
|
||||
}
|
||||
|
||||
InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std::string &weights) {
|
||||
InferenceEnginePython::IENetwork::IENetwork(const std::string& model, const std::string& weights) {
|
||||
InferenceEngine::Core reader;
|
||||
auto net = reader.ReadNetwork(model, weights);
|
||||
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
|
||||
@ -172,9 +165,9 @@ InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std:
|
||||
batch_size = actual->getBatchSize();
|
||||
}
|
||||
|
||||
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork> &cnn_network)
|
||||
: actual(cnn_network) {
|
||||
if (actual == nullptr) IE_THROW() << "IENetwork was not initialized.";
|
||||
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network): actual(cnn_network) {
|
||||
if (actual == nullptr)
|
||||
IE_THROW() << "IENetwork was not initialized.";
|
||||
name = actual->getName();
|
||||
batch_size = actual->getBatchSize();
|
||||
}
|
||||
@ -183,7 +176,8 @@ InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
|
||||
auto* capsule_ptr = PyCapsule_GetPointer(network, "ngraph_function");
|
||||
auto* function_sp = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
|
||||
if (function_sp == nullptr)
|
||||
IE_THROW() << "Cannot create CNNNetwork from capsule! Capsule doesn't contain nGraph function!";
|
||||
IE_THROW() << "Cannot create CNNNetwork from capsule! Capsule doesn't "
|
||||
"contain nGraph function!";
|
||||
|
||||
InferenceEngine::CNNNetwork cnnNetwork(*function_sp);
|
||||
actual = std::make_shared<InferenceEngine::CNNNetwork>(cnnNetwork);
|
||||
@ -191,10 +185,9 @@ InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
|
||||
batch_size = actual->getBatchSize();
|
||||
}
|
||||
|
||||
void
|
||||
InferenceEnginePython::IENetwork::load_from_buffer(const char *xml, size_t xml_size, uint8_t *bin, size_t bin_size) {
|
||||
void InferenceEnginePython::IENetwork::load_from_buffer(const char* xml, size_t xml_size, uint8_t* bin, size_t bin_size) {
|
||||
InferenceEngine::Core reader;
|
||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C);
|
||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C);
|
||||
auto weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, bin, bin_size);
|
||||
auto net = reader.ReadNetwork(std::string(xml, xml + xml_size), weights_blob);
|
||||
name = net.getName();
|
||||
@ -202,13 +195,12 @@ InferenceEnginePython::IENetwork::load_from_buffer(const char *xml, size_t xml_s
|
||||
batch_size = actual->getBatchSize();
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml, const std::string &path_to_bin) {
|
||||
void InferenceEnginePython::IENetwork::serialize(const std::string& path_to_xml, const std::string& path_to_bin) {
|
||||
actual->serialize(path_to_xml, path_to_bin);
|
||||
}
|
||||
|
||||
|
||||
PyObject* InferenceEnginePython::IENetwork::getFunction() {
|
||||
const char * py_capsule_name = "ngraph_function";
|
||||
const char* py_capsule_name = "ngraph_function";
|
||||
auto ngraph_func_ptr = actual->getFunction();
|
||||
// create a shared pointer on the heap before putting it in the capsule
|
||||
// this secures the lifetime of the object transferred by the capsule
|
||||
@ -224,35 +216,35 @@ PyObject* InferenceEnginePython::IENetwork::getFunction() {
|
||||
}
|
||||
};
|
||||
if (ngraph_func_ptr) {
|
||||
//return PyCapsule_New(&ngraph_func_ptr, py_capsule_name, NULL);
|
||||
// return PyCapsule_New(&ngraph_func_ptr, py_capsule_name, NULL);
|
||||
return PyCapsule_New(sp_copy, py_capsule_name, sp_deleter);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
const std::map <std::string, InferenceEngine::InputInfo::Ptr> InferenceEnginePython::IENetwork::getInputsInfo() {
|
||||
std::map <std::string, InferenceEngine::InputInfo::Ptr> inputs;
|
||||
const InferenceEngine::InputsDataMap &inputsInfo = actual->getInputsInfo();
|
||||
for (auto &in : inputsInfo) {
|
||||
const std::map<std::string, InferenceEngine::InputInfo::Ptr> InferenceEnginePython::IENetwork::getInputsInfo() {
|
||||
std::map<std::string, InferenceEngine::InputInfo::Ptr> inputs;
|
||||
const InferenceEngine::InputsDataMap& inputsInfo = actual->getInputsInfo();
|
||||
for (auto& in : inputsInfo) {
|
||||
inputs[in.first] = in.second;
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getInputs() {
|
||||
std::map <std::string, InferenceEngine::DataPtr> inputs;
|
||||
const InferenceEngine::InputsDataMap &inputsInfo = actual->getInputsInfo();
|
||||
for (auto &in : inputsInfo) {
|
||||
const std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getInputs() {
|
||||
std::map<std::string, InferenceEngine::DataPtr> inputs;
|
||||
const InferenceEngine::InputsDataMap& inputsInfo = actual->getInputsInfo();
|
||||
for (auto& in : inputsInfo) {
|
||||
inputs[in.first] = in.second->getInputData();
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getOutputs() {
|
||||
std::map <std::string, InferenceEngine::DataPtr> outputs;
|
||||
const InferenceEngine::OutputsDataMap &outputsInfo = actual->getOutputsInfo();
|
||||
for (auto &out : outputsInfo) {
|
||||
const std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getOutputs() {
|
||||
std::map<std::string, InferenceEngine::DataPtr> outputs;
|
||||
const InferenceEngine::OutputsDataMap& outputsInfo = actual->getOutputsInfo();
|
||||
for (auto& out : outputsInfo) {
|
||||
outputs[out.first] = out.second;
|
||||
}
|
||||
return outputs;
|
||||
@ -262,8 +254,7 @@ std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::stri
|
||||
return actual->getOVNameForTensor(orig_name);
|
||||
}
|
||||
|
||||
void
|
||||
InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) {
|
||||
void InferenceEnginePython::IENetwork::addOutput(const std::string& out_layer, size_t port_id) {
|
||||
actual->addOutput(out_layer, port_id);
|
||||
}
|
||||
|
||||
@ -275,17 +266,16 @@ size_t InferenceEnginePython::IENetwork::getBatch() {
|
||||
return actual->getBatchSize();
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IENetwork::reshape(const std::map <std::string, std::vector<size_t>> &input_shapes) {
|
||||
void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>>& input_shapes) {
|
||||
actual->reshape(input_shapes);
|
||||
}
|
||||
|
||||
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
|
||||
infer_requests(num_requests), name(name) {
|
||||
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests): infer_requests(num_requests), name(name) {
|
||||
request_queue_ptr = std::make_shared<IdleInferRequestQueue>();
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IEExecNetwork::infer() {
|
||||
InferRequestWrap &request = infer_requests[0];
|
||||
InferRequestWrap& request = infer_requests[0];
|
||||
request.infer();
|
||||
}
|
||||
|
||||
@ -293,71 +283,66 @@ InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGr
|
||||
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(actual.GetExecGraphInfo()));
|
||||
}
|
||||
|
||||
PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &metric_name) {
|
||||
PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string& metric_name) {
|
||||
return parse_parameter(actual.GetMetric(metric_name));
|
||||
}
|
||||
|
||||
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &name) {
|
||||
PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string& name) {
|
||||
return parse_parameter(actual.GetConfig(name));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string &model_file) {
|
||||
void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& model_file) {
|
||||
actual.Export(model_file);
|
||||
}
|
||||
|
||||
std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNetwork::getInputs() {
|
||||
std::map<std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNetwork::getInputs() {
|
||||
InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo();
|
||||
std::map <std::string, InferenceEngine::DataPtr> pyInputs;
|
||||
for (const auto &item : inputsDataMap) {
|
||||
std::map<std::string, InferenceEngine::DataPtr> pyInputs;
|
||||
for (const auto& item : inputsDataMap) {
|
||||
pyInputs[item.first] = item.second->getInputData();
|
||||
}
|
||||
return pyInputs;
|
||||
}
|
||||
|
||||
std::map <std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::IEExecNetwork::getInputsInfo() {
|
||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> InferenceEnginePython::IEExecNetwork::getInputsInfo() {
|
||||
InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo();
|
||||
std::map <std::string, InferenceEngine::InputInfo::CPtr> pyInputs;
|
||||
for (const auto &item : inputsDataMap) {
|
||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> pyInputs;
|
||||
for (const auto& item : inputsDataMap) {
|
||||
pyInputs[item.first] = item.second;
|
||||
}
|
||||
return pyInputs;
|
||||
}
|
||||
|
||||
std::map <std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecNetwork::getOutputs() {
|
||||
std::map<std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecNetwork::getOutputs() {
|
||||
InferenceEngine::ConstOutputsDataMap outputsDataMap = actual.GetOutputsInfo();
|
||||
std::map <std::string, InferenceEngine::CDataPtr> pyOutputs;
|
||||
for (const auto &item : outputsDataMap) {
|
||||
std::map<std::string, InferenceEngine::CDataPtr> pyOutputs;
|
||||
for (const auto& item : outputsDataMap) {
|
||||
pyOutputs[item.first] = item.second;
|
||||
}
|
||||
return pyOutputs;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string &blob_name,
|
||||
const InferenceEngine::Blob::Ptr &blob_ptr) {
|
||||
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->SetBlob(blob_name.c_str(), blob_ptr, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string &blob_name,
|
||||
const InferenceEngine::Blob::Ptr &blob_ptr,
|
||||
void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr,
|
||||
const InferenceEngine::PreProcessInfo& info) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->SetBlob(blob_name.c_str(), blob_ptr, info, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::getPreProcess(const std::string& blob_name,
|
||||
const InferenceEngine::PreProcessInfo** info) {
|
||||
void InferenceEnginePython::InferRequestWrap::getPreProcess(const std::string& blob_name, const InferenceEngine::PreProcessInfo** info) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->GetPreProcess(blob_name.c_str(), info, &response));
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name,
|
||||
InferenceEngine::Blob::Ptr &blob_ptr) {
|
||||
void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string& blob_name, InferenceEngine::Blob::Ptr& blob_ptr) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->GetBlob(blob_name.c_str(), blob_ptr, &response));
|
||||
}
|
||||
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::setBatch(int size) {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
IE_CHECK_CALL(request_ptr->SetBatch(size, &response));
|
||||
@ -366,13 +351,12 @@ void InferenceEnginePython::InferRequestWrap::setBatch(int size) {
|
||||
void latency_callback(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) {
|
||||
if (code != InferenceEngine::StatusCode::OK) {
|
||||
IE_EXCEPTION_SWITCH(code, ExceptionType,
|
||||
InferenceEngine::details::ThrowNow<ExceptionType>{}
|
||||
<<= std::stringstream{} << IE_LOCATION
|
||||
<< InferenceEngine::details::ExceptionTraits<ExceptionType>::string());
|
||||
InferenceEngine::details::ThrowNow<ExceptionType> {} <<=
|
||||
std::stringstream {} << IE_LOCATION << InferenceEngine::details::ExceptionTraits<ExceptionType>::string());
|
||||
}
|
||||
InferenceEnginePython::InferRequestWrap *requestWrap;
|
||||
InferenceEnginePython::InferRequestWrap* requestWrap;
|
||||
InferenceEngine::ResponseDesc dsc;
|
||||
request->GetUserData(reinterpret_cast<void **>(&requestWrap), &dsc);
|
||||
request->GetUserData(reinterpret_cast<void**>(&requestWrap), &dsc);
|
||||
auto end_time = Time::now();
|
||||
auto execTime = std::chrono::duration_cast<ns>(end_time - requestWrap->start_time);
|
||||
requestWrap->exec_time = static_cast<double>(execTime.count()) * 0.000001;
|
||||
@ -382,7 +366,7 @@ void latency_callback(InferenceEngine::IInferRequest::Ptr request, InferenceEngi
|
||||
}
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::setCyCallback(cy_callback callback, void *data) {
|
||||
void InferenceEnginePython::InferRequestWrap::setCyCallback(cy_callback callback, void* data) {
|
||||
user_callback = callback;
|
||||
user_data = data;
|
||||
}
|
||||
@ -412,27 +396,26 @@ int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
|
||||
return static_cast<int>(code);
|
||||
}
|
||||
|
||||
std::map <std::string, InferenceEnginePython::ProfileInfo>
|
||||
InferenceEnginePython::InferRequestWrap::getPerformanceCounts() {
|
||||
std::map <std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
|
||||
std::map<std::string, InferenceEnginePython::ProfileInfo> InferenceEnginePython::InferRequestWrap::getPerformanceCounts() {
|
||||
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
|
||||
InferenceEngine::ResponseDesc response;
|
||||
request_ptr->GetPerformanceCounts(perf_counts, &response);
|
||||
std::map <std::string, InferenceEnginePython::ProfileInfo> perf_map;
|
||||
std::map<std::string, InferenceEnginePython::ProfileInfo> perf_map;
|
||||
|
||||
for (auto it : perf_counts) {
|
||||
InferenceEnginePython::ProfileInfo profile_info;
|
||||
switch (it.second.status) {
|
||||
case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
|
||||
profile_info.status = "EXECUTED";
|
||||
break;
|
||||
case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
|
||||
profile_info.status = "NOT_RUN";
|
||||
break;
|
||||
case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
|
||||
profile_info.status = "OPTIMIZED_OUT";
|
||||
break;
|
||||
default:
|
||||
profile_info.status = "UNKNOWN";
|
||||
case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
|
||||
profile_info.status = "EXECUTED";
|
||||
break;
|
||||
case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
|
||||
profile_info.status = "NOT_RUN";
|
||||
break;
|
||||
case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
|
||||
profile_info.status = "OPTIMIZED_OUT";
|
||||
break;
|
||||
default:
|
||||
profile_info.status = "UNKNOWN";
|
||||
}
|
||||
profile_info.exec_type = it.second.exec_type;
|
||||
profile_info.layer_type = it.second.layer_type;
|
||||
@ -452,13 +435,11 @@ std::string InferenceEnginePython::get_version() {
|
||||
return version_str;
|
||||
}
|
||||
|
||||
|
||||
InferenceEnginePython::IECore::IECore(const std::string &xmlConfigFile) {
|
||||
InferenceEnginePython::IECore::IECore(const std::string& xmlConfigFile) {
|
||||
actual = InferenceEngine::Core(xmlConfigFile);
|
||||
}
|
||||
|
||||
std::map <std::string, InferenceEngine::Version>
|
||||
InferenceEnginePython::IECore::getVersions(const std::string &deviceName) {
|
||||
std::map<std::string, InferenceEngine::Version> InferenceEnginePython::IECore::getVersions(const std::string& deviceName) {
|
||||
return actual.GetVersions(deviceName);
|
||||
}
|
||||
|
||||
@ -473,22 +454,26 @@ int InferenceEnginePython::IEExecNetwork::getIdleRequestId() {
|
||||
int InferenceEnginePython::IdleInferRequestQueue::wait(int num_requests, int64_t timeout) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (timeout > 0) {
|
||||
if (!cv.wait_for(lock, std::chrono::milliseconds(timeout), [this, num_requests](){return idle_ids.size() >= num_requests;}))
|
||||
if (!cv.wait_for(lock, std::chrono::milliseconds(timeout), [this, num_requests]() {
|
||||
return idle_ids.size() >= num_requests;
|
||||
}))
|
||||
return static_cast<int>(InferenceEngine::StatusCode::RESULT_NOT_READY);
|
||||
} else
|
||||
cv.wait(lock, [this, num_requests](){return idle_ids.size() >= num_requests;});
|
||||
cv.wait(lock, [this, num_requests]() {
|
||||
return idle_ids.size() >= num_requests;
|
||||
});
|
||||
return static_cast<int>(InferenceEngine::StatusCode::OK);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IdleInferRequestQueue::setRequestIdle(int index) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
idle_ids.emplace_back(index);
|
||||
cv.notify_all();
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
idle_ids.emplace_back(index);
|
||||
cv.notify_all();
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IdleInferRequestQueue::setRequestBusy(int index) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
idle_ids.remove(index);
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
idle_ids.remove(index);
|
||||
}
|
||||
|
||||
int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() {
|
||||
@ -503,7 +488,7 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests)
|
||||
infer_requests.resize(num_requests);
|
||||
InferenceEngine::ResponseDesc response;
|
||||
for (size_t i = 0; i < num_requests; ++i) {
|
||||
InferRequestWrap &infer_request = infer_requests[i];
|
||||
InferRequestWrap& infer_request = infer_requests[i];
|
||||
infer_request.index = i;
|
||||
request_queue_ptr->setRequestIdle(i);
|
||||
infer_request.request_queue_ptr = request_queue_ptr;
|
||||
@ -513,18 +498,15 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests)
|
||||
}
|
||||
}
|
||||
|
||||
InferenceEnginePython::IENetwork
|
||||
InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const std::string& binPath) {
|
||||
InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const std::string& binPath) {
|
||||
InferenceEngine::CNNNetwork net = actual.ReadNetwork(modelPath, binPath);
|
||||
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
|
||||
}
|
||||
|
||||
InferenceEnginePython::IENetwork
|
||||
InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size) {
|
||||
InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size) {
|
||||
InferenceEngine::MemoryBlob::Ptr weights_blob;
|
||||
if(bin_size!=0)
|
||||
{
|
||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C);
|
||||
if (bin_size != 0) {
|
||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C);
|
||||
weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc);
|
||||
weights_blob->allocate();
|
||||
memcpy(weights_blob->rwmap().as<uint8_t*>(), bin, bin_size);
|
||||
@ -533,83 +515,75 @@ InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8
|
||||
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
|
||||
}
|
||||
|
||||
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetwork(IENetwork network,
|
||||
const std::string &deviceName,
|
||||
const std::map <std::string, std::string> &config,
|
||||
int num_requests) {
|
||||
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(network.name,
|
||||
num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetwork(IENetwork network, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config,
|
||||
int num_requests) {
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(network.name, num_requests);
|
||||
exec_network->actual = actual.LoadNetwork(*network.actual, deviceName, config);
|
||||
exec_network->createInferRequests(num_requests);
|
||||
|
||||
return exec_network;
|
||||
}
|
||||
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetworkFromFile(
|
||||
const std::string &modelPath, const std::string &deviceName, const std::map<std::string,
|
||||
std::string> &config, int num_requests) {
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(modelPath,
|
||||
num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetworkFromFile(const std::string& modelPath,
|
||||
const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config,
|
||||
int num_requests) {
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(modelPath, num_requests);
|
||||
exec_network->actual = actual.LoadNetwork(modelPath, deviceName, config);
|
||||
exec_network->createInferRequests(num_requests);
|
||||
|
||||
return exec_network;
|
||||
}
|
||||
|
||||
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::importNetwork(
|
||||
const std::string &modelFIle, const std::string &deviceName, const std::map <std::string, std::string> &config,
|
||||
int num_requests) {
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(EXPORTED_NETWORK_NAME,
|
||||
num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::importNetwork(const std::string& modelFIle, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config,
|
||||
int num_requests) {
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(EXPORTED_NETWORK_NAME, num_requests);
|
||||
exec_network->actual = actual.ImportNetwork(modelFIle, deviceName, config);
|
||||
exec_network->createInferRequests(num_requests);
|
||||
|
||||
return exec_network;
|
||||
|
||||
}
|
||||
|
||||
std::map <std::string, std::string>
|
||||
InferenceEnginePython::IECore::queryNetwork(InferenceEnginePython::IENetwork network,
|
||||
const std::string &deviceName,
|
||||
const std::map <std::string, std::string> &config) {
|
||||
std::map<std::string, std::string> InferenceEnginePython::IECore::queryNetwork(InferenceEnginePython::IENetwork network, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config) {
|
||||
auto res = actual.QueryNetwork(*network.actual, deviceName, config);
|
||||
return res.supportedLayersMap;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IECore::setConfig(const std::map <std::string, std::string> &config,
|
||||
const std::string &deviceName) {
|
||||
void InferenceEnginePython::IECore::setConfig(const std::map<std::string, std::string>& config, const std::string& deviceName) {
|
||||
actual.SetConfig(config, deviceName);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IECore::registerPlugin(const std::string &pluginName, const std::string &deviceName) {
|
||||
void InferenceEnginePython::IECore::registerPlugin(const std::string& pluginName, const std::string& deviceName) {
|
||||
actual.RegisterPlugin(pluginName, deviceName);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IECore::unregisterPlugin(const std::string &deviceName) {
|
||||
void InferenceEnginePython::IECore::unregisterPlugin(const std::string& deviceName) {
|
||||
actual.UnregisterPlugin(deviceName);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IECore::registerPlugins(const std::string &xmlConfigFile) {
|
||||
void InferenceEnginePython::IECore::registerPlugins(const std::string& xmlConfigFile) {
|
||||
actual.RegisterPlugins(xmlConfigFile);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IECore::addExtension(const std::string &ext_lib_path, const std::string &deviceName) {
|
||||
void InferenceEnginePython::IECore::addExtension(const std::string& ext_lib_path, const std::string& deviceName) {
|
||||
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(ext_lib_path);
|
||||
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
|
||||
actual.AddExtension(extension, deviceName);
|
||||
}
|
||||
|
||||
std::vector <std::string> InferenceEnginePython::IECore::getAvailableDevices() {
|
||||
std::vector<std::string> InferenceEnginePython::IECore::getAvailableDevices() {
|
||||
return actual.GetAvailableDevices();
|
||||
}
|
||||
|
||||
PyObject *InferenceEnginePython::IECore::getMetric(const std::string &deviceName, const std::string &name) {
|
||||
PyObject* InferenceEnginePython::IECore::getMetric(const std::string& deviceName, const std::string& name) {
|
||||
InferenceEngine::Parameter param = actual.GetMetric(deviceName, name);
|
||||
return parse_parameter(param);
|
||||
}
|
||||
|
||||
PyObject *InferenceEnginePython::IECore::getConfig(const std::string &deviceName, const std::string &name) {
|
||||
PyObject* InferenceEnginePython::IECore::getConfig(const std::string& deviceName, const std::string& name) {
|
||||
InferenceEngine::Parameter param = actual.GetConfig(deviceName, name);
|
||||
return parse_parameter(param);
|
||||
}
|
||||
|
@ -4,25 +4,25 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Python.h"
|
||||
#include <ie_extension.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <ie_core.hpp>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <list>
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <chrono>
|
||||
#include <queue>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
#include <ie_extension.h>
|
||||
#include <ie_core.hpp>
|
||||
#include "Python.h"
|
||||
|
||||
typedef std::chrono::high_resolution_clock Time;
|
||||
typedef std::chrono::nanoseconds ns;
|
||||
@ -48,7 +48,7 @@ struct IENetwork {
|
||||
|
||||
size_t getBatch();
|
||||
|
||||
void addOutput(const std::string &out_layer, size_t port_id);
|
||||
void addOutput(const std::string& out_layer, size_t port_id);
|
||||
|
||||
const std::map<std::string, InferenceEngine::InputInfo::Ptr> getInputsInfo();
|
||||
|
||||
@ -56,15 +56,15 @@ struct IENetwork {
|
||||
|
||||
const std::map<std::string, InferenceEngine::DataPtr> getOutputs();
|
||||
|
||||
void reshape(const std::map<std::string, std::vector<size_t>> &input_shapes);
|
||||
void reshape(const std::map<std::string, std::vector<size_t>>& input_shapes);
|
||||
|
||||
void serialize(const std::string &path_to_xml, const std::string &path_to_bin);
|
||||
void serialize(const std::string& path_to_xml, const std::string& path_to_bin);
|
||||
|
||||
void load_from_buffer(const char* xml, size_t xml_size, uint8_t* bin, size_t bin_size);
|
||||
|
||||
IENetwork(const std::string &model, const std::string &weights);
|
||||
IENetwork(const std::string& model, const std::string& weights);
|
||||
|
||||
IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork> &cnn_network);
|
||||
IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork>& cnn_network);
|
||||
|
||||
IENetwork(PyObject* network);
|
||||
|
||||
@ -75,7 +75,6 @@ struct IENetwork {
|
||||
std::string getOVNameForTensor(const std::string& orig_name);
|
||||
};
|
||||
|
||||
|
||||
struct IdleInferRequestQueue {
|
||||
std::list<size_t> idle_ids;
|
||||
std::mutex mutex;
|
||||
@ -91,7 +90,6 @@ struct IdleInferRequestQueue {
|
||||
using Ptr = std::shared_ptr<IdleInferRequestQueue>;
|
||||
};
|
||||
|
||||
|
||||
struct InferRequestWrap {
|
||||
int index;
|
||||
using cy_callback = void (*)(void*, int);
|
||||
@ -100,23 +98,22 @@ struct InferRequestWrap {
|
||||
Time::time_point start_time;
|
||||
double exec_time;
|
||||
cy_callback user_callback;
|
||||
void *user_data;
|
||||
IdleInferRequestQueue::Ptr request_queue_ptr;
|
||||
void* user_data;
|
||||
IdleInferRequestQueue::Ptr request_queue_ptr;
|
||||
|
||||
void infer();
|
||||
|
||||
void infer_async();
|
||||
|
||||
int wait(int64_t timeout);
|
||||
int wait(int64_t timeout);
|
||||
|
||||
void setCyCallback(cy_callback callback, void *data);
|
||||
void setCyCallback(cy_callback callback, void* data);
|
||||
|
||||
void getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr);
|
||||
void getBlobPtr(const std::string& blob_name, InferenceEngine::Blob::Ptr& blob_ptr);
|
||||
|
||||
void setBlob(const std::string &blob_name, const InferenceEngine::Blob::Ptr &blob_ptr);
|
||||
void setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr);
|
||||
|
||||
void setBlob(const std::string &name, const InferenceEngine::Blob::Ptr &data,
|
||||
const InferenceEngine::PreProcessInfo& info);
|
||||
void setBlob(const std::string& name, const InferenceEngine::Blob::Ptr& data, const InferenceEngine::PreProcessInfo& info);
|
||||
|
||||
void setBatch(int size);
|
||||
|
||||
@ -125,26 +122,25 @@ struct InferRequestWrap {
|
||||
std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
|
||||
};
|
||||
|
||||
|
||||
struct IEExecNetwork {
|
||||
InferenceEngine::ExecutableNetwork actual;
|
||||
std::vector<InferRequestWrap> infer_requests;
|
||||
std::string name;
|
||||
IdleInferRequestQueue::Ptr request_queue_ptr;
|
||||
IdleInferRequestQueue::Ptr request_queue_ptr;
|
||||
|
||||
IEExecNetwork(const std::string &name, size_t num_requests);
|
||||
IEExecNetwork(const std::string& name, size_t num_requests);
|
||||
|
||||
IENetwork GetExecGraphInfo();
|
||||
|
||||
void infer();
|
||||
void exportNetwork(const std::string & model_file);
|
||||
void exportNetwork(const std::string& model_file);
|
||||
|
||||
std::map<std::string, InferenceEngine::InputInfo::CPtr> getInputsInfo();
|
||||
std::map<std::string, InferenceEngine::DataPtr> getInputs();
|
||||
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
|
||||
|
||||
PyObject* getMetric(const std::string & metric_name);
|
||||
PyObject* getConfig(const std::string & name);
|
||||
PyObject* getMetric(const std::string& metric_name);
|
||||
PyObject* getConfig(const std::string& name);
|
||||
|
||||
int wait(int num_requests, int64_t timeout);
|
||||
int getIdleRequestId();
|
||||
@ -152,38 +148,36 @@ struct IEExecNetwork {
|
||||
void createInferRequests(int num_requests);
|
||||
};
|
||||
|
||||
|
||||
struct IECore {
|
||||
InferenceEngine::Core actual;
|
||||
explicit IECore(const std::string & xmlConfigFile = std::string());
|
||||
std::map<std::string, InferenceEngine::Version> getVersions(const std::string & deviceName);
|
||||
explicit IECore(const std::string& xmlConfigFile = std::string());
|
||||
std::map<std::string, InferenceEngine::Version> getVersions(const std::string& deviceName);
|
||||
InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath);
|
||||
InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetwork(IENetwork network, const std::string & deviceName,
|
||||
const std::map<std::string, std::string> & config, int num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetworkFromFile(const std::string & modelPath,
|
||||
const std::string & deviceName, const std::map<std::string, std::string> & config, int num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> importNetwork(const std::string & modelFIle, const std::string & deviceName,
|
||||
const std::map<std::string, std::string> & config, int num_requests);
|
||||
std::map<std::string, std::string> queryNetwork(IENetwork network, const std::string & deviceName,
|
||||
const std::map<std::string, std::string> & config);
|
||||
void setConfig(const std::map<std::string, std::string> &config, const std::string & deviceName = std::string());
|
||||
void registerPlugin(const std::string & pluginName, const std::string & deviceName);
|
||||
void unregisterPlugin(const std::string & deviceName);
|
||||
void registerPlugins(const std::string & xmlConfigFile);
|
||||
void addExtension(const std::string & ext_lib_path, const std::string & deviceName);
|
||||
InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetwork(IENetwork network, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config, int num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetworkFromFile(const std::string& modelPath, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config, int num_requests);
|
||||
std::unique_ptr<InferenceEnginePython::IEExecNetwork> importNetwork(const std::string& modelFIle, const std::string& deviceName,
|
||||
const std::map<std::string, std::string>& config, int num_requests);
|
||||
std::map<std::string, std::string> queryNetwork(IENetwork network, const std::string& deviceName, const std::map<std::string, std::string>& config);
|
||||
void setConfig(const std::map<std::string, std::string>& config, const std::string& deviceName = std::string());
|
||||
void registerPlugin(const std::string& pluginName, const std::string& deviceName);
|
||||
void unregisterPlugin(const std::string& deviceName);
|
||||
void registerPlugins(const std::string& xmlConfigFile);
|
||||
void addExtension(const std::string& ext_lib_path, const std::string& deviceName);
|
||||
std::vector<std::string> getAvailableDevices();
|
||||
PyObject* getMetric(const std::string & deviceName, const std::string & name);
|
||||
PyObject* getConfig(const std::string & deviceName, const std::string & name);
|
||||
PyObject* getMetric(const std::string& deviceName, const std::string& name);
|
||||
PyObject* getConfig(const std::string& deviceName, const std::string& name);
|
||||
};
|
||||
|
||||
template<class T>
|
||||
T *get_buffer(InferenceEngine::Blob &blob) {
|
||||
return blob.buffer().as<T *>();
|
||||
template <class T>
|
||||
T* get_buffer(InferenceEngine::Blob& blob) {
|
||||
return blob.buffer().as<T*>();
|
||||
}
|
||||
|
||||
template<class T, class... Args>
|
||||
std::unique_ptr<T> make_unique(Args &&... args) {
|
||||
template <class T, class... Args>
|
||||
std::unique_ptr<T> make_unique(Args&&... args) {
|
||||
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,8 @@ set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/offline_transfo
|
||||
|
||||
file(GLOB SOURCE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api.pyx
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api_impl.cpp)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api_impl.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
|
||||
|
||||
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
|
||||
|
||||
@ -59,3 +60,5 @@ install(TARGETS ${INSTALLED_TARGETS}
|
||||
install(PROGRAMS __init__.py
|
||||
DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations
|
||||
COMPONENT ${PYTHON_VERSION})
|
||||
|
||||
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
|
@ -5,16 +5,13 @@
|
||||
#include "offline_transformations_api_impl.hpp"
|
||||
|
||||
#include <moc_transformations.hpp>
|
||||
#include <pot_transformations.hpp>
|
||||
#include <pruning.hpp>
|
||||
|
||||
#include <transformations/control_flow/unroll_tensor_iterator.hpp>
|
||||
|
||||
#include <ngraph/opsets/opset6.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
#include <ngraph/pass/low_latency.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
|
||||
#include <ngraph/opsets/opset6.hpp>
|
||||
#include <pot_transformations.hpp>
|
||||
#include <pruning.hpp>
|
||||
#include <transformations/control_flow/unroll_tensor_iterator.hpp>
|
||||
|
||||
void InferenceEnginePython::ApplyMOCTransformations(InferenceEnginePython::IENetwork network, bool cf) {
|
||||
ngraph::pass::Manager manager;
|
||||
@ -34,7 +31,7 @@ void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython:
|
||||
manager.register_pass<ngraph::pass::UnrollTensorIterator>();
|
||||
|
||||
auto pass_config = manager.get_pass_config();
|
||||
pass_config->set_callback<ngraph::pass::UnrollTensorIterator>([](const std::shared_ptr<const ngraph::Node> &node) -> bool {
|
||||
pass_config->set_callback<ngraph::pass::UnrollTensorIterator>([](const std::shared_ptr<const ngraph::Node>& node) -> bool {
|
||||
return node->get_rt_info().count("UNROLL_TI") == 0;
|
||||
});
|
||||
manager.run_passes(network.actual->getFunction());
|
||||
@ -46,13 +43,12 @@ void InferenceEnginePython::ApplyPruningTransformation(InferenceEnginePython::IE
|
||||
manager.run_passes(network.actual->getFunction());
|
||||
}
|
||||
|
||||
|
||||
void InferenceEnginePython::CheckAPI() {
|
||||
std::shared_ptr<ngraph::Function> f;
|
||||
{
|
||||
auto input = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape{1, 1000, 4});
|
||||
auto input = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape {1, 1000, 4});
|
||||
auto reshape = std::make_shared<ngraph::opset6::Reshape>(input, std::make_shared<ngraph::opset6::ShapeOf>(input), true);
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector {reshape}, ngraph::ParameterVector {input});
|
||||
}
|
||||
ngraph::pass::Manager m;
|
||||
m.register_pass<ngraph::pass::ConstantFolding>();
|
||||
|
@ -11,7 +11,8 @@ set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/test_utils)
|
||||
|
||||
file(GLOB SOURCE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_utils_api.pyx
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_utils_api_impl.cpp)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test_utils_api_impl.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
|
||||
|
||||
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
|
||||
|
||||
@ -48,3 +49,5 @@ add_custom_command(TARGET ${TARGET_NAME}
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/test_utils/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
|
||||
)
|
||||
|
||||
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
|
Loading…
Reference in New Issue
Block a user