// Copyright (C) 2018 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 // #ifndef INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP #define INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP #include #include #include #include #include #include #include "ie_extension.h" namespace InferenceEnginePython { struct IENetLayer { InferenceEngine::CNNLayerPtr layer_ptr; std::string name; std::string type; std::string precision; std::string affinity; std::map params; void setAffinity(const std::string & target_affinity); void setParams(const std::map & params_map); std::map getWeights(); void setPrecision(std::string precision); }; struct InputInfo{ InferenceEngine::InputInfo actual; std::vector dims; std::string precision; std::string layout; void setPrecision(std::string precision); void setLayout(std::string layout); }; struct OutputInfo{ InferenceEngine::DataPtr actual; std::vector dims; std::string precision; std::string layout; void setPrecision(std::string precision); }; struct ProfileInfo { std::string status; std::string exec_type; std::string layer_type; long long real_time; long long cpu_time; unsigned execution_index; }; struct IENetwork { InferenceEngine::CNNNetwork actual; std::string name; std::size_t batch_size; void setBatch(const size_t size); void addOutputs(const std::vector &out_layers, const std::string &precision); std::map getLayers(); std::map getInputs(); std::map getOutputs(); void reshape(const std::map> & input_shapes); }; struct IENetReader { static IENetwork read(std::string const &model, std::string const &weights); std::vector> getLayers(); }; struct InferRequestWrap { InferenceEngine::IInferRequest::Ptr request_ptr; InferenceEngine::BlobMap inputs; InferenceEngine::BlobMap outputs; void infer(); void infer_async(); int wait(int64_t timeout); InferenceEngine::Blob::Ptr &getInputBlob(const std::string &blob_name); InferenceEngine::Blob::Ptr &getOutputBlob(const std::string &blob_name); std::vector getInputsList(); std::vector getOutputsList(); std::map getPerformanceCounts(); }; struct IEExecNetwork { InferenceEngine::IExecutableNetwork::Ptr actual; std::vector infer_requests; IEExecNetwork(const std::string &name, size_t num_requests); std::string name; int next_req_index = 0; bool async; void infer(); }; struct IEPlugin { std::unique_ptr load(InferenceEnginePython::IENetwork &net, int num_requests, const std::map &config); std::string device_name; std::string version; void setConfig(const std::map &); void addCpuExtension(const std::string &extension_path); void setInitialAffinity(InferenceEnginePython::IENetwork &net); IEPlugin(const std::string &device, const std::vector &plugin_dirs); IEPlugin() = default; std::set queryNetwork(InferenceEnginePython::IENetwork &net); InferenceEngine::InferenceEnginePluginPtr actual; }; template T* get_buffer(InferenceEngine::Blob& blob) { return blob.buffer().as(); } template std::unique_ptr make_unique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } std::string get_version(); }; // InferenceEnginePython #endif //INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP