* Docs: Model caching feature overview * Update docs/IE_DG/Intro_to_Performance.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Apply suggestions from code review Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Review comments - Moved code examples to snippets - Added link to Model Caching overview from "Inference Engine Developer Guide" - Few minor changes * Update docs/IE_DG/Intro_to_Performance.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com>
21 lines
736 B
C++
21 lines
736 B
C++
#include <ie_core.hpp>
|
|
|
|
int main() {
|
|
using namespace InferenceEngine;
|
|
std::string modelPath = "/tmp/myModel.xml";
|
|
std::string deviceName = "GNA";
|
|
std::map<std::string, std::string> deviceConfig;
|
|
InferenceEngine::Core ie;
|
|
//! [part3]
|
|
// Get list of supported metrics
|
|
std::vector<std::string> keys = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS));
|
|
|
|
// Find 'IMPORT_EXPORT_SUPPORT' metric in supported metrics
|
|
auto it = std::find(keys.begin(), keys.end(), METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
|
|
|
// If metric 'IMPORT_EXPORT_SUPPORT' exists, check it's value
|
|
bool cachingSupported = (it != keys.end()) && ie.GetMetric(deviceName, METRIC_KEY(IMPORT_EXPORT_SUPPORT));
|
|
//! [part3]
|
|
return 0;
|
|
}
|