Updated multi code snippets (#11037)

This commit is contained in:
Ilya Lavrenov
2022-03-20 10:44:33 +03:00
committed by GitHub
parent 72e8661157
commit 5390aa7ebc
7 changed files with 29 additions and 34 deletions

View File

@@ -45,7 +45,7 @@ Basically, there are three ways to specify the devices to be use by the "MULTI":
@endsphinxdirective
Notice that the priorities of the devices can be changed in real time for the executable network:
Notice that the priorities of the devices can be changed in real time for the compiled model:
@sphinxdirective
@@ -222,7 +222,7 @@ You can set the configuration directly as a string, or use the metric key `MULTI
@endsphinxdirective
* Option 2 - Pass a List as a Parameter, and Dynamically Change Priorities during Execution
Notice that the priorities of the devices can be changed in real time for the executable network:
Notice that the priorities of the devices can be changed in real time for the compiled model:
@sphinxdirective

View File

@@ -4,14 +4,15 @@ int main() {
//! [part0]
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
// the "MULTI" plugin is (globally) pre-configured with the explicit option:
// the "MULTI" device is (globally) pre-configured with the explicit option
core.set_property("MULTI", ov::device::priorities("HDDL,GPU"));
ov::CompiledModel compileModel0 = core.compile_model(model, "MULTI");
// configuration of the "MULTI" is part of the network configuration (and hence specific to the network):
// configuration of the "MULTI" is part of the compile configuration (and hence specific to the model):
ov::CompiledModel compileModel1 = core.compile_model(model, "MULTI", ov::device::priorities("HDDL,GPU"));
// same as previous, but configuration of the "MULTI" is part of the name (so config is empty), also network-specific:
// same as previous, but configuration of the "MULTI" is part
// of the name (so config is empty), also model-specific:
ov::CompiledModel compileModel2 = core.compile_model(model, "MULTI:HDDL,GPU");
//! [part0]
return 0;

View File

@@ -5,16 +5,17 @@ int main() {
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
ov::CompiledModel compileModel = core.compile_model(model, "MULTI:HDDL,GPU");
//...
// reverse the order of priorities
compileModel.set_property(ov::device::priorities("GPU,HDDL"));
// you can even exclude some device
// you can even exclude some device (HDDL here)
compileModel.set_property(ov::device::priorities("GPU"));
//...
// and then return it back
compileModel.set_property(ov::device::priorities("GPU,HDDL"));
//but you cannot add new devices on the fly, the next line will trigger the following exception:
//[ ERROR ] [NOT_FOUND] You can only change device priorities but not add new devices with the Network's SetConfig(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES.
//CPU device was not in the original device list!
// but you cannot add new devices on the fly,
// the next line will trigger the following exception:
// [ ERROR ] [NOT_FOUND] You can only change device
// priorities but not add new devices with the model's
// ov::device::priorities. CPU device was not in the original device list!
compileModel.set_property(ov::device::priorities("CPU,GPU,HDDL"));
//! [part1]
return 0;

View File

@@ -4,13 +4,14 @@ int main() {
//! [part2]
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
std::string allDevices = "MULTI:";
std::vector<std::string> availableDevices = core.get_available_devices();
std::string all_devices;
for (auto && device : availableDevices) {
allDevices += device;
allDevices += ((device == availableDevices[availableDevices.size()-1]) ? "" : ",");
all_devices += device;
all_devices += ((device == availableDevices[availableDevices.size()-1]) ? "" : ",");
}
ov::CompiledModel compileModel = core.compile_model(model, allDevices);
ov::CompiledModel compileModel = core.compile_model(model, "MULTI",
ov::device::priorities(all_devices));
//! [part2]
return 0;
}

View File

@@ -3,16 +3,15 @@
int main() {
//! [part3]
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
std::string allDevices = "MULTI:";
std::vector<std::string> myriadDevices = core.get_property("MYRIAD", ov::available_devices);
std::string all_devices;
for (size_t i = 0; i < myriadDevices.size(); ++i) {
allDevices += std::string("MYRIAD.")
all_devices += std::string("MYRIAD.")
+ myriadDevices[i]
+ std::string(i < (myriadDevices.size() -1) ? "," : "");
}
ov::CompiledModel compileModel = core.compile_model(model, allDevices);
ov::CompiledModel compileModel = core.compile_model("sample.xml", "MULTI",
ov::device::priorities(all_devices));
//! [part3]
return 0;
}

View File

@@ -1,23 +1,20 @@
#include <openvino/openvino.hpp>
int main() {
ov::AnyMap hddl_config = {{ov::enable_profiling(true)}};
ov::AnyMap gpu_config = {{ov::enable_profiling(true)}};
ov::AnyMap hddl_config, gpu_config;
//! [part4]
// configure the HDDL device first
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
core.set_property({ov::device::properties("HDDL", hddl_config),
ov::device::properties("GPU", gpu_config)});
// compile the modle on the multi-device, while specifying the configuration (devices along with priorities
// compile the modle on the multi-device,
// while specifying the configuration (devices along with priorities
// and the configuration of devices):
ov::CompiledModel compileModel = core.compile_model(model, "MULTI",
ov::device::priorities("HDDL,GPU"),
ov::device::priorities("HDDL", "GPU"),
ov::device::properties("HDDL", hddl_config),
ov::device::properties("GPU", gpu_config));
// query the optimal number of requests:
// query the optimal number of requests:
uint32_t nireq = compileModel.get_property(ov::optimal_number_of_infer_requests);
//! [part4]
return 0;

View File

@@ -1,14 +1,10 @@
#include <openvino/openvino.hpp>
int main() {
const ov::AnyMap full_config = {};
//! [part5]
ov::Core core;
std::string device_name = "MULTI:HDDL,GPU";
std::shared_ptr<ov::Model> model = core.read_model("sample.xml");
// 'device_name' can be "MULTI:HDDL,GPU" to configure the multi-device to use HDDL and GPU
ov::CompiledModel compileModel = core.compile_model(model, device_name, full_config);
// query the optimal number of requests:
ov::CompiledModel compileModel = core.compile_model("sample.xml", "MULTI:HDDL,GPU");
// query the optimal number of requests
uint32_t nireq = compileModel.get_property(ov::optimal_number_of_infer_requests);
//! [part5]
return 0;