diff --git a/inference-engine/tests/functional/plugin/shared/include/auto_batching/auto_batching_tests.hpp b/inference-engine/tests/functional/plugin/shared/include/auto_batching/auto_batching_tests.hpp index d1e25bde4ca..9c917bd180c 100644 --- a/inference-engine/tests/functional/plugin/shared/include/auto_batching/auto_batching_tests.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/auto_batching/auto_batching_tests.hpp @@ -65,14 +65,16 @@ TEST_P(AutoBatching_Test, compareAutoBatchingToBatch1) { for (size_t i = 0; i < nets.size(); ++i) { auto net = nets[i]; - net.getInputsInfo().begin()->second->setLayout(Layout::NCHW); - net.getInputsInfo().begin()->second->setPrecision(Precision::FP32); + // we test single inputs networks only + auto inp = net.getInputsInfo().begin()->second; + inp->setLayout(Layout::NCHW); + inp->setPrecision(Precision::FP32); std::map config; if (device_name.find("GPU") != std::string::npos) config[CONFIG_KEY(GPU_THROUGHPUT_STREAMS)] = std::to_string(num_streams); if (device_name.find("CPU") != std::string::npos) config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = std::to_string(num_streams); - auto exec_net_ref = ie.LoadNetwork(net, std::string("BATCH:") + + auto exec_net_ref = ie.LoadNetwork(net, std::string(CommonTestUtils::DEVICE_BATCH) + device_name + "(" + std::to_string(num_batch) + ")", config); diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp index 7d8087fb925..158352a72b4 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp @@ -10,6 +10,7 @@ const char DEVICE_AUTO[] = "AUTO"; const char DEVICE_CPU[] = "CPU"; const char DEVICE_GNA[] = "GNA"; const char DEVICE_GPU[] = "GPU"; +const char DEVICE_BATCH[] = "BATCH"; const char DEVICE_HDDL[] = "HDDL"; const char DEVICE_MYRIAD[] = "MYRIAD"; const char DEVICE_KEEMBAY[] = "VPUX";