[UT][AUTO_BATCH]auto batch plugin unit test (#15211)

* Init auto_batch plugin unit test

* Add more mock test

* Add to ci yml file

* Fix clang issue

* Resolve compilation issue

* Fix symbol multiple definition in static build

* Add test cases for AutoBatchInferRequest

* Add test cases for AutoBatchAsyncInferRequest

* qFixed build error after PR-15229

* Resolve blocked issue when call StartAsync test cases

* add more test for auto batch async inference

---------

Co-authored-by: Chen Peter <peter.chen@intel.com>
This commit is contained in:
River Li
2023-03-07 11:55:26 +08:00
committed by GitHub
parent 83b57e2a64
commit 4d7bffa593
12 changed files with 1556 additions and 3 deletions

View File

@@ -431,6 +431,9 @@ jobs:
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ieMultiPluginUnitTests.xml
displayName: 'MULTI UT'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_unit_tests.xml
displayName: 'AutoBatch UT'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_template_func_tests --gtest_filter=*smoke* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-templateFuncTests.xml
displayName: 'TEMPLATE FuncTests'

View File

@@ -302,6 +302,9 @@ jobs:
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieMultiPluginUnitTests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ieMultiPluginUnitTests.xml
displayName: 'MULTI UT'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_unit_tests.xml
displayName: 'AutoBatch UT'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_template_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-templateFuncTests.xml
displayName: 'TEMPLATE FuncTests'

View File

@@ -8,15 +8,15 @@ endif()
set(TARGET_NAME "openvino_auto_batch_plugin")
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp)
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "BATCH"
PSEUDO_DEVICE
SOURCES ${SOURCES} ${HEADERS}
VERSION_DEFINES_FOR auto_batch.cpp ADD_CLANG_FORMAT)
VERSION_DEFINES_FOR src/auto_batch.cpp ADD_CLANG_FORMAT)
target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads)
@@ -24,3 +24,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads)
ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
if(ENABLE_TESTS)
add_subdirectory(tests/unit)
endif()

View File

@@ -19,6 +19,10 @@
#include "ie_metric_helpers.hpp"
#include "threading/ie_thread_safe_containers.hpp"
#ifdef AUTOBATCH_UNITTEST
# define AutoBatchPlugin MockAutoBatchPlugin
#endif
namespace AutoBatchPlugin {
using DeviceName = std::string;
@@ -164,8 +168,13 @@ public:
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::RemoteContext::Ptr CreateContext(const InferenceEngine::ParamMap&) override;
#ifdef AUTOBATCH_UNITTEST
public:
#else
protected:
#endif
DeviceInformation ParseMetaDevice(const std::string& devicesBatchCfg,
const std::map<std::string, std::string>& config) const;

View File

@@ -0,0 +1,56 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ov_auto_batch_unit_tests)
set(CI_BUILD_NUMBER "unittest")
addVersionDefines(${OpenVINO_SOURCE_DIR}/src/plugins/auto_batch/src/auto_batch.cpp CI_BUILD_NUMBER)
add_definitions(-DAUTOBATCH_UNITTEST)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ie_add_compiler_flags(/wd4244)
ie_add_compiler_flags(/wd4267)
ie_add_compiler_flags(/wd4305)
ie_add_compiler_flags(/wd4018)
ie_add_compiler_flags(/wd4050)
ie_add_compiler_flags(/wd4250)
ie_add_compiler_flags(/wd4334)
ie_add_compiler_flags(/wd4661)
ie_add_compiler_flags(/wd4273)
ie_add_compiler_flags(/wd4309)
ie_add_compiler_flags(/wd4804)
endif()
if(NOT MSVC)
ie_add_compiler_flags(-Wno-missing-declarations)
ie_add_compiler_flags(-Wno-sign-compare)
ie_add_compiler_flags(-Wno-odr)
ie_add_compiler_flags(-Wno-all)
endif()
set(SHARED_HEADERS_DIR "${OpenVINO_SOURCE_DIR}/src/tests/ie_test_util")
addIeTargetTest(
NAME
${TARGET_NAME}
ROOT
${CMAKE_CURRENT_SOURCE_DIR}
ADDITIONAL_SOURCE_DIRS
${OpenVINO_SOURCE_DIR}/src/plugins/auto_batch/src
INCLUDES
${CMAKE_CURRENT_SOURCE_DIR}
${OpenVINO_SOURCE_DIR}/src/plugins/auto_batch/src
${SHARED_HEADERS_DIR}
LINK_LIBRARIES
unitTestUtils
openvino::runtime
openvino::runtime::dev
ngraphFunctions
DEPENDENCIES
mock_engine
ngraphFunctions
ADD_CPPLINT
LABELS
Auto_Batch
)
set_ie_threading_interface_for(${TARGET_NAME})

View File

@@ -0,0 +1,396 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "ie_ngraph_utils.hpp"
#include "mock_auto_batch_plugin.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "transformations/utils/utils.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/mock_task_executor.hpp"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Eq;
using ::testing::MatcherCast;
using ::testing::Matches;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Throw;
using namespace MockAutoBatchPlugin;
using namespace MockAutoBatchDevice;
using namespace InferenceEngine;
using AutoBatchRequestTestParams = std::tuple<int, // batch_size
ngraph::element::Type_t, // data type
int>; // inference interval
class AutoBatchRequestTest : public ::testing::TestWithParam<AutoBatchRequestTestParams> {
public:
// Mock inferRequest
std::shared_ptr<NiceMock<MockIInferRequestInternal>> mockInferRequestBatched;
std::vector<std::shared_ptr<AutoBatchInferRequest>> autoBatchInferRequests;
std::map<std::string, InferenceEngine::Blob::Ptr> blobMap;
std::vector<std::shared_ptr<const ov::Node>> inputs, outputs;
std::set<std::string> batchedInputs, batchedOutputs;
std::shared_ptr<AutoBatchExecutableNetwork::WorkerInferRequest> workerRequestPtr;
public:
static std::string getTestCaseName(testing::TestParamInfo<AutoBatchRequestTestParams> obj) {
int batch_size, infer_interval;
ngraph::element::Type_t element_type;
std::tie(batch_size, element_type, infer_interval) = obj.param;
std::string res;
res = "batch_size_" + std::to_string(batch_size);
res += "_element_type_" + std::to_string(static_cast<int>(element_type));
if (infer_interval > 0)
res += "_infer_interval_" + std::to_string(infer_interval);
return res;
}
void TearDown() override {
mockInferRequestBatched = {};
autoBatchInferRequests.clear();
blobMap.clear();
inputs.clear();
outputs.clear();
batchedInputs.clear();
batchedOutputs.clear();
clear_worker();
}
void SetUp() override {
mockInferRequestBatched = std::make_shared<NiceMock<MockIInferRequestInternal>>();
}
void create_worker(int batch_size) {
workerRequestPtr = std::make_shared<AutoBatchExecutableNetwork::WorkerInferRequest>();
workerRequestPtr->_inferRequestBatched = {mockInferRequestBatched, {}};
workerRequestPtr->_batchSize = batch_size;
workerRequestPtr->_completionTasks.resize(workerRequestPtr->_batchSize);
workerRequestPtr->_inferRequestBatched->SetCallback([this](std::exception_ptr exceptionPtr) mutable {
if (exceptionPtr)
workerRequestPtr->_exceptionPtr = exceptionPtr;
});
workerRequestPtr->_thread = std::thread([this] {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
});
return;
}
void clear_worker() {
workerRequestPtr->_inferRequestBatched = {};
workerRequestPtr->_completionTasks.clear();
workerRequestPtr->_thread.join();
}
void prepare_input(std::shared_ptr<ov::Model>& function, int batch_size) {
for (auto& input : function->inputs()) {
std::shared_ptr<const ov::Node> n = input.get_node_shared_ptr();
inputs.emplace_back(n);
}
for (auto& output : function->outputs()) {
std::shared_ptr<const ov::Node> n = output.get_node_shared_ptr();
outputs.emplace_back(n);
}
const auto& params = function->get_parameters();
for (size_t i = 0; i < params.size(); i++) {
batchedInputs.insert(ov::op::util::get_ie_output_name(params[i]->output(0)));
}
const auto& results = function->get_results();
for (size_t i = 0; i < results.size(); i++) {
const auto& output = results[i];
const auto& node = output->input_value(0);
batchedOutputs.insert(
ov::op::util::get_ie_output_name(ov::Output<const ov::Node>(node.get_node(), node.get_index())));
}
ON_CALL(*mockInferRequestBatched, GetBlob(StrEq(*batchedInputs.begin())))
.WillByDefault([this, batch_size](const std::string& name) {
auto item = blobMap.find(name);
if (item != blobMap.end()) {
return item->second;
}
auto shape = inputs[0]->get_shape();
shape[0] = batch_size;
auto element_type = inputs[0]->get_element_type();
InferenceEngine::TensorDesc tensorDesc = {InferenceEngine::details::convertPrecision(element_type),
shape,
InferenceEngine::TensorDesc::getLayoutByRank(shape.size())};
auto blob = make_blob_with_precision(tensorDesc);
blob->allocate();
blobMap[name] = blob;
return blob;
});
ON_CALL(*mockInferRequestBatched, GetBlob(StrEq(*batchedOutputs.begin())))
.WillByDefault([this, batch_size](const std::string& name) {
auto item = blobMap.find(name);
if (item != blobMap.end()) {
return item->second;
}
auto shape = outputs[0]->get_shape();
shape[0] = batch_size;
auto element_type = outputs[0]->get_element_type();
InferenceEngine::TensorDesc tensorDesc = {InferenceEngine::details::convertPrecision(element_type),
shape,
InferenceEngine::TensorDesc::getLayoutByRank(shape.size())};
auto blob = make_blob_with_precision(tensorDesc);
blob->allocate();
blobMap[name] = blob;
return blob;
});
}
};
TEST_P(AutoBatchRequestTest, AutoBatchRequestCreateTestCase) {
int batch_size, infer_interval;
ngraph::element::Type_t element_type;
std::tie(batch_size, element_type, infer_interval) = this->GetParam();
std::vector<size_t> inputShape = {1, 3, 24, 24};
auto function = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, element_type);
prepare_input(function, batch_size);
create_worker(batch_size);
for (int batch_id = 0; batch_id < batch_size; batch_id++) {
auto req = std::make_shared<AutoBatchInferRequest>(inputs,
outputs,
*workerRequestPtr,
batch_id,
batch_size,
batchedInputs,
batchedOutputs);
EXPECT_NE(req, nullptr);
autoBatchInferRequests.emplace_back(req);
std::vector<std::string> names = {*batchedInputs.begin(), *batchedOutputs.begin()};
for (auto& name : names) {
auto blob = req->GetBlob(name);
auto ptr = blob->buffer().as<char*>();
auto size = blob->byteSize();
auto batch_blob = mockInferRequestBatched->GetBlob(name);
auto batch_ptr = batch_blob->buffer().as<char*>();
EXPECT_EQ(ptr, batch_ptr + size * batch_id);
}
}
}
TEST_P(AutoBatchRequestTest, AutoBatchRequestCopyBlobTestCase) {
int batch_size, infer_interval;
ngraph::element::Type_t element_type;
std::tie(batch_size, element_type, infer_interval) = this->GetParam();
std::vector<size_t> inputShape = {1, 3, 24, 24};
auto function = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, element_type);
prepare_input(function, batch_size);
create_worker(batch_size);
for (int batch_id = 0; batch_id < batch_size; batch_id++) {
auto req = std::make_shared<AutoBatchInferRequest>(inputs,
outputs,
*workerRequestPtr,
batch_id,
batch_size,
batchedInputs,
batchedOutputs);
EXPECT_NE(req, nullptr);
autoBatchInferRequests.emplace_back(req);
EXPECT_NO_THROW(req->CopyInputsIfNeeded());
EXPECT_NO_THROW(req->CopyOutputsIfNeeded());
}
}
class AutoBatchAsyncInferRequestTest : public AutoBatchRequestTest {
public:
std::shared_ptr<NiceMock<MockIInferRequestInternal>> mockInferRequestWithoutBatched;
MockTaskExecutor::Ptr mockTaskExecutor;
std::vector<AutoBatchAsyncInferRequest::Ptr> autoBatchAsyncInferRequestVec;
bool terminate;
public:
void TearDown() override {
terminate = true;
autoBatchAsyncInferRequestVec.clear();
AutoBatchRequestTest::TearDown();
mockInferRequestWithoutBatched = {};
}
void SetUp() override {
AutoBatchRequestTest::SetUp();
mockInferRequestWithoutBatched = std::make_shared<NiceMock<MockIInferRequestInternal>>();
terminate = false;
mockTaskExecutor = std::make_shared<MockTaskExecutor>();
}
void create_worker(int batch_size) {
workerRequestPtr = std::make_shared<AutoBatchExecutableNetwork::WorkerInferRequest>();
workerRequestPtr->_inferRequestBatched = {mockInferRequestBatched, {}};
workerRequestPtr->_batchSize = batch_size;
workerRequestPtr->_completionTasks.resize(workerRequestPtr->_batchSize);
workerRequestPtr->_inferRequestBatched->SetCallback([this](std::exception_ptr exceptionPtr) mutable {
if (exceptionPtr)
workerRequestPtr->_exceptionPtr = exceptionPtr;
});
ON_CALL(*mockInferRequestBatched, StartAsync()).WillByDefault([this]() {
IE_ASSERT(workerRequestPtr->_completionTasks.size() == (size_t)workerRequestPtr->_batchSize);
for (int c = 0; c < workerRequestPtr->_batchSize; c++) {
workerRequestPtr->_completionTasks[c]();
}
workerRequestPtr->_cond.notify_one();
});
workerRequestPtr->_thread = std::thread([this] {
while (1) {
std::cv_status status;
{
std::unique_lock<std::mutex> lock(workerRequestPtr->_mutex);
status = workerRequestPtr->_cond.wait_for(lock, std::chrono::milliseconds(10));
}
if (terminate) {
break;
} else {
const int sz = static_cast<int>(workerRequestPtr->_tasks.size());
if (sz == workerRequestPtr->_batchSize) {
std::pair<AutoBatchAsyncInferRequest*, InferenceEngine::Task> t;
for (int n = 0; n < sz; n++) {
IE_ASSERT(workerRequestPtr->_tasks.try_pop(t));
workerRequestPtr->_completionTasks[n] = std::move(t.second);
t.first->_inferRequest->_wasBatchedRequestUsed =
AutoBatchInferRequest::eExecutionFlavor::BATCH_EXECUTED;
}
workerRequestPtr->_inferRequestBatched->StartAsync();
} else if ((status == std::cv_status::timeout) && sz) {
std::pair<AutoBatchAsyncInferRequest*, InferenceEngine::Task> t;
for (int n = 0; n < sz; n++) {
IE_ASSERT(workerRequestPtr->_tasks.try_pop(t));
t.first->_inferRequest->_wasBatchedRequestUsed =
AutoBatchInferRequest::eExecutionFlavor::TIMEOUT_EXECUTED;
t.first->_inferRequestWithoutBatch->StartAsync();
t.second();
}
}
}
}
});
return;
}
};
TEST_P(AutoBatchAsyncInferRequestTest, AutoBatchAsyncInferRequestCreateTest) {
int batch_size, infer_interval;
ngraph::element::Type_t element_type;
std::tie(batch_size, element_type, infer_interval) = this->GetParam();
std::vector<size_t> inputShape = {1, 3, 24, 24};
auto function = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, element_type);
prepare_input(function, batch_size);
create_worker(batch_size);
for (int batch_id = 0; batch_id < batch_size; batch_id++) {
auto autoRequestImpl = std::make_shared<AutoBatchInferRequest>(inputs,
outputs,
*workerRequestPtr,
batch_id,
batch_size,
batchedInputs,
batchedOutputs);
EXPECT_NE(autoRequestImpl, nullptr);
autoBatchInferRequests.emplace_back(autoRequestImpl);
InferenceEngine::SoIInferRequestInternal inferRequestWithoutBatched = {mockInferRequestWithoutBatched, {}};
auto asyncInferRequest =
std::make_shared<AutoBatchAsyncInferRequest>(autoRequestImpl, inferRequestWithoutBatched, nullptr);
EXPECT_NE(asyncInferRequest, nullptr);
autoBatchAsyncInferRequestVec.emplace_back(asyncInferRequest);
}
}
TEST_P(AutoBatchAsyncInferRequestTest, AutoBatchAsyncInferRequestStartAsyncTest) {
int batch_size, infer_interval;
ngraph::element::Type_t element_type;
std::tie(batch_size, element_type, infer_interval) = this->GetParam();
std::vector<size_t> inputShape = {1, 3, 24, 24};
auto function = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, element_type);
prepare_input(function, batch_size);
create_worker(batch_size);
for (int batch_id = 0; batch_id < batch_size; batch_id++) {
auto autoRequestImpl = std::make_shared<AutoBatchInferRequest>(inputs,
outputs,
*workerRequestPtr,
batch_id,
batch_size,
batchedInputs,
batchedOutputs);
EXPECT_NE(autoRequestImpl, nullptr);
autoBatchInferRequests.emplace_back(autoRequestImpl);
InferenceEngine::SoIInferRequestInternal inferRequestWithoutBatched = {mockInferRequestWithoutBatched, {}};
auto asyncInferRequest =
std::make_shared<AutoBatchAsyncInferRequest>(autoRequestImpl, inferRequestWithoutBatched, nullptr);
EXPECT_NE(asyncInferRequest, nullptr);
autoBatchAsyncInferRequestVec.emplace_back(asyncInferRequest);
}
for (auto& req : autoBatchAsyncInferRequestVec) {
if (infer_interval > 0)
std::this_thread::sleep_for(std::chrono::milliseconds(infer_interval));
EXPECT_NO_THROW(req->StartAsync());
}
for (auto& req : autoBatchAsyncInferRequestVec)
EXPECT_NO_THROW(req->Wait(InferRequest::WaitMode::RESULT_READY));
}
const std::vector<ngraph::element::Type_t> element_type{ngraph::element::Type_t::f16,
ngraph::element::Type_t::f32,
ngraph::element::Type_t::f64,
ngraph::element::Type_t::i8,
ngraph::element::Type_t::i16,
ngraph::element::Type_t::i32,
ngraph::element::Type_t::i64,
ngraph::element::Type_t::u8,
ngraph::element::Type_t::u16,
ngraph::element::Type_t::u32,
ngraph::element::Type_t::u64};
const std::vector<int> batch_size{1, 8, 16, 32, 64, 128};
const std::vector<int> infer_interval{0};
const std::vector<int> infer_interval_timeout{0, 10};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
AutoBatchRequestTest,
::testing::Combine(::testing::ValuesIn(batch_size),
::testing::ValuesIn(element_type),
::testing::ValuesIn(infer_interval)),
AutoBatchRequestTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
AutoBatchAsyncInferRequestTest,
::testing::Combine(::testing::ValuesIn(batch_size),
::testing::ValuesIn(element_type),
::testing::ValuesIn(infer_interval_timeout)),
AutoBatchAsyncInferRequestTest::getTestCaseName);

View File

@@ -0,0 +1,134 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "mock_auto_batch_plugin.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Eq;
using ::testing::MatcherCast;
using ::testing::Matches;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Throw;
using namespace MockAutoBatchPlugin;
using namespace MockAutoBatchDevice;
using namespace InferenceEngine;
using CreateInferRequestTestParams = std::tuple<int, // batch_size
int>; // inferReq number
class CreateInferRequestTest : public ::testing::TestWithParam<CreateInferRequestTestParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
// Mock execNetwork
std::shared_ptr<NiceMock<MockIExecutableNetworkInternal>> mockIExecNet;
ov::SoPtr<IExecutableNetworkInternal> mockExecNetwork;
std::shared_ptr<NiceMock<MockIInferencePlugin>> mockIPlugin;
std::shared_ptr<InferenceEngine::IInferencePlugin> mockPlugin;
ov::SoPtr<IExecutableNetworkInternal> batchedExecNetwork;
std::shared_ptr<AutoBatchExecutableNetwork> actualExecNet;
std::vector<std::shared_ptr<NiceMock<MockIInferRequestInternal>>> inferRequestVec;
public:
static std::string getTestCaseName(testing::TestParamInfo<CreateInferRequestTestParams> obj) {
int batch_size;
int infer_num;
std::tie(batch_size, infer_num) = obj.param;
std::string res;
res = "batch_size_" + std::to_string(batch_size);
res += "_infer_num_" + std::to_string(infer_num);
return res;
}
void TearDown() override {
core.reset();
plugin.reset();
mockIExecNet.reset();
mockExecNetwork = {};
batchedExecNetwork = {};
mockPlugin = {};
actualExecNet.reset();
inferRequestVec.clear();
}
void SetUp() override {
mockIExecNet = std::make_shared<NiceMock<MockIExecutableNetworkInternal>>();
mockIPlugin = std::make_shared<NiceMock<MockIInferencePlugin>>();
ON_CALL(*mockIPlugin, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).WillByDefault(Return(mockIExecNet));
mockPlugin = mockIPlugin;
mockExecNetwork = ov::SoPtr<InferenceEngine::IExecutableNetworkInternal>(mockPlugin->LoadNetwork(CNNNetwork{}, {}), {});
batchedExecNetwork = {};
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
// Create inferRequest
ON_CALL(*mockIExecNet.get(), CreateInferRequest()).WillByDefault([this]() {
auto inferReq = std::make_shared<NiceMock<MockIInferRequestInternal>>();
inferRequestVec.push_back(inferReq);
return inferReq;
});
}
AutoBatchExecutableNetwork::Ptr createAutoBatchExecutableNetwork(int batch_size) {
DeviceInformation metaDevice = {"CPU", {}, batch_size};
std::unordered_map<std::string, InferenceEngine::Parameter> config = {{CONFIG_KEY(AUTO_BATCH_TIMEOUT), "200"}};
std::set<std::string> batched_inputs = {"Parameter_0"};
std::set<std::string> batched_outputs = {"Convolution_20"};
if (batch_size > 1)
batchedExecNetwork = ov::SoPtr<InferenceEngine::IExecutableNetworkInternal>(mockPlugin->LoadNetwork(CNNNetwork{}, {}), {});
return std::make_shared<AutoBatchExecutableNetwork>(batchedExecNetwork,
mockExecNetwork,
metaDevice,
config,
batched_inputs,
batched_outputs);
}
};
TEST_P(CreateInferRequestTest, CreateInferRequestTestCases) {
int batch_size;
int infer_num;
std::tie(batch_size, infer_num) = this->GetParam();
actualExecNet = createAutoBatchExecutableNetwork(batch_size);
std::vector<InferenceEngine::IInferRequestInternal::Ptr> inferReqs;
InferenceEngine::IInferRequestInternal::Ptr inferReq;
for (int i = 0; i < infer_num; i++) {
EXPECT_NO_THROW(inferReq = actualExecNet->CreateInferRequest());
EXPECT_NE(inferReq, nullptr);
inferReqs.push_back(inferReq);
}
inferReqs.clear();
}
const std::vector<int> requests_num{1, 8, 16, 64};
const std::vector<int> batch_size{1, 8, 16, 32, 128, 256};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
CreateInferRequestTest,
::testing::Combine(
::testing::ValuesIn(batch_size),
::testing::ValuesIn(requests_num)),
CreateInferRequestTest::getTestCaseName);

View File

@@ -0,0 +1,201 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "mock_auto_batch_plugin.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Eq;
using ::testing::MatcherCast;
using ::testing::Matches;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Throw;
using namespace MockAutoBatchPlugin;
using namespace MockAutoBatchDevice;
using namespace InferenceEngine;
using ExecNetworkParams = std::tuple<std::string, // Key name
int, // GetMetric(0) or GetConfig(1) or SetConfig(3)
bool>; // Throw exception
class ExecNetworkTest : public ::testing::TestWithParam<ExecNetworkParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
// Mock execNetwork
std::shared_ptr<NiceMock<MockIExecutableNetworkInternal>> mockIExecNet;
ov::SoPtr<IExecutableNetworkInternal> mockExecNetwork;
std::shared_ptr<NiceMock<MockIInferencePlugin>> mockIPlugin;
std::shared_ptr<InferenceEngine::IInferencePlugin> mockPlugin;
InferenceEngine::IExecutableNetworkInternal::Ptr actualExecNet;
public:
static std::string getTestCaseName(testing::TestParamInfo<ExecNetworkParams> obj) {
std::string name;
bool throw_exception;
int action;
std::tie(name, action, throw_exception) = obj.param;
std::string res;
switch (action) {
case 0:
res += "GetMetric_" + name;
break;
case 1:
res += "GetConfig_" + name;
break;
case 3:
res += "SetConfig_" + name;
break;
default:
res += "error_" + name;
}
if (throw_exception)
res += "throw";
return res;
}
void TearDown() override {
core.reset();
plugin.reset();
mockIExecNet.reset();
mockExecNetwork = {};
mockPlugin = {};
actualExecNet.reset();
}
void SetUp() override {
mockIExecNet = std::make_shared<NiceMock<MockIExecutableNetworkInternal>>();
auto mockIPluginPtr = std::make_shared<NiceMock<MockIInferencePlugin>>();
ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).WillByDefault(Return(mockIExecNet));
mockPlugin = mockIPluginPtr;
EXPECT_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
mockExecNetwork = ov::SoPtr<InferenceEngine::IExecutableNetworkInternal>(mockPlugin->LoadNetwork(CNNNetwork{}, {}), {});
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*plugin, ParseBatchDevice).WillByDefault([this](const std::string& batchDevice) {
return plugin->AutoBatchInferencePlugin::ParseBatchDevice(batchDevice);
});
ON_CALL(*core, LoadNetwork(MatcherCast<const CNNNetwork&>(_), MatcherCast<const std::string&>(_), _))
.WillByDefault(Return(mockExecNetwork));
ON_CALL(*core,
LoadNetwork(MatcherCast<const CNNNetwork&>(_),
MatcherCast<const std::shared_ptr<InferenceEngine::RemoteContext>&>(_),
_))
.WillByDefault(Return(mockExecNetwork));
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT"))).WillByDefault(Return("THROUGHPUT"));
ON_CALL(*core, GetMetric(_, StrEq("OPTIMAL_BATCH_SIZE"), _)).WillByDefault(Return("16"));
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT_NUM_REQUESTS"))).WillByDefault(Return("12"));
ON_CALL(*core, GetMetric(_, StrEq("GPU_MEMORY_STATISTICS"), _))
.WillByDefault([](const std::string& device, const std::string& key, const ov::AnyMap& options) {
std::map<std::string, uint64_t> ret = {{"xyz", 1024}};
return ret;
});
ON_CALL(*core, GetMetric(_, StrEq("GPU_DEVICE_TOTAL_MEM_SIZE"), _)).WillByDefault(Return("10240"));
auto graph = ngraph::builder::subgraph::makeMultiSingleConv();
auto net = CNNNetwork(graph);
const std::map<std::string, std::string> configs = {{"AUTO_BATCH_TIMEOUT", "200"},
{"AUTO_BATCH_DEVICE_CONFIG", "CPU(16)"}};
ASSERT_NO_THROW(actualExecNet = plugin->LoadNetworkImpl(net, {}, configs));
ON_CALL(*mockIExecNet, GetConfig(StrEq("PERFORMANCE_HINT_NUM_REQUESTS"))).WillByDefault(Return("0"));
ON_CALL(*mockIExecNet, GetMetric(StrEq("OPTIMAL_NUMBER_OF_INFER_REQUESTS"))).WillByDefault(Return("12"));
ON_CALL(*mockIExecNet, GetMetric(StrEq("NETWORK_NAME"))).WillByDefault(Return("network_name"));
ON_CALL(*mockIExecNet, GetMetric(StrEq("EXECUTION_DEVICES"))).WillByDefault(Return("CPU"));
ON_CALL(*mockIExecNet, GetMetric(StrEq("SUPPORTED_CONFIG_KEYS"))).WillByDefault(Return("CPU"));
ON_CALL(*mockIExecNet, GetMetric(StrEq("SUPPORTED_CONFIG_KEYS"))).WillByDefault([](const std::string& name) {
std::vector<std::string> res_config;
res_config.emplace_back("CACHE_DIR");
res_config.emplace_back("OPTIMAL_BATCH_SIZE");
return res_config;
});
ON_CALL(*mockIExecNet, GetConfig(StrEq("CACHE_DIR"))).WillByDefault(Return("./abc"));
ON_CALL(*mockIExecNet, GetConfig(StrEq("OPTIMAL_BATCH_SIZE"))).WillByDefault(Return("16"));
}
};
TEST_P(ExecNetworkTest, ExecNetworkGetConfigMetricTestCase) {
std::string name;
bool throw_exception;
int action;
std::tie(name, action, throw_exception) = this->GetParam();
std::map<std::string, InferenceEngine::Parameter> config;
switch (action) {
case 0: {
if (throw_exception)
ASSERT_ANY_THROW(actualExecNet->GetMetric(name));
else
ASSERT_NO_THROW(actualExecNet->GetMetric(name));
break;
}
case 1: {
if (throw_exception)
ASSERT_ANY_THROW(actualExecNet->GetConfig(name));
else
ASSERT_NO_THROW(actualExecNet->GetConfig(name));
break;
}
case 3: {
config[name] = InferenceEngine::Parameter(100);
if (throw_exception)
ASSERT_ANY_THROW(actualExecNet->SetConfig(config));
else
ASSERT_NO_THROW(actualExecNet->SetConfig(config));
break;
}
default:
break;
}
}
const std::vector<ExecNetworkParams> testConfigs = {
// Metric
ExecNetworkParams{METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS), 0, false},
ExecNetworkParams{METRIC_KEY(NETWORK_NAME), 0, false},
ExecNetworkParams{METRIC_KEY(SUPPORTED_METRICS), 0, false},
ExecNetworkParams{METRIC_KEY(SUPPORTED_CONFIG_KEYS), 0, false},
ExecNetworkParams{ov::execution_devices.name(), 0, false},
// Config in autobatch
ExecNetworkParams{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), 1, false},
ExecNetworkParams{CONFIG_KEY(AUTO_BATCH_TIMEOUT), 1, false},
ExecNetworkParams{CONFIG_KEY(CACHE_DIR), 1, false},
// Config in dependent plugin
ExecNetworkParams{"OPTIMAL_BATCH_SIZE", 1, false},
// Incorrect Metric
ExecNetworkParams{"INCORRECT_METRIC", 0, true},
// Incorrect config
ExecNetworkParams{"INCORRECT_CONFIG", 1, true},
// Set Config
ExecNetworkParams{CONFIG_KEY(AUTO_BATCH_TIMEOUT), 2, false},
ExecNetworkParams{"INCORRECT_CONFIG", 2, true},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
ExecNetworkTest,
::testing::ValuesIn(testConfigs),
ExecNetworkTest::getTestCaseName);

View File

@@ -0,0 +1,313 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <dimension_tracker.hpp>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "mock_auto_batch_plugin.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Eq;
using ::testing::MatcherCast;
using ::testing::Matches;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Throw;
using namespace MockAutoBatchPlugin;
using namespace MockAutoBatchDevice;
using namespace InferenceEngine;
using PluginLoadNetworkParams = std::tuple<std::map<std::string, std::string>, // Paramters
std::map<std::string, std::string>, // Config
int>; // Batch Size
class PluginLoadNetworkTest : public ::testing::TestWithParam<PluginLoadNetworkParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
// Mock CPU execNetwork
std::shared_ptr<NiceMock<MockIExecutableNetworkInternal>> cpuMockIExecNet;
ov::SoPtr<IExecutableNetworkInternal> cpuMockExecNetwork;
std::shared_ptr<NiceMock<MockIInferencePlugin>> cpuMockIPlugin;
std::shared_ptr<InferenceEngine::IInferencePlugin> cpuMockPlugin;
public:
static std::string getTestCaseName(testing::TestParamInfo<PluginLoadNetworkParams> obj) {
std::map<std::string, std::string> params;
std::map<std::string, std::string> configs;
int batch_size;
std::tie(params, configs, batch_size) = obj.param;
std::string res;
for (auto& c : params) {
res += "_" + c.first + "_" + c.second;
}
for (auto& c : configs) {
res += "_" + c.first + "_" + c.second;
}
res += "_" + std::to_string(batch_size);
return res;
}
void TearDown() override {
core.reset();
plugin.reset();
cpuMockIExecNet.reset();
cpuMockExecNetwork = {};
cpuMockPlugin = {};
}
void SetUp() override {
cpuMockIExecNet = std::make_shared<NiceMock<MockIExecutableNetworkInternal>>();
auto cpuMockIPluginPtr = std::make_shared<NiceMock<MockIInferencePlugin>>();
ON_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _))
.WillByDefault(Return(cpuMockIExecNet));
cpuMockPlugin = cpuMockIPluginPtr;
EXPECT_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
cpuMockExecNetwork = ov::SoPtr<InferenceEngine::IExecutableNetworkInternal>(cpuMockPlugin->LoadNetwork(CNNNetwork{}, {}), {});
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*plugin, ParseBatchDevice).WillByDefault([this](const std::string& batchDevice) {
return plugin->AutoBatchInferencePlugin::ParseBatchDevice(batchDevice);
});
ON_CALL(*core, LoadNetwork(MatcherCast<const CNNNetwork&>(_), MatcherCast<const std::string&>(_), _))
.WillByDefault(Return(cpuMockExecNetwork));
ON_CALL(*core,
LoadNetwork(MatcherCast<const CNNNetwork&>(_),
MatcherCast<const std::shared_ptr<InferenceEngine::RemoteContext>&>(_),
_))
.WillByDefault(Return(cpuMockExecNetwork));
}
};
TEST_P(PluginLoadNetworkTest, PluginLoadNetworkTestCase) {
std::map<std::string, std::string> params;
std::map<std::string, std::string> configs;
int batch_size;
std::tie(params, configs, batch_size) = this->GetParam();
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT"))).WillByDefault(Return(params["PERFORMANCE_HINT"]));
ON_CALL(*core, GetMetric(_, StrEq("OPTIMAL_BATCH_SIZE"), _)).WillByDefault(Return(params["OPTIMAL_BATCH_SIZE"]));
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT_NUM_REQUESTS")))
.WillByDefault(Return(params["PERFORMANCE_HINT_NUM_REQUESTS"]));
ON_CALL(*core, GetMetric(_, StrEq("GPU_MEMORY_STATISTICS"), _))
.WillByDefault([this, &params](const std::string& device, const std::string& key, const ov::AnyMap& options) {
static int flag = 0;
ov::Any value = params[key];
uint64_t data = flag * value.as<uint64_t>();
std::map<std::string, uint64_t> ret = {{"xyz", data}};
flag = flag ? 0 : 1;
return ret;
});
ON_CALL(*core, GetMetric(_, StrEq("GPU_DEVICE_TOTAL_MEM_SIZE"), _))
.WillByDefault(Return(params["GPU_DEVICE_TOTAL_MEM_SIZE"]));
auto graph = ngraph::builder::subgraph::makeMultiSingleConv();
auto net = CNNNetwork(graph);
ASSERT_NO_THROW(plugin->LoadNetworkImpl(net, {}, configs));
}
TEST_P(PluginLoadNetworkTest, PluginLoadBatchedNetworkTestCase) {
std::map<std::string, std::string> params;
std::map<std::string, std::string> configs;
int batch_size;
std::tie(params, configs, batch_size) = this->GetParam();
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT"))).WillByDefault(Return(params["PERFORMANCE_HINT"]));
ON_CALL(*core, GetMetric(_, StrEq("OPTIMAL_BATCH_SIZE"), _)).WillByDefault(Return(params["OPTIMAL_BATCH_SIZE"]));
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT_NUM_REQUESTS")))
.WillByDefault(Return(params["PERFORMANCE_HINT_NUM_REQUESTS"]));
ON_CALL(*core, GetMetric(_, StrEq("GPU_MEMORY_STATISTICS"), _))
.WillByDefault([this, &params](const std::string& device, const std::string& key, const ov::AnyMap& options) {
static int flag = 0;
ov::Any value = params[key];
uint64_t data = flag * value.as<uint64_t>();
std::map<std::string, uint64_t> ret = {{"xyz", data}};
flag = flag ? 0 : 1;
return ret;
});
ON_CALL(*core, GetMetric(_, StrEq("GPU_DEVICE_TOTAL_MEM_SIZE"), _))
.WillByDefault(Return(params["GPU_DEVICE_TOTAL_MEM_SIZE"]));
auto graph = ngraph::builder::subgraph::makeConvPoolReluNonZero({1, 1, 32, 32});
auto batch = ov::Dimension(5);
ov::DimensionTracker::set_label(batch, 11);
auto p_shape = ov::PartialShape{batch, 1, 32, 32};
graph->reshape(p_shape);
auto net = CNNNetwork(graph);
InferenceEngine::IExecutableNetworkInternal::Ptr execNet;
ASSERT_NO_THROW(execNet = plugin->LoadNetworkImpl(net, {}, configs));
ON_CALL(*cpuMockIExecNet, GetConfig(StrEq("PERFORMANCE_HINT_NUM_REQUESTS"))).WillByDefault(Return("0"));
ON_CALL(*cpuMockIExecNet, GetMetric(StrEq("OPTIMAL_NUMBER_OF_INFER_REQUESTS"))).WillByDefault(Return("1"));
InferenceEngine::Parameter res;
ASSERT_NO_THROW(res = execNet->GetMetric("OPTIMAL_NUMBER_OF_INFER_REQUESTS"));
EXPECT_EQ(1, std::atoi(res.as<std::string>().c_str()));
}
TEST_P(PluginLoadNetworkTest, PluginLoadNetworkGetMetricTestCase) {
std::map<std::string, std::string> params;
std::map<std::string, std::string> configs;
int batch_size;
std::tie(params, configs, batch_size) = this->GetParam();
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT"))).WillByDefault(Return(params["PERFORMANCE_HINT"]));
ON_CALL(*core, GetMetric(_, StrEq("OPTIMAL_BATCH_SIZE"), _)).WillByDefault(Return(params["OPTIMAL_BATCH_SIZE"]));
ON_CALL(*core, GetConfig(_, StrEq("PERFORMANCE_HINT_NUM_REQUESTS")))
.WillByDefault(Return(params["PERFORMANCE_HINT_NUM_REQUESTS"]));
ON_CALL(*core, GetMetric(_, StrEq("GPU_MEMORY_STATISTICS"), _))
.WillByDefault([this, &params](const std::string& device, const std::string& key, const ov::AnyMap& options) {
static int flag = 0;
ov::Any value = params[key];
uint64_t data = flag * value.as<uint64_t>();
std::map<std::string, uint64_t> ret = {{"xyz", data}};
flag = flag ? 0 : 1;
return ret;
});
ON_CALL(*core, GetMetric(_, StrEq("GPU_DEVICE_TOTAL_MEM_SIZE"), _))
.WillByDefault(Return(params["GPU_DEVICE_TOTAL_MEM_SIZE"]));
auto graph = ngraph::builder::subgraph::makeMultiSingleConv();
auto net = CNNNetwork(graph);
InferenceEngine::IExecutableNetworkInternal::Ptr execNet;
ASSERT_NO_THROW(execNet = plugin->LoadNetworkImpl(net, {}, configs));
std::string network_name = graph.get()->get_name();
ON_CALL(*cpuMockIExecNet, GetConfig(StrEq("PERFORMANCE_HINT_NUM_REQUESTS"))).WillByDefault(Return("0"));
ON_CALL(*cpuMockIExecNet, GetMetric(StrEq("OPTIMAL_NUMBER_OF_INFER_REQUESTS"))).WillByDefault(Return("1"));
ON_CALL(*cpuMockIExecNet, GetMetric(StrEq("NETWORK_NAME"))).WillByDefault(Return(network_name.c_str()));
ON_CALL(*cpuMockIExecNet, GetMetric(StrEq("EXECUTION_DEVICES"))).WillByDefault(Return("CPU"));
InferenceEngine::Parameter res;
ASSERT_NO_THROW(res = execNet->GetMetric("OPTIMAL_NUMBER_OF_INFER_REQUESTS"));
EXPECT_EQ(batch_size, std::atoi(res.as<std::string>().c_str()));
ASSERT_NO_THROW(res = execNet->GetMetric("NETWORK_NAME"));
EXPECT_EQ(network_name, res.as<std::string>());
ASSERT_NO_THROW(res = execNet->GetMetric("SUPPORTED_METRICS"));
ASSERT_NO_THROW(res = execNet->GetMetric("EXECUTION_DEVICES"));
EXPECT_STREQ("CPU", res.as<std::string>().c_str());
ASSERT_ANY_THROW(execNet->GetMetric("XYZ"));
}
const std::vector<PluginLoadNetworkParams> testConfigs = {
// Case 1: explict apply batch size by config of AUTO_BATCH_DEVICE_CONFIG
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}},
32},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU(32)"}},
32},
// Case 2: CPU batch size is figured out by min of opt_batch_size and infReq_num
// If config contains "PERFORMANCE_HINT_NUM_REQUESTS" else get it from core->GetConfig
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}},
12},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "8"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "16"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}},
8},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "8"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "2"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}},
1},
//PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
// {"OPTIMAL_BATCH_SIZE", "32"},
// {"PERFORMANCE_HINT_NUM_REQUESTS", "16"},
// {"GPU_MEMORY_STATISTICS", "1024000"},
// {"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
// {{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU"}, {"PERFORMANCE_HINT_NUM_REQUESTS", "12"}},
// 12},
//
// Case 3: GPU batch size is figured out by
// 1) min of opt_batch_size and infReq_num
// 2) available_mem/one_graph_mem_footprint with power 2
// Final batch_size is the min of 1) and 2)
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "5000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}},
4},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "40960000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}},
12},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "32"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "24"},
{"GPU_MEMORY_STATISTICS", "1000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "18000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}},
16},
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "THROUGHPUT"},
{"OPTIMAL_BATCH_SIZE", "32"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "48"},
{"GPU_MEMORY_STATISTICS", "1000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "180000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "GPU"}},
32},
// Case 4:
PluginLoadNetworkParams{{{"PERFORMANCE_HINT", "LATENCY"},
{"OPTIMAL_BATCH_SIZE", "16"},
{"PERFORMANCE_HINT_NUM_REQUESTS", "12"},
{"GPU_MEMORY_STATISTICS", "1024000"},
{"GPU_DEVICE_TOTAL_MEM_SIZE", "4096000000"}},
{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(32)"}},
32},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
PluginLoadNetworkTest,
::testing::ValuesIn(testConfigs),
PluginLoadNetworkTest::getTestCaseName);

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <iostream>
#include "auto_batch.hpp"
#include "ie_icore.hpp"
using namespace MockAutoBatchPlugin;
namespace MockAutoBatchDevice {
class MockAutoBatchInferencePlugin : public AutoBatchInferencePlugin {
public:
MOCK_METHOD((DeviceInformation),
ParseMetaDevices,
(const std::string&, (const std::map<std::string, std::string>&)),
(const));
MOCK_METHOD((DeviceInformation), ParseBatchDevice, (const std::string&), ());
MOCK_METHOD((InferenceEngine::Parameter),
GetMetric,
(const std::string&, (const std::map<std::string, InferenceEngine::Parameter>&)),
(const, override));
};
class MockAutoBatchExecutableNetwork : public AutoBatchExecutableNetwork {
public:
MOCK_METHOD((InferenceEngine::Parameter), GetConfig, (const std::string&), (const, override));
MOCK_METHOD((InferenceEngine::Parameter), GetMetric, (const std::string&), (const, override));
};
} // namespace MockAutoBatchDevice

View File

@@ -0,0 +1,398 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mock_auto_batch_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Eq;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Throw;
using namespace MockAutoBatchPlugin;
using namespace MockAutoBatchDevice;
using BatchDeviceConfigParams = std::tuple<std::string, // Batch devices
std::string, // Expected device name
int, // Expected batch size
bool // Throw exception
>;
using MetricConfigParams = std::tuple<std::string, std::string, bool>;
using MetaDeviceParams = std::tuple<std::string, // Device batch cfg
std::map<std::string, std::string>, // Config
DeviceInformation, // Expected result
bool>; // Throw exception
using SetGetConfigParams = std::tuple<std::map<std::string, std::string>, // Set Config
std::string, // Get Config
bool>; // Throw exception
const std::vector<std::string> cpu_supported_properties = {
"CACHE_DIR",
};
const std::vector<std::string> gpu_supported_properties = {
"CACHE_DIR",
"OPTIMAL_BATCH_SIZE",
};
class SetGetConfigTest : public ::testing::TestWithParam<SetGetConfigParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
public:
static std::string getTestCaseName(testing::TestParamInfo<SetGetConfigParams> obj) {
std::map<std::string, std::string> set_config;
std::string get_config;
bool throw_exception;
std::tie(set_config, get_config, throw_exception) = obj.param;
std::string res = "";
if (set_config.size() > 0) {
res += "GetConfig_";
for (auto& it : set_config) {
res += it.first + "_" + it.second + "_";
}
}
if (!get_config.empty()) {
res += "GetConfig_" + get_config;
}
if (throw_exception)
res += "_throw";
return res;
}
void TearDown() override {
core.reset();
plugin.reset();
}
void SetUp() override {
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*plugin, ParseBatchDevice).WillByDefault([this](const std::string& batchDevice) {
return plugin->AutoBatchInferencePlugin::ParseBatchDevice(batchDevice);
});
}
};
TEST_P(SetGetConfigTest, SetConfigTestCase) {
std::map<std::string, std::string> set_config;
std::string temp;
bool throw_exception;
std::tie(set_config, temp, throw_exception) = this->GetParam();
if (set_config.size() == 0) {
ASSERT_NO_THROW(plugin->SetConfig(set_config));
return;
}
if (throw_exception) {
ASSERT_ANY_THROW(plugin->SetConfig(set_config));
} else {
ASSERT_NO_THROW(plugin->SetConfig(set_config));
}
}
TEST_P(SetGetConfigTest, GetConfigTestCase) {
std::map<std::string, std::string> temp;
std::string get_config;
bool throw_exception;
std::tie(temp, get_config, throw_exception) = this->GetParam();
if (get_config.empty() || temp.size() > 0) {
return;
}
std::map<std::string, InferenceEngine::Parameter> options = {};
if (throw_exception) {
ASSERT_ANY_THROW(plugin->GetConfig(get_config, options));
} else {
ASSERT_NO_THROW(plugin->GetConfig(get_config, options));
}
}
TEST_P(SetGetConfigTest, SetGetConfigTestCase) {
std::map<std::string, std::string> set_config;
std::string get_config;
bool throw_exception;
std::tie(set_config, get_config, throw_exception) = this->GetParam();
if (get_config.empty() || set_config.size() == 0) {
return;
}
std::map<std::string, InferenceEngine::Parameter> options = {};
ASSERT_NO_THROW(plugin->SetConfig(set_config));
InferenceEngine::Parameter result;
ASSERT_NO_THROW(result = plugin->GetConfig(get_config, options));
EXPECT_EQ(result.as<std::string>(), set_config[get_config]);
}
class ParseMetaDeviceTest : public ::testing::TestWithParam<MetaDeviceParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
public:
static std::string getTestCaseName(testing::TestParamInfo<MetaDeviceParams> obj) {
std::string batch_cfg;
std::map<std::string, std::string> config;
DeviceInformation info;
bool throw_exception;
std::tie(batch_cfg, config, info, throw_exception) = obj.param;
std::string res = batch_cfg;
for (auto& c : config) {
res += "_" + c.first + "_" + c.second;
}
if (throw_exception)
res += "_throw";
return res;
}
void TearDown() override {
core.reset();
plugin.reset();
}
void SetUp() override {
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*core, GetSupportedConfig)
.WillByDefault([](const std::string& device, const std::map<std::string, std::string>& configs) {
std::map<std::string, std::string> res_config;
if (device == "CPU") {
for (auto& c : configs) {
if (std::find(begin(cpu_supported_properties), end(cpu_supported_properties), c.first) !=
cpu_supported_properties.end())
res_config[c.first] = c.second;
}
} else if (device == "GPU") {
for (auto& c : configs) {
if (std::find(begin(gpu_supported_properties), end(gpu_supported_properties), c.first) !=
gpu_supported_properties.end())
res_config[c.first] = c.second;
}
}
return res_config;
});
ON_CALL(*plugin, ParseBatchDevice).WillByDefault([this](const std::string& batchDevice) {
return plugin->AutoBatchInferencePlugin::ParseBatchDevice(batchDevice);
});
}
bool compare(std::map<std::string, std::string> a, std::map<std::string, std::string> b) {
if (a.size() != b.size())
return false;
for (auto& it : a) {
auto item = b.find(it.first);
if (item == b.end())
return false;
if (it.second != item->second)
return false;
}
return true;
}
};
TEST_P(ParseMetaDeviceTest, ParseMetaDeviceTestCase) {
std::string batch_cfg;
std::map<std::string, std::string> config;
DeviceInformation expected;
bool throw_exception;
std::tie(batch_cfg, config, expected, throw_exception) = this->GetParam();
if (throw_exception) {
ASSERT_ANY_THROW(plugin->ParseMetaDevice(batch_cfg, config));
} else {
auto result = plugin->ParseMetaDevice(batch_cfg, config);
EXPECT_EQ(result.deviceName, expected.deviceName);
EXPECT_EQ(result.batchForDevice, expected.batchForDevice);
EXPECT_TRUE(compare(result.config, expected.config));
}
}
class ParseBatchDeviceTest : public ::testing::TestWithParam<BatchDeviceConfigParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
public:
static std::string getTestCaseName(testing::TestParamInfo<BatchDeviceConfigParams> obj) {
std::string batchDevice;
std::string deviceName;
int batchSize;
bool throw_exception;
std::tie(batchDevice, deviceName, batchSize, throw_exception) = obj.param;
return batchDevice;
}
void TearDown() override {
core.reset();
plugin.reset();
}
void SetUp() override {
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*plugin, ParseBatchDevice).WillByDefault([this](const std::string& batchDevice) {
return plugin->AutoBatchInferencePlugin::ParseBatchDevice(batchDevice);
});
}
};
TEST_P(ParseBatchDeviceTest, ParseBatchDeviceTestCase) {
std::string batchDevice;
std::string deviceName;
int batchSize;
bool throw_exception;
std::tie(batchDevice, deviceName, batchSize, throw_exception) = this->GetParam();
if (throw_exception) {
ASSERT_ANY_THROW(plugin->ParseBatchDevice(batchDevice));
} else {
auto result = plugin->ParseBatchDevice(batchDevice);
EXPECT_EQ(result.deviceName, deviceName);
EXPECT_EQ(result.batchForDevice, batchSize);
}
}
class PluginMetricTest : public ::testing::TestWithParam<MetricConfigParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>> plugin;
public:
static std::string getTestCaseName(testing::TestParamInfo<MetricConfigParams> obj) {
std::string metricName;
std::string value;
bool throw_exception;
std::tie(metricName, value, throw_exception) = obj.param;
return "Metric_" + metricName;
}
void TearDown() override {
core.reset();
plugin.reset();
}
void SetUp() override {
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
plugin = std::shared_ptr<NiceMock<MockAutoBatchInferencePlugin>>(new NiceMock<MockAutoBatchInferencePlugin>());
plugin->SetCore(core);
ON_CALL(*plugin, GetMetric)
.WillByDefault(
[this](const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) {
return plugin->AutoBatchInferencePlugin::GetMetric(name, options);
});
}
};
TEST_P(PluginMetricTest, GetPluginMetricTest) {
std::string metricName;
std::string expected;
bool throw_exception;
std::tie(metricName, expected, throw_exception) = this->GetParam();
if (throw_exception) {
ASSERT_ANY_THROW(plugin->GetMetric(metricName, {}));
} else {
auto value = plugin->GetMetric(metricName, {});
EXPECT_EQ(value.as<std::string>(), expected);
}
}
const char supported_metric[] = "SUPPORTED_METRICS FULL_DEVICE_NAME SUPPORTED_CONFIG_KEYS";
const char supported_config_keys[] = "AUTO_BATCH_DEVICE_CONFIG AUTO_BATCH_TIMEOUT CACHE_DIR";
const std::vector<BatchDeviceConfigParams> batchDeviceTestConfigs = {
BatchDeviceConfigParams{"CPU(4)", "CPU", 4, false},
BatchDeviceConfigParams{"GPU(8)", "GPU", 8, false},
BatchDeviceConfigParams{"CPU(0)", "CPU", 0, true},
BatchDeviceConfigParams{"GPU(-1)", "GPU", 0, true},
};
const std::vector<MetricConfigParams> metricTestConfigs = {
MetricConfigParams{METRIC_KEY(SUPPORTED_METRICS), supported_metric, false},
MetricConfigParams{METRIC_KEY(FULL_DEVICE_NAME), "BATCH", false},
MetricConfigParams{METRIC_KEY(SUPPORTED_CONFIG_KEYS), supported_config_keys, false},
MetricConfigParams{"CPU_THREADS_NUM", "16", true},
MetricConfigParams{"PERFORMANCE_HINT", "LATENCY", true},
};
const std::vector<MetaDeviceParams> testMetaDeviceConfigs = {
MetaDeviceParams{"CPU(4)", {}, DeviceInformation{"CPU", {}, 4}, false},
MetaDeviceParams{"CPU(4)", {{}}, DeviceInformation{"CPU", {{}}, 4}, true},
MetaDeviceParams{"CPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4}, false},
MetaDeviceParams{"GPU(4)", {{"CACHE_DIR", "./"}}, DeviceInformation{"GPU", {{"CACHE_DIR", "./"}}, 4}, false},
MetaDeviceParams{"GPU(8)",
{{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}},
DeviceInformation{"GPU", {{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}}, 8},
false},
MetaDeviceParams{"CPU(4)", {{"OPTIMAL_BATCH_SIZE", "16"}}, DeviceInformation{"CPU", {{}}, 4}, true},
MetaDeviceParams{"CPU(4)",
{{"CACHE_DIR", "./"}, {"OPTIMAL_BATCH_SIZE", "16"}},
DeviceInformation{"CPU", {{"CACHE_DIR", "./"}}, 4},
true},
};
const std::vector<SetGetConfigParams> testSetGetConfigParams = {
// Set Config
SetGetConfigParams{{{"AUTO_BATCH_TIMEOUT", "200"}}, {}, false},
SetGetConfigParams{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, {}, false},
SetGetConfigParams{{{"CACHE_DIR", "./xyz"}}, {}, false},
SetGetConfigParams{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, {}, false},
SetGetConfigParams{{{"AUTO_BATCH_TIMEOUT", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}},
{},
false},
SetGetConfigParams{{{"XYZ", "200"}}, {}, true},
SetGetConfigParams{{{"XYZ", "200"}, {"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}, {"CACHE_DIR", "./xyz"}}, {}, true},
// Get Config
SetGetConfigParams{{}, "AUTO_BATCH_TIMEOUT", false},
SetGetConfigParams{{}, "AUTO_BATCH_DEVICE_CONFIG", true},
SetGetConfigParams{{}, "CACHE_DIR", true},
// Set and get Config
SetGetConfigParams{{{"AUTO_BATCH_TIMEOUT", "200"}}, "AUTO_BATCH_TIMEOUT", false},
SetGetConfigParams{{{"AUTO_BATCH_DEVICE_CONFIG", "CPU(4)"}}, "AUTO_BATCH_DEVICE_CONFIG", false},
SetGetConfigParams{{{"CACHE_DIR", "./abc"}}, "CACHE_DIR", false},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
SetGetConfigTest,
::testing::ValuesIn(testSetGetConfigParams),
SetGetConfigTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
ParseBatchDeviceTest,
::testing::ValuesIn(batchDeviceTestConfigs),
ParseBatchDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
PluginMetricTest,
::testing::ValuesIn(metricTestConfigs),
PluginMetricTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
ParseMetaDeviceTest,
::testing::ValuesIn(testMetaDeviceConfigs),
ParseMetaDeviceTest::getTestCaseName);