rework auto test cases (#19862)

* initial commit

Signed-off-by: fishbell <bell.song@intel.com>

* clean up

Signed-off-by: fishbell <bell.song@intel.com>

* fix windows build failure

Signed-off-by: fishbell <bell.song@intel.com>

* enable auto func tests

Signed-off-by: fishbell <bell.song@intel.com>

* enable auto_func_test to ci

Signed-off-by: fishbell <bell.song@intel.com>

* some clean up in gpu case

Signed-off-by: fishbell <bell.song@intel.com>

* clang

Signed-off-by: fishbell <bell.song@intel.com>

* fix build warning

Signed-off-by: fishbell <bell.song@intel.com>

* enable new tests

Signed-off-by: fishbell <bell.song@intel.com>

* fix build warning

Signed-off-by: fishbell <bell.song@intel.com>

* enable consistency test

Signed-off-by: fishbell <bell.song@intel.com>

* try fix build error on manylinux

Signed-off-by: fishbell <bell.song@intel.com>

* enable cpplint

Signed-off-by: fishbell <bell.song@intel.com>

* enable clang-format

Signed-off-by: fishbell <bell.song@intel.com>

enable some tests

Signed-off-by: fishbell <bell.song@intel.com>

* fix typo

Signed-off-by: fishbell <bell.song@intel.com>

* clang for unit tests

Signed-off-by: fishbell <bell.song@intel.com>

* fix merge conflict

Signed-off-by: fishbell <bell.song@intel.com>

---------

Signed-off-by: fishbell <bell.song@intel.com>
This commit is contained in:
yanlan song 2023-10-07 18:44:25 +08:00 committed by Alexander Nesterov
parent 166d6743e7
commit 4e20dda85e
143 changed files with 5068 additions and 3763 deletions

View File

@ -411,6 +411,9 @@ jobs:
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_unit_tests.xml
displayName: 'AUTO UT'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_func_tests.xml
displayName: 'AUTO FuncTests'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ov_auto_batch_unit_tests.xml
displayName: 'AutoBatch UT'

View File

@ -305,6 +305,9 @@ jobs:
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_unit_tests.xml
displayName: 'AUTO UT'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_func_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_func_tests.xml
displayName: 'AUTO FuncTests'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_auto_batch_unit_tests --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ov_auto_batch_unit_tests.xml
displayName: 'AutoBatch UT'

View File

@ -733,6 +733,12 @@ jobs:
${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml
- name: AUTO func Tests
run: |
source ${{ env.INSTALL_DIR }}/setupvars.sh
${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml
- name: Template plugin func tests
run: |
source ${INSTALL_DIR}/setupvars.sh

View File

@ -595,6 +595,11 @@ jobs:
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml
- name: AUTO FuncTests
shell: cmd
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml
- name: Template plugin tests
shell: cmd
run: |

View File

@ -30,6 +30,8 @@ OPENVINO_SUPPRESS_DEPRECATED_START
__VA_ARGS__; \
} catch (const ::InferenceEngine::RequestBusy& ex) { \
ov::Busy::create(ex.what()); \
} catch (const ov::Busy&) { \
throw; \
} catch (const std::exception& ex) { \
OPENVINO_THROW(ex.what()); \
} catch (...) { \

View File

@ -415,8 +415,9 @@ std::shared_ptr<ov::ICompiledModel> Plugin::compile_model_impl(const std::string
load_config.set_user_property(pre_process_config(properties));
load_config.apply_user_properties();
if (!work_mode_auto) {
if (iter_config != properties.end() && iter_config->second != "THROUGHPUT") {
LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only", iter_config->second.as<std::string>().c_str());
if (iter_config != properties.end() && iter_config->second.as<std::string>() != "THROUGHPUT") {
LOG_WARNING_TAG("User set perf_hint:%s, but MULTI supports THROUGHPUT only",
iter_config->second.as<std::string>().c_str());
}
load_config.set_property(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
}

View File

@ -8,4 +8,5 @@ if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
endif()
add_subdirectory(functional)
add_subdirectory(unit)

View File

@ -0,0 +1,34 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ov_auto_func_tests)
if(ENABLE_AUTO_BATCH)
list(APPEND DEPENDENCIES openvino_auto_batch_plugin)
list(APPEND COMPILE_DEFINITIONS ENABLE_AUTO_BATCH)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ie_add_compiler_flags(/wd4305)
endif()
ov_add_test_target(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
LINK_LIBRARIES
openvino::runtime::dev
gtest
gtest_main
openvino::funcSharedTests
INCLUDES
${CMAKE_CURRENT_SOURCE_DIR}
${TEST_COMMON_INCLUDE_DIR}
ADD_CLANG_FORMAT
LABELS
Multi
Auto
)
target_compile_definitions(${TARGET_NAME} PRIVATE ${COMPILE_DEFINITIONS})
set_ie_threading_interface_for(${TARGET_NAME})

View File

@ -0,0 +1,97 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
#ifdef __GLIBC__
# include <gnu/libc-version.h>
# if __GLIBC_MINOR__ >= 34
# define ENABLETESTTHREADING
# endif
#endif
using namespace ov::auto_plugin::tests;
#ifdef ENABLETESTTHREADING
TEST_F(AutoFuncTests, can_compile_with_multiple_devices) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
compiled_model = core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
}
TEST_F(AutoFuncTests, threading_test) {
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
},
10,
10);
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
},
10,
10);
}
TEST_F(AutoFuncTests, threading_test_cache_enabled) {
core.set_property(ov::cache_dir(cache_path));
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
},
10,
10);
core.set_property(ov::cache_dir(""));
}
TEST_F(AutoFuncTests, threading_test_get_version) {
ThreadingTest::runParallel([&]() {
auto versions = core.get_versions("AUTO");
ASSERT_LE(1u, versions.size());
});
}
TEST_F(AutoFuncTests, theading_compiled_with_cpu_help) {
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
},
10,
10);
}
TEST_F(AutoFuncTests, threading_test_hardware_slower) {
core.compile_model(model_cannot_batch, "MOCK_CPU");
core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine
register_plugin_mock_gpu_compile_slower(core, "MOCK_GPU_SLOWER", {});
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU_SLOWER", "MOCK_CPU")});
},
10,
10);
}
TEST_F(AutoFuncTests, threading_test_cpu_help_slower) {
core.compile_model(model_cannot_batch, "MOCK_CPU");
core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine
register_plugin_mock_cpu_compile_slower(core, "MOCK_CPU_SLOWER", {});
ThreadingTest::runParallel(
[&]() {
(void)core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU_SLOWER")});
},
10,
10);
}
#endif

View File

@ -0,0 +1,790 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
#include <chrono>
#include <memory>
#include <string>
#include "common_test_utils/file_utils.hpp"
#include "ie_plugin_config.hpp"
#include "openvino/core/any.hpp"
#include "openvino/core/except.hpp"
#include "openvino/opsets/opset11.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/auto/properties.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include "openvino/runtime/internal_properties.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/iremote_context.hpp"
#include "openvino/runtime/iremote_tensor.hpp"
#include "openvino/runtime/make_tensor.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/util/shared_object.hpp"
namespace {
std::string get_mock_engine_path() {
std::string mockEngineName("mock_engine");
return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(),
mockEngineName + IE_BUILD_POSTFIX);
}
template <class T>
std::function<T> make_std_function(const std::shared_ptr<void> so, const std::string& functionName) {
std::function<T> ptr(reinterpret_cast<T*>(ov::util::get_symbol(so, functionName.c_str())));
return ptr;
}
bool support_model(const std::shared_ptr<const ov::Model>& model, const ov::SupportedOpsMap& supported_ops) {
for (const auto& op : model->get_ops()) {
if (supported_ops.find(op->get_friendly_name()) == supported_ops.end())
return false;
}
return true;
}
ov::PropertyName RO_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RO);
}
ov::PropertyName RW_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RW);
}
} // namespace
void ov::auto_plugin::tests::AutoFuncTests::SetUp() {
if (m_mock_plugins.empty()) {
register_plugin_mock_cpu(core, "MOCK_CPU", {});
register_plugin_mock_gpu(core, "MOCK_GPU", {});
}
model_can_batch = create_model_with_batch_possible();
model_cannot_batch = create_model_with_reshape();
auto hash = std::hash<std::string>()(::testing::UnitTest::GetInstance()->current_test_info()->name());
std::stringstream ss;
ss << std::this_thread::get_id();
cache_path =
"threading_test" + std::to_string(hash) + "_" + ss.str() + "_" + ov::test::utils::GetTimestamp() + "_cache";
}
void ov::auto_plugin::tests::AutoFuncTests::TearDown() {
ov::test::utils::removeFilesWithExt(cache_path, "blob");
ov::test::utils::removeDir(cache_path);
}
ov::Tensor ov::auto_plugin::tests::AutoFuncTests::create_and_fill_tensor(const ov::element::Type& type,
const ov::Shape& shape) {
switch (type) {
case ov::element::Type_t::i64:
return create_tensor<ov::element_type_traits<ov::element::Type_t::i64>::value_type>(type, shape);
default:
break;
}
OPENVINO_THROW("Cannot generate tensor. Unsupported element type.");
}
std::shared_ptr<ov::Model> ov::auto_plugin::tests::AutoFuncTests::create_model_with_batch_possible() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto result = std::make_shared<ov::opset11::Result>(add);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
std::shared_ptr<ov::Model> ov::auto_plugin::tests::AutoFuncTests::create_model_with_reshape() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1}, {-1});
reshape_val->set_friendly_name("reshape_val");
auto reshape = std::make_shared<ov::opset11::Reshape>(add, reshape_val, true);
reshape->set_friendly_name("reshape");
auto result = std::make_shared<ov::opset11::Result>(reshape);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
// Mock plugins
class MockCompiledModel : public ov::ICompiledModel {
public:
MockCompiledModel(const std::shared_ptr<const ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const ov::AnyMap& config)
: ov::ICompiledModel(model, plugin),
m_config(config),
m_model(model),
m_has_context(false) {
try {
m_context = plugin->get_default_context(config);
} catch (ov::Exception&) {
}
}
MockCompiledModel(const std::shared_ptr<const ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const ov::AnyMap& config,
const ov::SoPtr<ov::IRemoteContext>& context)
: ov::ICompiledModel(model, plugin),
m_config(config),
m_model(model),
m_has_context(true),
m_context(context) {}
// Methods from a base class ov::ICompiledModel
void export_model(std::ostream& model) const override {
ov::pass::StreamSerialize(model, std::function<void(std::ostream&)>())
.run_on_model(std::const_pointer_cast<ov::Model>(m_model));
}
std::shared_ptr<const ov::Model> get_runtime_model() const override {
return m_model;
}
void set_property(const ov::AnyMap& properties) override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::Any get_property(const std::string& name) const override {
auto prop = m_config.find(name);
if (prop != m_config.end())
return prop->second;
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties{ov::optimal_number_of_infer_requests,
ov::hint::performance_mode};
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::optimal_number_of_infer_requests.name()) {
return decltype(ov::optimal_number_of_infer_requests)::value_type(2);
} else if (name == ov::model_name) {
return decltype(ov::model_name)::value_type(m_model->get_name());
} else if (name == ov::execution_devices) {
return decltype(ov::execution_devices)::value_type({get_plugin()->get_device_name()});
}
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<ov::ISyncInferRequest> create_sync_infer_request() const override;
const std::shared_ptr<const ov::Model>& get_model() const {
return m_model;
}
ov::SoPtr<ov::IRemoteContext> get_context() const {
return m_context;
}
bool has_context() const {
return m_has_context;
}
private:
ov::AnyMap m_config;
std::shared_ptr<const ov::Model> m_model;
bool m_has_context;
ov::SoPtr<ov::IRemoteContext> m_context;
};
class MockInferRequest : public ov::ISyncInferRequest {
public:
MockInferRequest(const std::shared_ptr<const MockCompiledModel>& compiled_model)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_ASSERT(compiled_model);
m_model = compiled_model->get_model();
m_has_context = compiled_model->get_context() != nullptr;
// Allocate input/output tensors
for (const auto& input : get_inputs()) {
allocate_tensor(input, [this, input, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
input.get_element_type(),
input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape(),
compiled_model->has_context(),
compiled_model->get_context());
});
}
for (const auto& output : get_outputs()) {
allocate_tensor(output, [this, output, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
output.get_element_type(),
output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape(),
compiled_model->has_context(),
compiled_model->get_context());
});
}
}
~MockInferRequest() = default;
void infer() override {
ov::TensorVector input_tensors;
bool evaludate_flag = true;
for (const auto& input : get_inputs()) {
auto tensor = get_tensor(input);
// check if valid if remote tensor
if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) && m_has_context) {
evaludate_flag = false;
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr);
if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name())
OPENVINO_THROW("cannot consume the buffer!");
}
input_tensors.emplace_back(ov::make_tensor(tensor));
}
ov::TensorVector output_tensors;
for (const auto& output : get_outputs()) {
auto tensor = get_tensor(output);
// check if valid if remote tensor
if (std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) && m_has_context) {
evaludate_flag = false;
auto remote_tensor = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr);
if (remote_tensor->get_device_name() != get_compiled_model()->get_context()->get_device_name())
OPENVINO_THROW("cannot consume the buffer!");
}
output_tensors.emplace_back(ov::make_tensor(tensor));
}
if (evaludate_flag) {
std::this_thread::sleep_for(std::chrono::milliseconds(100)); // add delay for test
m_model->evaluate(output_tensors, input_tensors);
}
}
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override {
OPENVINO_NOT_IMPLEMENTED;
}
std::vector<ov::ProfilingInfo> get_profiling_info() const override {
OPENVINO_NOT_IMPLEMENTED;
}
private:
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
const ov::element::Type& element_type,
const ov::Shape& shape,
bool has_context,
ov::SoPtr<ov::IRemoteContext> context) {
if (!tensor || tensor->get_element_type() != element_type) {
if (has_context) {
tensor = context->create_tensor(element_type, shape, {});
} else {
tensor = ov::SoPtr<ov::ITensor>(ov::make_tensor(element_type, shape), nullptr);
}
} else {
tensor->set_shape(shape);
}
}
std::shared_ptr<const ov::Model> m_model;
bool m_has_context;
};
std::shared_ptr<ov::ISyncInferRequest> MockCompiledModel::create_sync_infer_request() const {
return std::make_shared<MockInferRequest>(std::dynamic_pointer_cast<const MockCompiledModel>(shared_from_this()));
}
class MockRemoteTensor : public ov::IRemoteTensor {
ov::AnyMap m_properties;
std::string m_dev_name;
ov::element::Type m_element_type;
ov::Shape m_shape;
public:
MockRemoteTensor(const std::string& name,
const ov::AnyMap& props,
const ov::element::Type& type,
const ov::Shape& shape)
: m_properties(props),
m_dev_name(name),
m_element_type(type),
m_shape(shape) {}
const ov::AnyMap& get_properties() const override {
return m_properties;
}
const std::string& get_device_name() const override {
return m_dev_name;
}
void set_shape(ov::Shape shape) override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::element::Type& get_element_type() const override {
return m_element_type;
}
const ov::Shape& get_shape() const override {
return m_shape;
}
const ov::Strides& get_strides() const override {
OPENVINO_NOT_IMPLEMENTED;
}
};
class MockRemoteContext : public ov::IRemoteContext {
ov::AnyMap m_property = {{"IS_DEFAULT", true}};
std::string m_dev_name;
public:
MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {}
const std::string& get_device_name() const override {
return m_dev_name;
}
const ov::AnyMap& get_property() const override {
return m_property;
}
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
const ov::Shape& shape,
const ov::AnyMap& params = {}) override {
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property, type, shape);
return {remote_tensor, nullptr};
}
};
class MockCustomRemoteContext : public ov::IRemoteContext {
ov::AnyMap m_property = {{"IS_DEFAULT", false}};
std::string m_dev_name;
public:
MockCustomRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {}
const std::string& get_device_name() const override {
return m_dev_name;
}
const ov::AnyMap& get_property() const override {
return m_property;
}
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
const ov::Shape& shape,
const ov::AnyMap& params = {}) override {
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property, type, shape);
return {remote_tensor, nullptr};
}
};
class MockPluginBase : public ov::IPlugin {
public:
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties);
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::string& model_path,
const ov::AnyMap& properties) const override {
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::SoPtr<ov::IRemoteContext>& context) const override {
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties, context);
}
void set_property(const ov::AnyMap& properties) override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override {
std::string xmlString, xmlInOutString;
ov::Tensor weights;
ov::pass::StreamSerialize::DataHeader hdr = {};
model.read(reinterpret_cast<char*>(&hdr), sizeof hdr);
// read CNNNetwork input/output precisions
model.seekg(hdr.custom_data_offset);
xmlInOutString.resize(hdr.custom_data_size);
model.read(const_cast<char*>(xmlInOutString.c_str()), hdr.custom_data_size);
// read blob content
model.seekg(hdr.consts_offset);
if (hdr.consts_size) {
weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size});
char* data = static_cast<char*>(weights.data());
model.read(data, hdr.consts_size);
}
// read XML content
model.seekg(hdr.model_offset);
xmlString.resize(hdr.model_size);
model.read(const_cast<char*>(xmlString.c_str()), hdr.model_size);
ov::Core core;
auto ov_model = core.read_model(xmlString, weights);
return compile_model(ov_model, properties);
}
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
const ov::SoPtr<ov::IRemoteContext>& context,
const ov::AnyMap& properties) const override {
std::string xmlString, xmlInOutString;
ov::Tensor weights;
ov::pass::StreamSerialize::DataHeader hdr = {};
model.read(reinterpret_cast<char*>(&hdr), sizeof hdr);
// read CNNNetwork input/output precisions
model.seekg(hdr.custom_data_offset);
xmlInOutString.resize(hdr.custom_data_size);
model.read(const_cast<char*>(xmlInOutString.c_str()), hdr.custom_data_size);
// read blob content
model.seekg(hdr.consts_offset);
if (hdr.consts_size) {
weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size});
char* data = static_cast<char*>(weights.data());
model.read(data, hdr.consts_size);
}
// read XML content
model.seekg(hdr.model_offset);
xmlString.resize(hdr.model_size);
model.read(const_cast<char*>(xmlString.c_str()), hdr.model_size);
ov::Core core;
auto ov_model = core.read_model(xmlString, weights);
return compile_model(ov_model, properties, context);
}
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_NOT_IMPLEMENTED;
}
};
class MockPluginSupportBatchAndContext : public MockPluginBase {
public:
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
std::unordered_set<std::string> supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"};
ov::SupportedOpsMap res;
for (const auto& op : model->get_ordered_ops()) {
if (supported_ops.find(op->get_type_info().name) == supported_ops.end())
continue;
res.emplace(op->get_friendly_name(), get_device_name());
}
return res;
}
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
if (remote_properties.find("CUSTOM_CTX") == remote_properties.end())
return std::make_shared<MockRemoteContext>(get_device_name());
return std::make_shared<MockCustomRemoteContext>(get_device_name());
}
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
std::string device_name = get_device_name();
if (remote_properties.find(ov::device::id.name()) != remote_properties.end())
device_name = device_name + "." + remote_properties.at(ov::device::id.name()).as<std::string>();
return std::make_shared<MockRemoteContext>(device_name);
}
void set_property(const ov::AnyMap& properties) override {
for (const auto& it : properties) {
if (it.first == ov::num_streams.name())
num_streams = it.second.as<int32_t>();
else if (it.first == ov::enable_profiling.name())
m_profiling = it.second.as<bool>();
else if (it.first == ov::hint::performance_mode.name())
m_perf_hint = it.second.as<ov::hint::PerformanceMode>();
else if (it.first == ov::hint::num_requests.name())
m_request = it.second.as<uint32_t>();
else if (it.first == ov::device::id.name())
m_id = it.second.as<std::string>();
else if (it.first == ov::cache_dir.name())
continue;
else
OPENVINO_THROW(get_device_name(), " set config: " + it.first);
}
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
const std::vector<ov::PropertyName> roProperties{RO_property(ov::supported_properties.name()),
RO_property(ov::optimal_batch_size.name()),
RO_property(ov::device::capabilities.name()),
RO_property(ov::device::type.name()),
RO_property(ov::device::uuid.name()),
RO_property(ov::device::id.name()),
RO_property(ov::intel_gpu::memory_statistics.name())};
// the whole config is RW before network is loaded.
const std::vector<ov::PropertyName> rwProperties{RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
RW_property(ov::compilation_num_threads.name()),
RW_property(ov::hint::performance_mode.name()),
RW_property(ov::hint::num_requests.name())};
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::hint::num_requests.name()) {
return decltype(ov::hint::num_requests)::value_type(1);
} else if (name == ov::hint::performance_mode.name()) {
return decltype(ov::hint::performance_mode)::value_type(ov::hint::PerformanceMode::LATENCY);
} else if (name == ov::optimal_batch_size.name()) {
return decltype(ov::optimal_batch_size)::value_type(4);
} else if (name == ov::device::capabilities.name()) {
return decltype(ov::device::capabilities)::value_type(
{"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8", ov::device::capability::EXPORT_IMPORT});
} else if (name == ov::device::type.name()) {
return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED);
} else if (name == ov::loaded_from_cache.name()) {
return false;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{false};
} else if (name == ov::streams::num.name()) {
return decltype(ov::streams::num)::value_type{2};
} else if (name == ov::compilation_num_threads.name()) {
return decltype(ov::compilation_num_threads)::value_type{4};
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type(
{ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}});
} else if (ov::internal::caching_properties == name) {
std::vector<ov::PropertyName> caching_properties = {ov::device::uuid, ov::device::id};
return decltype(ov::internal::caching_properties)::value_type(caching_properties);
} else if (name == ov::device::uuid) {
ov::device::UUID uuid = {};
return decltype(ov::device::uuid)::value_type{uuid};
} else if (name == ov::device::id) {
return decltype(ov::device::id)::value_type{m_id};
} else if (name == ov::loaded_from_cache.name()) {
return m_loaded_from_cache;
} else if (name == ov::intel_gpu::memory_statistics) {
return decltype(ov::intel_gpu::memory_statistics)::value_type{{}};
}
OPENVINO_NOT_IMPLEMENTED;
}
private:
int32_t num_streams{0};
bool m_profiling = false;
bool m_loaded_from_cache{false};
ov::hint::PerformanceMode m_perf_hint = ov::hint::PerformanceMode::THROUGHPUT;
uint32_t m_request = 0;
std::string m_id;
};
void ov::auto_plugin::tests::AutoFuncTests::reg_plugin(ov::Core& core,
std::shared_ptr<ov::IPlugin>& plugin,
const std::string& device_name,
const ov::AnyMap& properties) {
std::string libraryPath = get_mock_engine_path();
if (!m_so)
m_so = ov::util::load_shared_object(libraryPath.c_str());
plugin->set_device_name(device_name);
std::function<void(ov::IPlugin*)> injectProxyEngine = make_std_function<void(ov::IPlugin*)>(m_so, "InjectPlugin");
injectProxyEngine(plugin.get());
core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(),
std::string("mock_engine") + IE_BUILD_POSTFIX),
device_name,
properties);
m_mock_plugins.emplace_back(plugin);
}
// test
void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
std::shared_ptr<ov::IPlugin> base_plugin = std::make_shared<MockPluginSupportBatchAndContext>();
reg_plugin(core, base_plugin, device_name, properties);
}
class MockPlugin : public MockPluginBase {
public:
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
std::unordered_set<std::string> supported_ops = {"Parameter", "Result", "Add", "Constant", "Reshape"};
ov::SupportedOpsMap res;
for (const auto& op : model->get_ordered_ops()) {
if (supported_ops.find(op->get_type_info().name) == supported_ops.end())
continue;
res.emplace(op->get_friendly_name(), get_device_name());
}
return res;
}
void set_property(const ov::AnyMap& properties) override {
for (const auto& it : properties) {
if (it.first == ov::num_streams.name())
num_streams = it.second.as<int32_t>();
else if (it.first == ov::enable_profiling.name())
m_profiling = it.second.as<bool>();
else if (it.first == ov::device::id.name())
continue;
else if (it.first == ov::cache_dir.name())
continue;
else
OPENVINO_THROW(get_device_name(), " set config: " + it.first);
}
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
const std::vector<ov::PropertyName> roProperties{RO_property(ov::supported_properties.name()),
RO_property(ov::device::uuid.name()),
RO_property(ov::device::capabilities.name())};
// the whole config is RW before network is loaded.
const std::vector<ov::PropertyName> rwProperties{RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
RW_property(ov::hint::performance_mode.name())};
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::loaded_from_cache.name()) {
return false;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{false};
} else if (name == ov::streams::num.name()) {
return decltype(ov::streams::num)::value_type{2};
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type(
{ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}});
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;
capabilities.push_back(ov::device::capability::EXPORT_IMPORT);
return decltype(ov::device::capabilities)::value_type(capabilities);
} else if (ov::internal::caching_properties == name) {
std::vector<ov::PropertyName> caching_properties = {ov::device::uuid};
return decltype(ov::internal::caching_properties)::value_type(caching_properties);
} else if (name == ov::device::uuid) {
ov::device::UUID uuid = {};
return decltype(ov::device::uuid)::value_type{uuid};
} else if (name == ov::loaded_from_cache.name()) {
return m_loaded_from_cache;
}
OPENVINO_NOT_IMPLEMENTED;
}
private:
int32_t num_streams{0};
bool m_profiling = false;
bool m_loaded_from_cache{false};
};
void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
std::shared_ptr<ov::IPlugin> base_plugin = std::make_shared<MockPlugin>();
reg_plugin(core, base_plugin, device_name, properties);
}
void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_gpu_compile_slower(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
class MockPluginCompileSlower : public MockPluginSupportBatchAndContext {
public:
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties);
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::SoPtr<ov::IRemoteContext>& context) const override {
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties, context);
}
};
std::shared_ptr<ov::IPlugin> base_plugin = std::make_shared<MockPluginCompileSlower>();
reg_plugin(core, base_plugin, device_name, properties);
}
void ov::auto_plugin::tests::AutoFuncTests::register_plugin_mock_cpu_compile_slower(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
class MockCPUPluginCompileSlower : public MockPlugin {
public:
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties);
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::SoPtr<ov::IRemoteContext>& context) const override {
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); // add delay for test
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties, context);
}
};
std::shared_ptr<ov::IPlugin> base_plugin = std::make_shared<MockCPUPluginCompileSlower>();
reg_plugin(core, base_plugin, device_name, properties);
}

View File

@ -0,0 +1,133 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory>
#include <thread>
#include "openvino/runtime/auto/properties.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/iplugin.hpp"
namespace ov {
namespace auto_plugin {
namespace tests {
#define ASSERT_THROW_WITH_MESSAGE(code, expected_exception, expected_message) \
do { \
try { \
{ code; } \
FAIL() << "no exception occured" << std::endl; \
} catch (const expected_exception& e) { \
EXPECT_THAT(e.what(), testing::HasSubstr(expected_message)); \
} catch (const std::exception& e) { \
FAIL() << "an unexpected exception occured: " << e.what() << std::endl; \
} catch (...) { \
FAIL() << "an unknown exception occured" << std::endl; \
} \
} while (0);
class PluginRemoteTensor : public ov::RemoteTensor {
public:
/**
* @brief Checks that type defined runtime parameters are presented in remote object
* @param tensor a tensor to check
*/
static void type_check(const Tensor& tensor) {
RemoteTensor::type_check(tensor, {{"IS_DEFAULT", {}}});
}
bool is_default() {
return get_params().at("IS_DEFAULT").as<bool>();
}
};
class PluginRemoteContext : public ov::RemoteContext {
public:
// Needed to make create_tensor overloads from base class visible for user
using RemoteContext::create_host_tensor;
using RemoteContext::create_tensor;
/**
* @brief Checks that type defined runtime parameters are presented in remote object
* @param remote_context A remote context to check
*/
static void type_check(const RemoteContext& remote_context) {
RemoteContext::type_check(remote_context, {{"IS_DEFAULT", {}}});
}
bool is_default() {
return get_params().at("IS_DEFAULT").as<bool>();
}
};
class AutoFuncTests : public ::testing::Test {
public:
ov::Core core;
void SetUp() override;
void TearDown() override;
ov::Tensor create_and_fill_tensor(const ov::element::Type& type, const ov::Shape& shape);
protected:
void register_plugin_mock_cpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties);
void register_plugin_mock_cpu_compile_slower(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties);
void register_plugin_mock_gpu(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties);
void register_plugin_mock_gpu_compile_slower(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties);
std::shared_ptr<ov::Model> model_can_batch;
std::shared_ptr<ov::Model> model_cannot_batch;
std::string cache_path;
private:
template <class T>
ov::Tensor create_tensor(const ov::element::Type& type, const ov::Shape& shape) {
ov::Tensor tensor(type, shape);
T* data = tensor.data<T>();
for (size_t i = 0; i < tensor.get_size(); i++) {
data[i] = static_cast<T>(i);
}
return tensor;
}
std::vector<std::shared_ptr<ov::IPlugin>> m_mock_plugins;
std::shared_ptr<void> m_so;
void reg_plugin(ov::Core& core,
std::shared_ptr<ov::IPlugin>& plugin,
const std::string& device_name,
const ov::AnyMap& properties);
std::shared_ptr<ov::Model> create_model_with_batch_possible();
std::shared_ptr<ov::Model> create_model_with_reshape();
};
class ThreadingTest {
public:
static void runParallel(std::function<void(void)> func,
const unsigned int iterations = 100,
const unsigned int threadsNum = 8) {
std::vector<std::thread> threads(threadsNum);
for (auto& thread : threads) {
thread = std::thread([&]() {
for (unsigned int i = 0; i < iterations; ++i) {
func();
}
});
}
for (auto& thread : threads) {
if (thread.joinable())
thread.join();
}
}
};
} // namespace tests
} // namespace auto_plugin
} // namespace ov

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
#include "common_test_utils/include/common_test_utils/file_utils.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, compiled_with_cache_enabled) {
core.set_property(ov::cache_dir(cache_path));
core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU
auto compiled_model =
core.compile_model(model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2);
compiled_model = core.compile_model(model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
// can reuse the cache, no extra cache generated
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 2);
core.set_property("MOCK_GPU", ov::device::id("test_regenerate"));
compiled_model = core.compile_model(model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
// new cache file expected
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3);
core.set_property(ov::cache_dir(""));
}
TEST_F(AutoFuncTests, compiled_with_cache_enabled_batch_enabled) {
#ifdef ENABLE_AUTO_BATCH
core.set_property(ov::cache_dir(cache_path));
core.set_property("MOCK_GPU", ov::device::id("test")); // device id for cache property distinguish with MOCK_CPU
auto compiled_model =
core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3);
compiled_model = core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
// can reuse the cache, no extra cache generated
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 3);
core.set_property("MOCK_GPU", ov::device::id("test_regenerate"));
compiled_model = core.compile_model(model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
// new cache file expected
ASSERT_EQ(ov::test::utils::listFilesWithExt(cache_path, "blob").size(), 5);
core.set_property(ov::cache_dir(""));
#endif
}

View File

@ -0,0 +1,116 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <future>
#include <thread>
#include "auto_func_test.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, can_infer_with_cpu_help) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
auto req = compiled_model.create_infer_request();
bool is_called = false;
ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) {
// HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
ASSERT_EQ(exception_ptr, nullptr);
is_called = true;
}));
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
ASSERT_TRUE(is_called);
}
TEST_F(AutoFuncTests, impl_does_not_copy_callback) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
ov::InferRequest req;
ASSERT_NO_THROW(req = compiled_model.create_infer_request());
{
auto somePtr = std::make_shared<int>(42);
ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) {
ASSERT_EQ(nullptr, exception_ptr);
ASSERT_EQ(1, somePtr.use_count());
}));
}
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
}
TEST_F(AutoFuncTests, return_result_not_ready) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
ov::InferRequest req;
ASSERT_NO_THROW(req = compiled_model.create_infer_request());
std::promise<std::chrono::system_clock::time_point> callbackTimeStamp;
auto callbackTimeStampFuture = callbackTimeStamp.get_future();
// add a callback to the request and capture the timestamp
ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) {
if (exception_ptr) {
callbackTimeStamp.set_exception(exception_ptr);
} else {
callbackTimeStamp.set_value(std::chrono::system_clock::now());
}
}));
ASSERT_NO_THROW(req.start_async());
bool ready = false;
ASSERT_NO_THROW(ready = req.wait_for({}));
// get timestamp taken AFTER return from the wait(STATUS_ONLY)
const auto afterWaitTimeStamp = std::chrono::system_clock::now();
if (afterWaitTimeStamp < callbackTimeStampFuture.get()) {
ASSERT_FALSE(ready);
}
ASSERT_NO_THROW(req.wait());
}
TEST_F(AutoFuncTests, rethrow_if_callback_throw) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
ov::InferRequest req;
ASSERT_NO_THROW(req = compiled_model.create_infer_request());
ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) {
OPENVINO_THROW("Throw");
}));
ASSERT_NO_THROW(req.start_async());
ASSERT_THROW(req.wait(), ov::Exception);
}
TEST_F(AutoFuncTests, can_start_several_async_inside_completion_callback_with_safedtor) {
const int NUM_ITER = 10;
struct TestUserData {
std::atomic<int> numIter = {0};
std::promise<bool> promise;
};
TestUserData data;
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
ov::InferRequest req;
ASSERT_NO_THROW(req = compiled_model.create_infer_request());
ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) {
if (exception_ptr) {
data.promise.set_exception(exception_ptr);
} else {
if (data.numIter.fetch_add(1) != NUM_ITER) {
req.start_async();
} else {
data.promise.set_value(true);
}
}
}));
auto future = data.promise.get_future();
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
future.wait();
auto callbackStatus = future.get();
ASSERT_TRUE(callbackStatus);
auto dataNumIter = data.numIter - 1;
ASSERT_EQ(NUM_ITER, dataNumIter);
}

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "infer_consistency_test.hpp"
using namespace ov::auto_plugin::tests;
namespace {
auto props = []() {
return std::vector<ov::AnyMap>{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")},
{ov::device::priorities("MOCK_GPU")},
{ov::device::priorities("MOCK_CPU", "MOCK_GPU")}};
};
const std::vector<bool> get_vs_set{true, false};
const std::vector<std::string> target_device{"AUTO", "MULTI"};
INSTANTIATE_TEST_SUITE_P(AutoFuncTests,
Consistency_Test,
::testing::Combine(::testing::ValuesIn(target_device),
::testing::ValuesIn(get_vs_set),
::testing::ValuesIn(props())),
Consistency_Test::getTestCaseName);
} // namespace

View File

@ -0,0 +1,105 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "auto_func_test.hpp"
#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "ov_models/subgraph_builders.hpp"
namespace ov {
namespace auto_plugin {
namespace tests {
using consistency_test_param = std::tuple<std::string, // device name
bool, // get or set blob
ov::AnyMap>; // property
class Consistency_Test : public AutoFuncTests, public testing::WithParamInterface<consistency_test_param> {
void SetUp() override {
AutoFuncTests::SetUp();
std::tie(target_device, use_get_tensor, property) = this->GetParam();
};
public:
static std::string getTestCaseName(const testing::TestParamInfo<consistency_test_param>& obj) {
ov::AnyMap property;
bool use_get_tensor;
std::string target_device;
std::tie(target_device, use_get_tensor, property) = obj.param;
std::ostringstream result;
result << "target_device=" << target_device << "_";
result << std::string(use_get_tensor ? "_get_blob" : "_set_blob") << "_";
if (!property.empty()) {
for (auto& iter : property) {
result << "priority=" << iter.first << "_" << iter.second.as<std::string>();
}
}
return result.str();
}
protected:
bool use_get_tensor;
ov::AnyMap property;
std::string target_device;
void run() {
std::vector<ov::InferRequest> irs;
std::vector<std::vector<ov::Tensor>> ref;
std::map<std::shared_ptr<ov::Node>, ov::Tensor> input_data;
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
auto inputs = compiled_model.inputs();
auto outputs = compiled_model.outputs();
auto num_requests = compiled_model.get_property(ov::optimal_number_of_infer_requests);
for (size_t j = 0; j < num_requests; j++) {
auto inf_req = compiled_model.create_infer_request();
irs.push_back(inf_req);
for (auto& iter : inputs) {
auto tensor = ov::test::utils::create_and_fill_tensor(iter.get_element_type(), iter.get_shape());
if (use_get_tensor)
memcpy(reinterpret_cast<uint8_t*>(inf_req.get_tensor(iter).data()),
reinterpret_cast<const uint8_t*>(tensor.data()),
tensor.get_byte_size());
else
inf_req.set_tensor(iter, tensor);
auto node_ptr = iter.get_node_shared_ptr();
input_data.insert({std::const_pointer_cast<ov::Node>(node_ptr), tensor});
}
for (auto& iter : outputs) {
if (!use_get_tensor) {
auto tensor = ov::Tensor(iter.get_element_type(), iter.get_shape());
inf_req.set_tensor(iter, tensor);
}
}
auto refOutData = ngraph::helpers::interpretFunction(model_cannot_batch, input_data);
ref.push_back(refOutData);
}
for (size_t i = 0; i < 50; i++) {
for (auto ir : irs) {
ir.start_async();
}
for (auto ir : irs) {
ir.wait();
}
}
for (size_t i = 0; i < irs.size(); ++i) {
for (auto& iter : outputs) {
ov::test::utils::compare(irs[i].get_tensor(iter), ref[i][0]);
}
}
}
};
TEST_P(Consistency_Test, infer_consistency_test) {
run();
}
} // namespace tests
} // namespace auto_plugin
} // namespace ov

View File

@ -0,0 +1,114 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <future>
#include "auto_func_test.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, can_run_3syncrequests_consistently_from_threads) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
ov::InferRequest req1, req2, req3;
ASSERT_NO_THROW(req1 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req2 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req3 = compiled_model.create_infer_request());
auto f1 = std::async(std::launch::async, [&] {
req1.infer();
});
auto f2 = std::async(std::launch::async, [&] {
req2.infer();
});
auto f3 = std::async(std::launch::async, [&] {
req3.infer();
});
f1.wait();
f2.wait();
f3.wait();
ASSERT_NO_THROW(f1.get());
ASSERT_NO_THROW(f2.get());
ASSERT_NO_THROW(f3.get());
}
TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_from_threads_without_wait) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
ov::InferRequest req1, req2, req3;
ASSERT_NO_THROW(req1 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req2 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req3 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req1.infer());
ASSERT_NO_THROW(req2.infer());
ASSERT_NO_THROW(req3.infer());
auto f1 = std::async(std::launch::async, [&] {
req1.start_async();
});
auto f2 = std::async(std::launch::async, [&] {
req2.start_async();
});
auto f3 = std::async(std::launch::async, [&] {
req3.start_async();
});
f1.wait();
f2.wait();
f3.wait();
ASSERT_NO_THROW(f1.get());
ASSERT_NO_THROW(f2.get());
ASSERT_NO_THROW(f3.get());
}
TEST_F(AutoFuncTests, can_run_3asyncrequests_consistently_with_wait) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
ov::InferRequest req1, req2, req3;
ASSERT_NO_THROW(req1 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req2 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req3 = compiled_model.create_infer_request());
req1.start_async();
ASSERT_NO_THROW(req1.wait());
req2.start_async();
ASSERT_NO_THROW(req2.wait());
req3.start_async();
ASSERT_NO_THROW(req3.wait());
}
TEST_F(AutoFuncTests, can_run_3asyncrequests_parallel_with_wait) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
ov::InferRequest req1, req2, req3;
ASSERT_NO_THROW(req1 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req2 = compiled_model.create_infer_request());
ASSERT_NO_THROW(req3 = compiled_model.create_infer_request());
req1.start_async();
req2.start_async();
req3.start_async();
ASSERT_NO_THROW(req2.wait());
ASSERT_NO_THROW(req1.wait());
ASSERT_NO_THROW(req3.wait());
}

View File

@ -0,0 +1,172 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "io_tensor.hpp"
#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp"
using namespace ov::auto_plugin::tests;
void InferRequest_IOTensor_Test::SetUp() {
AutoFuncTests::SetUp();
std::tie(target_device, property) = this->GetParam();
auto compiled_model =
core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
input = compiled_model.input();
output = compiled_model.output();
}
void InferRequest_IOTensor_Test::TearDown() {
input = {};
output = {};
AutoFuncTests::TearDown();
}
TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_input) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ASSERT_THROW(req.set_tensor(input, {}), ov::Exception);
}
TEST_P(InferRequest_IOTensor_Test, fail_to_set_nullptr_for_output) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ASSERT_THROW(req.set_tensor(output, {}), ov::Exception);
}
TEST_P(InferRequest_IOTensor_Test, can_set_and_get_input) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape());
ASSERT_NO_THROW(req.set_tensor(input, tensor));
ov::Tensor actual_tensor;
ASSERT_NO_THROW(actual_tensor = req.get_tensor(input));
ASSERT_TRUE(actual_tensor);
ASSERT_NE(nullptr, actual_tensor.data());
ASSERT_EQ(tensor.data(), actual_tensor.data());
ASSERT_EQ(input.get_element_type(), actual_tensor.get_element_type());
ASSERT_EQ(input.get_shape(), actual_tensor.get_shape());
}
TEST_P(InferRequest_IOTensor_Test, fail_to_set_tensor_with_incorrect_name) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape());
ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception);
}
TEST_P(InferRequest_IOTensor_Test, fail_input_set_size_incorrect) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
auto shape = input.get_shape();
shape[0] *= 2;
auto tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), shape);
ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception);
}
TEST_P(InferRequest_IOTensor_Test, fail_output_set_size_incorrect) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
auto shape = output.get_shape();
shape[0] *= 2;
auto tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), shape);
ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception);
}
TEST_P(InferRequest_IOTensor_Test, second_call_get_input) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ov::Tensor tensor1, tensor2;
ASSERT_NO_THROW(tensor1 = req.get_tensor(input));
ASSERT_NO_THROW(tensor2 = req.get_tensor(input));
ASSERT_EQ(tensor1.data(), tensor2.data());
}
TEST_P(InferRequest_IOTensor_Test, second_call_get_output) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ov::Tensor tensor1, tensor2;
ASSERT_NO_THROW(tensor1 = req.get_tensor(output));
ASSERT_NO_THROW(tensor2 = req.get_tensor(output));
ASSERT_EQ(tensor1.data(), tensor2.data());
}
TEST_P(InferRequest_IOTensor_Test, second_call_get_input_after_async) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ov::Tensor tensor1, tensor2;
ASSERT_NO_THROW(req.infer());
ASSERT_NO_THROW(tensor1 = req.get_tensor(input));
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
ASSERT_NO_THROW(tensor2 = req.get_tensor(input));
ASSERT_EQ(tensor1.data(), tensor2.data());
}
TEST_P(InferRequest_IOTensor_Test, second_call_get_output_after_async) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ov::Tensor tensor1, tensor2;
ASSERT_NO_THROW(req.infer());
ASSERT_NO_THROW(tensor1 = req.get_tensor(output));
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
ASSERT_NO_THROW(tensor2 = req.get_tensor(output));
ASSERT_EQ(tensor1.data(), tensor2.data());
}
TEST_P(InferRequest_IOTensor_Test, can_infer_with_set_tensor) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
auto input_tensor = ov::test::utils::create_and_fill_tensor(input.get_element_type(), input.get_shape());
ASSERT_NO_THROW(req.set_tensor(input, input_tensor));
auto output_tensor = ov::test::utils::create_and_fill_tensor(output.get_element_type(), output.get_shape());
ASSERT_NO_THROW(req.set_tensor(output, output_tensor));
ASSERT_NO_THROW(req.infer());
auto actual_input_tensor = req.get_tensor(input);
ASSERT_EQ(actual_input_tensor.data(), input_tensor.data());
auto actual_output_tensor = req.get_tensor(output);
ASSERT_EQ(actual_output_tensor.data(), output_tensor.data());
}
TEST_P(InferRequest_IOTensor_Test, can_infer_after_io_realloc) {
auto compiled_model = core.compile_model(model_cannot_batch, target_device, property);
req = compiled_model.create_infer_request();
ov::Tensor input_tensor, output_tensor;
auto in_shape = input.get_shape();
auto out_shape = output.get_shape();
// imitates blob reallocation
ASSERT_NO_THROW(input_tensor = req.get_tensor(input));
ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5}));
ASSERT_NO_THROW(input_tensor.set_shape(in_shape));
ASSERT_NO_THROW(output_tensor = req.get_tensor(output));
ASSERT_NO_THROW(output_tensor.set_shape({20, 20, 20, 20}));
ASSERT_NO_THROW(output_tensor.set_shape(out_shape));
ASSERT_NO_THROW(req.infer());
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
ASSERT_NO_THROW(req.get_tensor(output));
}
namespace {
auto props = []() {
return std::vector<ov::AnyMap>{{ov::device::priorities("MOCK_GPU", "MOCK_CPU")},
{ov::device::priorities("MOCK_GPU")},
{ov::device::priorities("MOCK_CPU", "MOCK_GPU")}};
};
INSTANTIATE_TEST_SUITE_P(AutoFuncTests,
InferRequest_IOTensor_Test,
::testing::Combine(::testing::Values("AUTO"), ::testing::ValuesIn(props())),
InferRequest_IOTensor_Test::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(AutoFuncTestsCumu,
InferRequest_IOTensor_Test,
::testing::Combine(::testing::Values("MULTI"), ::testing::ValuesIn(props())),
InferRequest_IOTensor_Test::getTestCaseName);
} // namespace

View File

@ -0,0 +1,51 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory>
#include <thread>
#include "auto_func_test.hpp"
#include "openvino/runtime/auto/properties.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/iplugin.hpp"
namespace ov {
namespace auto_plugin {
namespace tests {
using test_params = std::tuple<std::string, ov::AnyMap>;
class InferRequest_IOTensor_Test : public AutoFuncTests, public ::testing::WithParamInterface<test_params> {
public:
static std::string getTestCaseName(testing::TestParamInfo<test_params> obj) {
std::string target_device;
ov::AnyMap configuration;
std::tie(target_device, configuration) = obj.param;
std::ostringstream result;
result << "target_device=" << target_device << "_";
if (!configuration.empty()) {
for (auto& iter : configuration) {
result << "priority=" << iter.first << "_" << iter.second.as<std::string>();
}
}
return result.str();
}
void SetUp() override;
void TearDown() override;
protected:
std::string target_device;
ov::InferRequest req;
ov::Output<const ov::Node> input;
ov::Output<const ov::Node> output;
ov::AnyMap property;
};
} // namespace tests
} // namespace auto_plugin
} // namespace ov

View File

@ -0,0 +1,57 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
#include "common_test_utils/include/common_test_utils/file_utils.hpp"
#include "openvino/runtime/make_tensor.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, tensor_life_time_with_batch_model) {
auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU");
auto gpu_request = gpu_compiled_model.create_infer_request();
auto input = gpu_compiled_model.input();
auto gpu_tensor = gpu_request.get_tensor(input);
auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor);
auto compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)});
auto request = compiled_model.create_infer_request();
auto tensor = request.get_tensor(input);
auto tensor_detail = ov::get_tensor_impl(gpu_tensor);
ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so);
}
TEST_F(AutoFuncTests, tensor_life_time_with_batch_model_latency_hint) {
auto gpu_compiled_model = core.compile_model(model_can_batch, "MOCK_GPU");
auto gpu_request = gpu_compiled_model.create_infer_request();
auto input = gpu_compiled_model.input();
auto gpu_tensor = gpu_request.get_tensor(input);
auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor);
auto compiled_model = core.compile_model(model_can_batch, "AUTO", {ov::device::priorities("MOCK_GPU")});
auto request = compiled_model.create_infer_request();
auto tensor = request.get_tensor(input);
auto tensor_detail = ov::get_tensor_impl(gpu_tensor);
ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so);
}
TEST_F(AutoFuncTests, tensor_life_time_with_batch_not_applicable_model) {
auto gpu_compiled_model = core.compile_model(model_cannot_batch, "MOCK_GPU");
auto gpu_request = gpu_compiled_model.create_infer_request();
auto input = gpu_compiled_model.input();
auto gpu_tensor = gpu_request.get_tensor(input);
auto gpu_tensor_detail = ov::get_tensor_impl(gpu_tensor);
auto compiled_model = core.compile_model(
model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU"), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)});
auto request = compiled_model.create_infer_request();
auto tensor = request.get_tensor(input);
auto tensor_detail = ov::get_tensor_impl(gpu_tensor);
ASSERT_EQ(tensor_detail._so, gpu_tensor_detail._so);
}

View File

@ -0,0 +1,99 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, default_perfmode_for_multi) {
auto compiled_model =
core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT);
}
TEST_F(AutoFuncTests, respect_secondary_property_for_multi) {
auto compiled_model = core.compile_model(
model_cannot_batch,
"MULTI",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))});
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT);
auto prop = compiled_model.get_property(ov::device::properties.name()).as<ov::AnyMap>();
for (auto& item : prop) {
for (auto& item2 : item.second.as<ov::AnyMap>()) {
if (item2.first == ov::hint::performance_mode) {
if (item.first == "MOCK_CPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY);
} else if (item.first == "MOCK_GPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT);
}
}
}
}
}
TEST_F(AutoFuncTests, default_perfmode_for_auto_ctput) {
auto compiled_model =
core.compile_model(model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)});
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode),
ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT);
auto prop = compiled_model.get_property(ov::device::properties.name()).as<ov::AnyMap>();
for (auto& item : prop) {
for (auto& item2 : item.second.as<ov::AnyMap>()) {
if (item2.first == ov::hint::performance_mode) {
if (item.first == "MOCK_CPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT);
} else if (item.first == "MOCK_GPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT);
}
}
}
}
}
TEST_F(AutoFuncTests, default_perfmode_for_auto) {
auto compiled_model =
core.compile_model(model_cannot_batch, "AUTO", {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY);
auto prop = compiled_model.get_property(ov::device::properties.name()).as<ov::AnyMap>();
for (auto& item : prop) {
for (auto& item2 : item.second.as<ov::AnyMap>()) {
if (item2.first == ov::hint::performance_mode) {
if (item.first == "MOCK_CPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY);
} else if (item.first == "MOCK_GPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY);
}
}
}
}
}
TEST_F(AutoFuncTests, respect_secondary_property_auto_ctput) {
auto compiled_model = core.compile_model(
model_cannot_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
ov::device::properties("MOCK_GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("MOCK_CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))});
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode),
ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT);
auto prop = compiled_model.get_property(ov::device::properties.name()).as<ov::AnyMap>();
for (auto& item : prop) {
for (auto& item2 : item.second.as<ov::AnyMap>()) {
if (item2.first == ov::hint::performance_mode) {
if (item.first == "MOCK_CPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::LATENCY);
} else if (item.first == "MOCK_GPU") {
EXPECT_EQ(item2.second, ov::hint::PerformanceMode::THROUGHPUT);
}
}
}
}
}

View File

@ -0,0 +1,104 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_func_test.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity) {
ov::CompiledModel compiled_model;
compiled_model = core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU")});
auto input = model_cannot_batch->get_parameters().at(0);
auto output = model_cannot_batch->get_results().at(0);
auto fake_img_data = ov::Tensor(input->get_element_type(), input->get_shape());
auto inf_req_regular = compiled_model.create_infer_request();
inf_req_regular.set_tensor(input, fake_img_data);
// infer using system memory
ASSERT_NO_THROW(inf_req_regular.infer());
auto output_tensor_regular = inf_req_regular.get_tensor(output);
auto cldnn_context = core.get_default_context("MOCK_GPU");
auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape());
auto infer_req_remote = compiled_model.create_infer_request();
infer_req_remote.set_tensor(input, remote_tensor);
// infer using remote tensor
ASSERT_NO_THROW(infer_req_remote.start_async());
// no actual inference for remote tensor, due to data not able to mmap
infer_req_remote.wait();
}
TEST_F(AutoFuncTests, cannot_infer_remote_if_not_initialized_for_device) {
core.compile_model(model_cannot_batch, "MOCK_CPU");
core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine
// simulate 2 hardware devices
register_plugin_mock_gpu(core, "MOCK_3", {});
ov::CompiledModel compiled_model;
auto cldnn_context = core.get_default_context("MOCK_GPU");
auto input = model_cannot_batch->get_parameters().at(0);
auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape());
ASSERT_NO_THROW(compiled_model =
core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_3")}));
auto infer_req_remote = compiled_model.create_infer_request();
infer_req_remote.set_tensor(input, remote_tensor);
ASSERT_NO_THROW(infer_req_remote.start_async());
ASSERT_THROW(infer_req_remote.wait(), ov::Exception);
}
TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices) {
core.compile_model(model_cannot_batch, "MOCK_CPU");
core.compile_model(model_cannot_batch, "MOCK_GPU"); // need to initialize the order of plugins in mock_engine
register_plugin_mock_gpu(core, "MOCK_3", {});
ov::CompiledModel compiled_model;
auto input = model_cannot_batch->get_parameters().at(0);
ASSERT_NO_THROW(
compiled_model =
core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU", "MOCK_3")}));
std::vector<ov::InferRequest> inf_req_shared = {};
auto cldnn_context = core.get_default_context("MOCK_GPU");
auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape());
ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU");
auto cldnn_context_2 = core.get_default_context("MOCK_3");
auto remote_tensor_2 = cldnn_context_2.create_tensor(input->get_element_type(), input->get_shape());
ASSERT_EQ(remote_tensor_2.get_device_name(), "MOCK_3");
auto infer_req_remote = compiled_model.create_infer_request();
infer_req_remote.set_tensor(input, remote_tensor);
auto infer_req_remote_2 = compiled_model.create_infer_request();
infer_req_remote_2.set_tensor(input, remote_tensor_2);
// infer using remote tensor
ASSERT_NO_THROW(infer_req_remote.start_async());
ASSERT_NO_THROW(infer_req_remote_2.start_async());
ASSERT_NO_THROW(infer_req_remote.wait());
ASSERT_NO_THROW(infer_req_remote_2.wait());
}
TEST_F(AutoFuncTests, can_create_remotetensor_then_infer_with_affinity_2_devices_device_id) {
ov::CompiledModel compiled_model;
auto input = model_cannot_batch->get_parameters().at(0);
ASSERT_NO_THROW(
compiled_model =
core.compile_model(model_cannot_batch, "MULTI", {ov::device::priorities("MOCK_GPU.1", "MOCK_CPU")}));
auto cldnn_context = core.get_default_context("MOCK_GPU");
auto remote_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape());
ASSERT_EQ(remote_tensor.get_device_name(), "MOCK_GPU");
auto infer_req_remote = compiled_model.create_infer_request();
infer_req_remote.set_tensor(input, remote_tensor);
// infer using remote tensor
ASSERT_NO_THROW(infer_req_remote.start_async());
ASSERT_THROW_WITH_MESSAGE(infer_req_remote.wait(),
ov::Exception,
"None of the devices supports a remote tensor created on the device named MOCK_GPU");
}
TEST_F(AutoFuncTests, can_throw_if_oversubsciption_of_inferrequest) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_cannot_batch,
"MULTI",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"), ov::intel_auto::device_bind_buffer(true)}));
auto optimal_num = compiled_model.get_property(ov::optimal_number_of_infer_requests);
for (size_t i = 0; i < optimal_num; i++) {
compiled_model.create_infer_request();
}
ASSERT_THROW(compiled_model.create_infer_request(), ov::Exception);
}

View File

@ -0,0 +1,73 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <thread>
#include "auto_func_test.hpp"
#include "openvino/runtime/exception.hpp"
using namespace ov::auto_plugin::tests;
TEST_F(AutoFuncTests, can_infer_and_wait_for_result) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
auto req = compiled_model.create_infer_request();
ov::Tensor tensor;
auto input = compiled_model.input();
auto output = compiled_model.output();
ASSERT_NO_THROW(tensor = req.get_tensor(input));
ASSERT_NO_THROW(req.infer());
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(req.wait());
ASSERT_NO_THROW(tensor = req.get_tensor(output));
}
TEST_F(AutoFuncTests, can_wait_without_startasync) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
auto req = compiled_model.create_infer_request();
ASSERT_NO_THROW(req.wait());
ASSERT_NO_THROW(req.wait_for({}));
ASSERT_NO_THROW(req.wait_for(std::chrono::milliseconds{1}));
}
TEST_F(AutoFuncTests, can_throw_if_request_busy) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
auto req = compiled_model.create_infer_request();
auto input = compiled_model.input();
auto output = compiled_model.output();
auto output_tensor = req.get_tensor(input);
ASSERT_NO_THROW(req.wait_for({}));
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(try { req.set_tensor(input, output_tensor); } catch (const ov::Busy&){});
ASSERT_NO_THROW(req.wait_for({}));
ASSERT_NO_THROW(req.wait());
}
TEST_F(AutoFuncTests, can_throw_on_get_tensor_if_request_busy) {
ov::CompiledModel compiled_model;
ASSERT_NO_THROW(compiled_model = core.compile_model(
model_can_batch,
"AUTO",
{ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
auto req = compiled_model.create_infer_request();
auto input = compiled_model.input();
ASSERT_NO_THROW(req.start_async());
ASSERT_NO_THROW(try { req.get_tensor(input); } catch (const ov::Busy&){});
ASSERT_NO_THROW(req.wait());
}

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/executable_network/exec_network_base.hpp"
#include "ie_plugin_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> auto_configs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
ExecutableNetworkBaseTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_configs)),
ExecutableNetworkBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ExecutableNetworkBaseTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_configs)),
ExecutableNetworkBaseTest::getTestCaseName);
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::U16};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
ExecNetSetPrecision,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_configs)),
ExecNetSetPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ExecNetSetPrecision,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_configs)),
ExecNetSetPrecision::getTestCaseName);
} // namespace

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/executable_network/get_metric.hpp"
using namespace BehaviorTestsDefinitions;
using namespace InferenceEngine::PluginConfigParams;
namespace {
//
// Executable Network GetMetric
//
INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest,
IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest,
IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest,
IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest,
IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassExecutableNetworkGetMetricTest,
IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"));
} // namespace

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request/callback.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestCallbackTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestCallbackTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
InferRequestCallbackTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request/io_blob.hpp"
#include <vector>
#include "ie_plugin_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}},
};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestIOBBlobTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
InferRequestIOBBlobTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestIOBBlobTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestIOBBlobTest::getTestCaseName);
} // namespace

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <common_test_utils/test_constants.hpp>
#include "behavior/infer_request/memory_states.hpp"
#include <common_test_utils/test_constants.hpp>
#include "functional_test_utils/plugin_cache.hpp"
#include "ov_models/builders.hpp"
@ -11,17 +13,17 @@ using namespace BehaviorTestsDefinitions;
namespace {
std::vector<memoryStateParams> memoryStateTestCases = {
#ifdef ENABLE_INTEL_CPU
memoryStateParams(InferRequestVariableStateTest::getNetwork(),
{"c_1-3", "r_1-3"},
ov::test::utils::DEVICE_AUTO,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}),
memoryStateParams(InferRequestVariableStateTest::getNetwork(),
{"c_1-3", "r_1-3"},
ov::test::utils::DEVICE_MULTI,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES),
ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU}})
#endif
};
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}})};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestQueryStateExceptionTest,
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestVariableStateTest,
::testing::ValuesIn(memoryStateTestCases),
InferRequestQueryStateExceptionTest::getTestCaseName);
InferRequestVariableStateTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request/multithreading.hpp"
#include "ie_plugin_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestMultithreadingTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestMultithreadingTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Multiconfigs)),
InferRequestMultithreadingTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request/perf_counters.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestPerfCountersTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
InferRequestPerfCountersTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestPerfCountersTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestPerfCountersTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request/set_blob_by_type.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace BehaviorTestsDefinitions;
using namespace InferenceEngine;
const std::vector<FuncTestUtils::BlobType> BlobTypes = {
FuncTestUtils::BlobType::Compound,
FuncTestUtils::BlobType::Batched,
FuncTestUtils::BlobType::Memory,
};
const std::map<std::string, std::string> autoConfig{
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}};
INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi,
InferRequestSetBlobByType,
::testing::Combine(::testing::ValuesIn(BlobTypes),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::Values(autoConfig)),
InferRequestSetBlobByType::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto,
InferRequestSetBlobByType,
::testing::Combine(::testing::ValuesIn(BlobTypes),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::Values(autoConfig)),
InferRequestSetBlobByType::getTestCaseName);

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/infer_request/wait.hpp"
#include <vector>
#include "ie_plugin_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestWaitTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
InferRequestWaitTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestWaitTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestWaitTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,30 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/compiled_model/properties.hpp"
#include "openvino/runtime/core.hpp"
using namespace ov::test::behavior;
using namespace InferenceEngine::PluginConfigParams;
namespace {
//
// Executable Network GetMetric
//
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest,
OVClassCompiledModelGetPropertyTest,
::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"));
//
// Executable Network GetConfig / SetConfig
//
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetIncorrectPropertyTest,
OVClassCompiledModelGetIncorrectPropertyTest,
::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"));
//////////////////////////////////////////////////////////////////////////////////////////
} // namespace

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/compiled_model/compiled_model_base.hpp"
#include "ie_plugin_config.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVCompiledModelBaseTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVCompiledModelBaseTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVCompiledModelBaseTestOptional,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTestOptional::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVCompiledModelBaseTestOptional,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTestOptional::getTestCaseName);
} // namespace

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <common_test_utils/test_constants.hpp>
#include "behavior/compiled_model/import_export.hpp"
#include "ie_plugin_config.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::element::Type_t> netPrecisions = {
ov::element::i8,
ov::element::i16,
ov::element::i32,
ov::element::i64,
ov::element::u8,
ov::element::u16,
ov::element::u32,
ov::element::u64,
ov::element::f16,
ov::element::f32,
};
const std::vector<ov::AnyMap> auto_configs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVCompiledGraphImportExportTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_configs)),
OVCompiledGraphImportExportTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,144 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/compiled_model/properties.hpp"
#include "ie_system_conf.h"
#include "openvino/runtime/properties.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVClassCompiledModelPropertiesIncorrectTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI, "AUTO:TEMPLATE"),
::testing::ValuesIn(inproperties)),
OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName);
#if (defined(__APPLE__) || defined(_WIN32))
auto default_affinity = [] {
auto numaNodes = InferenceEngine::getAvailableNUMANodes();
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
if (coreTypes.size() > 1) {
return ov::Affinity::HYBRID_AWARE;
} else if (numaNodes.size() > 1) {
return ov::Affinity::NUMA;
} else {
return ov::Affinity::NONE;
}
}();
#else
auto default_affinity = [] {
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
if (coreTypes.size() > 1) {
return ov::Affinity::HYBRID_AWARE;
} else {
return ov::Affinity::CORE;
}
}();
#endif
const std::vector<ov::AnyMap> multi_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::num_streams(ov::streams::AUTO)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVClassCompiledModelPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multi_properties)),
OVClassCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice,
OVCompiledModelIncorrectDevice,
::testing::Values("TEMPLATE"));
const std::vector<ov::AnyMap> auto_multi_device_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::device::properties("TEMPLATE", ov::num_streams(4))},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::device::properties(ov::AnyMap{{"TEMPLATE", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow,
OVClassCompiledModelPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_multi_device_properties)),
OVClassCompiledModelPropertiesTests::getTestCaseName);
const std::vector<ov::AnyMap> configsWithSecondaryProperties = {
{ov::device::properties("TEMPLATE", ov::num_streams(4))},
{ov::device::properties("TEMPLATE",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}};
const std::vector<ov::AnyMap> autoConfigsWithSecondaryProperties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::device::properties("AUTO",
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::device::properties("TEMPLATE",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::device::properties("AUTO",
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)),
ov::device::properties("TEMPLATE",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}};
// IE Class Load network
INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::ValuesIn(configsWithSecondaryProperties)));
INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("MULTI"),
::testing::ValuesIn(autoConfigsWithSecondaryProperties)));
INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("AUTO"),
::testing::ValuesIn(autoConfigsWithSecondaryProperties)));
const std::vector<std::pair<ov::AnyMap, std::string>> automultiExeDeviceConfigs = {
std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}}, "TEMPLATE")};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests,
OVCompileModelGetExecutionDeviceTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(automultiExeDeviceConfigs)),
OVCompileModelGetExecutionDeviceTests::getTestCaseName);
const std::vector<ov::AnyMap> multiDevicePriorityConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest,
OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY,
::testing::Combine(::testing::Values("MULTI", "AUTO"),
::testing::ValuesIn(multiDevicePriorityConfigs)),
OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName);
const std::vector<ov::AnyMap> multiModelPriorityConfigs = {{ov::hint::model_priority(ov::hint::Priority::HIGH)},
{ov::hint::model_priority(ov::hint::Priority::MEDIUM)},
{ov::hint::model_priority(ov::hint::Priority::LOW)},
{ov::hint::model_priority(ov::hint::Priority::DEFAULT)}};
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest,
OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY,
::testing::Combine(::testing::Values("AUTO:TEMPLATE"),
::testing::ValuesIn(multiModelPriorityConfigs)));
} // namespace

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/callback.hpp"
#include <vector>
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestCallbackTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVInferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestCallbackTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVInferRequestCallbackTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,47 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
#include <vector>
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
std::shared_ptr<ngraph::Function> getFunction2() {
const std::vector<size_t> inputShape = {1, 4, 20, 20};
const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32;
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(ngPrc, ov::Shape(inputShape))};
params.front()->set_friendly_name("Param_1");
params.front()->get_output_tensor(0).set_names({"input_tensor"});
auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1);
auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector<float>{}, true);
auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD);
auto relu1 = std::make_shared<ngraph::opset1::Relu>(add);
auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector<float>{}, true);
auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(mult);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3);
concat->get_output_tensor(0).set_names({"concat"});
return std::make_shared<ngraph::Function>(concat, params, "SplitAddConcat");
}
INSTANTIATE_TEST_SUITE_P(
smoke_Auto_BehaviorTests,
OVInferRequestDynamicTests,
::testing::Combine(::testing::Values(getFunction2()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
{{1, 4, 20, 20}, {1, 2, 20, 40}},
{{2, 4, 20, 20}, {2, 2, 20, 40}}}),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferRequestDynamicTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/inference_chaining.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> AutoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferenceChaining,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferenceChaining::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferenceChainingStatic,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferenceChainingStatic::getTestCaseName);
} // namespace

View File

@ -0,0 +1,74 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/io_tensor.hpp"
#include <vector>
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
const std::vector<ov::AnyMap> emptyConfigs = {{}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestIOTensorTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestIOTensorTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorTest::getTestCaseName);
std::vector<ov::element::Type> prcs = {
ov::element::boolean,
ov::element::bf16,
ov::element::f16,
ov::element::f32,
ov::element::f64,
ov::element::i4,
ov::element::i8,
ov::element::i16,
ov::element::i32,
ov::element::i64,
ov::element::u1,
ov::element::u4,
ov::element::u8,
ov::element::u16,
ov::element::u32,
ov::element::u64,
};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestIOTensorSetPrecisionTest,
::testing::Combine(::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestIOTensorSetPrecisionTest,
::testing::Combine(::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestCheckTensorPrecision,
::testing::Combine(::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestCheckTensorPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestCheckTensorPrecision,
::testing::Combine(::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestCheckTensorPrecision::getTestCaseName);
} // namespace

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/multithreading.hpp"
#include <vector>
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> Multiconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestMultithreadingTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestMultithreadingTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestMultithreadingTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/perf_counters.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> Autoconfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::enable_profiling(true)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer(true)}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestPerfCountersTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestPerfCountersTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestPerfCountersTest,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestPerfCountersTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/wait.hpp"
#include <vector>
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> Autoconfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVInferRequestWaitTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestWaitTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVInferRequestWaitTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestWaitTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,47 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_plugin/caching_tests.hpp"
#include <ov_ops/multiclass_nms_ie_internal.hpp>
#include <ov_ops/nms_ie_internal.hpp>
#include <ov_ops/nms_static_shape_ie.hpp>
using namespace ov::test::behavior;
using namespace ngraph;
namespace {
static const std::vector<ov::element::Type> precisionsTemplate = {
ov::element::f32,
};
static const std::vector<std::size_t> batchSizesTemplate = {1, 2};
const std::vector<ov::AnyMap> autoConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase,
CompileModelCacheTestBase,
::testing::Combine(::testing::ValuesIn(CompileModelCacheTestBase::getStandardFunctions()),
::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(batchSizesTemplate),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(autoConfigs)),
CompileModelCacheTestBase::getTestCaseName);
const std::vector<ov::AnyMap> LoadFromFileConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
const std::vector<std::string> TestTargets = {
ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI,
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase,
CompileModelLoadFromFileTestBase,
::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)),
CompileModelLoadFromFileTestBase::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_CachingSupportCase,
CompileModelLoadFromMemoryTestBase,
::testing::Combine(::testing::ValuesIn(TestTargets), ::testing::ValuesIn(LoadFromFileConfigs)),
CompileModelLoadFromMemoryTestBase::getTestCaseName);
} // namespace

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_plugin/core_integration.hpp"
#include <openvino/runtime/properties.hpp>
#include "behavior/ov_plugin/core_integration_sw.hpp"
#include "behavior/ov_plugin/query_model.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/core.hpp"
using namespace ov::test::behavior;
using namespace InferenceEngine::PluginConfigParams;
// defined in plugin_name.cpp
extern const char* cpu_plugin_file_name;
namespace {
//
// IE Class Common tests with <pluginName, deviceName params>
//
const std::vector<ov::AnyMap> configsWithEmpty = {{}};
const std::vector<ov::AnyMap> configsWithMetaPlugin = {{ov::device::priorities("AUTO")},
{ov::device::priorities("MULTI")},
{ov::device::priorities("AUTO", "MULTI")},
{ov::device::priorities("AUTO", "TEMPLATE")},
{ov::device::priorities("MULTI", "TEMPLATE")}};
INSTANTIATE_TEST_SUITE_P(
smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyWithEmptyConfigTest,
OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest,
::testing::Combine(::testing::Values("MULTI:AUTO", "AUTO:MULTI", "MULTI:AUTO,TEMPLATE", "AUTO:TEMPLATE,MULTI"),
::testing::ValuesIn(configsWithEmpty)),
::testing::PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(smoke_MULTI_AUTO_DoNotSupportMetaPluginLoadingItselfRepeatedlyTest,
OVClassCompileModelWithCondidateDeviceListContainedMetaPluginTest,
::testing::Combine(::testing::Values("MULTI", "AUTO"),
::testing::ValuesIn(configsWithMetaPlugin)),
::testing::PrintToStringParamName());
// Several devices case
/* enable below in nightly tests*/
/*
INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest,
OVClassSeveralDevicesTestCompileModel,
::testing::Values(std::vector<std::string>({"GPU.0", "GPU.1"})));
INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest,
OVClassSeveralDevicesTestQueryModel,
::testing::Values(std::vector<std::string>({"GPU.0", "GPU.1"})));
INSTANTIATE_TEST_SUITE_P(nightly_OVClassSeveralDevicesTest,
OVClassSeveralDevicesTestDefaultCore,
::testing::Values(std::vector<std::string>({"GPU.0", "GPU.1"})));
*/
} // namespace

View File

@ -0,0 +1,22 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_plugin/life_time.hpp"
using namespace ov::test::behavior;
namespace {
INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests,
OVHoldersTest,
::testing::Values("AUTO:TEMPLATE", "MULTI:TEMPLATE"),
OVHoldersTest::getTestCaseName);
const std::vector<std::string> device_names_and_priorities = {
"MULTI:TEMPLATE", // GPU via MULTI,
"AUTO:TEMPLATE", // GPU via AUTO,
};
INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests,
OVHoldersTestWithConfig,
::testing::ValuesIn(device_names_and_priorities),
OVHoldersTestWithConfig::getTestCaseName);
} // namespace

View File

@ -0,0 +1,165 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_plugin/properties_tests.hpp"
#include <openvino/runtime/auto/properties.hpp>
using namespace ov::test::behavior;
using namespace InferenceEngine::PluginConfigParams;
namespace {
const std::vector<ov::AnyMap> multi_Auto_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("YES")},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::device_bind_buffer("NO")},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("YES")},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE), ov::intel_auto::enable_startup_fallback("NO")},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiBehaviorTests,
OVPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multi_Auto_properties)),
OVPropertiesTests::getTestCaseName);
const std::vector<ov::AnyMap> multi_setcore_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY),
ov::hint::model_priority(ov::hint::Priority::HIGH)}};
const std::vector<ov::AnyMap> multi_compileModel_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::model_priority(ov::hint::Priority::MEDIUM)}};
INSTANTIATE_TEST_SUITE_P(smoke_MultiCompileModelBehaviorTests,
OVSetPropComplieModleGetPropTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multi_setcore_properties),
::testing::ValuesIn(multi_compileModel_properties)),
OVSetPropComplieModleGetPropTests::getTestCaseName);
const std::vector<ov::AnyMap> auto_setcore_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::model_priority(ov::hint::Priority::HIGH)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY),
ov::hint::model_priority(ov::hint::Priority::HIGH)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
ov::hint::model_priority(ov::hint::Priority::HIGH)}};
const std::vector<ov::AnyMap> auto_compileModel_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY),
ov::hint::model_priority(ov::hint::Priority::MEDIUM)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
ov::hint::model_priority(ov::hint::Priority::MEDIUM)},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::model_priority(ov::hint::Priority::MEDIUM)}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests,
OVSetPropComplieModleGetPropTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_setcore_properties),
::testing::ValuesIn(auto_compileModel_properties)),
OVSetPropComplieModleGetPropTests::getTestCaseName);
const std::vector<ov::AnyMap> default_properties = {{ov::enable_profiling(false)},
{ov::log::level("LOG_NONE")},
{ov::hint::model_priority(ov::hint::Priority::MEDIUM)},
{ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)},
{ov::intel_auto::device_bind_buffer(false)},
{ov::intel_auto::enable_startup_fallback(true)},
{ov::device::priorities("")}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests,
OVPropertiesDefaultTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(default_properties)),
OVPropertiesDefaultTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVPropertiesDefaultSupportedTests,
::testing::Values(ov::test::utils::DEVICE_TEMPLATE, ov::test::utils::DEVICE_AUTO));
const std::vector<ov::AnyMap> auto_multi_incorrect_device_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::num_streams(4),
ov::device::properties("TEMPLATE", ov::num_streams(4))},
{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE),
ov::num_streams(4),
ov::device::properties("TEMPLATE", ov::num_streams(4), ov::enable_profiling(true))}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow,
OVSetUnsupportPropCompileModelWithoutConfigTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_multi_incorrect_device_properties)),
OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName);
//
// IE Class GetMetric
//
INSTANTIATE_TEST_SUITE_P(smoke_AutoOVGetMetricPropsTest, OVGetMetricPropsTest, ::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(
smoke_AutoOVCheckGetSupportedROMetricsPropsTests,
OVCheckGetSupportedROMetricsPropsTests,
::testing::Combine(::testing::Values("MULTI", "AUTO"),
::testing::ValuesIn(OVCheckGetSupportedROMetricsPropsTests::configureProperties(
{ov::device::full_name.name()}))),
OVCheckGetSupportedROMetricsPropsTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
OVCheckSetSupportedRWMandatoryMetricsPropsTests,
OVCheckSetSupportedRWMetricsPropsTests,
::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"),
::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues(
{ov::hint::model_priority.name(), ov::log::level.name()}))),
OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName);
const std::vector<ov::AnyMap> multiConfigs = {{ov::device::priorities(ov::test::utils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetDevicePriorityConfigPropsTest,
OVClassSetDevicePriorityConfigPropsTest,
::testing::Combine(::testing::Values("MULTI", "AUTO"), ::testing::ValuesIn(multiConfigs)));
const std::vector<ov::AnyMap> auto_properties = {{ov::device::priorities("TEMPLATE")},
{ov::device::priorities("TEMPLATE(1)")}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests,
OVPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_properties)),
OVPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests,
OVPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_properties)),
OVPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_MultiAutoOVCheckSetSupportedRWMetricsPropsTests,
OVCheckSetSupportedRWMetricsPropsTests,
::testing::Combine(::testing::Values("MULTI:TEMPLATE", "AUTO:TEMPLATE"),
::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWMandatoryPropertiesValues(
{ov::hint::model_priority.name(), ov::log::level.name()}))),
OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,191 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/configuration_tests.hpp"
#include "ie_plugin_config.hpp"
#include "ie_system_conf.h"
using namespace BehaviorTestsDefinitions;
namespace {
#if (defined(__APPLE__) || defined(_WIN32))
auto defaultBindThreadParameter = InferenceEngine::Parameter{[] {
auto numaNodes = InferenceEngine::getAvailableNUMANodes();
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
if (coreTypes.size() > 1) {
return std::string{CONFIG_VALUE(HYBRID_AWARE)};
} else if (numaNodes.size() > 1) {
return std::string{CONFIG_VALUE(NUMA)};
} else {
return std::string{CONFIG_VALUE(NO)};
}
}()};
#else
auto defaultBindThreadParameter = InferenceEngine::Parameter{[] {
auto coreTypes = InferenceEngine::getAvailableCoresTypes();
if (coreTypes.size() > 1) {
return std::string{CONFIG_VALUE(HYBRID_AWARE)};
} else {
return std::string{CONFIG_VALUE(YES)};
}
}()};
#endif
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> conf = {{}};
const std::vector<std::map<std::string, std::string>> MultiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}}};
const std::vector<std::map<std::string, std::string>> AutoConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_NONE}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_ERROR}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_WARNING}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_INFO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_DEBUG}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, InferenceEngine::PluginConfigParams::LOG_TRACE}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY,
InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY,
InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY,
InferenceEngine::PluginConfigParams::MODEL_PRIORITY_LOW}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
CorrectConfigTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(MultiConfigs)),
CorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
CorrectConfigTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
CorrectConfigTests::getTestCaseName);
const std::vector<std::map<std::string, std::string>> multiinconfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "OFF"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF"}},
};
const std::vector<std::map<std::string, std::string>> autoinconfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, "DOESN'T EXIST"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "-1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "should be int"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "-1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY, "ABC"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_LOG_LEVEL, "NAN"}}};
const std::vector<std::map<std::string, std::string>> multiconf = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
IncorrectConfigTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
IncorrectConfigTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(autoinconfigs)),
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
IncorrectConfigAPITests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiinconfigs)),
IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
IncorrectConfigAPITests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(autoinconfigs)),
IncorrectConfigAPITests::getTestCaseName);
const std::vector<std::map<std::string, std::string>> auto_multi_prop_config = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY,
InferenceEngine::PluginConfigParams::MODEL_PRIORITY_MED}}};
const std::vector<std::map<std::string, std::string>> auto_multi_loadNetWork_config = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_MODEL_PRIORITY,
InferenceEngine::PluginConfigParams::MODEL_PRIORITY_HIGH}}};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
SetPropLoadNetWorkGetPropTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(auto_multi_prop_config),
::testing::ValuesIn(auto_multi_loadNetWork_config)),
SetPropLoadNetWorkGetPropTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
SetPropLoadNetWorkGetPropTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(auto_multi_prop_config),
::testing::ValuesIn(auto_multi_loadNetWork_config)),
SetPropLoadNetWorkGetPropTests::getTestCaseName);
} // namespace

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/core_integration.hpp"
using namespace BehaviorTestsDefinitions;
using namespace InferenceEngine::PluginConfigParams;
// defined in plugin_name.cpp
extern const char* cpu_plugin_file_name;
namespace {
//
// IE Class Common tests with <pluginName, deviceName params>
//
//
// IE Class GetMetric
//
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest,
IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest,
IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest,
IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest,
IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetMetricTest,
IEClassGetMetricTest_ThrowUnsupported,
::testing::Values("MULTI", "AUTO"));
INSTANTIATE_TEST_SUITE_P(smoke_IEClassGetConfigTest,
IEClassGetConfigTest_ThrowUnsupported,
::testing::Values("MULTI", "AUTO"));
//////////////////////////////////////////////////////////////////////////////////////////
} // namespace

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <behavior/plugin/core_threading.hpp>
#ifdef __GLIBC__
# include <gnu/libc-version.h>
# if __GLIBC_MINOR__ >= 34
# define ENABLETESTMULTI
# endif
#else
# define ENABLETESTMULTI
#endif
namespace {
const Params params[] = {
std::tuple<Device, Config>{ov::test::utils::DEVICE_TEMPLATE, {{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)}}},
#ifdef ENABLETESTMULTI
std::tuple<Device, Config>{ov::test::utils::DEVICE_MULTI,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}},
std::tuple<Device, Config>{ov::test::utils::DEVICE_AUTO,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_TEMPLATE}}},
#endif
};
} // namespace
/*
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTests, testing::ValuesIn(params),
CoreThreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, CoreThreadingTestsWithIterations,
testing::Combine(testing::ValuesIn(params),
testing::Values(4),
testing::Values(50),
testing::Values(ModelClass::Default)),
CoreThreadingTestsWithIterations::getTestCaseName);
*/

View File

@ -0,0 +1,84 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/set_preprocess.hpp"
#ifdef ENABLE_GAPI_PREPROCESSING
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, ov::test::utils::DEVICE_TEMPLATE}}};
const std::vector<InferenceEngine::Precision> ioPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::U8};
const std::vector<InferenceEngine::Layout> netLayouts = {
InferenceEngine::Layout::NCHW,
// InferenceEngine::Layout::NHWC
};
const std::vector<InferenceEngine::Layout> ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC};
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestPreprocessConversionTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts),
::testing::ValuesIn(ioLayouts),
::testing::ValuesIn(ioLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestPreprocessConversionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestPreprocessDynamicallyInSetBlobTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Bool(),
::testing::Bool(),
::testing::ValuesIn(netLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(true), // only SetBlob
::testing::Values(true), // only SetBlob
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestPreprocessConversionTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts),
::testing::ValuesIn(ioLayouts),
::testing::ValuesIn(ioLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
InferRequestPreprocessConversionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestPreprocessDynamicallyInSetBlobTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Bool(),
::testing::Bool(),
::testing::ValuesIn(netLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(true), // only SetBlob
::testing::Values(true), // only SetBlob
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName);
} // namespace
#endif // ENABLE_GAPI_PREPROCESSING

View File

@ -0,0 +1,18 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/plugin/version.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
VersionTest,
::testing::Values(ov::test::utils::DEVICE_MULTI),
VersionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
VersionTest,
::testing::Values(ov::test::utils::DEVICE_AUTO),
VersionTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,17 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
namespace ov {
namespace test {
void core_configuration(ov::test::SubgraphBaseTest* test) {}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,17 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "set_device_name.hpp"
#include <stdexcept>
#include <string>
namespace ov {
namespace test {
void set_device_suffix(const std::string& suffix) {
if (!suffix.empty()) {
throw std::runtime_error("The suffix can't be used for CPU device!");
}
}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/skip_tests_config.hpp"
#include <ie_system_conf.h>
#include <string>
#include <vector>
#include "ie_parallel.hpp"
std::vector<std::string> disabledTestPatterns() {
std::vector<std::string> retVector{
// TODO: Issue: 43793
R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*iPRC=0.*_iLT=1.*)",
R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*_oLT=1.*)",
// Not expected behavior
R"(.*Behavior.*InferRequestSetBlobByType.*Batched.*)",
R"(.*Auto.*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableWithIncorrectConfig.*)",
// Not implemented yet:
R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)",
R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModel.*)",
R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)",
R"(.*Behavior.*OVCompiledModelBaseTest.*canExportModel.*)",
R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNetWithIncorrectConfig.*)",
R"(.*Behavior.*OVCompiledModelBaseTest.*canSetConfigToCompiledModelWithIncorrectConfig.*)",
// TODO: CVS-104942
R"(.*(Auto|Multi).*Behavior.*ExecutableNetworkBaseTest.*canLoadCorrectNetworkToGetExecutableAndCheckConfig.*)",
R"(.*(Auto|Multi).*SetPropLoadNetWorkGetPropTests.*)",
// CPU does not support dynamic rank
// Issue: CVS-66778
R"(.*smoke_Auto_BehaviorTests.*InferFullyDynamicNetworkWith(S|G)etTensor.*)",
R"(.*smoke_Auto_BehaviorTests.*DynamicOutputToDynamicInput.*)",
R"(.*smoke_Auto_BehaviorTests.*DynamicInputToDynamicOutput.*)",
// unsupported metrics
R"(.*smoke_AutoOVGetMetricPropsTest.*OVGetMetricPropsTest.*(AVAILABLE_DEVICES|OPTIMIZATION_CAPABILITIES|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)",
// Issue:
// New API tensor tests
R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)",
R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)",
R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)",
// AUTO does not support import / export
R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)",
// New plugin API doesn't support changes of pre-processing
R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInputInfo.*)",
R"(.*(Auto|Multi).*InferRequestPreprocessTest.*SetPreProcessToInferRequest.*)",
// New plugin work with tensors, so it means that blob in old API can have different pointers
R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)",
R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)",
R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetInputAfterInferSync.*)",
R"(.*(Auto|Multi).*InferRequestIOBBlobTest.*secondCallGetOutputAfterInferSync.*)",
// TODO Issue 100145
R"(.*Behavior.*InferRequestIOBBlobTest.*canReallocateExternalBlobViaGet.*)",
R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)",
R"(.*Behavior.*OVInferRequestDynamicTests.*InferUpperBoundNetworkAfterIOTensorsReshaping.*)",
// Not expected behavior
R"(.*Behavior.*(Multi|Auto).*InferRequestSetBlobByType.*Batched.*)",
R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)",
// template plugin doesn't support this case
R"(.*OVInferRequestPerfCountersTest.*CheckOperationInProfilingInfo.*)"};
#if !defined(OPENVINO_ARCH_X86_64)
// very time-consuming test
retVector.emplace_back(R"(.*OVInferConsistencyTest.*)");
#endif
#if defined(_WIN32)
retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)");
#endif
return retVector;
}

View File

@ -3,11 +3,22 @@
//
#include "include/auto_unit_test.hpp"
#include "common_test_utils/file_utils.hpp"
#include "openvino/core/any.hpp"
#include "openvino/opsets/opset11.hpp"
#include "openvino/runtime/make_tensor.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/util/shared_object.hpp"
namespace testing {
namespace internal {
template <>
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os) {
*os << "using custom PrintTo ov::Any";
}
} // namespace internal
} // namespace testing
std::shared_ptr<ov::Model> ov::mock_auto_plugin::tests::BaseTest::create_model() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
@ -30,52 +41,56 @@ ov::mock_auto_plugin::tests::BaseTest::BaseTest() {
NiceMock<MockAutoPlugin>* mock_auto = new NiceMock<MockAutoPlugin>();
plugin.reset(mock_auto);
// construct mock plugin
mock_plugin_cpu = std::make_shared<NiceMock<ov::MockPluginBase>>();
mock_plugin_gpu = std::make_shared<NiceMock<ov::MockPluginBase>>();
mock_plugin_cpu = std::make_shared<NiceMock<ov::MockIPlugin>>();
mock_plugin_gpu = std::make_shared<NiceMock<ov::MockIPlugin>>();
// prepare mockExeNetwork
mockIExeNet = std::make_shared<NiceMock<ov::MockCompiledModel>>(model, mock_plugin_cpu);
mockIExeNet = std::make_shared<NiceMock<ov::MockICompiledModel>>(model, mock_plugin_cpu);
mockExeNetwork = {mockIExeNet, {}};
mockIExeNetActual = std::make_shared<NiceMock<ov::MockCompiledModel>>(model, mock_plugin_gpu);
mockIExeNetActual = std::make_shared<NiceMock<ov::MockICompiledModel>>(model, mock_plugin_gpu);
mockExeNetworkActual = {mockIExeNetActual, {}};
inferReqInternal = std::make_shared<ov::MockSyncInferRequest>(mockIExeNet);
ON_CALL(*mockIExeNet.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs()));
ON_CALL(*mockIExeNet.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs()));
ON_CALL(*mockIExeNetActual.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs()));
ON_CALL(*mockIExeNetActual.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs()));
inferReqInternal = std::make_shared<ov::mock_auto_plugin::MockISyncInferRequest>(mockIExeNet);
ON_CALL(*mockIExeNet.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternal));
optimalNum = (uint32_t)1;
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
inferReqInternalActual = std::make_shared<ov::MockSyncInferRequest>(mockIExeNetActual);
inferReqInternalActual = std::make_shared<ov::mock_auto_plugin::MockISyncInferRequest>(mockIExeNetActual);
ON_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).WillByDefault(Return(inferReqInternalActual));
ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault([this]() {
return mockIExeNet->ICompiledModel::create_infer_request();
});
return mockIExeNet->ICompiledModel::create_infer_request();
});
ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault([this]() {
return mockIExeNetActual->ICompiledModel::create_infer_request();
});
return mockIExeNetActual->ICompiledModel::create_infer_request();
});
std::vector<ov::PropertyName> supported_props = {ov::hint::num_requests};
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::supported_properties.name())))
.WillByDefault(Return(ov::Any(supported_props)));
ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::supported_properties.name())))
.WillByDefault(Return(ov::Any(supported_props)));
unsigned int num = 1;
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name())))
.WillByDefault(Return(ov::Any(num)));
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::hint::num_requests.name()))).WillByDefault(Return(ov::Any(num)));
ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::hint::num_requests.name())))
.WillByDefault(Return(ov::Any(num)));
ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) {
return plugin->Plugin::get_device_list(config);
});
ON_CALL(*plugin, parse_meta_devices)
.WillByDefault(
[this](const std::string& priorityDevices, const ov::AnyMap& config) {
.WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) {
return plugin->Plugin::parse_meta_devices(priorityDevices, config);
});
ON_CALL(*plugin, select_device)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices,
const std::string& netPrecision,
unsigned int priority) {
const std::string& netPrecision,
unsigned int priority) {
return plugin->Plugin::select_device(metaDevices, netPrecision, priority);
});
@ -115,47 +130,46 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() {
ON_CALL(*core, get_property(_, StrEq(ov::supported_properties.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(supportedProps));
ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(12));
std::vector<std::string> cpuCability = {"FP32", "FP16", "INT8", "BIN"};
std::vector<std::string> gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"};
std::vector<std::string> othersCability = {"FP32", "FP16"};
std::vector<std::string> cpuCability = {"FP32", "FP16", "INT8", "BIN"};
std::vector<std::string> gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"};
std::vector<std::string> othersCability = {"FP32", "FP16"};
std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0";
std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1";
auto iGpuType = ov::device::Type::INTEGRATED;
auto dGpuType = ov::device::Type::DISCRETE;
ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(cpuCability));
ON_CALL(*core, get_property(HasSubstr("GPU"),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability));
ON_CALL(*core, get_property(StrEq("OTHERS"),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(othersCability));
ON_CALL(*core, get_property(StrEq("GPU"),
StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU.0"),
StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU.1"),
StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU"),
StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType));
ON_CALL(*core, get_property(StrEq("GPU.0"),
StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(iGpuType));
ON_CALL(*core, get_property(StrEq("GPU.1"),
StrEq(ov::device::type.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dGpuType));
const std::vector<std::string> metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS), ov::device::full_name.name(), ov::device::id.name()};
ON_CALL(*core, get_property(StrEq(ov::test::utils::DEVICE_CPU), StrEq(ov::device::capabilities.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(cpuCability));
ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(gpuCability));
ON_CALL(*core, get_property(StrEq("OTHERS"), StrEq(ov::device::capabilities.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(othersCability));
ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::architecture.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::architecture.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(igpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::type.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(iGpuType));
ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::type.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(iGpuType));
ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::type.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(dGpuType));
const std::vector<std::string> metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS),
ov::device::full_name.name(),
ov::device::id.name()};
const char igpuFullDeviceName[] = "Intel(R) Gen9 HD Graphics (iGPU)";
const char dgpuFullDeviceName[] = "Intel(R) Iris(R) Xe MAX Graphics (dGPU)";
ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _))
.WillByDefault(RETURN_MOCK_VALUE(metrics));
ON_CALL(*core, get_property(_, ov::supported_properties.name(), _))
.WillByDefault(Return(ov::Any(supportedProps)));
ON_CALL(*core, get_property(StrEq("GPU"),
StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName));
ON_CALL(*core, get_property(StrEq("GPU"),
StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0")));
ON_CALL(*core, get_property(StrEq("GPU.0"),
StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName));
ON_CALL(*core, get_property(StrEq("GPU.1"),
StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName));
const std::vector<std::string> availableDevs = {"CPU", "GPU.0", "GPU.1"};
ON_CALL(*core, get_property(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)).WillByDefault(RETURN_MOCK_VALUE(metrics));
ON_CALL(*core, get_property(_, ov::supported_properties.name(), _)).WillByDefault(Return(ov::Any(supportedProps)));
ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::full_name.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName));
ON_CALL(*core, get_property(StrEq("GPU"), StrEq(ov::device::id.name()), _)).WillByDefault(Return(ov::Any("0")));
ON_CALL(*core, get_property(StrEq("GPU.0"), StrEq(ov::device::full_name.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName));
ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::full_name.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName));
const std::vector<std::string> availableDevs = {"CPU", "GPU.0", "GPU.1"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*core, get_supported_property).WillByDefault([](const std::string& device, const ov::AnyMap& fullConfigs) {
auto item = fullConfigs.find(ov::device::properties.name());
@ -186,180 +200,35 @@ ov::mock_auto_plugin::tests::AutoTest::~AutoTest() {
core.reset();
}
namespace {
std::string get_mock_engine_path() {
std::string mockEngineName("mock_engine");
return ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(),
mockEngineName + IE_BUILD_POSTFIX);
void ov::mock_auto_plugin::MockISyncInferRequest::allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
const element::Type& element_type,
const Shape& shape) {
if (!tensor || tensor->get_element_type() != element_type) {
tensor = ov::make_tensor(element_type, shape);
} else {
tensor->set_shape(shape);
}
}
template <class T>
std::function<T> make_std_function(const std::shared_ptr<void> so, const std::string& functionName) {
std::function<T> ptr(reinterpret_cast<T*>(ov::util::get_symbol(so, functionName.c_str())));
return ptr;
}
ov::PropertyName RO_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RO);
}
ov::PropertyName RW_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RW);
}
} // namespace
ov::mock_auto_plugin::tests::AutoTestWithRealCore::AutoTestWithRealCore() {
register_plugin_simple(core, "MOCK_CPU", {});
// validate the mock plugin, to ensure the order as well
core.get_property("MOCK_CPU", ov::supported_properties);
register_plugin_support_batch_and_context(core, "MOCK_GPU", {});
// validate the mock plugin
core.get_property("MOCK_GPU", ov::supported_properties);
ov::Any optimalNum = (uint32_t)1;
ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_), _))
.WillByDefault(Return(mockIExeNet));
ON_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_), _))
.WillByDefault(Return(mockIExeNetActual));
}
void ov::mock_auto_plugin::tests::AutoTestWithRealCore::reg_plugin(ov::Core& core,
std::shared_ptr<ov::IPlugin> plugin,
const std::string& device_name,
const ov::AnyMap& properties) {
std::string libraryPath = get_mock_engine_path();
if (!m_so)
m_so = ov::util::load_shared_object(libraryPath.c_str());
if (device_name.find("MULTI") == std::string::npos && device_name.find("AUTO") == std::string::npos)
plugin->set_device_name(device_name);
std::function<void(ov::IPlugin*)> inject_mock_plugin = make_std_function<void(ov::IPlugin*)>(m_so, "InjectPlugin");
inject_mock_plugin(plugin.get());
core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(),
std::string("mock_engine") + IE_BUILD_POSTFIX),
device_name,
properties);
}
// test
void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_support_batch_and_context(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
auto remote_context = std::make_shared<ov::MockRemoteContext>(mock_plugin_gpu->get_device_name());
m_mock_contexts.push_back(remote_context);
ON_CALL(*mock_plugin_gpu, compile_model(_, _)).WillByDefault(Return(mockIExeNetActual));
ON_CALL(*mock_plugin_gpu, create_context).WillByDefault(Return(ov::SoPtr<ov::IRemoteContext>(remote_context, nullptr)));
ON_CALL(*mock_plugin_gpu, get_default_context).WillByDefault(Return(ov::SoPtr<ov::IRemoteContext>(remote_context, nullptr)));
ON_CALL(*mock_plugin_gpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any {
const std::vector<ov::PropertyName> roProperties{
RO_property(ov::supported_properties.name()),
RO_property(ov::optimal_batch_size.name()),
RO_property(ov::optimal_number_of_infer_requests.name()),
RO_property(ov::device::capabilities.name()),
RO_property(ov::device::type.name()),
RO_property(ov::device::uuid.name()),
};
// the whole config is RW before network is loaded.
const std::vector<ov::PropertyName> rwProperties{
RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
RW_property(ov::compilation_num_threads.name()),
RW_property(ov::hint::performance_mode.name()),
RW_property(ov::hint::num_requests.name())
};
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::optimal_number_of_infer_requests.name()) {
return decltype(ov::optimal_number_of_infer_requests)::value_type(1);
} else if (name == ov::optimal_batch_size.name()) {
return decltype(ov::optimal_batch_size)::value_type(4);
} else if (name == ov::device::capabilities.name()) {
return decltype(ov::device::capabilities)::value_type({"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"});
} else if (name == ov::device::type.name()) {
return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED);
} else if (name == ov::loaded_from_cache.name()) {
return false;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{false};
} else if (name == ov::streams::num.name()) {
return decltype(ov::streams::num)::value_type{2};
} else if (name == ov::compilation_num_threads.name()) {
return decltype(ov::compilation_num_threads)::value_type{4};
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type({});
}
OPENVINO_NOT_IMPLEMENTED;
});
std::shared_ptr<ov::IPlugin> base_plugin = mock_plugin_gpu;
reg_plugin(core, base_plugin, device_name, properties);
}
void ov::mock_auto_plugin::tests::AutoTestWithRealCore::register_plugin_simple(ov::Core& core,
const std::string& device_name,
const ov::AnyMap& properties) {
ON_CALL(*mock_plugin_cpu, compile_model(_, _)).WillByDefault(Return(mockIExeNet));
ON_CALL(*mock_plugin_cpu, create_context).WillByDefault(Throw(ov::Exception{"NotImplemented"}));
ON_CALL(*mock_plugin_cpu, get_default_context).WillByDefault(Throw(ov::Exception{"NotImplemented"}));
ON_CALL(*mock_plugin_cpu, get_property).WillByDefault([](const std::string& name, const ov::AnyMap& property) -> ov::Any {
const std::vector<ov::PropertyName> roProperties{
RO_property(ov::supported_properties.name()),
RO_property(ov::device::uuid.name()),
};
// the whole config is RW before network is loaded.
const std::vector<ov::PropertyName> rwProperties{
RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
RW_property(ov::hint::performance_mode.name())
};
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::loaded_from_cache.name()) {
return false;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{false};
} else if (name == ov::streams::num.name()) {
return decltype(ov::streams::num)::value_type{2};
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type({});
}
OPENVINO_NOT_IMPLEMENTED;
});
std::shared_ptr<ov::IPlugin> base_plugin = mock_plugin_cpu;
reg_plugin(core, base_plugin, device_name, properties);
ov::mock_auto_plugin::MockISyncInferRequest::MockISyncInferRequest(
const std::shared_ptr<const ov::ICompiledModel>& compiled_model)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_ASSERT(compiled_model);
// Allocate input/output tensors
for (const auto& input : get_inputs()) {
allocate_tensor(input, [this, input](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
input.get_element_type(),
input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape());
});
}
for (const auto& output : get_outputs()) {
allocate_tensor(output, [this, output](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
output.get_element_type(),
output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape());
});
}
}

View File

@ -97,11 +97,11 @@ public:
}
};
using modelPrioPerfHintTestParams = std::tuple<bool, // is New API
bool, // if Actual device sleep, cpu device will load slow
std::string, // Actual Device Name
std::string, // performance mode
ov::Any // model Priority
using modelPrioPerfHintTestParams = std::tuple<bool, // is New API
bool, // if Actual device sleep, cpu device will load slow
std::string, // Actual Device Name
std::string, // performance mode
ov::Any // model Priority
>;
class ExecNetworkget_propertyOtherTest : public tests::AutoTest,
@ -113,11 +113,7 @@ public:
std::string actualDeviceName;
std::string performanceMode;
ov::Any modelPriority;
std::tie(isNewAPI,
actualSleep,
actualDeviceName,
performanceMode,
modelPriority) = obj.param;
std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param;
std::ostringstream result;
if (isNewAPI) {
result << "_isNewAPI_"
@ -227,47 +223,60 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1);
if (cpuSleep) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return mockExeNetwork;
}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return mockExeNetwork;
}));
} else {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
}
if (actualSleep) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)), _))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return mockExeNetworkActual;
}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return mockExeNetworkActual;
}));
} else {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)), _))
.WillByDefault(Return(mockExeNetworkActual));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum));
.WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum));
ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum));
.WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum));
EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.Times(AtLeast(1));
EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name()))).Times(AtLeast(1));
EXPECT_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.Times(AtLeast(1));
.Times(AtLeast(1));
EXPECT_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _)).Times(1);
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.Times(1);
EXPECT_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)), _)).Times(1);
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)),
_))
.Times(1);
if (cpuCustomerNum == -1) {
EXPECT_CALL(*mockIExeNet.get(), create_sync_infer_request()).Times(cpuOptimalNum);
@ -281,7 +290,7 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES
EXPECT_CALL(*mockIExeNetActual.get(), create_sync_infer_request()).Times(actualCustomerNum);
}
auto AutoExecNetwork = plugin->compile_model(model, config);
auto AutoExecNetwork = plugin->compile_model(model, config);
auto result = AutoExecNetwork->get_property(ov::optimal_number_of_infer_requests.name()).as<unsigned int>();
EXPECT_EQ(result, expectOptimalNum);
}
@ -292,57 +301,58 @@ TEST_P(ExecNetworkget_propertyOptimalNumInferReq, OPTIMAL_NUMBER_OF_INFER_REQUES
// every element for ConfigParams
// {is throughput mode, cpuOptimalNum, customer hope for cpu infer requset num, if cpu sleep when load,
// actualOptimalNum, customer hope for actual infer requset num, if actual sleep when load, actual device Name
// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of OptimalNum}
// expectOptimalNum of Auto ExecNetwork, gpu Number of requests, if actual supported OptimalNum, default Value of
// OptimalNum}
//
const std::vector<ConfigParams> testConfigs = {
ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false},
ConfigParams {true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams {false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true},
ConfigParams {true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams {false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false},
ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false},
ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true},
ConfigParams {true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true},
ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true},
ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true},
ConfigParams {false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true},
ConfigParams {true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true},
ConfigParams {false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true},
ConfigParams {true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true},
ConfigParams {false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams {true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams {false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true},
ConfigParams {true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true},
ConfigParams {false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams {true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
};
ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 1, 0, false, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_GPU, 2, 0, false, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 48, 48, false, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, true},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, false, true},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 48, 0, true, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, false, false},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 10, false, false},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, false},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 6, true, false},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, false, false},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 8, 0, true, false},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_GPU, 2, 0, true, false},
ConfigParams{true, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams{false, 3, 5, false, 0, 5, true, ov::test::utils::DEVICE_GPU, 1, 6, true, true},
ConfigParams{true, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams{false, 3, 5, false, 6, 5, true, ov::test::utils::DEVICE_GPU, 6, 6, true, true},
ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, false},
ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, false},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, false},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 1, true, false},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, false, false},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, false, false},
ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 4, 6, true, true},
ConfigParams{true, 3, -1, false, 4, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 2, true, true},
ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 10, true, true},
ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 6, 6, true, true},
ConfigParams{false, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, true, true},
ConfigParams{true, 3, -1, false, 0, -1, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, true, true},
ConfigParams{false, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true},
ConfigParams{true, 3, -1, false, 2, -1, true, ov::test::utils::DEVICE_KEEMBAY, 2, 0, true, true},
ConfigParams{false, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams{true, 3, -1, true, 2, -1, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams{false, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 1, 0, false, true},
ConfigParams{true, 3, 5, false, 2, 5, true, ov::test::utils::DEVICE_KEEMBAY, 8, 0, false, true},
ConfigParams{false, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
ConfigParams{true, 3, 5, true, 2, 5, false, ov::test::utils::DEVICE_KEEMBAY, 2, 0, false, true},
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ExecNetworkget_propertyOptimalNumInferReq,
@ -357,11 +367,7 @@ public:
std::string actualDeviceName;
std::string performanceMode;
ov::Any modelPriority;
std::tie(isNewAPI,
actualSleep,
actualDeviceName,
performanceMode,
modelPriority) = obj.param;
std::tie(isNewAPI, actualSleep, actualDeviceName, performanceMode, modelPriority) = obj.param;
std::ostringstream result;
if (isNewAPI) {
result << "_isNewAPI_"
@ -392,11 +398,7 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t
std::string actualDeviceName;
std::string performanceHint;
ov::Any modelPriority;
std::tie(isNewAPI,
actualSleep,
actualDeviceName,
performanceHint,
modelPriority) = this->GetParam();
std::tie(isNewAPI, actualSleep, actualDeviceName, performanceHint, modelPriority) = this->GetParam();
config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + actualDeviceName));
config.insert(ov::hint::performance_mode(performanceHint));
config.insert({ov::hint::model_priority.name(), modelPriority.as<std::string>()});
@ -418,14 +420,17 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(1);
ON_CALL(*core, get_property(_, StrEq(ov::compilation_num_threads.name()), _)).WillByDefault(Return(8));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
if (actualSleep) {
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)), _))
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(5000));
return mockExeNetworkActual;
@ -433,14 +438,15 @@ TEST_P(ExecNetworkGetMetricOtherTest, modelPriority_perfHint_exclusiveAsyncReq_t
} else {
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)), _))
::testing::Matcher<const std::string&>(StrEq(actualDeviceName)),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum));
.WillByDefault(RETURN_MOCK_VALUE(cpuOptimalNum));
ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum));
.WillByDefault(RETURN_MOCK_VALUE(actualOptimalNum));
auto AutoExecNetwork = plugin->compile_model(model, config);
auto result = AutoExecNetwork->get_property(ov::hint::performance_mode.name()).as<std::string>();
@ -455,61 +461,25 @@ const std::vector<modelPrioPerfHintTestParams> modelPrioPerfHintConfig = {
ov::test::utils::DEVICE_GPU,
"THROUGHPUT",
CONFIG_VALUE(MODEL_PRIORITY_LOW)},
modelPrioPerfHintTestParams{false,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
CONFIG_VALUE(MODEL_PRIORITY_LOW)},
modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_LOW)},
modelPrioPerfHintTestParams{false,
true,
ov::test::utils::DEVICE_GPU,
"THROUGHPUT",
CONFIG_VALUE(MODEL_PRIORITY_MED)},
modelPrioPerfHintTestParams{false,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
CONFIG_VALUE(MODEL_PRIORITY_MED)},
modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_MED)},
modelPrioPerfHintTestParams{false,
true,
ov::test::utils::DEVICE_GPU,
CONFIG_VALUE(THROUGHPUT),
CONFIG_VALUE(MODEL_PRIORITY_HIGH)},
modelPrioPerfHintTestParams{false,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
CONFIG_VALUE(MODEL_PRIORITY_HIGH)},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"THROUGHPUT",
"LOW"},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
"LOW"},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"THROUGHPUT",
"MEDIUM"},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
"MEDIUM"},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"THROUGHPUT",
"HIGH"},
modelPrioPerfHintTestParams{true,
true,
ov::test::utils::DEVICE_GPU,
"LATENCY",
"HIGH"}};
modelPrioPerfHintTestParams{false, true, ov::test::utils::DEVICE_GPU, "LATENCY", CONFIG_VALUE(MODEL_PRIORITY_HIGH)},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "LOW"},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "LOW"},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "MEDIUM"},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "MEDIUM"},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "THROUGHPUT", "HIGH"},
modelPrioPerfHintTestParams{true, true, ov::test::utils::DEVICE_GPU, "LATENCY", "HIGH"}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ExecNetworkGetMetricOtherTest,

View File

@ -24,7 +24,7 @@ using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<std::string, // virtual device name to load network
std::vector<std::string>, // hardware device name to expect loading network on
ov::AnyMap>; // secondary property setting to device
ov::AnyMap>; // secondary property setting to device
static std::vector<ConfigParams> testConfigs;
@ -52,51 +52,72 @@ public:
static std::vector<ConfigParams> CreateConfigs() {
testConfigs.clear();
testConfigs.push_back(
ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"NUM_STREAMS", "12"},
{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"NUM_STREAMS", "15"},
{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
ConfigParams{"AUTO:CPU",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
ConfigParams{"AUTO:CPU,GPU",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(ConfigParams{"AUTO:GPU,CPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
ConfigParams{"AUTO:GPU",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:GPU,CPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
testConfigs.push_back(ConfigParams{"MULTI:CPU,GPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
ConfigParams{"MULTI:CPU",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(ConfigParams{"MULTI:GPU,CPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
ConfigParams{"MULTI:CPU,GPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:GPU",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:GPU,CPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
return testConfigs;
}
void SetUp() override {
std::vector<std::string> availableDevs = {"CPU", "GPU"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrNe(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(Return(mockExeNetworkActual));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrNe(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
};
@ -123,11 +144,10 @@ TEST_P(LoadNetworkWithSecondaryConfigsMockTest, LoadNetworkWithSecondaryConfigsT
ov::util::Read<ov::AnyMap>{}(strConfigs, deviceConfigs);
}
}
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(MapContains(deviceConfigs))))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(MapContains(deviceConfigs))))
.Times(1);
}
@ -144,32 +164,40 @@ TEST_P(AutoLoadExeNetworkFailedTest, checkLoadFailMassage) {
if (device.find("MULTI") != std::string::npos)
plugin->set_device_name("MULTI");
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"}));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Throw(ov::Exception{"Mock GPU Load Failed"}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Throw(ov::Exception{"Mock CPU Load Failed"}));
if (device == "AUTO") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[AUTO] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed");
} else if (device == "AUTO:CPU") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[AUTO] compile model failed, CPU:Mock CPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[AUTO] compile model failed, CPU:Mock CPU Load Failed");
} else if (device == "AUTO:GPU") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[AUTO] compile model failed, GPU:Mock GPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[AUTO] compile model failed, GPU:Mock GPU Load Failed");
} else if (device == "MULTI") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[MULTI] compile model failed, GPU:Mock GPU Load Failed; CPU:Mock CPU Load Failed");
} else if (device == "MULTI:CPU") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[MULTI] compile model failed, CPU:Mock CPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[MULTI] compile model failed, CPU:Mock CPU Load Failed");
} else if (device == "MULTI:GPU") {
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config), ov::Exception,
"[MULTI] compile model failed, GPU:Mock GPU Load Failed");
EXPECT_THROW_WITH_MESSAGE(plugin->compile_model(model, config),
ov::Exception,
"[MULTI] compile model failed, GPU:Mock GPU Load Failed");
}
}
@ -184,9 +212,9 @@ const std::vector<ConfigParams> testConfigsAutoLoadFailed = {
ConfigParams{"AUTO:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}},
ConfigParams{"MULTI", {"CPU", "GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}},
ConfigParams{"MULTI:CPU", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}},
ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}
};
ConfigParams{"MULTI:GPU", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, AutoLoadExeNetworkFailedTest,
::testing::ValuesIn(testConfigsAutoLoadFailed),
AutoLoadExeNetworkFailedTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest,
AutoLoadExeNetworkFailedTest,
::testing::ValuesIn(testConfigsAutoLoadFailed),
AutoLoadExeNetworkFailedTest::getTestCaseName);

View File

@ -8,8 +8,7 @@ using namespace ov::mock_auto_plugin;
using Config = std::map<std::string, std::string>;
using ConfigParams = std::tuple<std::vector<std::string>>;
class LoadNetworkWithCTPUTMockTest : public tests::AutoTest,
public ::testing::TestWithParam<ConfigParams> {
class LoadNetworkWithCTPUTMockTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
std::vector<std::string> targetDevices;
@ -29,12 +28,16 @@ public:
void SetUp() override {
std::vector<std::string> availableDevs = {"CPU", "GPU"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)), _))
.WillByDefault(Return(mockExeNetworkActual));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
};
@ -51,17 +54,15 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) {
// Call single device logic and performance hint is THROUGHPUT
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(targetDevice),
::testing::Matcher<const ov::AnyMap&>(
ComparePerfHint("THROUGHPUT"))))
::testing::Matcher<const std::string&>(targetDevice),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT"))))
.Times(1);
// if target device only has GPU, no CPU helper to be called
if (targetDevice.find("GPU") != std::string::npos) {
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(
ComparePerfHint("LATENCY"))))
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY"))))
.Times(0);
}
} else {
@ -71,18 +72,16 @@ TEST_P(LoadNetworkWithCTPUTMockTest, CTPUTSingleDevLogicTest) {
targetDev += ((deviceName == targetDevices.back()) ? "" : ",");
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(
ComparePerfHint("THROUGHPUT"))))
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT"))))
.Times(1);
}
config.insert(ov::device::priorities(targetDev));
// no CPU helper to be called
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(
ComparePerfHint("LATENCY"))))
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY"))))
.Times(0);
}
@ -150,12 +149,16 @@ public:
void SetUp() override {
std::vector<std::string> availableDevs = {"CPU", "GPU"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)), _))
.WillByDefault(Return(mockExeNetworkActual));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
};
@ -176,21 +179,21 @@ TEST_P(AutoCTPUTCallMulti, CTPUTDeviceLoadFailedNoExceptionThrowTest) {
config.insert(ov::device::priorities(targetDev));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(loadFailedDevice)),
::testing::Matcher<const ov::AnyMap&>(_)))
::testing::Matcher<const std::string&>(StrEq(loadFailedDevice)),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Throw(InferenceEngine::GeneralError{""}));
if (loadFailedDevice != ov::test::utils::DEVICE_CPU) {
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(_)))
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(1);
}
if (loadFailedDevice != ov::test::utils::DEVICE_GPU) {
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_GPU),
::testing::Matcher<const ov::AnyMap&>(_)))
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_GPU),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(1);
}
ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config));

View File

@ -7,12 +7,11 @@ using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<std::string, // virtual device name to load network
std::vector<std::string>, // hardware device name to expect loading network on
ov::AnyMap>; // secondary property setting to device
ov::AnyMap>; // secondary property setting to device
static std::vector<ConfigParams> testConfigs;
class AutoDefaultPerfHintTest : public tests::AutoTest,
public ::testing::TestWithParam<ConfigParams> {
class AutoDefaultPerfHintTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
std::string deviceName;
@ -37,35 +36,36 @@ public:
testConfigs.clear();
testConfigs.push_back(
ConfigParams{"AUTO", {"CPU"}, {{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get default_hint:lantency
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:no perf_hint
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint
testConfigs.push_back(
ConfigParams{"AUTO", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get default_hint:lantency
testConfigs.push_back(
ConfigParams{"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint
testConfigs.push_back(ConfigParams{
"MULTI:CPU,GPU",
@ -91,30 +91,30 @@ public:
static std::vector<ConfigParams> CreatePerfHintAndDefaultPerfHintTestConfigs() {
testConfigs.clear();
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:tput
testConfigs.push_back(ConfigParams{
"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput
testConfigs.push_back(ConfigParams{"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:THROUGHPUT}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput
testConfigs.push_back(ConfigParams{
"MULTI:CPU,GPU",
@ -136,30 +136,29 @@ public:
static std::vector<ConfigParams> CreateSecPropAndDefaultPerfHintTestConfigs() {
testConfigs.clear();
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency
testConfigs.push_back(
ConfigParams{"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"},
{"MULTI_DEVICE_PRIORITIES",
"GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint
testConfigs.push_back(ConfigParams{
"AUTO",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"CPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:FALSE}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint
testConfigs.push_back(ConfigParams{
"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint
testConfigs.push_back(ConfigParams{"AUTO",
{"GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"},
{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint
testConfigs.push_back(ConfigParams{
"MULTI:CPU,GPU",
@ -171,11 +170,11 @@ public:
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{GPU:{ALLOW_AUTO_BATCHING:FALSE}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: get default_hint:tput
testConfigs.push_back(ConfigParams{
"MULTI:CPU,GPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput
testConfigs.push_back(
ConfigParams{"MULTI:CPU,GPU",
{"CPU", "GPU"},
{{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"},
{"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput
return testConfigs;
}
@ -183,13 +182,17 @@ public:
std::vector<std::string> availableDevs = {"CPU", "GPU"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("CPU")), _))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("CPU")),
_))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU")), _))
.WillByDefault(Return(mockExeNetworkActual));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU")),
_))
.WillByDefault(Return(mockExeNetworkActual));
}
};
@ -246,21 +249,21 @@ TEST_P(NumStreamsAndDefaultPerfHintMockTest, NumStreamsAndDefaultPerfHintTest) {
// do not pass default perf_hint to HW
HW_PerfHint = "No PERFORMANCE_HINT";
}
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
.Times(1);
}
ASSERT_NO_THROW(plugin->compile_model(model, config));
}
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest,
NumStreamsAndDefaultPerfHintMockTest,
::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()),
NumStreamsAndDefaultPerfHintMockTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest,
NumStreamsAndDefaultPerfHintMockTest,
::testing::ValuesIn(NumStreamsAndDefaultPerfHintMockTest::CreateNumStreamsAndDefaultPerfHintTestConfigs()),
NumStreamsAndDefaultPerfHintMockTest::getTestCaseName);
TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) {
std::string device;
@ -309,21 +312,21 @@ TEST_P(PerHintAndDefaultPerfHintMockTest, PerfHintAndDefaultPerfHintTest) {
if (itor != deviceConfigs.end() && !isCPUHelper) {
HW_PerfHint = itor->second.as<std::string>();
}
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
.Times(1);
}
ASSERT_NO_THROW(plugin->compile_model(model, config));
}
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest,
PerHintAndDefaultPerfHintMockTest,
::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()),
PerHintAndDefaultPerfHintMockTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_AutoMultiMock_PerHintAndDefaultPerfHintToHWTest,
PerHintAndDefaultPerfHintMockTest,
::testing::ValuesIn(PerHintAndDefaultPerfHintMockTest::CreatePerfHintAndDefaultPerfHintTestConfigs()),
PerHintAndDefaultPerfHintMockTest::getTestCaseName);
TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) {
std::string device;
@ -372,18 +375,18 @@ TEST_P(SecPropAndDefaultPerfHintMockTest, SecPropAndDefaultPerfHintTest) {
}
}
}
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint(HW_PerfHint))))
.Times(1);
}
ASSERT_NO_THROW(plugin->compile_model(model, config));
}
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest,
SecPropAndDefaultPerfHintMockTest,
::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()),
SecPropAndDefaultPerfHintMockTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_AutoMultiMock_SecPropAndDefaultPerfHintToHWTest,
SecPropAndDefaultPerfHintMockTest,
::testing::ValuesIn(SecPropAndDefaultPerfHintMockTest::CreateSecPropAndDefaultPerfHintTestConfigs()),
SecPropAndDefaultPerfHintMockTest::getTestCaseName);

View File

@ -2,8 +2,9 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <thread>
#include <common_test_utils/common_utils.hpp>
#include <thread>
#include "include/auto_unit_test.hpp"
using DynamicOutputConfigParams = std::tuple<
@ -11,8 +12,7 @@ using DynamicOutputConfigParams = std::tuple<
ov::Any // expected device to run inference on
>;
class DynamicOutputInferenceTest : public tests::AutoTest,
public ::testing::TestWithParam<DynamicOutputConfigParams> {
class DynamicOutputInferenceTest : public tests::AutoTest, public ::testing::TestWithParam<DynamicOutputConfigParams> {
public:
std::shared_ptr<ov::Model> create_dynamic_output_model();
static std::string getTestCaseName(testing::TestParamInfo<DynamicOutputConfigParams> obj);
@ -45,27 +45,35 @@ std::shared_ptr<ov::Model> DynamicOutputInferenceTest::create_dynamic_output_mod
auto scores = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 1, 2});
scores->set_friendly_name("param_2");
scores->get_output_tensor(0).set_names({"input_tensor_2"});
auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10});
auto max_output_boxes_per_class = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {10});
auto iou_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.75});
auto score_threshold = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {0.7});
auto nms = std::make_shared<ov::op::v9::NonMaxSuppression>(boxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold);
auto nms = std::make_shared<ov::op::v9::NonMaxSuppression>(boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold);
auto res = std::make_shared<ov::op::v0::Result>(nms);
res->set_friendly_name("output_dynamic");
return std::make_shared<ov::Model>(ov::NodeVector{nms}, ov::ParameterVector{boxes, scores});
}
void DynamicOutputInferenceTest::SetUp() {
model = create_dynamic_output_model();
std::tie(priorityList, targetList) = GetParam();
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)), _))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual; }));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
(_))).WillByDefault(Return(mockExeNetwork));
model = create_dynamic_output_model();
std::tie(priorityList, targetList) = GetParam();
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual;
}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
(_)))
.WillByDefault(Return(mockExeNetwork));
}
TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWithCorrectSize) {
@ -74,27 +82,26 @@ TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWit
config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
std::shared_ptr<ov::ICompiledModel> exeNetwork;
for (auto& iter : targets) {
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(HasSubstr(iter)),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(1);
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(HasSubstr(iter)),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(1);
}
EXPECT_CALL(
*core,
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(HasSubstr("GPU")),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(0);
::testing::Matcher<const std::string&>(HasSubstr("GPU")),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(0);
ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config));
}
const std::vector<DynamicOutputConfigParams> testConfigs = {
DynamicOutputConfigParams {"CPU,GPU", std::vector<std::string>{"CPU"}},
DynamicOutputConfigParams {"GPU,CPU", std::vector<std::string>{"CPU"}},
DynamicOutputConfigParams{"CPU,GPU", std::vector<std::string>{"CPU"}},
DynamicOutputConfigParams{"GPU,CPU", std::vector<std::string>{"CPU"}},
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, DynamicOutputInferenceTest,
::testing::ValuesIn(testConfigs),
DynamicOutputInferenceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
DynamicOutputInferenceTest,
::testing::ValuesIn(testConfigs),
DynamicOutputInferenceTest::getTestCaseName);

View File

@ -10,10 +10,9 @@ using namespace ov::mock_auto_plugin;
const std::vector<std::string> availableDevs = {"CPU", "GPU", "NPU"};
const std::vector<std::string> availableDevsWithId = {"CPU", "GPU.0", "GPU.1", "NPU"};
using Params = std::tuple<std::string, std::string>;
using ConfigParams = std::tuple<
std::vector<std::string>, // Available devices retrieved from Core
Params // Params {devicePriority, expect metaDevices}
>;
using ConfigParams = std::tuple<std::vector<std::string>, // Available devices retrieved from Core
Params // Params {devicePriority, expect metaDevices}
>;
class GetDeviceListTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
@ -36,10 +35,9 @@ public:
}
void SetUp() override {
ON_CALL(*plugin, get_device_list).WillByDefault([this](
const ov::AnyMap& config) {
return plugin->Plugin::get_device_list(config);
});
ON_CALL(*plugin, get_device_list).WillByDefault([this](const ov::AnyMap& config) {
return plugin->Plugin::get_device_list(config);
});
}
};
@ -76,8 +74,8 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) {
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
std::string dgpuArchitecture = "GPU: vendor=0x10DE arch=0";
ON_CALL(*core, get_property(StrEq("GPU.1"),
StrEq(ov::device::architecture.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture));
ON_CALL(*core, get_property(StrEq("GPU.1"), StrEq(ov::device::architecture.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(dgpuArchitecture));
EXPECT_CALL(*core, get_available_devices()).Times(1);
if (metaDevices == "") {
EXPECT_THROW(plugin->get_device_list({ov::device::priorities(priorityDevices)}), ov::Exception);
@ -88,29 +86,30 @@ TEST_P(GetDeviceListTestWithNotInteldGPU, GetDeviceListTestWithExcludeList) {
}
}
const std::vector<Params> testConfigsWithId = {Params{" ", " "},
Params{"", "CPU,GPU.0,GPU.1"},
Params{"CPU, ", "CPU, "},
Params{" ,CPU", " ,CPU"},
Params{"CPU,", "CPU"},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"},
Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
Params{"-GPU.0", "CPU,GPU.1"},
Params{"-GPU.0,-GPU.1", "CPU"},
Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"},
Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"},
Params{"-GPU.0,-GPU.1,-CPU", ""},
Params{"GPU,-GPU.0", "GPU.1"},
Params{"-GPU,CPU", "CPU"},
Params{"-GPU,-CPU", ""},
Params{"GPU.0,-GPU", "GPU.0"},
Params{"-GPU.0,-CPU", "GPU.1"}};
const std::vector<Params> testConfigsWithId = {
Params{" ", " "},
Params{"", "CPU,GPU.0,GPU.1"},
Params{"CPU, ", "CPU, "},
Params{" ,CPU", " ,CPU"},
Params{"CPU,", "CPU"},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"},
Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
Params{"-GPU.0", "CPU,GPU.1"},
Params{"-GPU.0,-GPU.1", "CPU"},
Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"},
Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"},
Params{"-GPU.0,-GPU.1,-CPU", ""},
Params{"GPU,-GPU.0", "GPU.1"},
Params{"-GPU,CPU", "CPU"},
Params{"-GPU,-CPU", ""},
Params{"GPU.0,-GPU", "GPU.0"},
Params{"-GPU.0,-CPU", "GPU.1"}};
const std::vector<Params> testConfigs = {Params{" ", " "},
Params{"", "CPU,GPU"},
@ -139,35 +138,36 @@ const std::vector<Params> testConfigs = {Params{" ", " "},
Params{"-CPU,INVALID_DEVICE", "INVALID_DEVICE"},
Params{"CPU,GPU,NPU", "CPU,GPU,NPU"}};
const std::vector<Params> testConfigsWithIdNotInteldGPU = {Params{" ", " "},
Params{"", "CPU,GPU.0"},
Params{"CPU, ", "CPU, "},
Params{" ,CPU", " ,CPU"},
Params{"CPU,", "CPU"},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"},
Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
Params{"-GPU.0", "CPU"},
Params{"-GPU.0,-GPU.1", "CPU"},
Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"},
Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"},
Params{"-GPU.0,-GPU.1,-CPU", ""},
Params{"GPU,-GPU.0", "GPU.1"},
Params{"GPU.0,-GPU", "GPU.0"},
Params{"GPU", "GPU.0,GPU.1"},
Params{"GPU.0", "GPU.0"},
Params{"GPU.1", "GPU.1"},
Params{"-CPU", "GPU.0"},
Params{"-CPU,-GPU", ""},
Params{"-CPU,-GPU.0", ""},
Params{"-CPU,-GPU.1", "GPU.0"},
Params{"-GPU,CPU", "CPU"},
Params{"-GPU.0,-CPU", ""}};
const std::vector<Params> testConfigsWithIdNotInteldGPU = {
Params{" ", " "},
Params{"", "CPU,GPU.0"},
Params{"CPU, ", "CPU, "},
Params{" ,CPU", " ,CPU"},
Params{"CPU,", "CPU"},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
Params{"CPU,GPU,NPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,NPU,INVALID_DEVICE"},
Params{"NPU,GPU,CPU,-GPU.0", "NPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
Params{"-GPU.0", "CPU"},
Params{"-GPU.0,-GPU.1", "CPU"},
Params{"-GPU.0,-GPU.1,INVALID_DEVICE", "INVALID_DEVICE"},
Params{"-GPU.0,-GPU.1,-INVALID_DEVICE", "CPU"},
Params{"-GPU.0,-GPU.1,-CPU", ""},
Params{"GPU,-GPU.0", "GPU.1"},
Params{"GPU.0,-GPU", "GPU.0"},
Params{"GPU", "GPU.0,GPU.1"},
Params{"GPU.0", "GPU.0"},
Params{"GPU.1", "GPU.1"},
Params{"-CPU", "GPU.0"},
Params{"-CPU,-GPU", ""},
Params{"-CPU,-GPU.0", ""},
Params{"-CPU,-GPU.1", "GPU.0"},
Params{"-GPU,CPU", "CPU"},
Params{"-GPU.0,-CPU", ""}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListWithID,
GetDeviceListTest,
@ -182,8 +182,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceList,
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests_GetDeviceListNotInteldGPU,
GetDeviceListTestWithNotInteldGPU,
::testing::Combine(::testing::Values(availableDevsWithId), ::testing::ValuesIn(testConfigsWithIdNotInteldGPU)),
::testing::Combine(::testing::Values(availableDevsWithId),
::testing::ValuesIn(testConfigsWithIdNotInteldGPU)),
GetDeviceListTestWithNotInteldGPU::getTestCaseName);
//toDo need add test for ParseMetaDevices(_, config) to check device config of
//return metaDevices
// toDo need add test for ParseMetaDevices(_, config) to check device config of
// return metaDevices

View File

@ -3,43 +3,31 @@
//
#pragma once
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <memory>
#include "plugin.hpp"
#include "openvino/runtime/core.hpp"
#include "gmock_plugin.hpp"
#include "mock_common.hpp"
#include <common_test_utils/test_constants.hpp>
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include <gtest/gtest.h>
using ::testing::MatcherCast;
using ::testing::AllOf;
using ::testing::Throw;
using ::testing::Matches;
using ::testing::_;
using ::testing::StrEq;
using ::testing::StrNe;
using ::testing::Return;
using ::testing::Property;
using ::testing::Eq;
using ::testing::ReturnRef;
using ::testing::AtLeast;
using ::testing::AnyNumber;
using ::testing::InvokeWithoutArgs;
using ::testing::HasSubstr;
using ::testing::NiceMock;
#include <common_test_utils/test_constants.hpp>
#include <memory>
#include "gmock_plugin.hpp"
#include "openvino/runtime/core.hpp"
#include "plugin.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp"
using namespace ::testing;
using namespace ov::mock_auto_plugin;
#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) EXPECT_THROW( \
try { \
stmt; \
} catch (const etype& ex) { \
#define EXPECT_THROW_WITH_MESSAGE(stmt, etype, whatstring) \
EXPECT_THROW( \
try { stmt; } catch (const etype& ex) { \
EXPECT_THAT(std::string(ex.what()), HasSubstr(whatstring)); \
throw; \
} \
, etype)
throw; \
}, \
etype)
// define a matcher to check if perf hint expects
MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") {
@ -51,28 +39,51 @@ MATCHER_P(ComparePerfHint, perfHint, "Check if perf hint expects.") {
return perfHint == arg_perfHint.as<std::string>();
}
#define IE_SET_METRIC(key, name, ...) \
typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = __VA_ARGS__;
#define RETURN_MOCK_VALUE(value) \
InvokeWithoutArgs([value]() { \
return ov::Any(value); \
})
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
// it will cause core dump, so add this special implemented
namespace testing {
namespace internal {
template <>
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os);
}
} // namespace testing
#define ENABLE_LOG_IN_MOCK() \
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \
std::cout << stream.str() << std::endl; \
});
namespace ov {
namespace mock_auto_plugin {
namespace tests {
class BaseTest {
public:
std::shared_ptr<ov::Model> model;
std::shared_ptr<NiceMock<MockPluginBase>> mock_plugin_cpu;
std::shared_ptr<NiceMock<MockPluginBase>> mock_plugin_gpu;
std::shared_ptr<NiceMock<MockAutoPlugin>> plugin;
//mock exeNetwork helper
ov::SoPtr<ov::MockCompiledModel> mockExeNetwork;
std::shared_ptr<ov::MockCompiledModel> mockIExeNet;
//mock exeNetwork actual
ov::SoPtr<ov::MockCompiledModel> mockExeNetworkActual;
std::shared_ptr<ov::MockCompiledModel> mockIExeNetActual;
std::shared_ptr<const ov::Model> model;
std::shared_ptr<ov::Model> model_can_batch;
std::shared_ptr<NiceMock<ov::MockIPlugin>> mock_plugin_cpu;
std::shared_ptr<NiceMock<ov::MockIPlugin>> mock_plugin_gpu;
std::shared_ptr<NiceMock<MockAutoPlugin>> plugin;
// mock exeNetwork helper
ov::SoPtr<ov::MockICompiledModel> mockExeNetwork;
std::shared_ptr<ov::MockICompiledModel> mockIExeNet;
// mock exeNetwork actual
ov::SoPtr<ov::MockICompiledModel> mockExeNetworkActual;
std::shared_ptr<ov::MockICompiledModel> mockIExeNetActual;
// config for Auto device
ov::AnyMap config;
std::vector<DeviceInformation> metaDevices;
std::shared_ptr<ov::MockSyncInferRequest> inferReqInternal;
std::shared_ptr<ov::MockSyncInferRequest> inferReqInternalActual;
ov::AnyMap config;
std::vector<DeviceInformation> metaDevices;
std::shared_ptr<ov::mock_auto_plugin::MockISyncInferRequest> inferReqInternal;
std::shared_ptr<ov::mock_auto_plugin::MockISyncInferRequest> inferReqInternalActual;
ov::Any optimalNum;
virtual ~BaseTest();
@ -84,32 +95,10 @@ protected:
// for auto unit tests which can covered by mock core, or need to test with gmock icore
class AutoTest : public BaseTest {
public:
std::shared_ptr<NiceMock<ov::MockICore>> core;
std::shared_ptr<NiceMock<ov::MockICore>> core;
AutoTest();
~AutoTest();
};
// for unit tests which requires real core, batch support or remote context
// mock plugin name: MOCK_CPU,MOCK_HARDWARE
// please extend as needed
class AutoTestWithRealCore : public BaseTest {
public:
AutoTestWithRealCore();
~AutoTestWithRealCore() = default;
ov::Core core;
protected:
void register_plugin_simple(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties);
void register_plugin_support_batch_and_context(ov::Core& core, const std::string& device_name, const ov::AnyMap& properties);
std::vector<std::shared_ptr<ov::IRemoteContext>> m_mock_contexts;
std::shared_ptr<void> m_so;
std::shared_ptr<ov::ICompiledModel> compiled_model;
void reg_plugin(ov::Core& core,
std::shared_ptr<ov::IPlugin> plugin,
const std::string& device_name,
const ov::AnyMap& properties);
};
} // namespace tests
} // namespace mock_auto_plugin
} // namespace ov

View File

@ -4,9 +4,11 @@
#pragma once
#include <gmock/gmock.h>
#include <iostream>
#include "openvino/runtime/core.hpp"
#include "plugin.hpp"
#include <iostream>
using namespace ov::mock_auto_plugin;
namespace ov {
@ -19,10 +21,47 @@ public:
get_valid_device,
((const std::vector<DeviceInformation>&), const std::string&),
(const, override));
MOCK_METHOD(DeviceInformation, select_device, ((const std::vector<DeviceInformation>&),
const std::string&, unsigned int), (override));
MOCK_METHOD((std::vector<DeviceInformation>), parse_meta_devices,
(const std::string&, const ov::AnyMap&), (const, override));
MOCK_METHOD(DeviceInformation,
select_device,
((const std::vector<DeviceInformation>&), const std::string&, unsigned int),
(override));
MOCK_METHOD((std::vector<DeviceInformation>),
parse_meta_devices,
(const std::string&, const ov::AnyMap&),
(const, override));
};
} // namespace mock_auto_plugin
} // namespace ov
class MockISyncInferRequest : public ISyncInferRequest {
public:
MockISyncInferRequest(const std::shared_ptr<const ov::ICompiledModel>& compiled_model);
MOCK_METHOD(std::vector<ov::ProfilingInfo>, get_profiling_info, (), (const, override));
MOCK_METHOD(void, infer, (), (override));
MOCK_METHOD(std::vector<ov::SoPtr<IVariableState>>, query_state, (), (const, override));
~MockISyncInferRequest() = default;
private:
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
const ov::element::Type& element_type,
const ov::Shape& shape);
};
class MockAsyncInferRequest : public IAsyncInferRequest {
public:
MockAsyncInferRequest(const std::shared_ptr<IInferRequest>& request,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor,
bool ifThrow)
: IAsyncInferRequest(request, task_executor, callback_executor),
m_throw(ifThrow) {
m_pipeline = {};
m_pipeline.push_back({task_executor, [this] {
if (m_throw)
OPENVINO_THROW("runtime inference failure");
}});
}
private:
bool m_throw;
};
} // namespace mock_auto_plugin
} // namespace ov

View File

@ -1,145 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <ie_metric_helpers.hpp>
#include "openvino/runtime/iplugin.hpp"
#include "openvino/opsets/opset11.hpp"
#include "openvino/runtime/iasync_infer_request.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/iremote_context.hpp"
#include "openvino/runtime/iremote_tensor.hpp"
#include "openvino/runtime/make_tensor.hpp"
#define IE_SET_METRIC(key, name, ...) \
typename ::InferenceEngine::Metrics::MetricType<::InferenceEngine::Metrics::key>::type name = \
__VA_ARGS__;
#define RETURN_MOCK_VALUE(value) \
InvokeWithoutArgs([value](){return ov::Any(value);})
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
// it will cause core dump, so add this special implemented
namespace testing {
namespace internal {
template<>
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os);
}
}
#define ENABLE_LOG_IN_MOCK() \
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) { \
std::cout << stream.str() << std::endl; \
});
namespace ov {
class MockPluginBase : public ov::IPlugin {
public:
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, compile_model, ((const std::shared_ptr<const ov::Model>&), (const ov::AnyMap&)), (const, override));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, compile_model,
((const std::shared_ptr<const ov::Model>&), (const ov::AnyMap&), (const ov::SoPtr<ov::IRemoteContext>&)), (const, override));
MOCK_METHOD(void, set_property, (const AnyMap&), (override));
MOCK_METHOD(ov::Any, get_property, ((const std::string&), (const ov::AnyMap&)), (const, override));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, create_context, (const ov::AnyMap&), (const, override));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, get_default_context, (const ov::AnyMap&), (const, override));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, import_model, ((std::istream&), (const ov::AnyMap&)), (const, override));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, import_model,
((std::istream&), (const ov::SoPtr<ov::IRemoteContext>&), (const ov::AnyMap&)), (const, override));
MOCK_METHOD(ov::SupportedOpsMap, query_model, ((const std::shared_ptr<const ov::Model>&), (const ov::AnyMap&)), (const, override));
};
class MockCompiledModel : public ICompiledModel {
public:
MockCompiledModel(const std::shared_ptr<const ov::Model>& model, const std::shared_ptr<const ov::IPlugin>& plugin)
: ICompiledModel(model, plugin) {}
MOCK_METHOD(std::shared_ptr<ISyncInferRequest>, create_sync_infer_request, (), (const, override));
MOCK_METHOD(Any, get_property, (const std::string&), (const, override));
MOCK_METHOD(void, set_property, (const AnyMap&), (override));
MOCK_METHOD(void, export_model, (std::ostream&), (const, override));
MOCK_METHOD(std::shared_ptr<const Model>, get_runtime_model, (), (const, override));
MOCK_METHOD(std::shared_ptr<IAsyncInferRequest>, create_infer_request, (), (const, override));
};
class MockAsyncInferRequest : public IAsyncInferRequest {
public:
MockAsyncInferRequest(const std::shared_ptr<IInferRequest>& request,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor,
bool ifThrow);
private:
bool m_throw;
};
class MockSyncInferRequest : public ISyncInferRequest {
public:
MockSyncInferRequest(const std::shared_ptr<const MockCompiledModel>& compiled_model);
MOCK_METHOD(std::vector<ov::ProfilingInfo>, get_profiling_info, (), (const, override));
//MOCK_METHOD(Tensor, get_tensor, (const Output<const Node>&), (const, override));
//MOCK_METHOD(void, set_tensor, (const Output<const Node>&, const Tensor&), (override));
//MOCK_METHOD(std::vector<Tensor>, get_tensors, (const Output<const Node>&), (const, override));
//MOCK_METHOD(void, set_tensors, (const Output<const Node>&, const std::vector<Tensor>&), (override));
MOCK_METHOD(void, infer, (), (override));
MOCK_METHOD(std::vector<ov::SoPtr<IVariableState>>, query_state, (), (const, override));
//MOCK_METHOD(const std::shared_ptr<const ICompiledModel>&, get_compiled_model, (), (const, override));
//MOCK_METHOD(const std::vector<Output<const Node>>&, get_inputs, (), (const, override));
//MOCK_METHOD(const std::vector<Output<const Node>>&, get_outputs, (), (const, override));
//MOCK_METHOD(void, check_tensors, (), (const, override));
~MockSyncInferRequest() = default;
private:
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor, const ov::element::Type& element_type, const ov::Shape& shape);
};
class MockRemoteTensor : public ov::IRemoteTensor {
ov::AnyMap m_properties;
std::string m_dev_name;
public:
MockRemoteTensor(const std::string& name, const ov::AnyMap& props) : m_properties(props), m_dev_name(name) {}
const ov::AnyMap& get_properties() const override {
return m_properties;
}
const std::string& get_device_name() const override {
return m_dev_name;
}
void set_shape(ov::Shape shape) override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::element::Type& get_element_type() const override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::Shape& get_shape() const override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::Strides& get_strides() const override {
OPENVINO_NOT_IMPLEMENTED;
}
};
class MockRemoteContext : public ov::IRemoteContext {
ov::AnyMap m_property = {{"IS_DEFAULT", true}};
std::string m_dev_name;
public:
MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {}
const std::string& get_device_name() const override {
return m_dev_name;
}
const ov::AnyMap& get_property() const override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
const ov::Shape& shape,
const ov::AnyMap& params = {}) override {
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property);
return {remote_tensor, nullptr};
}
};
} // namespace ov

View File

@ -4,15 +4,15 @@
#pragma once
#include <gmock/gmock.h>
#include "utils/log.hpp"
namespace ov {
namespace mock_auto_plugin {
class MockLog : public Log {
public:
MOCK_METHOD(void, print, (std::stringstream& stream), (override));
MockLog(std::string unittest):Log(unittest) {
}
MOCK_METHOD(void, print, (std::stringstream & stream), (override));
MockLog(std::string unittest) : Log(unittest) {}
static MockLog* get_instance() {
if (m_mocklog == NULL) {
m_mocklog = new MockLog("unittest");
@ -27,5 +27,5 @@ public:
}
static MockLog* m_mocklog;
};
}// namespace mock_auto_plugin
} //namespace ov
} // namespace mock_auto_plugin
} // namespace ov

View File

@ -7,16 +7,15 @@
using Config = std::map<std::string, std::string>;
using namespace ov::mock_auto_plugin;
using PriorityParams = std::tuple<unsigned int, std::string>; //{modelpriority, deviceUniquName}
using PriorityParams = std::tuple<unsigned int, std::string>; //{modelpriority, deviceUniquName}
using ConfigParams = std::tuple<
std::string, // netPrecision
bool, // enable device priority
std::vector<PriorityParams> // {{modelpriority, expect device unique_name}}
>;
using ConfigParams = std::tuple<std::string, // netPrecision
bool, // enable device priority
std::vector<PriorityParams> // {{modelpriority, expect device unique_name}}
>;
class KeyNetworkPriorityTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
std::vector<DeviceInformation> metaDevices;
std::vector<DeviceInformation> metaDevices;
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
@ -31,8 +30,8 @@ public:
result << "_enableDevicePriority_false";
}
for (auto& item : PriorityConfigs) {
result << "_priority_" << std::get<0>(item);
result << "_return_" << std::get<1>(item);
result << "_priority_" << std::get<0>(item);
result << "_return_" << std::get<1>(item);
}
result << "netPrecision_" << netPrecision;
return result.str();
@ -45,9 +44,9 @@ public:
void SetUp() override {
std::tie(netPrecision, enableDevicePriority, PriorityConfigs) = GetParam();
sizeOfConfigs = static_cast<int>(PriorityConfigs.size());
std::vector<std::string> gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"};
ON_CALL(*core, get_property(HasSubstr("GPU"),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability));
std::vector<std::string> gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN"};
ON_CALL(*core, get_property(HasSubstr("GPU"), StrEq(ov::device::capabilities.name()), _))
.WillByDefault(RETURN_MOCK_VALUE(gpuCability));
std::vector<std::string> otherCability = {"INT8"};
ON_CALL(*core, get_property(HasSubstr("OTHER"), StrEq(ov::device::capabilities.name()), _))
@ -69,14 +68,14 @@ TEST_P(KeyNetworkPriorityTest, SelectDevice) {
std::vector<DeviceInformation> resDevInfo;
if (enableDevicePriority) {
metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0},
{"GPU.0", {}, 2, "01", "iGPU_01", 1},
{"GPU.1", {}, 2, "01", "dGPU_01", 2},
{"OTHER", {}, 2, "01", "OTHER_01", 3}};
{"GPU.0", {}, 2, "01", "iGPU_01", 1},
{"GPU.1", {}, 2, "01", "dGPU_01", 2},
{"OTHER", {}, 2, "01", "OTHER_01", 3}};
} else {
metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0},
{"GPU.0", {}, 2, "01", "iGPU_01", 0},
{"GPU.1", {}, 2, "01", "dGPU_01", 0},
{"OTHER", {}, 2, "01", "OTHER_01", 0}};
{"GPU.0", {}, 2, "01", "iGPU_01", 0},
{"GPU.1", {}, 2, "01", "dGPU_01", 0},
{"OTHER", {}, 2, "01", "OTHER_01", 0}};
}
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(sizeOfConfigs);
@ -110,16 +109,16 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) {
// selectdevice in multi threads, and UnregisterPriority them all, should not affect the
// Priority Map
for (auto& item : PriorityConfigs) {
unsigned int priority = std::get<0>(item);
auto future = std::async(std::launch::async, [this, priority] {
auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority);
plugin->unregister_priority(priority, deviceInfo.unique_name);
});
futureVect.push_back(std::move(future));
unsigned int priority = std::get<0>(item);
auto future = std::async(std::launch::async, [this, priority] {
auto deviceInfo = plugin->select_device(metaDevices, netPrecision, priority);
plugin->unregister_priority(priority, deviceInfo.unique_name);
});
futureVect.push_back(std::move(future));
}
for (auto& item : futureVect) {
item.get();
item.get();
}
for (auto& item : PriorityConfigs) {
@ -138,152 +137,206 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) {
// {netPrecision, enableDevicePriority, PriorityParamsVector{{modelpriority, expect device unique_name}}}
const std::vector<ConfigParams> testConfigs = {
ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {3, "iGPU_01"},
PriorityParams {4, "CPU_01"}}},
ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {2, "iGPU_01"}}},
ConfigParams {"FP32", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {3, "CPU_01"}}},
ConfigParams {"FP32", false, {PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"}}},
ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"},
PriorityParams {3, "CPU_01"},
PriorityParams {4, "CPU_01"},
PriorityParams {5, "CPU_01"}}},
ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"},
PriorityParams {0, "OTHER_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
ConfigParams {"INT8", false, {PriorityParams {2, "OTHER_01"},
PriorityParams {0, "OTHER_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"}}},
ConfigParams {"INT8", false, {PriorityParams {0, "OTHER_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"},
PriorityParams {0, "OTHER_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"}}},
ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {3, "iGPU_01"},
PriorityParams {4, "CPU_01"},
PriorityParams {5, "CPU_01"}}},
ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {2, "iGPU_01"}}},
ConfigParams {"BIN", false, {PriorityParams {2, "dGPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {3, "CPU_01"}}},
ConfigParams {"BIN", false, {PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"},
PriorityParams {0, "dGPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"}}},
ConfigParams{"FP32",
false,
{PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{2, "CPU_01"}}},
ConfigParams{"FP32",
false,
{PriorityParams{2, "dGPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "CPU_01"}}},
ConfigParams{"FP32",
false,
{PriorityParams{2, "dGPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{2, "iGPU_01"}}},
ConfigParams{"FP32",
false,
{PriorityParams{2, "dGPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{3, "CPU_01"}}},
ConfigParams{"FP32",
false,
{PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"}}},
ConfigParams{"INT8",
false,
{PriorityParams{0, "OTHER_01"},
PriorityParams{1, "CPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{2, "CPU_01"}}},
ConfigParams{"INT8",
false,
{PriorityParams{2, "OTHER_01"},
PriorityParams{3, "CPU_01"},
PriorityParams{4, "CPU_01"},
PriorityParams{5, "CPU_01"}}},
ConfigParams{"INT8",
false,
{PriorityParams{2, "OTHER_01"},
PriorityParams{0, "OTHER_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{2, "CPU_01"}}},
ConfigParams{"INT8",
false,
{PriorityParams{2, "OTHER_01"},
PriorityParams{0, "OTHER_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{3, "CPU_01"}}},
ConfigParams{"INT8",
false,
{PriorityParams{0, "OTHER_01"},
PriorityParams{1, "CPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{3, "CPU_01"},
PriorityParams{0, "OTHER_01"},
PriorityParams{1, "CPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{3, "CPU_01"}}},
ConfigParams{"BIN",
false,
{PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{2, "CPU_01"}}},
ConfigParams{"BIN",
false,
{PriorityParams{2, "dGPU_01"},
PriorityParams{3, "iGPU_01"},
PriorityParams{4, "CPU_01"},
PriorityParams{5, "CPU_01"}}},
ConfigParams{"BIN",
false,
{PriorityParams{2, "dGPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{2, "iGPU_01"}}},
ConfigParams{"BIN",
false,
{PriorityParams{2, "dGPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{3, "CPU_01"}}},
ConfigParams{"BIN",
false,
{PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{3, "CPU_01"},
PriorityParams{0, "dGPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "CPU_01"},
PriorityParams{3, "CPU_01"}}},
// metaDevices = {{ov::test::utils::DEVICE_CPU, {}, 2, "", "CPU_01", 0},
// {ov::test::utils::DEVICE_GPU, {}, 2, "01", "iGPU_01", 1},
// {ov::test::utils::DEVICE_GPU, {}, 2, "01", "dGPU_01", 2},
// cpu > igpu > dgpu > OTHER
ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
PriorityParams {2, "dGPU_01"}}},
ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"},
PriorityParams {3, "iGPU_01"},
PriorityParams {4, "dGPU_01"}}},
ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {2, "iGPU_01"}}},
ConfigParams {"FP32", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {3, "dGPU_01"}}},
ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"}}},
ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "OTHER_01"},
PriorityParams {2, "OTHER_01"},
PriorityParams {2, "OTHER_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
PriorityParams {3, "OTHER_01"},
PriorityParams {4, "OTHER_01"},
PriorityParams {5, "OTHER_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "OTHER_01"},
PriorityParams {2, "OTHER_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "OTHER_01"},
PriorityParams {3, "OTHER_01"}}},
ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "OTHER_01"},
PriorityParams {2, "OTHER_01"},
PriorityParams {3, "OTHER_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {1, "OTHER_01"},
PriorityParams {2, "OTHER_01"},
PriorityParams {3, "OTHER_01"}}},
ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
PriorityParams {2, "dGPU_01"}}},
ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"},
PriorityParams {3, "iGPU_01"},
PriorityParams {4, "dGPU_01"},
PriorityParams {5, "dGPU_01"}}},
ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {2, "iGPU_01"}}},
ConfigParams {"BIN", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {2, "iGPU_01"},
PriorityParams {3, "dGPU_01"}}},
ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
PriorityParams {3, "dGPU_01"},
PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
PriorityParams {3, "dGPU_01"}}}
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, KeyNetworkPriorityTest,
::testing::ValuesIn(testConfigs),
KeyNetworkPriorityTest::getTestCaseName);
ConfigParams{"FP32",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"},
PriorityParams{2, "dGPU_01"}}},
ConfigParams{"FP32",
true,
{PriorityParams{2, "CPU_01"}, PriorityParams{3, "iGPU_01"}, PriorityParams{4, "dGPU_01"}}},
ConfigParams{"FP32",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{2, "iGPU_01"}}},
ConfigParams{"FP32",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{3, "dGPU_01"}}},
ConfigParams{"FP32",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"}}},
ConfigParams{"INT8",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "OTHER_01"},
PriorityParams{2, "OTHER_01"},
PriorityParams{2, "OTHER_01"}}},
ConfigParams{"INT8",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{3, "OTHER_01"},
PriorityParams{4, "OTHER_01"},
PriorityParams{5, "OTHER_01"}}},
ConfigParams{"INT8",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "OTHER_01"},
PriorityParams{2, "OTHER_01"}}},
ConfigParams{"INT8",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "OTHER_01"},
PriorityParams{3, "OTHER_01"}}},
ConfigParams{"INT8",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "OTHER_01"},
PriorityParams{2, "OTHER_01"},
PriorityParams{3, "OTHER_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{1, "OTHER_01"},
PriorityParams{2, "OTHER_01"},
PriorityParams{3, "OTHER_01"}}},
ConfigParams{"BIN",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"},
PriorityParams{2, "dGPU_01"}}},
ConfigParams{"BIN",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{3, "iGPU_01"},
PriorityParams{4, "dGPU_01"},
PriorityParams{5, "dGPU_01"}}},
ConfigParams{"BIN",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{2, "iGPU_01"}}},
ConfigParams{"BIN",
true,
{PriorityParams{2, "CPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{2, "iGPU_01"},
PriorityParams{3, "dGPU_01"}}},
ConfigParams{"BIN",
true,
{PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"},
PriorityParams{3, "dGPU_01"},
PriorityParams{0, "CPU_01"},
PriorityParams{1, "iGPU_01"},
PriorityParams{2, "dGPU_01"},
PriorityParams{3, "dGPU_01"}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
KeyNetworkPriorityTest,
::testing::ValuesIn(testConfigs),
KeyNetworkPriorityTest::getTestCaseName);

View File

@ -0,0 +1,84 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "include/auto_unit_test.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp"
using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<bool, ov::AnyMap>;
class AutoLifeTimeTest : public tests::AutoTest, public ::testing::Test {
public:
void SetUp() override {
plugin->set_device_name("AUTO");
mock_compiled_model = {mockIExeNetActual, std::make_shared<std::string>("for test")};
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>("GPU.0"),
_))
.WillByDefault(Return(mock_compiled_model));
mock_states = {ov::SoPtr<ov::IVariableState>(std::make_shared<NiceMock<ov::MockIVariableState>>(),
std::make_shared<std::string>("for test"))};
EXPECT_CALL(*inferReqInternalActual, query_state()).WillRepeatedly(Return(mock_states));
}
void TearDown() override {
testing::Mock::AllowLeak(mock_states.front()._ptr.get());
testing::Mock::AllowLeak(inferReqInternalActual.get());
}
protected:
ov::SoPtr<ov::MockICompiledModel> mock_compiled_model;
std::vector<ov::SoPtr<ov::IVariableState>> mock_states;
};
TEST_F(AutoLifeTimeTest, loaded_tensor) {
// get Parameter
config.insert(ov::device::priorities("GPU.0"));
std::shared_ptr<ov::ICompiledModel> compiled_model;
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config));
auto request = compiled_model->create_infer_request();
for (auto& iter : request->get_inputs()) {
auto tensor = request->get_tensor(iter);
ASSERT_EQ(tensor._so, mock_compiled_model._so);
}
}
TEST_F(AutoLifeTimeTest, loaded_states) {
// get Parameter
config.insert(ov::device::priorities("GPU.0"));
std::shared_ptr<ov::ICompiledModel> compiled_model;
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config));
auto request = compiled_model->create_infer_request();
auto states = request->query_state();
auto res_so = mock_states.front()._so;
for (auto& state : states)
ASSERT_EQ(state._so, res_so);
}
TEST_F(AutoLifeTimeTest, loaded_tensor_multi) {
plugin->set_device_name("MULTI");
// get Parameter
config.insert(ov::device::priorities("GPU.0"));
std::shared_ptr<ov::ICompiledModel> compiled_model;
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config));
auto request = compiled_model->create_infer_request();
for (auto& iter : request->get_inputs()) {
auto tensor = request->get_tensor(iter);
ASSERT_EQ(tensor._so, mock_compiled_model._so);
}
}
TEST_F(AutoLifeTimeTest, loaded_states_bind_buffer) {
// get Parameter
config.insert(ov::device::priorities("GPU.0"));
config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
config.insert(ov::intel_auto::device_bind_buffer(true));
std::shared_ptr<ov::ICompiledModel> compiled_model;
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, config));
auto request = compiled_model->create_infer_request();
auto states = request->query_state();
auto res_so = mock_states.front()._so;
for (auto& state : states)
ASSERT_EQ(state._so, res_so);
}

View File

@ -2,11 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "utils/log_util.hpp"
#include <gtest/gtest.h>
#include <regex>
#include "utils/log_util.hpp"
using namespace ov::mock_auto_plugin;
using ::testing::_;
class LogUtilsFormatTest : public ::testing::Test {
@ -19,7 +20,7 @@ public:
MockLog::release();
}
void traceCallStacksTest(){
void traceCallStacksTest() {
TraceCallStacks("test");
}
};
@ -34,8 +35,8 @@ TEST_F(LogUtilsFormatTest, format_s) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%sabc", "DEBUG");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -45,8 +46,8 @@ TEST_F(LogUtilsFormatTest, format_d) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%dabc", -1);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -57,8 +58,8 @@ TEST_F(LogUtilsFormatTest, format_ld) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%ldabc", -3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -69,8 +70,8 @@ TEST_F(LogUtilsFormatTest, format_u) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%uabc", 1);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -81,8 +82,8 @@ TEST_F(LogUtilsFormatTest, format_lu) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%luabc", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -93,8 +94,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%s,%d,%ld,%u,%lu,abc", "DEBUG", -1, -3, 1, 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -105,8 +106,8 @@ TEST_F(LogUtilsFormatTest, format_s_d_ld_u_lu2) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%s%d%ld%u%luabc", "DEBUG", -1, -3, 1, 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -117,8 +118,8 @@ TEST_F(LogUtilsFormatTest, format_lf) {
std::string pattern{"abc"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%lfabc", 1.33);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -129,8 +130,8 @@ TEST_F(LogUtilsFormatTest, format_p) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%p", MockLog::m_mocklog);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -141,8 +142,8 @@ TEST_F(LogUtilsFormatTest, format_x) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%x", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -153,8 +154,8 @@ TEST_F(LogUtilsFormatTest, format_X) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%X", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -165,8 +166,8 @@ TEST_F(LogUtilsFormatTest, format_o) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%o", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -177,8 +178,8 @@ TEST_F(LogUtilsFormatTest, format_e) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%e", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -189,8 +190,8 @@ TEST_F(LogUtilsFormatTest, format_E) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%E", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -201,8 +202,8 @@ TEST_F(LogUtilsFormatTest, format_f) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%f", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -213,8 +214,8 @@ TEST_F(LogUtilsFormatTest, format_F) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%F", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -225,8 +226,8 @@ TEST_F(LogUtilsFormatTest, format_g) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%g", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -237,21 +238,20 @@ TEST_F(LogUtilsFormatTest, format_G) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%G", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
}
TEST_F(LogUtilsFormatTest, format_a) {
std::string printResult = "";
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%a", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -262,8 +262,8 @@ TEST_F(LogUtilsFormatTest, format_A) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%A", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -274,8 +274,8 @@ TEST_F(LogUtilsFormatTest, format_c) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%c", 3);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -287,8 +287,8 @@ TEST_F(LogUtilsFormatTest, format_n) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%n", &num);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -299,8 +299,8 @@ TEST_F(LogUtilsFormatTest, format__) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%%");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -311,8 +311,8 @@ TEST_F(LogUtilsFormatTest, format_s__) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%s%%", "DEBUG");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -324,8 +324,8 @@ TEST_F(LogUtilsFormatTest, format_dn) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("%d%n", num, &num);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -337,8 +337,8 @@ TEST_F(LogUtilsFormatTest, format_ccccdn) {
std::string pattern{"not valid"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("cccc%d%n", num, &num);
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -349,8 +349,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_error) {
std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]ERROR\\[.+:[0-9]+\\].*"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_ERROR("test");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -361,8 +361,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_warning) {
std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]W\\[.+:[0-9]+\\].*"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_WARNING("test");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -373,8 +373,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_info) {
std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]I\\[.+:[0-9]+\\].*"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_INFO("test");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -385,8 +385,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_debug) {
std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]D\\[.+:[0-9]+\\].*"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_DEBUG("test");
EXPECT_TRUE(std::regex_search(printResult, regex));
@ -398,8 +398,8 @@ TEST_F(LogUtilsFormatTest, logPrintFormat_trace) {
std::string pattern{"\\[[0-9]+:[0-9]+:[0-9]+\\.[0-9]+\\]T\\[.+:[0-9]+\\].*"};
std::regex regex(pattern);
ON_CALL(*(HLogger), print(_)).WillByDefault([&](std::stringstream& stream) {
printResult = stream.str();
});
printResult = stream.str();
});
EXPECT_CALL(*(HLogger), print(_)).Times(1);
LOG_TRACE(true, "test", "TRACE");
EXPECT_TRUE(std::regex_search(printResult, regex));

View File

@ -2,31 +2,31 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "utils/log_util.hpp"
#include <gtest/gtest.h>
#include <future>
#include "utils/log_util.hpp"
using ::testing::_;
using namespace ov::mock_auto_plugin;
// disable using windows.h
#if 0
#if defined(_WIN32)
#include <windows.h>
#elif defined(__linux__)
#include <stdlib.h>
#elif defined(__APPLE__)
#include <stdlib.h>
#else
#endif
# if defined(_WIN32)
# include <windows.h>
# elif defined(__linux__)
# include <stdlib.h>
# elif defined(__APPLE__)
# include <stdlib.h>
# else
# endif
#endif
MockLog* MockLog::m_mocklog = NULL;
using ConfigParams = std::tuple<
std::string, // logLevel
std::string, // envlogLevel
int // expectCallNum
>;
using ConfigParams = std::tuple<std::string, // logLevel
std::string, // envlogLevel
int // expectCallNum
>;
class LogUtilsTest : public ::testing::TestWithParam<ConfigParams> {
public:
std::string _logLevel;
@ -40,21 +40,20 @@ public:
int expectCallNum;
std::tie(logLevel, envLogLevel, expectCallNum) = obj.param;
std::ostringstream result;
result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum
<< "envlogLevel" << envLogLevel;
result << "logLevel_" << logLevel << "_expectCallNum_" << expectCallNum << "envlogLevel" << envLogLevel;
return result.str();
}
#if 0
void SetTestEnv(std::string key, std::string value) {
#ifdef WIN32
# ifdef WIN32
SetEnvironmentVariable(key.c_str(), value.c_str());
#elif defined(__linux__)
# elif defined(__linux__)
::setenv(key.c_str(), value.c_str(), true);
#elif defined(__APPLE__)
# elif defined(__APPLE__)
::setenv(key.c_str(), value.c_str(), true);
#else
#endif
# else
# endif
}
#endif
void SetUp() override {
@ -88,9 +87,10 @@ TEST_P(LogUtilsTest, set_log_level) {
TEST_P(LogUtilsTest, INFO_RUN) {
set_log_level(_logLevel);
int a = 0;
INFO_RUN([&a](){a++;});
if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" ||
_logLevel == "LOG_TRACE") {
INFO_RUN([&a]() {
a++;
});
if (_logLevel == "LOG_INFO" || _logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") {
EXPECT_EQ(a, 1);
} else {
EXPECT_EQ(a, 0);
@ -100,7 +100,9 @@ TEST_P(LogUtilsTest, INFO_RUN) {
TEST_P(LogUtilsTest, DEBUG_RUN) {
set_log_level(_logLevel);
int a = 0;
DEBUG_RUN([&a](){a++;});
DEBUG_RUN([&a]() {
a++;
});
if (_logLevel == "LOG_DEBUG" || _logLevel == "LOG_TRACE") {
EXPECT_EQ(a, 1);
} else {
@ -117,10 +119,10 @@ TEST_P(LogUtilsTest, setEnvNotAffectset_log_level) {
}
#endif
//can not test ENV case. because of the ENV variable is readed at the
//beginning of test application and modify it in runtime is not valid
//still need to test it in different platform manully
//TEST_P(LogUtilsTest, setEnvLogLevel) {
// can not test ENV case. because of the ENV variable is readed at the
// beginning of test application and modify it in runtime is not valid
// still need to test it in different platform manully
// TEST_P(LogUtilsTest, setEnvLogLevel) {
// SetTestEnv("AUTO_LOG_LEVEL", _envLogLevel);
// EXPECT_CALL(*(HLogger), print(_)).Times(_expectCallNum);
// printLog();
@ -132,8 +134,8 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) {
std::shared_ptr<Log> instanceVector[20];
for (unsigned int i = 0; i < 20; i++) {
auto future = std::async(std::launch::async, [&instanceVector, i] {
instanceVector[i] = Log::instance();
});
instanceVector[i] = Log::instance();
});
futureVect.push_back(std::move(future));
}
@ -143,20 +145,19 @@ TEST(smoke_Auto_BehaviorTests, LogUtilsSingleton) {
for (unsigned int i = 0; i < 19; i++) {
EXPECT_NE(instanceVector[i].get(), nullptr);
EXPECT_EQ(instanceVector[i].get(), instanceVector[i+1].get());
EXPECT_EQ(instanceVector[i].get(), instanceVector[i + 1].get());
}
}
const std::vector<ConfigParams> testConfigs =
{ConfigParams {"LOG_NONE", "0", 0},
ConfigParams {"LOG_NONE", "1", 0},
ConfigParams {"LOG_ERROR", "2", 2},
ConfigParams {"LOG_WARNING", "3", 4},
ConfigParams {"LOG_INFO", "4", 6},
ConfigParams {"LOG_DEBUG", "5", 8},
ConfigParams {"LOG_TRACE", "6", 10}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, LogUtilsTest,
::testing::ValuesIn(testConfigs),
LogUtilsTest::getTestCaseName);
const std::vector<ConfigParams> testConfigs = {ConfigParams{"LOG_NONE", "0", 0},
ConfigParams{"LOG_NONE", "1", 0},
ConfigParams{"LOG_ERROR", "2", 2},
ConfigParams{"LOG_WARNING", "3", 4},
ConfigParams{"LOG_INFO", "4", 6},
ConfigParams{"LOG_DEBUG", "5", 8},
ConfigParams{"LOG_TRACE", "6", 10}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
LogUtilsTest,
::testing::ValuesIn(testConfigs),
LogUtilsTest::getTestCaseName);

View File

@ -1,62 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "include/mock_common.hpp"
#include "openvino/runtime/make_tensor.hpp"
// getMetric will return a fake ov::Any, gmock will call ostreamer << ov::Any
// it will cause core dump, so add this special implemented
namespace testing {
namespace internal {
template<>
void PrintTo<ov::Any>(const ov::Any& a, std::ostream* os) {
*os << "using custom PrintTo ov::Any";
}
}
}
namespace ov {
MockAsyncInferRequest::MockAsyncInferRequest(const std::shared_ptr<IInferRequest>& request,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor,
bool ifThrow)
: IAsyncInferRequest(request, task_executor, callback_executor), m_throw(ifThrow) {
m_pipeline = {};
m_pipeline.push_back({task_executor,
[this] {
if (m_throw)
OPENVINO_THROW("runtime inference failure");
} });
}
void MockSyncInferRequest::allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor, const element::Type& element_type, const Shape& shape) {
if (!tensor || tensor->get_element_type() != element_type) {
tensor = ov::make_tensor(element_type, shape);
} else {
tensor->set_shape(shape);
}
}
MockSyncInferRequest::MockSyncInferRequest(const std::shared_ptr<const MockCompiledModel>& compiled_model)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_ASSERT(compiled_model);
// Allocate input/output tensors
for (const auto& input : get_inputs()) {
allocate_tensor(input, [this, input](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
input.get_element_type(),
input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape());
});
}
for (const auto& output : get_outputs()) {
allocate_tensor(output, [this, output](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
output.get_element_type(),
output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape());
});
}
}
} //namespace ov

View File

@ -38,18 +38,18 @@ public:
void SetUp() override {
ON_CALL(*core, get_supported_property(StrEq("INVALID_DEVICE"), _)).WillByDefault(Throw(ov::Exception("")));
ON_CALL(*core, get_property(StrEq("GPU.2"), ov::supported_properties.name(), _))
.WillByDefault(Throw(ov::Exception("")));
ON_CALL(*plugin, parse_meta_devices).WillByDefault([this](const std::string& priorityDevices,
const ov::AnyMap& config) {
return plugin->Plugin::parse_meta_devices(priorityDevices, config);
});
std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam();
.WillByDefault(Throw(ov::Exception("")));
ON_CALL(*plugin, parse_meta_devices)
.WillByDefault([this](const std::string& priorityDevices, const ov::AnyMap& config) {
return plugin->Plugin::parse_meta_devices(priorityDevices, config);
});
std::tie(priorityDevices, metaDevices, throwException, expectedTimes) = GetParam();
}
void compare(std::vector<DeviceInformation>& result, std::vector<DeviceInformation>& expect) {
EXPECT_EQ(result.size(), expect.size());
if (result.size() == expect.size()) {
for (unsigned int i = 0 ; i < result.size(); i++) {
for (unsigned int i = 0; i < result.size(); i++) {
EXPECT_EQ(result[i].device_name, expect[i].device_name);
EXPECT_EQ(result[i].unique_name, expect[i].unique_name);
EXPECT_EQ(result[i].num_requests_per_devices, expect[i].num_requests_per_devices);
@ -61,7 +61,7 @@ public:
void compareDevicePriority(std::vector<DeviceInformation>& result, std::vector<DeviceInformation>& expect) {
EXPECT_EQ(result.size(), expect.size());
if (result.size() == expect.size()) {
for (unsigned int i = 0 ; i < result.size(); i++) {
for (unsigned int i = 0; i < result.size(); i++) {
EXPECT_EQ(result[i].device_priority, expect[i].device_priority);
}
}
@ -84,9 +84,9 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesWithPriority) {
if (throwException) {
ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {}));
} else {
auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)});
compare(result, metaDevices);
compareDevicePriority(result, metaDevices);
auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)});
compare(result, metaDevices);
compareDevicePriority(result, metaDevices);
}
}
@ -97,16 +97,16 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesNotWithPriority) {
if (throwException) {
ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {}));
} else {
auto result = plugin->parse_meta_devices(priorityDevices, {});
compare(result, metaDevices);
for (unsigned int i = 0 ; i < result.size(); i++) {
EXPECT_EQ(result[i].device_priority, 0);
}
auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")});
compare(result2, metaDevices);
for (unsigned int i = 0 ; i < result.size(); i++) {
EXPECT_EQ(result2[i].device_priority, 0);
}
auto result = plugin->parse_meta_devices(priorityDevices, {});
compare(result, metaDevices);
for (unsigned int i = 0; i < result.size(); i++) {
EXPECT_EQ(result[i].device_priority, 0);
}
auto result2 = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities("")});
compare(result2, metaDevices);
for (unsigned int i = 0; i < result.size(); i++) {
EXPECT_EQ(result2[i].device_priority, 0);
}
}
}
@ -119,9 +119,9 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) {
if (throwException) {
ASSERT_ANY_THROW(plugin->parse_meta_devices(priorityDevices, {}));
} else {
auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)});
compare(result, metaDevices);
compareDevicePriority(result, metaDevices);
auto result = plugin->parse_meta_devices(priorityDevices, {ov::device::priorities(priorityDevices)});
compare(result, metaDevices);
compareDevicePriority(result, metaDevices);
}
}
// ConfigParams details
@ -129,11 +129,7 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) {
// ConfigParams {devicePriority, expect metaDevices, ifThrowException}
const std::vector<ConfigParams> testConfigs = {
ConfigParams{"CPU,GPU.2,OTHER",
{{"CPU", {}, -1, "", "CPU_", 0},
{"OTHER", {}, -1, "", "OTHER_", 2}},
false,
3},
ConfigParams{"CPU,GPU.2,OTHER", {{"CPU", {}, -1, "", "CPU_", 0}, {"OTHER", {}, -1, "", "OTHER_", 2}}, false, 3},
ConfigParams{"CPU,GPU,OTHER",
{{"CPU", {}, -1, "", "CPU_", 0},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
@ -189,13 +185,15 @@ const std::vector<ConfigParams> testConfigsNoID = {
3},
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceTest,
::testing::ValuesIn(testConfigs),
ParseMetaDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ParseMetaDeviceTest,
::testing::ValuesIn(testConfigs),
ParseMetaDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceNoIDTest,
::testing::ValuesIn(testConfigsNoID),
ParseMetaDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
ParseMetaDeviceNoIDTest,
::testing::ValuesIn(testConfigsNoID),
ParseMetaDeviceTest::getTestCaseName);
//toDo need add test for ParseMetaDevices(_, config) to check device config of
//return metaDevices
// toDo need add test for ParseMetaDevices(_, config) to check device config of
// return metaDevices

View File

@ -1,100 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "include/auto_unit_test.hpp"
using namespace ov::mock_auto_plugin::tests;
class MultiPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test {
public:
void SetUp() override {
plugin->set_device_name("MULTI");
std::shared_ptr<ov::IPlugin> base_plugin = plugin;
reg_plugin(core, base_plugin, "MOCK_MULTI", {});
// validate mock plugin
core.get_property("MOCK_MULTI", ov::supported_properties);
}
};
class AutoPropertyTest : public tests::AutoTestWithRealCore, public ::testing::Test {
public:
void SetUp() override {
plugin->set_device_name("AUTO");
std::shared_ptr<ov::IPlugin> base_plugin = plugin;
reg_plugin(core, base_plugin, "MOCK_AUTO", {});
core.get_property("MOCK_AUTO", ov::supported_properties);
}
};
/* to be enabled if expect multi throw for latency mode
TEST_F(PropertyTest, tputmodeonly_for_multi) {
EXPECT_THROW_WITH_MESSAGE(core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception,
"MULTI does not support perf mode");
ASSERT_NO_THROW(compiled_model = core.compile_model(model, "MULTI", ov::device::priorities("MOCK_GPU", "MOCK_CPU")));
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::THROUGHPUT);
}
TEST_F(PropertyTest, tputmodeonly_for_multi_propertyset) {
ASSERT_NO_THROW(core.get_property("MULTI", ov::supported_properties));
EXPECT_THROW_WITH_MESSAGE(core.set_property("MULTI", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::Exception,
"MULTI does not support perf mode");
}
*/
/*
TEST_F(PropertyTest, default_perfmode_for_auto) {
ov::CompiledModel compiled_model;
EXPECT_NO_THROW(compiled_model = core.compile_model(model, "AUTO", ov::device::priorities("MOCK_GPU", "MOCK_CPU")));
EXPECT_EQ(compiled_model.get_property(ov::hint::performance_mode), ov::hint::PerformanceMode::LATENCY);
}
*/
TEST_F(MultiPropertyTest, default_perfmode_for_multi) {
EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT")))).Times(1);
EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT")))).Times(1);
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")}));
EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT);
}
TEST_F(MultiPropertyTest, respect_secondary_property) {
EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY")))).Times(1);
EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY")))).Times(1);
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
{"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:LATENCY}"}}));
EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::THROUGHPUT);
}
TEST_F(AutoPropertyTest, default_perfmode_for_auto_ctput) {
EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT")))).Times(1);
EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT")))).Times(1);
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}));
EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT);
}
TEST_F(AutoPropertyTest, default_perfmode_for_auto) {
EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY")))).Times(1);
EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY")))).Times(1);
compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU")});
EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::LATENCY);
}
TEST_F(AutoPropertyTest, respect_secondary_property_auto_ctput) {
EXPECT_CALL(*mock_plugin_cpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("LATENCY")))).Times(1);
EXPECT_CALL(*mock_plugin_gpu.get(), compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const ov::AnyMap&>(ComparePerfHint("THROUGHPUT")))).Times(1);
ASSERT_NO_THROW(compiled_model = plugin->compile_model(model, {ov::device::priorities("MOCK_GPU", "MOCK_CPU"),
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
{"DEVICE_PROPERTIES", "{MOCK_CPU:{PERFORMANCE_HINT:LATENCY},MOCK_GPU:{PERFORMANCE_HINT:THROUGHPUT}"}}));
EXPECT_EQ(compiled_model->get_property(ov::hint::performance_mode.name()), ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT);
}

View File

@ -2,18 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <common_test_utils/test_constants.hpp>
#include <thread>
#include "common_test_utils/test_constants.hpp"
#include "include/auto_unit_test.hpp"
using Config = std::map<std::string, std::string>;
using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<
bool, // cpu load success
bool // hw device load success
>;
using ConfigParams = std::tuple<bool, // cpu load success
bool // hw device load success
>;
class AutoReleaseHelperTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
@ -21,7 +20,7 @@ public:
bool accSuccess;
std::tie(cpuSuccess, accSuccess) = obj.param;
std::ostringstream result;
if (!cpuSuccess) {
if (!cpuSuccess) {
result << "cpuLoadFailure_";
} else {
result << "cpuLoadSuccess_";
@ -43,33 +42,42 @@ TEST_P(AutoReleaseHelperTest, releaseResource) {
size_t decreaseCount = 0;
// test auto plugin
plugin->set_device_name("AUTO");
const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") +
ov::test::utils::DEVICE_CPU;
const std::string strDevices = ov::test::utils::DEVICE_GPU + std::string(",") + ov::test::utils::DEVICE_CPU;
if (accSuccess) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)), _))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual; }));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual;
}));
} else {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)), _))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
OPENVINO_THROW("");
return mockExeNetworkActual; }));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_GPU)),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
OPENVINO_THROW("");
return mockExeNetworkActual;
}));
}
if (cpuSuccess) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Return(mockExeNetwork));
if (accSuccess)
decreaseCount++;
} else {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)), _))
.WillByDefault(Throw(InferenceEngine::GeneralError{""}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
_))
.WillByDefault(Throw(InferenceEngine::GeneralError{""}));
}
metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}};
DeviceInformation devInfo;
@ -80,15 +88,16 @@ TEST_P(AutoReleaseHelperTest, releaseResource) {
return devices;
});
ON_CALL(*plugin, select_device(Property(&std::vector<DeviceInformation>::size, Eq(2)), _, _))
.WillByDefault(Return(metaDevices[1]));
.WillByDefault(Return(metaDevices[1]));
ON_CALL(*plugin, select_device(Property(&std::vector<DeviceInformation>::size, Eq(1)), _, _))
.WillByDefault(Return(metaDevices[0]));
.WillByDefault(Return(metaDevices[0]));
config.insert(ov::device::priorities(ov::test::utils::DEVICE_CPU + std::string(",") + ov::test::utils::DEVICE_GPU));
std::shared_ptr<ov::ICompiledModel> exeNetwork;
if (cpuSuccess || accSuccess) {
ASSERT_NO_THROW(exeNetwork = plugin->compile_model(model, config));
if (!cpuSuccess)
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(), ov::test::utils::DEVICE_GPU);
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(),
ov::test::utils::DEVICE_GPU);
else
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(), "(CPU)");
} else {
@ -101,19 +110,21 @@ TEST_P(AutoReleaseHelperTest, releaseResource) {
EXPECT_EQ(inferReqInternal.use_count(), requestsharedcount - decreaseCount);
if (cpuSuccess || accSuccess) {
if (accSuccess)
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(), ov::test::utils::DEVICE_GPU);
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(),
ov::test::utils::DEVICE_GPU);
else
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(), ov::test::utils::DEVICE_CPU);
EXPECT_EQ(exeNetwork->get_property(ov::execution_devices.name()).as<std::string>(),
ov::test::utils::DEVICE_CPU);
}
}
//
const std::vector<ConfigParams> testConfigs = {ConfigParams {true, true},
ConfigParams {true, false},
ConfigParams {false, true},
ConfigParams {false, false}
};
const std::vector<ConfigParams> testConfigs = {ConfigParams{true, true},
ConfigParams{true, false},
ConfigParams{false, true},
ConfigParams{false, false}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoReleaseHelperTest,
::testing::ValuesIn(testConfigs),
AutoReleaseHelperTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
AutoReleaseHelperTest,
::testing::ValuesIn(testConfigs),
AutoReleaseHelperTest::getTestCaseName);

View File

@ -5,29 +5,28 @@
#include <thread>
#include "include/auto_unit_test.hpp"
#include "openvino/runtime/threading/immediate_executor.hpp"
#include "openvino/runtime/auto/properties.hpp"
#include "openvino/runtime/threading/immediate_executor.hpp"
using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<std::vector<std::tuple<std::string, bool>>, int, bool, bool, bool, bool>;
class AutoRuntimeFallback : public tests::AutoTest,
public ::testing::TestWithParam<ConfigParams> {
class AutoRuntimeFallback : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
ov::SoPtr<ov::MockCompiledModel> mockExeNetworkGPU_1;
ov::SoPtr<ov::MockCompiledModel> mockExeNetworkOTHER;
ov::SoPtr<ov::MockICompiledModel> mockExeNetworkGPU_1;
ov::SoPtr<ov::MockICompiledModel> mockExeNetworkOTHER;
std::shared_ptr<NiceMock<ov::MockSyncInferRequest>> inferReqInternalGPU_1;
std::shared_ptr<NiceMock<ov::MockSyncInferRequest>> inferReqInternalOTHER;
std::shared_ptr<NiceMock<ov::mock_auto_plugin::MockISyncInferRequest>> inferReqInternalGPU_1;
std::shared_ptr<NiceMock<ov::mock_auto_plugin::MockISyncInferRequest>> inferReqInternalOTHER;
std::shared_ptr<NiceMock<ov::MockCompiledModel>> mockIExeNetGPU_1;
std::shared_ptr<NiceMock<ov::MockCompiledModel>> mockIExeNetOTHER;
std::shared_ptr<NiceMock<ov::MockICompiledModel>> mockIExeNetGPU_1;
std::shared_ptr<NiceMock<ov::MockICompiledModel>> mockIExeNetOTHER;
std::shared_ptr<ov::MockAsyncInferRequest> mockInferrequest;
std::shared_ptr<ov::MockAsyncInferRequest> mockInferrequestGPU_0;
std::shared_ptr<ov::MockAsyncInferRequest> mockInferrequestGPU_1;
std::shared_ptr<ov::MockAsyncInferRequest> mockInferrequestOTHER;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequest;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequestGPU_0;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequestGPU_1;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequestOTHER;
std::shared_ptr<ov::threading::ImmediateExecutor> mockExecutor;
std::shared_ptr<ov::threading::ImmediateExecutor> mockExecutorGPU_0;
@ -42,7 +41,12 @@ public:
bool expectThrow;
bool loadNetworkFail;
bool generateWorkersFail;
std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = obj.param;
std::tie(targetDevices,
loadNetworkNum,
enableRumtimeFallback,
expectThrow,
loadNetworkFail,
generateWorkersFail) = obj.param;
std::ostringstream result;
result << "auto_runtime_fallback_";
for (auto deviceInfo : targetDevices) {
@ -82,40 +86,59 @@ public:
void SetUp() override {
// prepare extra mockExeNetwork
mockIExeNetGPU_1 = std::make_shared<NiceMock<ov::MockCompiledModel>>(model, plugin);
mockIExeNetGPU_1 = std::make_shared<NiceMock<ov::MockICompiledModel>>(model, plugin);
mockExeNetworkGPU_1 = {mockIExeNetGPU_1, {}};
mockIExeNetOTHER = std::make_shared<NiceMock<ov::MockCompiledModel>>(model, plugin);
mockIExeNetOTHER = std::make_shared<NiceMock<ov::MockICompiledModel>>(model, plugin);
mockExeNetworkOTHER = {mockIExeNetOTHER, {}};
ON_CALL(*mockIExeNetGPU_1.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs()));
ON_CALL(*mockIExeNetGPU_1.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs()));
ON_CALL(*mockIExeNetOTHER.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs()));
ON_CALL(*mockIExeNetOTHER.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs()));
// prepare mockicore and cnnNetwork for loading
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.0")), _)).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual; }));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")), _)).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkGPU_1; }));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("OTHER")), _)).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkOTHER; }));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.0")),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkActual;
}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkGPU_1;
}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("OTHER")),
_))
.WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
return mockExeNetworkOTHER;
}));
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
(_))).WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(ov::test::utils::DEVICE_CPU)),
(_)))
.WillByDefault(Return(mockExeNetwork));
mockExecutor = std::make_shared<ov::threading::ImmediateExecutor>();
mockExecutorGPU_0 = std::make_shared<ov::threading::ImmediateExecutor>();
inferReqInternalGPU_1 = std::make_shared<NiceMock<ov::MockSyncInferRequest>>(mockIExeNetGPU_1);
inferReqInternalGPU_1 =
std::make_shared<NiceMock<ov::mock_auto_plugin::MockISyncInferRequest>>(mockIExeNetGPU_1);
mockExecutorGPU_1 = std::make_shared<ov::threading::ImmediateExecutor>();
ON_CALL(*mockIExeNetGPU_1, get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
.WillByDefault(Return(optimalNum));
inferReqInternalOTHER = std::make_shared<NiceMock<ov::MockSyncInferRequest>>(mockIExeNetOTHER);
inferReqInternalOTHER =
std::make_shared<NiceMock<ov::mock_auto_plugin::MockISyncInferRequest>>(mockIExeNetOTHER);
mockExecutorOTHER = std::make_shared<ov::threading::ImmediateExecutor>();
ON_CALL(*mockIExeNetOTHER, get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
@ -132,11 +155,14 @@ TEST_P(AutoRuntimeFallback, releaseResource) {
bool expectThrow;
bool loadNetworkFail;
bool generateWorkersFail;
std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam();
std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) =
this->GetParam();
if (loadNetworkFail) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")),
_)).WillByDefault(Throw(ov::Exception{"compile model error"}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")),
_))
.WillByDefault(Throw(ov::Exception{"compile model error"}));
}
for (auto& deviceInfo : targetDevices) {
std::string deviceName;
@ -145,30 +171,45 @@ TEST_P(AutoRuntimeFallback, releaseResource) {
targetDev += deviceName;
targetDev += ((deviceInfo == targetDevices.back()) ? "" : ",");
if (deviceName == "CPU") {
mockInferrequest = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternal, mockExecutor, nullptr, ifThrow);
mockInferrequest = std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternal,
mockExecutor,
nullptr,
ifThrow);
ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest));
} else if (deviceName == "GPU.0") {
mockInferrequestGPU_0 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow);
mockInferrequestGPU_0 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalActual,
mockExecutorGPU_0,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_0; }));
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_0;
}));
} else if (deviceName == "GPU.1") {
if (generateWorkersFail) {
mockInferrequestGPU_1 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow);
mockInferrequestGPU_1 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalGPU_1,
mockExecutorGPU_1,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"}));
} else {
mockInferrequestGPU_1 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow);
mockInferrequestGPU_1 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalGPU_1,
mockExecutorGPU_1,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_1; }));
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_1;
}));
}
} else if (deviceName == "OTHER") {
mockInferrequestOTHER =
std::make_shared<ov::MockAsyncInferRequest>(inferReqInternalOTHER, mockExecutorOTHER, nullptr, ifThrow);
mockInferrequestOTHER = std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalOTHER,
mockExecutorOTHER,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetOTHER.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestOTHER;
@ -185,8 +226,8 @@ TEST_P(AutoRuntimeFallback, releaseResource) {
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(loadNetworkNum);
std::shared_ptr<ov::ICompiledModel> exeNetwork;
@ -206,10 +247,10 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, true, false, false, false},
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, true, false, false, false},
//CPU_HELP does not throw
// CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, true, false, false, false},
//CPU_HELP throw
// CPU_HELP throw
ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, true, true, false, false},
// 3 devices
@ -217,11 +258,11 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 3, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 3, true, true, false, false},
//CPU_HELP does not throw
// CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, true, false, false, false},
//CPU_HELP throw
// CPU_HELP throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 3, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 3, true, true, false, false},
@ -230,10 +271,10 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}}, 1, false, true, false, false},
ConfigParams{{{"GPU.0", false}, {"GPU.1", true}}, 1, false, false, false, false},
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}}, 1, false, false, false, false},
//CPU_HELP does not throw
// CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"CPU", false}}, 2, false, false, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", false}}, 2, false, false, false, false},
//CPU_HELP throw
// CPU_HELP throw
ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, false, true, false, false},
// 3 devices
@ -241,11 +282,11 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 1, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", false}}, 1, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"OTHER", true}}, 1, false, true, false, false},
//CPU_HELP does not throw
// CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", false}}, 2, false, false, false, false},
//CPU_HELP throw
// CPU_HELP throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 2, false, true, false, false},
@ -254,23 +295,27 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"OTHER", false}}, 3, true, false, false, true},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, AutoRuntimeFallback,
::testing::ValuesIn(testConfigs),
AutoRuntimeFallback::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback,
AutoRuntimeFallback,
::testing::ValuesIn(testConfigs),
AutoRuntimeFallback::getTestCaseName);
TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) {
std::string targetDev;
std::vector<std::tuple<std::string, bool>> targetDevices; //std::tuple<deviceName, will infer throw exception>
std::vector<std::tuple<std::string, bool>> targetDevices; // std::tuple<deviceName, will infer throw exception>
int loadNetworkNum;
bool enableRumtimeFallback;
bool expectThrow;
bool loadNetworkFail;
bool generateWorkersFail;
std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) = this->GetParam();
std::tie(targetDevices, loadNetworkNum, enableRumtimeFallback, expectThrow, loadNetworkFail, generateWorkersFail) =
this->GetParam();
if (loadNetworkFail) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")),
_)).WillByDefault(Throw(ov::Exception{"compile model error"}));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU.1")),
_))
.WillByDefault(Throw(ov::Exception{"compile model error"}));
}
for (auto& deviceInfo : targetDevices) {
std::string deviceName;
@ -279,26 +324,39 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) {
targetDev += deviceName;
targetDev += ((deviceInfo == targetDevices.back()) ? "" : ",");
if (deviceName == "CPU") {
mockInferrequest = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternal, mockExecutor, nullptr, ifThrow);
mockInferrequest = std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternal,
mockExecutor,
nullptr,
ifThrow);
ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest));
} else if (deviceName == "GPU.0") {
mockInferrequestGPU_0 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalActual, mockExecutorGPU_0, nullptr, ifThrow);
mockInferrequestGPU_0 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalActual,
mockExecutorGPU_0,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_0; }));
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_0;
}));
} else if (deviceName == "GPU.1") {
if (generateWorkersFail) {
mockInferrequestGPU_1 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow);
mockInferrequestGPU_1 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalGPU_1,
mockExecutorGPU_1,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(Throw(ov::Exception{"error"}));
} else {
mockInferrequestGPU_1 = std::make_shared<ov::MockAsyncInferRequest>(
inferReqInternalGPU_1, mockExecutorGPU_1, nullptr, ifThrow);
mockInferrequestGPU_1 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalGPU_1,
mockExecutorGPU_1,
nullptr,
ifThrow);
ON_CALL(*mockIExeNetGPU_1.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_1; }));
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_1;
}));
}
}
}
@ -311,8 +369,8 @@ TEST_P(AutoCTPUTRuntimeFallback, ctputDeviceInferFailTest) {
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(loadNetworkNum);
std::shared_ptr<ov::ICompiledModel> exeNetwork;

View File

@ -15,18 +15,16 @@ enum MODEL {
THROUGHPUT = 2,
};
using ConfigParams = std::tuple<
bool, // if can continue to run
bool, // if select throw exception
MODEL, // config model general, latency, throughput
std::vector<DeviceParams>, // {device, loadSuccess}
unsigned int, // select count
unsigned int, // load count
unsigned int // load device success count
>;
using ConfigParams = std::tuple<bool, // if can continue to run
bool, // if select throw exception
MODEL, // config model general, latency, throughput
std::vector<DeviceParams>, // {device, loadSuccess}
unsigned int, // select count
unsigned int, // load count
unsigned int // load device success count
>;
class AutoLoadFailedTest : public tests::AutoTest,
public ::testing::TestWithParam<ConfigParams> {
class AutoLoadFailedTest : public tests::AutoTest, public ::testing::TestWithParam<ConfigParams> {
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
unsigned int selectCount;
@ -36,8 +34,8 @@ public:
bool continueRun;
bool thrExcWheSelect;
MODEL configModel;
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs,
selectCount, loadCount, loadSuccessCount) = obj.param;
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) =
obj.param;
std::ostringstream result;
for (auto& item : deviceConfigs) {
if (std::get<1>(item)) {
@ -53,22 +51,21 @@ public:
}
switch (configModel) {
case GENERAL:
result << "GENERAL";
break;
case LATENCY:
result << "LATENCY";
break;
case THROUGHPUT:
result << "THROUGHPUT";
break;
default:
LOG_ERROR("should not come here");
break;
case GENERAL:
result << "GENERAL";
break;
case LATENCY:
result << "LATENCY";
break;
case THROUGHPUT:
result << "THROUGHPUT";
break;
default:
LOG_ERROR("should not come here");
break;
}
result << "select_" << selectCount << "_loadCount_"
<< loadCount << "_loadSuccessCount_" << loadSuccessCount;
result << "select_" << selectCount << "_loadCount_" << loadCount << "_loadSuccessCount_" << loadSuccessCount;
return result.str();
}
void SetUp() override {
@ -87,8 +84,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
bool continueRun;
bool thrExcWheSelect;
MODEL configModel;
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount,
loadCount, loadSuccessCount) = this->GetParam();
std::tie(continueRun, thrExcWheSelect, configModel, deviceConfigs, selectCount, loadCount, loadSuccessCount) =
this->GetParam();
// test auto plugin
plugin->set_device_name("AUTO");
@ -99,30 +96,37 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
bool loadSuccess = std::get<1>(*iter);
// accoding to device loading config, set if the loading will successful or throw exception.
if (loadSuccess) {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
(_))).WillByDefault(Return(mockExeNetwork));
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
(_)))
.WillByDefault(Return(mockExeNetwork));
} else {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
(_)))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(StrEq(deviceName)),
(_)))
.WillByDefault(Throw(ov::Exception{"compile error"}));
}
DeviceInformation devInfo;
switch (configModel) {
case GENERAL:
devInfo = {deviceName, {}, 2, ""};
break;
case LATENCY:
devInfo = {deviceName, {ov::hint::performance_mode("LATENCY"), ov::hint::allow_auto_batching(true), ov::auto_batch_timeout(1000)},
2, ""};
break;
case THROUGHPUT:
devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""};
break;
default:
LOG_ERROR("should not come here");
break;
case GENERAL:
devInfo = {deviceName, {}, 2, ""};
break;
case LATENCY:
devInfo = {deviceName,
{ov::hint::performance_mode("LATENCY"),
ov::hint::allow_auto_batching(true),
ov::auto_batch_timeout(1000)},
2,
""};
break;
case THROUGHPUT:
devInfo = {deviceName, {ov::hint::performance_mode("THROUGHPUT")}, 2, ""};
break;
default:
LOG_ERROR("should not come here");
break;
}
metaDevices.push_back(std::move(devInfo));
@ -156,9 +160,11 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
EXPECT_CALL(*plugin, parse_meta_devices(_, _)).Times(AtLeast(1));
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(selectCount);
EXPECT_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_))).Times(loadCount);
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
.Times(loadCount);
// if loadSuccess will get the optimalNum requset of per device, in this test is 2;
EXPECT_CALL(*mockIExeNet.get(), get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
@ -177,8 +183,8 @@ TEST_P(AutoLoadFailedTest, LoadCNNetWork) {
// DeviceParams {ov::test::utils::DEVICE_CPU, true}}, 2, 3, 2},
//
// every element for ConfigParams
// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount, loadSuccessCount}
// { true, false, GENERAL, 3 device, 2, 3, 2}
// {continueRun, selectThrowException, config model, deviceLoadsuccessVector, selectCount, loadCount,
// loadSuccessCount} { true, false, GENERAL, 3 device, 2, 3, 2}
//
// there are three devices for loading
// CPU load for accelerator success, but GPU will load faild and then select NPU and load again
@ -353,7 +359,7 @@ const std::vector<ConfigParams> testConfigs = {
3,
2}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, AutoLoadFailedTest,
::testing::ValuesIn(testConfigs),
AutoLoadFailedTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
AutoLoadFailedTest,
::testing::ValuesIn(testConfigs),
AutoLoadFailedTest::getTestCaseName);

View File

@ -5,30 +5,28 @@
#include "include/auto_unit_test.hpp"
using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<
std::string, // netPrecision
std::vector<DeviceInformation>, // metaDevices for select
DeviceInformation, // expect DeviceInformation
bool, // throw exception
bool, // enabledevice_priority
bool // reverse total device
>;
using ConfigParams = std::tuple<std::string, // netPrecision
std::vector<DeviceInformation>, // metaDevices for select
DeviceInformation, // expect DeviceInformation
bool, // throw exception
bool, // enabledevice_priority
bool // reverse total device
>;
const DeviceInformation CPU_INFO = {ov::test::utils::DEVICE_CPU, {}, 2, "01", "CPU_01"};
const DeviceInformation IGPU_INFO = {"GPU.0", {}, 2, "01", "iGPU_01"};
const DeviceInformation DGPU_INFO = {"GPU.1", {}, 2, "01", "dGPU_01"};
const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS" };
const DeviceInformation OTHERS_INFO = {"OTHERS", {}, 2, "01", "OTHERS"};
const std::vector<DeviceInformation> fp32DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO};
const std::vector<DeviceInformation> fp16DeviceVector = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO};
const std::vector<DeviceInformation> int8DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO};
const std::vector<DeviceInformation> binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO};
const std::vector<DeviceInformation> batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO};
const std::vector<DeviceInformation> binDeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO};
const std::vector<DeviceInformation> batchedblobDeviceVector = {DGPU_INFO, IGPU_INFO};
std::map<std::string, const std::vector<DeviceInformation>> devicesMap = {{"FP32", fp32DeviceVector},
{"FP16", fp16DeviceVector},
{"INT8", int8DeviceVector},
{"BIN", binDeviceVector},
{"BATCHED_BLOB", batchedblobDeviceVector}
};
{"FP16", fp16DeviceVector},
{"INT8", int8DeviceVector},
{"BIN", binDeviceVector},
{"BATCHED_BLOB", batchedblobDeviceVector}};
const std::vector<DeviceInformation> totalDevices = {DGPU_INFO, IGPU_INFO, OTHERS_INFO, CPU_INFO};
const std::vector<DeviceInformation> reverseTotalDevices = {CPU_INFO, OTHERS_INFO, IGPU_INFO, DGPU_INFO};
const std::vector<std::string> netPrecisions = {"FP32", "FP16", "INT8", "BIN", "BATCHED_BLOB"};
@ -47,7 +45,7 @@ public:
std::ostringstream result;
result << "_netPrecision_" << netPrecision;
for (auto& item : devices) {
result << "_device_" << item.unique_name;
result << "_device_" << item.unique_name;
}
result << "_expect_" << expect.unique_name;
if (throwExcept) {
@ -72,9 +70,14 @@ public:
}
// combine select_num devices from devices and make them to ConfigParams
// insert the ConfigParams into testConfigs
static void combine_device(const std::vector<DeviceInformation>& devices, size_t start,
size_t* result, size_t result_index, const size_t select_num, std::string& netPrecision,
bool enabledevice_priority, bool reverse) {
static void combine_device(const std::vector<DeviceInformation>& devices,
size_t start,
size_t* result,
size_t result_index,
const size_t select_num,
std::string& netPrecision,
bool enabledevice_priority,
bool reverse) {
for (size_t i = start; i < devices.size() + 1 - result_index; i++) {
result[result_index - 1] = i;
if (result_index - 1 == 0) {
@ -100,8 +103,11 @@ public:
if (enabledevice_priority) {
std::vector<DeviceInformation> validDevices;
for (auto& item : devicesInfo) {
auto device = std::find_if(metaDevices.begin(), metaDevices.end(),
[&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;});
auto device = std::find_if(metaDevices.begin(),
metaDevices.end(),
[&item](const DeviceInformation& d) -> bool {
return d.unique_name == item.unique_name;
});
if (device != metaDevices.end()) {
validDevices.push_back(*device);
}
@ -118,8 +124,11 @@ public:
}
} else {
for (auto& item : devicesInfo) {
auto device = std::find_if(metaDevices.begin(), metaDevices.end(),
[&item](const DeviceInformation& d)->bool{return d.unique_name == item.unique_name;});
auto device = std::find_if(metaDevices.begin(),
metaDevices.end(),
[&item](const DeviceInformation& d) -> bool {
return d.unique_name == item.unique_name;
});
if (device != metaDevices.end()) {
find = true;
expect = item;
@ -133,11 +142,17 @@ public:
} else {
find = false;
}
testConfigs.push_back(std::make_tuple(netPrecision, metaDevices,
expect, !find, enabledevice_priority, reverse));
testConfigs.push_back(
std::make_tuple(netPrecision, metaDevices, expect, !find, enabledevice_priority, reverse));
} else {
combine_device(devices, i + 1, result, result_index - 1,
select_num, netPrecision, enabledevice_priority, reverse);
combine_device(devices,
i + 1,
result,
result_index - 1,
select_num,
netPrecision,
enabledevice_priority,
reverse);
}
}
}
@ -178,7 +193,7 @@ public:
combine_device(reverseTotalDevices, 0, result, i, i, netPrecision, true, true);
}
}
delete []result;
delete[] result;
return testConfigs;
}
@ -189,14 +204,16 @@ public:
}
void SetUp() override {
ON_CALL(*plugin, select_device).WillByDefault([this](const std::vector<DeviceInformation>& metaDevices,
const std::string& netPrecision, unsigned int priority) {
return plugin->Plugin::select_device(metaDevices, netPrecision, priority);
});
ON_CALL(*plugin, get_valid_device)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices, const std::string& netPrecision) {
return plugin->Plugin::get_valid_device(metaDevices, netPrecision);
});
ON_CALL(*plugin, select_device)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices,
const std::string& netPrecision,
unsigned int priority) {
return plugin->Plugin::select_device(metaDevices, netPrecision, priority);
});
ON_CALL(*plugin, get_valid_device)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices, const std::string& netPrecision) {
return plugin->Plugin::get_valid_device(metaDevices, netPrecision);
});
}
};
@ -220,13 +237,12 @@ TEST_P(SelectDeviceTest, SelectDevice) {
if (throwExcept) {
ASSERT_THROW(plugin->select_device(devices, netPrecision, 0), ov::Exception);
} else {
auto result = plugin->select_device(devices, netPrecision, 0);
auto result = plugin->select_device(devices, netPrecision, 0);
compare(result, expect);
}
}
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, SelectDeviceTest,
::testing::ValuesIn(SelectDeviceTest::CreateConfigs()),
SelectDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
SelectDeviceTest,
::testing::ValuesIn(SelectDeviceTest::CreateConfigs()),
SelectDeviceTest::getTestCaseName);

View File

@ -4,14 +4,14 @@
#include "include/auto_unit_test.hpp"
namespace {
void custom_unsetenv(const char *name) {
void custom_unsetenv(const char* name) {
#ifdef _WIN32
_putenv((std::string(name) + "=").c_str());
#else
::unsetenv(name);
#endif
}
} // namespace
} // namespace
using ConfigParams = std::tuple<std::string, ov::AnyMap>;
using namespace ov::mock_auto_plugin;
@ -28,9 +28,10 @@ public:
}
void SetUp() override {
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
::testing::Matcher<const ov::AnyMap&>(_)))
.WillByDefault(Return(mockExeNetwork));
metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}};
@ -57,8 +58,12 @@ TEST_P(AutoSetLogLevel, setLogLevelFromConfig) {
plugin->set_device_name("AUTO");
plugin->compile_model(model, config);
int a = 0;
DEBUG_RUN([&a](){a++;});
INFO_RUN([&a](){a++;});
DEBUG_RUN([&a]() {
a++;
});
INFO_RUN([&a]() {
a++;
});
if (log_level == "LOG_DEBUG" || log_level == "LOG_TRACE") {
EXPECT_EQ(a, 2);
} else if (log_level == "LOG_INFO") {

View File

@ -5,8 +5,7 @@
using namespace ov::mock_auto_plugin;
using ConfigParams = std::tuple<bool,
ov::AnyMap>;
using ConfigParams = std::tuple<bool, ov::AnyMap>;
// define a matcher if all the elements of subMap are contained in the map.
MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are contained in the map.") {
@ -32,17 +31,19 @@ class AutoStartupFallback : public tests::AutoTest, public ::testing::TestWithPa
public:
void SetUp() override {
plugin->set_device_name("AUTO");
ON_CALL(*core, compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_), _))
ON_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(_),
_))
.WillByDefault(Return(mockExeNetwork));
metaDevices = {{ov::test::utils::DEVICE_CPU, {}, -1}, {ov::test::utils::DEVICE_GPU, {}, -1}};
ON_CALL(*plugin, parse_meta_devices(_, _)).WillByDefault(Return(metaDevices));
ON_CALL(*plugin, get_valid_device)
.WillByDefault([](const std::vector<DeviceInformation>& metaDevices, const std::string& netPrecision) {
std::list<DeviceInformation> devices(metaDevices.begin(), metaDevices.end());
return devices;
});
ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1]));
.WillByDefault([](const std::vector<DeviceInformation>& metaDevices, const std::string& netPrecision) {
std::list<DeviceInformation> devices(metaDevices.begin(), metaDevices.end());
return devices;
});
ON_CALL(*plugin, select_device(_, _, _)).WillByDefault(Return(metaDevices[1]));
}
};
@ -52,30 +53,24 @@ TEST_P(AutoStartupFallback, propertytest) {
ov::AnyMap config;
std::tie(startup_fallback, config) = this->GetParam();
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_GPU), _))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_GPU),
_))
.Times(1);
if (startup_fallback) {
std::map<std::string, std::string> test_map = {{"PERFORMANCE_HINT", "LATENCY"}};
EXPECT_CALL(
*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(MapContains(test_map))))
EXPECT_CALL(*core,
compile_model(::testing::Matcher<const std::shared_ptr<const ov::Model>&>(_),
::testing::Matcher<const std::string&>(ov::test::utils::DEVICE_CPU),
::testing::Matcher<const ov::AnyMap&>(MapContains(test_map))))
.Times(1);
}
ASSERT_NO_THROW(plugin->compile_model(model, config));
}
const std::vector<ConfigParams> testConfigs = {ConfigParams {true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}},
ConfigParams {false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}}
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback,
AutoStartupFallback,
::testing::ValuesIn(testConfigs));
const std::vector<ConfigParams> testConfigs = {ConfigParams{true, {{"ENABLE_STARTUP_FALLBACK", "YES"}}},
ConfigParams{false, {{"ENABLE_STARTUP_FALLBACK", "NO"}}}};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_StartupFallback, AutoStartupFallback, ::testing::ValuesIn(testConfigs));

View File

@ -86,7 +86,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtr<ov::IAsyncInferR
auto tensor = get_tensor(it);
OPENVINO_ASSERT(tensor != nullptr, "The tensor is empty!");
auto type = tensor->get_element_type();
if (req->get_tensor(it)->data(type) != tensor->data(type)) {
bool is_remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) ||
std::dynamic_pointer_cast<ov::IRemoteTensor>(req->get_tensor(it)._ptr);
if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) {
req->set_tensor(it, tensor);
}
}
@ -95,7 +97,9 @@ void SyncInferRequest::set_tensors_to_another_request(ov::SoPtr<ov::IAsyncInferR
auto tensor = get_tensor(it);
OPENVINO_ASSERT(tensor != nullptr, "The tensor is empty!");
auto type = tensor->get_element_type();
if (req->get_tensor(it)->data(type) != tensor->data(type)) {
bool is_remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(tensor._ptr) ||
std::dynamic_pointer_cast<ov::IRemoteTensor>(req->get_tensor(it)._ptr);
if (is_remote || req->get_tensor(it)->data(type) != tensor->data(type)) {
req->set_tensor(it, tensor);
}
}

View File

@ -11,9 +11,6 @@ namespace {
const std::vector<std::map<std::string, std::string>> configs = {
{},
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> heteroConfigs = {
{{"TARGET_FALLBACK", ov::test::utils::DEVICE_CPU}}};
@ -24,18 +21,6 @@ namespace {
::testing::ValuesIn(configs)),
ExecutableNetworkBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecutableNetworkBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
ExecutableNetworkBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecutableNetworkBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
ExecutableNetworkBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, ExecutableNetworkBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_HETERO),
@ -54,34 +39,10 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> AutoConfigsSetPrc = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}},
};
const std::vector<std::map<std::string, std::string>> MultiConfigsSetPrc = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ExecNetSetPrecision,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configSetPrc)),
ExecNetSetPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ExecNetSetPrecision,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(MultiConfigsSetPrc)),
ExecNetSetPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ExecNetSetPrecision,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigsSetPrc)),
ExecNetSetPrecision::getTestCaseName);
} // namespace

View File

@ -19,23 +19,23 @@ INSTANTIATE_TEST_SUITE_P(
INSTANTIATE_TEST_SUITE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
//
// Executable Network GetConfig / SetConfig

View File

@ -12,25 +12,9 @@ const std::vector<std::map<std::string, std::string>> configs = {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
InferRequestCallbackTests::getTestCaseName);
} // namespace

View File

@ -10,10 +10,6 @@ namespace {
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> InConfigs = {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
@ -23,41 +19,10 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}
};
const std::vector<std::map<std::string, std::string>> MultiInConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , ov::test::utils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest,
::testing::Combine(
::testing::Values(1u),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestConfigTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestConfigTest,
::testing::Combine(
::testing::Values(1u),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
InferRequestConfigTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests_, InferRequestConfigTest,
::testing::Combine(
::testing::Values(1u),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(MultiInConfigs)),
InferRequestConfigTest::getTestCaseName);
} // namespace

View File

@ -15,30 +15,9 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestIOBBlobTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestIOBBlobTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestIOBBlobTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestIOBBlobTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestIOBBlobTest::getTestCaseName);
} // namespace

View File

@ -17,30 +17,8 @@ std::vector<memoryStateParams> memoryStateTestCases = {
ov::test::utils::DEVICE_HETERO,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})};
std::vector<memoryStateParams> memoryStateAutoTestCases = {
memoryStateParams(InferRequestVariableStateTest::getNetwork(),
{"c_1-3", "r_1-3"},
ov::test::utils::DEVICE_AUTO,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})};
std::vector<memoryStateParams> memoryStateMultiTestCases = {
memoryStateParams(InferRequestVariableStateTest::getNetwork(),
{"c_1-3", "r_1-3"},
ov::test::utils::DEVICE_MULTI,
{{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::utils::DEVICE_CPU}})};
INSTANTIATE_TEST_SUITE_P(smoke_VariableStateBasic,
InferRequestVariableStateTest,
::testing::ValuesIn(memoryStateTestCases),
InferRequestVariableStateTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
InferRequestVariableStateTest,
::testing::ValuesIn(memoryStateAutoTestCases),
InferRequestVariableStateTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
InferRequestVariableStateTest,
::testing::ValuesIn(memoryStateMultiTestCases),
InferRequestVariableStateTest::getTestCaseName);
} // namespace

View File

@ -15,26 +15,9 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Multiconfigs)),
InferRequestMultithreadingTests::getTestCaseName);
} // namespace

View File

@ -35,30 +35,9 @@ const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestPerfCountersTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestPerfCountersTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPerfCountersTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestPerfCountersTest::getTestCaseName);
} // namespace

View File

@ -15,8 +15,6 @@ const std::vector<FuncTestUtils::BlobType> BlobTypes = {
};
const std::map<std::string, std::string> cpuConfig{}; //nothing special
const std::map<std::string, std::string> autoConfig{};
const std::map<std::string, std::string> multiConfig{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}};
const std::map<std::string, std::string> heteroConfig{{ "TARGET_FALLBACK", ov::test::utils::DEVICE_CPU }};
INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType,
@ -25,19 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Behavior, InferRequestSetBlobByType,
::testing::Values(cpuConfig)),
InferRequestSetBlobByType::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Multi, InferRequestSetBlobByType,
::testing::Combine(::testing::ValuesIn(BlobTypes),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::Values(multiConfig)),
InferRequestSetBlobByType::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Auto, InferRequestSetBlobByType,
::testing::Combine(::testing::ValuesIn(BlobTypes),
::testing::Values(ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU),
::testing::Values(autoConfig)),
InferRequestSetBlobByType::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Behavior_Hetero, InferRequestSetBlobByType,
::testing::Combine(::testing::ValuesIn(BlobTypes),
::testing::Values(ov::test::utils::DEVICE_HETERO),

View File

@ -15,30 +15,9 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<std::map<std::string, std::string>> Multiconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> Autoconfigs = {
{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , ov::test::utils::DEVICE_CPU}}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
InferRequestWaitTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestWaitTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
InferRequestWaitTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestWaitTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
InferRequestWaitTests::getTestCaseName);
} // namespace

View File

@ -17,7 +17,7 @@ namespace {
INSTANTIATE_TEST_SUITE_P(
smoke_OVClassCompiledModelGetPropertyTest, OVClassCompiledModelGetPropertyTest,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
const std::vector<std::tuple<std::string, std::pair<ov::AnyMap, std::string>>> GetMetricTest_ExecutionDevice_CPU = {
{"CPU", std::make_pair(ov::AnyMap{}, "CPU")}};
@ -32,7 +32,7 @@ INSTANTIATE_TEST_SUITE_P(
INSTANTIATE_TEST_SUITE_P(
smoke_OVClassCompiledModelGetIncorrectPropertyTest, OVClassCompiledModelGetIncorrectPropertyTest,
::testing::Values("CPU", "MULTI:CPU", "HETERO:CPU", "AUTO:CPU"));
::testing::Values("CPU", "HETERO:CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_OVClassCompiledModelGetConfigTest, OVClassCompiledModelGetConfigTest,

View File

@ -11,9 +11,6 @@ namespace {
const std::vector<ov::AnyMap> configs = {
{},
};
const std::vector<ov::AnyMap> multiConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> heteroConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}};
@ -24,18 +21,6 @@ namespace {
::testing::ValuesIn(configs)),
OVCompiledModelBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_HETERO),
@ -48,18 +33,6 @@ namespace {
::testing::ValuesIn(configs)),
OVCompiledModelBaseTestOptional::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelBaseTestOptional,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTestOptional::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelBaseTestOptional,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVCompiledModelBaseTestOptional::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelBaseTestOptional,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_HETERO),
@ -77,14 +50,4 @@ namespace {
{},
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
const std::vector<ov::AnyMap> AutoConfigsSetPrc = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)},
};
const std::vector<ov::AnyMap> MultiConfigsSetPrc = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};
} // namespace

View File

@ -23,8 +23,6 @@ const std::vector<ov::element::Type_t> netPrecisions = {
const std::vector<ov::AnyMap> configs = {
{},
};
const std::vector<ov::AnyMap> multiConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}};
const std::vector<ov::AnyMap> heteroConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}};
@ -37,14 +35,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
::testing::ValuesIn(configs)),
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests,
OVCompiledGraphImportExportTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),

View File

@ -21,8 +21,7 @@ const std::vector<ov::AnyMap> auto_batch_inproperties = {
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVClassCompiledModelPropertiesIncorrectTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_CPU,
ov::test::utils::DEVICE_HETERO,
ov::test::utils::DEVICE_MULTI, "AUTO:CPU"),
ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(inproperties)),
OVClassCompiledModelPropertiesIncorrectTests::getTestCaseName);
@ -84,13 +83,6 @@ const std::vector<ov::AnyMap> hetero_properties = {
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
};
const std::vector<ov::AnyMap> multi_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::num_streams(ov::streams::AUTO)},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
};
const std::vector<ov::AnyMap> auto_batch_properties = {
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"}},
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), std::string(ov::test::utils::DEVICE_CPU) + "(4)"},
@ -111,12 +103,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests,
::testing::ValuesIn(hetero_properties)),
OVClassCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests,
OVClassCompiledModelPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multi_properties)),
OVClassCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
OVClassCompiledModelPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_BATCH),
@ -125,22 +111,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests,
INSTANTIATE_TEST_SUITE_P(smoke_OVCompiledModelIncorrectDevice, OVCompiledModelIncorrectDevice, ::testing::Values("CPU"));
const std::vector<ov::AnyMap> auto_multi_device_properties = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU), ov::device::properties("CPU", ov::num_streams(4))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})}};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow,
OVClassCompiledModelPropertiesTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI,
ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(auto_multi_device_properties)),
OVClassCompiledModelPropertiesTests::getTestCaseName);
const std::vector<ov::AnyMap> configsWithSecondaryProperties = {
{ov::device::properties("CPU", ov::num_streams(4))},
{ov::device::properties("CPU",
@ -151,48 +121,6 @@ const std::vector<ov::AnyMap> configsWithSecondaryProperties = {
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
const std::vector<ov::AnyMap> multiConfigsWithSecondaryProperties = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
const std::vector<ov::AnyMap> autoConfigsWithSecondaryProperties = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("AUTO",
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))},
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("AUTO",
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(ov::test::utils::DEVICE_GPU),
ov::device::properties("AUTO",
ov::enable_profiling(false),
ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)),
ov::device::properties("CPU",
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
const std::vector<ov::AnyMap> heteroConfigsWithSecondaryProperties = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU),
ov::device::properties("HETERO",
@ -227,52 +155,11 @@ const std::vector<ov::AnyMap> heteroConfigsWithSecondaryProperties = {
// IE Class Load network
INSTANTIATE_TEST_SUITE_P(smoke_CPUOVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU", "HETERO:CPU"),
::testing::Combine(::testing::Values("CPU", "HETERO:CPU"),
::testing::ValuesIn(configsWithSecondaryProperties)));
INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("MULTI"),
::testing::ValuesIn(multiConfigsWithSecondaryProperties)));
INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("AUTO"),
::testing::ValuesIn(autoConfigsWithSecondaryProperties)));
INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassCompileModelWithCorrectPropertiesTest,
OVClassCompileModelWithCorrectPropertiesTest,
::testing::Combine(::testing::Values("HETERO"),
::testing::ValuesIn(heteroConfigsWithSecondaryProperties)));
const std::vector<std::pair<ov::AnyMap, std::string>> automultiExeDeviceConfigs = {
std::make_pair(ov::AnyMap{{ov::device::priorities(ov::test::utils::DEVICE_CPU)}}, "CPU")};
INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests,
OVCompileModelGetExecutionDeviceTests,
::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO,
ov::test::utils::DEVICE_MULTI,
ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(automultiExeDeviceConfigs)),
OVCompileModelGetExecutionDeviceTests::getTestCaseName);
const std::vector<ov::AnyMap> multiDevicePriorityConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}};
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest,
OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY,
::testing::Combine(::testing::Values("MULTI", "AUTO"),
::testing::ValuesIn(multiDevicePriorityConfigs)),
OVClassCompiledModelGetPropertyTest_DEVICE_PRIORITY::getTestCaseName);
const std::vector<ov::AnyMap> multiModelPriorityConfigs = {
{ov::hint::model_priority(ov::hint::Priority::HIGH)},
{ov::hint::model_priority(ov::hint::Priority::MEDIUM)},
{ov::hint::model_priority(ov::hint::Priority::LOW)},
{ov::hint::model_priority(ov::hint::Priority::DEFAULT)}};
INSTANTIATE_TEST_SUITE_P(smoke_OVClassCompiledModelGetPropertyTest,
OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY,
::testing::Combine(::testing::Values("AUTO:CPU"),
::testing::ValuesIn(multiModelPriorityConfigs)));
} // namespace

View File

@ -15,25 +15,9 @@ const std::vector<ov::AnyMap> configs = {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<ov::AnyMap> multiConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
OVInferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
OVInferRequestCallbackTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVInferRequestCallbackTests::getTestCaseName);
} // namespace

View File

@ -20,41 +20,10 @@ std::vector<Configs> configs = {
{{ov::test::utils::DEVICE_CPU, {}}, {ov::test::utils::DEVICE_CPU, {}}}
};
std::vector<Configs> AutoConfigs = {
{
{
ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU,
{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}
},
{ov::test::utils::DEVICE_CPU, {}}
},
{
{
ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU,
{ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}
},
{ov::test::utils::DEVICE_CPU, {}}
},
{
{
ov::test::utils::DEVICE_AUTO + std::string(":") + ov::test::utils::DEVICE_CPU,
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}
},
{ov::test::utils::DEVICE_CPU, {}}
}
};
INSTANTIATE_TEST_SUITE_P(BehaviorTests, OVInferConsistencyTest,
::testing::Combine(
::testing::Values(10),// inferRequest num
::testing::Values(10),// infer counts
::testing::ValuesIn(configs)),
OVInferConsistencyTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(Auto_BehaviorTests, OVInferConsistencyTest,
::testing::Combine(
::testing::Values(10),// inferRequest num
::testing::Values(10),// infer counts
::testing::ValuesIn(AutoConfigs)),
OVInferConsistencyTest::getTestCaseName);
} // namespace

View File

@ -18,10 +18,6 @@ const std::vector<ov::AnyMap> HeteroConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> AutoConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
std::shared_ptr<ngraph::Function> getFunction1() {
const std::vector<size_t> inputShape = {1, 4, 20, 20};
const ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32;
@ -93,15 +89,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests,
::testing::Values(ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(HeteroConfigs)),
OVInferRequestDynamicTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests,
::testing::Combine(
::testing::Values(getFunction2()),
::testing::Values(std::vector<std::pair<std::vector<size_t>, std::vector<size_t>>>{
{{1, 4, 20, 20}, {1, 2, 20, 40}},
{{2, 4, 20, 20}, {2, 2, 20, 40}}}),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferRequestDynamicTests::getTestCaseName);
} // namespace

View File

@ -17,10 +17,6 @@ const std::vector<ov::AnyMap> HeteroConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> AutoConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChaining,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
@ -33,12 +29,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChaining,
::testing::ValuesIn(HeteroConfigs)),
OVInferenceChaining::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferenceChaining::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChainingStatic,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
@ -50,10 +40,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChainingStatic,
::testing::Values(ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(HeteroConfigs)),
OVInferenceChainingStatic::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChainingStatic,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVInferenceChainingStatic::getTestCaseName);
} // namespace

View File

@ -15,14 +15,6 @@ const std::vector<ov::AnyMap> configs = {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<ov::AnyMap> Multiconfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> Autoconfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> emptyConfigs = {{}};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest,
@ -31,18 +23,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest,
::testing::ValuesIn(configs)),
OVInferRequestIOTensorTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestIOTensorTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorTest::getTestCaseName);
std::vector<ov::element::Type> prcs = {
ov::element::boolean,
ov::element::bf16,
@ -69,38 +49,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision
::testing::ValuesIn(configs)),
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest,
::testing::Combine(
::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest,
::testing::Combine(
::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestIOTensorSetPrecisionTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCheckTensorPrecision,
::testing::Combine(
::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(emptyConfigs)),
OVInferRequestCheckTensorPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCheckTensorPrecision,
::testing::Combine(
::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestCheckTensorPrecision::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCheckTensorPrecision,
::testing::Combine(
::testing::ValuesIn(prcs),
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Autoconfigs)),
OVInferRequestCheckTensorPrecision::getTestCaseName);
} // namespace

View File

@ -17,10 +17,6 @@ const std::vector<ov::AnyMap> HeteroConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> AutoConfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVIterationChaining,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
@ -32,11 +28,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVIterationChaining,
::testing::Values(ov::test::utils::DEVICE_HETERO),
::testing::ValuesIn(HeteroConfigs)),
OVIterationChaining::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVIterationChaining,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(AutoConfigs)),
OVIterationChaining::getTestCaseName);
} // namespace

View File

@ -16,26 +16,9 @@ const std::vector<ov::AnyMap> configs = {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}}
};
const std::vector<ov::AnyMap> Multiconfigs = {
{ov::device::priorities(ov::test::utils::DEVICE_CPU)}
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::ValuesIn(configs)),
OVInferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_MULTI),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestMultithreadingTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests,
::testing::Combine(
::testing::Values(ov::test::utils::DEVICE_AUTO),
::testing::ValuesIn(Multiconfigs)),
OVInferRequestMultithreadingTests::getTestCaseName);
} // namespace

Some files were not shown because too many files have changed in this diff Show More