Improve test coverage for auto_executable_network.cpp (#14693)
* add test case for device_bind_buffer * Correct path to header file properties.hpp * rename remote blob testcase with multi * add test case for remote blob and device bind buffer * add logs for debug * disable test case RemoteBlobInitializedWithoutGPU * add property for remote blob test case * remove debug logs for bind_multi_schedule.cpp * fix MultiDeviceMultipleGPU_Test fail * add test case for oversubsciption of infer requests * get optimal number to create inferRequests * using macro ENABLE_INTEL_CPU to make sure tests need CPU * fix the issue that canCreateRemoteTensorThenInferWithAffinity test case fails to run * remove ov::hint::PerformanceMode::UNDEFINED from MultiDeviceMultipleGPU_Test
This commit is contained in:
@@ -7,9 +7,9 @@
|
||||
#include "multi/multi_remote_blob_tests.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
const std::vector<DevicesNamesAndSupportPair> device_names_and_support_for_remote_blobs {
|
||||
{{CPU}, false}, // CPU via MULTI
|
||||
const std::vector<DevicesNamesAndSupportTuple> device_names_and_support_for_remote_blobs {
|
||||
{{CPU}, false, {}}, // CPU via MULTI
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiCPU, MultiDevice_SupportTest,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobCPU, MultiDevice_SupportTest,
|
||||
::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName);
|
||||
|
||||
@@ -49,6 +49,18 @@ auto AutoConfigs = []() {
|
||||
CommonTestUtils::DEVICE_GPU,
|
||||
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}}},
|
||||
{{CommonTestUtils::DEVICE_AUTO + std::string(":") + CommonTestUtils::DEVICE_GPU + "," +
|
||||
CommonTestUtils::DEVICE_CPU,
|
||||
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
|
||||
ov::intel_auto::device_bind_buffer(true)}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}}},
|
||||
{{CommonTestUtils::DEVICE_AUTO + std::string(":") + CommonTestUtils::DEVICE_CPU + "," +
|
||||
CommonTestUtils::DEVICE_GPU,
|
||||
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
|
||||
ov::intel_auto::device_bind_buffer(true)}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}}}};
|
||||
};
|
||||
|
||||
@@ -71,6 +83,29 @@ auto MultiConfigs = []() {
|
||||
{CommonTestUtils::DEVICE_CPU, {}}}};
|
||||
};
|
||||
|
||||
auto MultiBindConfigs = []() {
|
||||
return std::vector<Configs>{{{CommonTestUtils::DEVICE_MULTI + std::string(":") + CommonTestUtils::DEVICE_GPU + "," +
|
||||
CommonTestUtils::DEVICE_CPU,
|
||||
{ov::intel_auto::device_bind_buffer(true)}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}}}};
|
||||
};
|
||||
|
||||
auto AutoBindConfigs = []() {
|
||||
return std::vector<Configs>{{{CommonTestUtils::DEVICE_AUTO + std::string(":") + CommonTestUtils::DEVICE_GPU + "," +
|
||||
CommonTestUtils::DEVICE_CPU,
|
||||
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
|
||||
ov::intel_auto::device_bind_buffer(true)}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}}},
|
||||
{{CommonTestUtils::DEVICE_AUTO + std::string(":") + CommonTestUtils::DEVICE_CPU + "," +
|
||||
CommonTestUtils::DEVICE_GPU,
|
||||
{ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
|
||||
ov::intel_auto::device_bind_buffer(true)}},
|
||||
{CommonTestUtils::DEVICE_CPU, {}},
|
||||
{CommonTestUtils::DEVICE_GPU, {}}}};
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferConsistencyTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(10),// inferRequest num
|
||||
@@ -91,4 +126,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferConsistencyTest,
|
||||
::testing::Values(50),// infer counts
|
||||
::testing::ValuesIn(MultiConfigs())),
|
||||
OVInferConsistencyTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Auto_Bind_BehaviorTests, OVInferConsistencyTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(0),// inferRequest num, will use optimal request number if set 0
|
||||
::testing::Values(100),// infer counts
|
||||
::testing::ValuesIn(AutoBindConfigs())),
|
||||
OVInferConsistencyTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_Bind_BehaviorTests, OVInferConsistencyTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(0),// inferRequest num, will use optimal request number if set 0
|
||||
::testing::Values(100),// infer counts
|
||||
::testing::ValuesIn(MultiBindConfigs())),
|
||||
OVInferConsistencyTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
@@ -34,11 +34,24 @@ auto configs = []() {
|
||||
};
|
||||
|
||||
auto Multiconfigs = []() {
|
||||
return std::vector<ov::AnyMap>{{ov::device::priorities(CommonTestUtils::DEVICE_GPU)}};
|
||||
return std::vector<ov::AnyMap>{{ov::device::priorities(CommonTestUtils::DEVICE_GPU)},
|
||||
#ifdef ENABLE_INTEL_CPU
|
||||
{ov::device::priorities(CommonTestUtils::DEVICE_GPU, CommonTestUtils::DEVICE_CPU),
|
||||
ov::intel_auto::device_bind_buffer(false)},
|
||||
{ov::device::priorities(CommonTestUtils::DEVICE_GPU, CommonTestUtils::DEVICE_CPU),
|
||||
ov::intel_auto::device_bind_buffer(true)}
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
auto Autoconfigs = []() {
|
||||
return std::vector<ov::AnyMap>{{ov::device::priorities(CommonTestUtils::DEVICE_GPU)}};
|
||||
return std::vector<ov::AnyMap>{{ov::device::priorities(CommonTestUtils::DEVICE_GPU)},
|
||||
#ifdef ENABLE_INTEL_CPU
|
||||
{ov::device::priorities(CommonTestUtils::DEVICE_GPU, CommonTestUtils::DEVICE_CPU),
|
||||
ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT),
|
||||
ov::intel_auto::device_bind_buffer(true)}
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
auto AutoBatchConfigs = []() {
|
||||
|
||||
@@ -8,20 +8,28 @@
|
||||
#include "multi/multi_remote_blob_tests.hpp"
|
||||
#include "multi/multi_remote_blob_multidevice_test.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include <openvino/runtime/auto/properties.hpp>
|
||||
|
||||
using MultiDevice_Bind_oversubsciption_test = MultiDevice_Test;
|
||||
|
||||
auto device_names_and_support_for_remote_blobs = []() {
|
||||
return std::vector<DevicesNamesAndSupportPair>{
|
||||
{{GPU}, true}, // GPU via MULTI,
|
||||
{{"GPU.0"}, true}, // GPU.0 via MULTI,
|
||||
return std::vector<DevicesNamesAndSupportTuple>{
|
||||
{{GPU}, true, {}}, // GPU via MULTI,
|
||||
{{"GPU.0"}, true, {}}, // GPU.0 via MULTI,
|
||||
{{GPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU via MULTI,
|
||||
{{"GPU.0"}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU.0 via MULTI,
|
||||
#ifdef ENABLE_INTEL_CPU
|
||||
{{GPU, CPU}, true}, // GPU+CPU
|
||||
{{CPU, GPU}, true}, // CPU+GPU
|
||||
{{GPU, CPU}, true, {}}, // GPU+CPU
|
||||
{{CPU, GPU}, true, {}}, // CPU+GPU
|
||||
{{GPU, CPU}, true, {ov::intel_auto::device_bind_buffer(true)}}, // GPU+CPU
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiGPU, MultiDevice_SupportTest,
|
||||
::testing::ValuesIn(device_names_and_support_for_remote_blobs()), MultiDevice_SupportTest::getTestCaseName);
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobGPU,
|
||||
MultiDevice_SupportTest,
|
||||
::testing::ValuesIn(device_names_and_support_for_remote_blobs()),
|
||||
MultiDevice_SupportTest::getTestCaseName);
|
||||
|
||||
TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
|
||||
InferenceEngine::CNNNetwork net(fn_ptr);
|
||||
@@ -36,9 +44,14 @@ TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
|
||||
auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx);
|
||||
rblob->allocate();
|
||||
|
||||
std::map<std::string, std::string> configs;
|
||||
for (auto&& value : _properties) {
|
||||
configs.emplace(value.first, value.second.as<std::string>());
|
||||
}
|
||||
|
||||
InferenceEngine::ExecutableNetwork exec_net_multi;
|
||||
try {
|
||||
exec_net_multi = ie->LoadNetwork(net, device_names);
|
||||
exec_net_multi = ie->LoadNetwork(net, device_names, configs);
|
||||
} catch(...) {
|
||||
// device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test
|
||||
return;
|
||||
@@ -50,29 +63,82 @@ TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
|
||||
ASSERT_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY), InferenceEngine::Exception);
|
||||
}
|
||||
|
||||
TEST_P(MultiDevice_Bind_oversubsciption_test, oversubsciptionOfInferRequest) {
|
||||
InferenceEngine::CNNNetwork net(fn_ptr);
|
||||
auto ie = PluginCache::get().ie();
|
||||
// load a network to the GPU to make sure we have a remote context
|
||||
auto exec_net = ie->LoadNetwork(net, GPU);
|
||||
auto ctx = exec_net.GetContext();
|
||||
|
||||
const InferenceEngine::ConstInputsDataMap inputInfo = exec_net.GetInputsInfo();
|
||||
auto& first_input = inputInfo.begin()->second;
|
||||
auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx);
|
||||
rblob->allocate();
|
||||
|
||||
std::map<std::string, std::string> configs;
|
||||
for (auto&& value : _properties) {
|
||||
configs.emplace(value.first, value.second.as<std::string>());
|
||||
}
|
||||
|
||||
InferenceEngine::ExecutableNetwork exec_net_multi;
|
||||
try {
|
||||
exec_net_multi = ie->LoadNetwork(net, device_names, configs);
|
||||
} catch(...) {
|
||||
// device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned int optimalNum = 0;
|
||||
try {
|
||||
optimalNum = exec_net_multi.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
|
||||
} catch (...) {
|
||||
std::cout << "ExecutableNetwork getMetric failed" << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
// test binder mode to throw exception when oversubsciption of infer requests
|
||||
InferenceEngine::InferRequest req;
|
||||
for (int i = 0; i < optimalNum; i++) {
|
||||
req = exec_net_multi.CreateInferRequest();
|
||||
}
|
||||
ASSERT_ANY_THROW(req = exec_net_multi.CreateInferRequest());
|
||||
}
|
||||
|
||||
auto device_names_and_support_for_remote_blobs2 = []() {
|
||||
return std::vector<DevicesNames>{
|
||||
return std::vector<DevicesNamseAndProperties>{
|
||||
#ifdef ENABLE_INTEL_CPU
|
||||
{CPU}, // stand-alone CPU via MULTI (no GPU), no OCL context
|
||||
//{{CPU}, {}}, // stand-alone CPU via MULTI (no GPU), no OCL context
|
||||
{{CPU}, {ov::intel_auto::device_bind_buffer(true)}}, // stand-alone CPU via MULTI (no GPU), no OCL context
|
||||
#endif
|
||||
{"GPU.1"}, // another GPU (the test will test its presence), different OCL contexts
|
||||
{{"GPU.1"}, {}}, // another GPU (the test will test its presence), different OCL contexts
|
||||
{{"GPU.1"}, {ov::intel_auto::device_bind_buffer(true)}}, // another GPU (the test will test its presence), different OCL contexts
|
||||
};
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiInitializedWithoutGPU,
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU,
|
||||
MultiDevice_Test,
|
||||
::testing::ValuesIn(device_names_and_support_for_remote_blobs2()),
|
||||
MultiDevice_Test::getTestCaseName);
|
||||
|
||||
auto multi_bind_oversubsciption_test = []() {
|
||||
return std::vector<DevicesNamseAndProperties>{{{GPU}, {ov::intel_auto::device_bind_buffer(true)}}};
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobOversubsciptionInferRequest,
|
||||
MultiDevice_Bind_oversubsciption_test,
|
||||
::testing::ValuesIn(multi_bind_oversubsciption_test()),
|
||||
MultiDevice_Test::getTestCaseName);
|
||||
|
||||
auto multi_device_names_and_support_for_remote_blobs = []() {
|
||||
return std::vector<DevicesNames>{
|
||||
#ifdef ENABLE_INTEL_CPU
|
||||
{CPU, "GPU.0"},
|
||||
{CPU, "GPU.0", "GPU.1"}, // another GPU (the test will test its presence), different OCL contexts
|
||||
{"GPU.0", CPU},
|
||||
{"GPU.0", "GPU.1", CPU}, // another GPU (the test will test its presence), different OCL contexts
|
||||
#endif
|
||||
{"GPU.0", "GPU.1"}};
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiInitializedWithoutGPU,
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_Multi_RemoteBlobInitializedWithoutGPU,
|
||||
MultiDeviceMultipleGPU_Test,
|
||||
::testing::ValuesIn(multi_device_names_and_support_for_remote_blobs()),
|
||||
MultiDeviceMultipleGPU_Test::getTestCaseName);
|
||||
|
||||
@@ -5,10 +5,11 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
#include "openvino/util/common_util.hpp"
|
||||
|
||||
using namespace ::testing;
|
||||
|
||||
@@ -22,39 +23,68 @@ static std::string getDeviceStringWithMulti(std::vector<std::string> names) {
|
||||
}
|
||||
using DeviceName = std::string;
|
||||
using DevicesNames = std::vector<DeviceName>;
|
||||
using DevicesNamesAndSupportPair = std::pair<DevicesNames, bool>;
|
||||
using DevicesNamesAndSupportTuple = std::tuple<DevicesNames, bool, ov::AnyMap>;
|
||||
using DevicesNamseAndProperties = std::pair<DevicesNames, ov::AnyMap>;
|
||||
|
||||
class MultiDevice_Test : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<DevicesNames> {
|
||||
class MultiDevice_Test : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<DevicesNamseAndProperties> {
|
||||
void SetUp() override {
|
||||
device_names = getDeviceStringWithMulti(this->GetParam());
|
||||
std::vector<DeviceName> deviceNameList;
|
||||
std::tie(deviceNameList, _properties) = this->GetParam();
|
||||
device_names = getDeviceStringWithMulti(deviceNameList);
|
||||
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
|
||||
}
|
||||
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNames> &obj) {
|
||||
auto s = getDeviceStringWithMulti(obj.param);
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNamseAndProperties>& obj) {
|
||||
auto s = getDeviceStringWithMulti(obj.param.first);
|
||||
ov::AnyMap properties = obj.param.second;
|
||||
std::replace(s.begin(), s.end(), ',', '_');
|
||||
return "device_names_" + s;
|
||||
std::replace(s.begin(), s.end(), ':', '_');
|
||||
std::ostringstream result;
|
||||
result << "device_names_" << s << "_";
|
||||
if (!properties.empty()) {
|
||||
result << "properties=" << ov::util::join(ov::util::split(ov::util::to_string(properties), ' '), "_");
|
||||
} else {
|
||||
result << "no_property";
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string device_names;
|
||||
ov::AnyMap _properties;
|
||||
std::shared_ptr<ngraph::Function> fn_ptr;
|
||||
};
|
||||
|
||||
class MultiDevice_SupportTest : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<DevicesNamesAndSupportPair> {
|
||||
class MultiDevice_SupportTest : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<DevicesNamesAndSupportTuple> {
|
||||
void SetUp() override {
|
||||
device_names = getDeviceStringWithMulti(this->GetParam().first);
|
||||
expected_status = this->GetParam().second;
|
||||
std::vector<DeviceName> deviceNameList;
|
||||
std::tie(deviceNameList, expected_status, _properties) = this->GetParam();
|
||||
device_names = getDeviceStringWithMulti(deviceNameList);
|
||||
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
|
||||
}
|
||||
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNamesAndSupportPair> &obj) {
|
||||
auto s = getDeviceStringWithMulti(obj.param.first);
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNamesAndSupportTuple>& obj) {
|
||||
auto s = getDeviceStringWithMulti(std::get<0>(obj.param));
|
||||
std::string expected_status = std::get<1>(obj.param) == true ? "expect_TRUE" : "expect_FALSE";
|
||||
ov::AnyMap properties = std::get<2>(obj.param);
|
||||
std::replace(s.begin(), s.end(), ',', '_');
|
||||
return "device_names_" + s;
|
||||
std::replace(s.begin(), s.end(), ':', '_');
|
||||
std::ostringstream result;
|
||||
result << "device_names_" << s << "_" << expected_status << "_";
|
||||
if (!properties.empty()) {
|
||||
result << "properties=" << ov::util::join(ov::util::split(ov::util::to_string(properties), ' '), "_");
|
||||
} else {
|
||||
result << "no_property";
|
||||
}
|
||||
return result.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string device_names;
|
||||
bool expected_status;
|
||||
ov::AnyMap _properties;
|
||||
std::shared_ptr<ngraph::Function> fn_ptr;
|
||||
};
|
||||
|
||||
@@ -68,6 +98,7 @@ public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNames> &obj) {
|
||||
auto s = getDeviceStringWithMulti(obj.param);
|
||||
std::replace(s.begin(), s.end(), ',', '_');
|
||||
std::replace(s.begin(), s.end(), ':', '_');
|
||||
return "device_names_" + s;
|
||||
}
|
||||
protected:
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <openvino/openvino.hpp>
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "openvino/runtime/auto/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "base/ov_behavior_test_utils.hpp"
|
||||
#include "openvino/runtime/auto/properties.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
||||
@@ -35,10 +35,12 @@ TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity)
|
||||
inf_req_regular.infer();
|
||||
auto output_tensor_regular = inf_req_regular.get_tensor(output);
|
||||
auto imSize = ov::shape_size(input->get_shape());
|
||||
std::vector<ov::intel_gpu::ocl::ClContext> contexts = {};
|
||||
std::vector<ov::intel_gpu::ocl::ClBufferTensor> cldnn_tensor = {};
|
||||
for (auto& iter : device_lists) {
|
||||
try {
|
||||
auto cldnn_context = ie.get_default_context(iter).as<ov::intel_gpu::ocl::ClContext>();
|
||||
contexts.push_back(cldnn_context);
|
||||
cl_context ctx = cldnn_context;
|
||||
auto ocl_instance = std::make_shared<OpenCL>(ctx);
|
||||
cl_int err;
|
||||
@@ -76,4 +78,4 @@ TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity)
|
||||
FuncTestUtils::compare_tensor(output_tensor_regular, output_tensor_shared, thr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,12 @@ TEST_P(MultiDevice_SupportTest, canCreateContextThenRequestThenBlobsAndInfer) {
|
||||
|
||||
auto ie = PluginCache::get().ie();
|
||||
|
||||
auto exec_net = ie->LoadNetwork(net, device_names);
|
||||
std::map<std::string, std::string> configs;
|
||||
for (auto&& value : _properties) {
|
||||
configs.emplace(value.first, value.second.as<std::string>());
|
||||
}
|
||||
|
||||
auto exec_net = ie->LoadNetwork(net, device_names, configs);
|
||||
if (expected_status) {
|
||||
std::shared_ptr<InferenceEngine::RemoteContext> ctx;
|
||||
ASSERT_NE(ctx = exec_net.GetContext(), nullptr);
|
||||
|
||||
@@ -45,6 +45,13 @@ void OVInferConsistencyTest::SetUp() {
|
||||
for (auto&& item : _deviceConfigs) {
|
||||
ModelContext modelContext;
|
||||
modelContext._model = core->compile_model(function, item.first, item.second);
|
||||
if (_inferReqNumPerModel == 0) {
|
||||
try {
|
||||
_inferReqNumPerModel = modelContext._model.get_property(ov::optimal_number_of_infer_requests);
|
||||
} catch (...) {
|
||||
throw("cannot deduce infer request number");
|
||||
}
|
||||
}
|
||||
for (auto i = 0; i < _inferReqNumPerModel; i++) {
|
||||
InferContext inferContext;
|
||||
inferContext._inferRequest = modelContext._model.create_infer_request();
|
||||
|
||||
Reference in New Issue
Block a user