Bell/remote tensor fix in multi (#13483)

* unify device representations

Signed-off-by: fishbell <bell.song@intel.com>

* fix case failure

Signed-off-by: fishbell <bell.song@intel.com>

* add test cases

Signed-off-by: fishbell <bell.song@intel.com>

* indenting

Signed-off-by: fishbell <bell.song@intel.com>

* fix build error on macos

Signed-off-by: fishbell <bell.song@intel.com>

* keep the original logic, if no device available,multi will throw exeception

Signed-off-by: fishbell <bell.song@intel.com>

Signed-off-by: fishbell <bell.song@intel.com>
This commit is contained in:
yanlan song
2022-10-19 10:15:36 +08:00
committed by GitHub
parent 5e2869cd14
commit 752ff23582
5 changed files with 176 additions and 19 deletions

View File

@@ -6,6 +6,7 @@
#include <vector>
#include "gpu/gpu_config.hpp"
#include "multi/multi_remote_blob_tests.hpp"
#include "multi/multi_remote_blob_multidevice_test.hpp"
#include "common_test_utils/test_constants.hpp"
const std::vector<DevicesNamesAndSupportPair> device_names_and_support_for_remote_blobs {
@@ -56,3 +57,14 @@ const std::vector<DevicesNames> device_names_and_support_for_remote_blobs2 {
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiInitializedWithoutGPU, MultiDevice_Test,
::testing::ValuesIn(device_names_and_support_for_remote_blobs2), MultiDevice_Test::getTestCaseName);
const std::vector<DevicesNames> multi_device_names_and_support_for_remote_blobs {
#ifdef ENABLE_INTEL_CPU
{CPU, "GPU.0"},
{CPU, "GPU.0", "GPU.1"}, // another GPU (the test will test its presence), different OCL contexts
#endif
{"GPU.0", "GPU.1"}
};
INSTANTIATE_TEST_SUITE_P(smoke_RemoteBlobMultiInitializedWithoutGPU, MultiDeviceMultipleGPU_Test,
::testing::ValuesIn(multi_device_names_and_support_for_remote_blobs), MultiDeviceMultipleGPU_Test::getTestCaseName);

View File

@@ -8,6 +8,7 @@
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/test_constants.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include "base/ov_behavior_test_utils.hpp"
using namespace ::testing;
@@ -56,6 +57,24 @@ protected:
bool expected_status;
std::shared_ptr<ngraph::Function> fn_ptr;
};
class MultiDeviceMultipleGPU_Test : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<DevicesNames> {
void SetUp() override {
device_names = getDeviceStringWithMulti(this->GetParam());
device_lists = this->GetParam();
fn_ptr = ov::test::behavior::getDefaultNGraphFunctionForTheDevice("");
}
public:
static std::string getTestCaseName(const testing::TestParamInfo<DevicesNames> &obj) {
auto s = getDeviceStringWithMulti(obj.param);
std::replace(s.begin(), s.end(), ',', '_');
return "device_names_" + s;
}
protected:
std::string device_names;
std::vector<std::string> device_lists;
std::shared_ptr<ngraph::Function> fn_ptr;
};
#define MULTI CommonTestUtils::DEVICE_MULTI
#define CPU CommonTestUtils::DEVICE_CPU
#define GPU CommonTestUtils::DEVICE_GPU

View File

@@ -0,0 +1,79 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include "openvino/runtime/intel_gpu/ocl/ocl.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include <remote_blob_tests/remote_blob_helpers.hpp>
TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) {
auto ie = ov::Core();
using namespace ov::preprocess;
auto p = PrePostProcessor(fn_ptr);
p.input().tensor().set_element_type(ov::element::i8);
p.input().preprocess().convert_element_type(ov::element::f32);
auto function = p.build();
ov::CompiledModel exec_net;
try {
exec_net = ie.compile_model(function, device_names, ov::hint::allow_auto_batching(false));
} catch (...) {
// device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test
return;
}
std::vector<ov::InferRequest> inf_req_shared = {};
auto input = function->get_parameters().at(0);
auto output = function->get_results().at(0);
auto fakeImageData = FuncTestUtils::create_and_fill_tensor(input->get_element_type(), input->get_shape());
auto inf_req_regular = exec_net.create_infer_request();
inf_req_regular.set_tensor(input, fakeImageData);
// infer using system memory
inf_req_regular.infer();
auto output_tensor_regular = inf_req_regular.get_tensor(output);
auto imSize = ov::shape_size(input->get_shape());
std::vector<ov::intel_gpu::ocl::ClBufferTensor> cldnn_tensor = {};
for (auto& iter : device_lists) {
try {
auto cldnn_context = ie.get_default_context(iter).as<ov::intel_gpu::ocl::ClContext>();
cl_context ctx = cldnn_context;
auto ocl_instance = std::make_shared<OpenCL>(ctx);
cl_int err;
cl::Buffer shared_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, imSize, NULL, &err);
{
void* buffer = fakeImageData.data();
ocl_instance->_queue.enqueueWriteBuffer(shared_buffer, true, 0, imSize, buffer);
}
cldnn_tensor.emplace_back(cldnn_context.create_tensor(input->get_element_type(), input->get_shape(), shared_buffer));
} catch(...) {
// device does not support remote context
continue;
}
}
for (int i = 0; i < cldnn_tensor.size(); i++) {
auto temprequest = exec_net.create_infer_request();
temprequest.set_input_tensor(cldnn_tensor.at(i));
inf_req_shared.emplace_back(temprequest);
}
for (int i = 0; i < inf_req_shared.size(); i++)
inf_req_shared.at(i).start_async();
for (int i = 0; i < inf_req_shared.size(); i++)
inf_req_shared.at(i).wait();
// compare results
for (int i = 0; i < inf_req_shared.size(); i++) {
auto output_tensor_shared = inf_req_shared.at(i).get_tensor(output);
{
ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data());
ASSERT_NO_THROW(output_tensor_shared.data());
FuncTestUtils::compare_tensor(output_tensor_regular, output_tensor_shared, thr);
}
}
}

View File

@@ -44,6 +44,8 @@ const char dgpuFullDeviceName[] = "Intel(R) Iris(R) Xe MAX Graphics (dGPU)";
// const char vpuxFullDeviceName[] = "";
const std::vector<std::string> availableDevs = {"CPU", "GPU.0", "GPU.1",
"MYRIAD.9.2-ma2480", "MYRIAD.9.1-ma2480", "VPUX"};
const std::vector<std::string> availableDevsNoID = {"CPU", "GPU",
"MYRIAD", "VPUX"};
using ConfigParams = std::tuple<
std::string, // Priority devices
std::vector<DeviceInformation>, // expect metaDevices
@@ -86,16 +88,14 @@ public:
IE_SET_METRIC(SUPPORTED_METRICS, metrics, {METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME)});
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _))
.WillByDefault(RETURN_MOCK_VALUE(metrics));
ON_CALL(*core, GetMetric(StrEq("GPU"),
StrEq(METRIC_KEY(FULL_DEVICE_NAME)), _)).WillByDefault(Return(igpuFullDeviceName));
ON_CALL(*core, GetConfig(StrEq("GPU"),
StrEq(CONFIG_KEY(DEVICE_ID)))).WillByDefault(Return(0));
ON_CALL(*core, GetMetric(StrEq("GPU.0"),
StrEq(METRIC_KEY(FULL_DEVICE_NAME)), _)).WillByDefault(Return(igpuFullDeviceName));
ON_CALL(*core, GetMetric(StrEq("GPU.1"),
StrEq(METRIC_KEY(FULL_DEVICE_NAME)), _)).WillByDefault(Return(dgpuFullDeviceName));
IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys, {});
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _))
.WillByDefault(RETURN_MOCK_VALUE(configKeys));
ON_CALL(*core, GetAvailableDevices()).WillByDefault(Return(availableDevs));
ON_CALL(*plugin, ParseMetaDevices).WillByDefault([this](const std::string& priorityDevices,
const std::map<std::string, std::string>& config) {
@@ -124,8 +124,13 @@ public:
}
}
};
using ParseMetaDeviceNoIDTest = ParseMetaDeviceTest;
TEST_P(ParseMetaDeviceTest, ParseMetaDevicesWithPriority) {
ON_CALL(*core, GetAvailableDevices()).WillByDefault(Return(availableDevs));
IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys, {});
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _))
.WillByDefault(RETURN_MOCK_VALUE(configKeys));
// get Parameter
std::string priorityDevices;
std::vector<DeviceInformation> metaDevices;
@@ -147,6 +152,10 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesWithPriority) {
}
TEST_P(ParseMetaDeviceTest, ParseMetaDevicesNotWithPriority) {
ON_CALL(*core, GetAvailableDevices()).WillByDefault(Return(availableDevs));
IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys, {});
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _))
.WillByDefault(RETURN_MOCK_VALUE(configKeys));
// get Parameter
std::string priorityDevices;
std::vector<DeviceInformation> metaDevices;
@@ -169,6 +178,30 @@ TEST_P(ParseMetaDeviceTest, ParseMetaDevicesNotWithPriority) {
}
}
TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) {
ON_CALL(*core, GetAvailableDevices()).WillByDefault(Return(availableDevsNoID));
IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys, {CONFIG_KEY(DEVICE_ID)});
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _))
.WillByDefault(RETURN_MOCK_VALUE(configKeys));
// get Parameter
std::string priorityDevices;
std::vector<DeviceInformation> metaDevices;
bool throwException;
std::tie(priorityDevices, metaDevices, throwException) = this->GetParam();
EXPECT_CALL(*plugin, ParseMetaDevices(_, _)).Times(1);
EXPECT_CALL(*core, GetMetric(_, _, _)).Times(AnyNumber());
EXPECT_CALL(*core, GetConfig(_, _)).Times(AnyNumber());
EXPECT_CALL(*core, GetAvailableDevices()).Times(1);
EXPECT_CALL(*core, GetSupportedConfig(_, _)).Times(metaDevices.size());
if (throwException) {
ASSERT_ANY_THROW(plugin->ParseMetaDevices(priorityDevices, {}));
} else {
auto result = plugin->ParseMetaDevices(priorityDevices, {{ov::device::priorities.name(), priorityDevices}});
compare(result, metaDevices);
compareDevicePriority(result, metaDevices);
}
}
// ConfigParams details
// example
// ConfigParams {devicePriority, expect metaDevices, ifThrowException}
@@ -198,19 +231,19 @@ const std::vector<ConfigParams> testConfigs = {
//
ConfigParams {"CPU,GPU,MYRIAD,VPUX",
{{"CPU", {}, -1, "", "CPU_", 0},
{"GPU.0", {}, -1, "0", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "1", std::string(dgpuFullDeviceName) + "_1", 1},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"MYRIAD", {}, -1, "", "MYRIAD_", 2},
{"VPUX", {}, -1, "", "VPUX_", 3}}, false},
ConfigParams {"VPUX,GPU,CPU",
{{"VPUX", {}, -1, "", "VPUX_", 0},
{"GPU.0", {}, -1, "0", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "1", std::string(dgpuFullDeviceName) + "_1", 1},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"CPU", {}, -1, "", "CPU_", 2}}, false},
ConfigParams {"CPU(1),GPU(2),VPUX(4)",
{{"CPU", {}, 1, "", "CPU_", 0},
{"GPU.0", {}, 2, "0", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, 2, "1", std::string(dgpuFullDeviceName) + "_1", 1},
{"GPU.0", {}, 2, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, 2, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"VPUX", {}, 4, "", "VPUX_", 2}}, false},
ConfigParams {"CPU(-1),GPU,MYRIAD,VPUX", {}, true},
@@ -218,20 +251,32 @@ const std::vector<ConfigParams> testConfigs = {
ConfigParams {"CPU(3),GPU.1,MYRIAD.9.2-ma2480,VPUX",
{{"CPU", {}, 3, "", "CPU_", 0},
{"GPU.1", {}, -1, "1", std::string(dgpuFullDeviceName) + "_1", 1},
{"MYRIAD.9.2-ma2480", {}, -1, "9.2-ma2480", "MYRIAD_9.2-ma2480", 2},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"MYRIAD.9.2-ma2480", {}, -1, "", "MYRIAD_9.2-ma2480", 2},
{"VPUX", {}, -1, "", "VPUX_", 3}}, false},
ConfigParams {"VPUX,MYRIAD.9.2-ma2480,GPU.1,CPU(3)",
{{"VPUX", {}, -1, "", "VPUX_", 0},
{"MYRIAD.9.2-ma2480", {}, -1, "9.2-ma2480", "MYRIAD_9.2-ma2480", 1},
{"GPU.1", {}, -1, "1", std::string(dgpuFullDeviceName) + "_1", 2},
{"MYRIAD.9.2-ma2480", {}, -1, "", "MYRIAD_9.2-ma2480", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 2},
{"CPU", {}, 3, "", "CPU_", 3}}, false}
};
const std::vector<ConfigParams> testConfigsNoID = {
ConfigParams {"CPU,GPU,MYRIAD,VPUX",
{{"CPU", {}, -1, "", "CPU_", 0},
{"GPU", {}, -1, "0", std::string(igpuFullDeviceName) + "_0", 1},
{"MYRIAD", {}, -1, "", "MYRIAD_", 2},
{"VPUX", {}, -1, "", "VPUX_", 3}}, false},
};
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceTest,
::testing::ValuesIn(testConfigs),
ParseMetaDeviceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ParseMetaDeviceNoIDTest,
::testing::ValuesIn(testConfigsNoID),
ParseMetaDeviceTest::getTestCaseName);
//toDo need add test for ParseMetaDevices(_, config) to check device config of
//return metaDevices