GPU remote tensor tests update. Get rid of API 1.0 (#21553)

* GPU remote tensor tests update. Get rid of API 1.0

* Apply comments

* Apply comments
This commit is contained in:
Oleg Pipikin 2023-12-12 09:43:49 +01:00 committed by GitHub
parent 42c33ac7b1
commit 2fb683d667
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 35 additions and 61 deletions

View File

@ -11,7 +11,7 @@
#include <ie_compound_blob.h> #include <ie_compound_blob.h>
#include <gpu/gpu_config.hpp> #include <gpu/gpu_config.hpp>
#include <remote_blob_tests/remote_blob_helpers.hpp> #include <remote_tensor_tests/helpers.hpp>
#include <common_test_utils/test_common.hpp> #include <common_test_utils/test_common.hpp>
#include <functional_test_utils/plugin_cache.hpp> #include <functional_test_utils/plugin_cache.hpp>

View File

@ -8,14 +8,12 @@
#include <tuple> #include <tuple>
#include <memory> #include <memory>
#include <ie_compound_blob.h> #include "gpu/gpu_config.hpp"
#include "common_test_utils/test_common.hpp"
#include <gpu/gpu_config.hpp> #include "common_test_utils/test_constants.hpp"
#include <common_test_utils/test_common.hpp>
#include <common_test_utils/test_constants.hpp>
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "ov_models/subgraph_builders.hpp" #include "ov_models/subgraph_builders.hpp"
#include <openvino/core/preprocess/pre_post_process.hpp> #include "openvino/core/preprocess/pre_post_process.hpp"
#ifdef _WIN32 #ifdef _WIN32
#ifdef ENABLE_DX11 #ifdef ENABLE_DX11
@ -30,12 +28,13 @@
#define NOMINMAX_DEFINED_CTX_UT #define NOMINMAX_DEFINED_CTX_UT
#endif #endif
#include <gpu/gpu_context_api_dx.hpp>
#include <openvino/runtime/intel_gpu/ocl/dx.hpp>
#include <atlbase.h> #include <atlbase.h>
#include <d3d11.h> #include <d3d11.h>
#include <d3d11_4.h> #include <d3d11_4.h>
#include "gpu/gpu_context_api_dx.hpp"
#include "openvino/runtime/intel_gpu/ocl/dx.hpp"
#ifdef NOMINMAX_DEFINED_CTX_UT #ifdef NOMINMAX_DEFINED_CTX_UT
#undef NOMINMAX #undef NOMINMAX
#undef NOMINMAX_DEFINED_CTX_UT #undef NOMINMAX_DEFINED_CTX_UT
@ -202,29 +201,26 @@ TEST_F(DX11RemoteCtx_Test, smoke_make_shared_context) {
#if defined(ANDROID) #if defined(ANDROID)
GTEST_SKIP(); GTEST_SKIP();
#endif #endif
using namespace InferenceEngine; auto core = ov::Core();
using namespace InferenceEngine::gpu;
auto ie = InferenceEngine::Core();
CComPtr<ID3D11Device> device_ptr; CComPtr<ID3D11Device> device_ptr;
CComPtr<ID3D11DeviceContext> ctx_ptr; CComPtr<ID3D11DeviceContext> ctx_ptr;
ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) =
create_device_with_ctx(intel_adapters[0])); create_device_with_ctx(intel_adapters[0]));
auto remote_context = make_shared_context(ie,
ov::test::utils::DEVICE_GPU, auto gpu_context = core.get_default_context("GPU").as<ov::intel_gpu::ocl::ClContext>();
device_ptr); auto context_handle = gpu_context.get();
ASSERT_TRUE(remote_context); ASSERT_TRUE(context_handle);
for (auto adapter : other_adapters) { for (auto adapter : other_adapters) {
CComPtr<ID3D11Device> device_ptr; CComPtr<ID3D11Device> device_ptr;
CComPtr<ID3D11DeviceContext> ctx_ptr; CComPtr<ID3D11DeviceContext> ctx_ptr;
ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) =
create_device_with_ctx(adapter)); create_device_with_ctx(adapter));
ASSERT_THROW(make_shared_context(ie, ov::test::utils::DEVICE_GPU, ASSERT_THROW(ov::intel_gpu::ocl::D3DContext gpu_context(core, device_ptr),
device_ptr), std::runtime_error);
std::runtime_error);
} }
} }

View File

@ -2,36 +2,19 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <string>
#include <utility>
#include <vector>
#include <memory>
#include "openvino/core/dimension.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/model.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/core/preprocess/pre_post_process.hpp"
#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" #include "openvino/runtime/intel_gpu/ocl/ocl.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/runtime/intel_gpu/properties.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/remote_tensor.hpp" #include "openvino/runtime/remote_tensor.hpp"
#include "remote_blob_tests/remote_blob_helpers.hpp" #include "remote_tensor_tests/helpers.hpp"
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "base/ov_behavior_test_utils.hpp" #include "base/ov_behavior_test_utils.hpp"
#include "ov_models/subgraph_builders.hpp" #include "ov_models/subgraph_builders.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "subgraphs_builders.hpp"
#include "transformations/utils/utils.hpp"
using namespace ::testing;
class OVRemoteTensor_Test : public ov::test::TestsCommon { class OVRemoteTensor_Test : public ov::test::TestsCommon {
protected: protected:
std::shared_ptr<ngraph::Function> fn_ptr; std::shared_ptr<ov::Model> fn_ptr;
void SetUp() override { void SetUp() override {
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat(); fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
@ -39,6 +22,8 @@ protected:
}; };
namespace { namespace {
using ::testing::HasSubstr;
std::vector<bool> ov_dynamic {true, false}; std::vector<bool> ov_dynamic {true, false};
std::vector<bool> ov_with_auto_batching {true, false}; std::vector<bool> ov_with_auto_batching {true, false};
enum class RemoteTensorSharingType { enum class RemoteTensorSharingType {
@ -71,7 +56,7 @@ using RemoteTensorSharingTestOptionsParams = std::tuple<RemoteTensorSharingType,
class OVRemoteTensorInputBlob_Test : public OVRemoteTensor_Test, class OVRemoteTensorInputBlob_Test : public OVRemoteTensor_Test,
public testing::WithParamInterface<RemoteTensorSharingTestOptionsParams> { public testing::WithParamInterface<RemoteTensorSharingTestOptionsParams> {
protected: protected:
std::shared_ptr<ngraph::Function> fn_ptr; std::shared_ptr<ov::Model> fn_ptr;
std::string deviceName; std::string deviceName;
ov::AnyMap config; ov::AnyMap config;
@ -340,10 +325,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ASSERT_NO_THROW(output_tensor_shared.data()); ASSERT_NO_THROW(output_tensor_shared.data());
ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); ov::test::utils::compare(output_tensor_regular, output_tensor_shared);
} }
} }
@ -611,10 +595,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputOutputRemoteTensor) {
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ASSERT_NO_THROW(output_tensor_shared.data()); ASSERT_NO_THROW(output_tensor_shared.data());
ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); ov::test::utils::compare(output_tensor_regular, output_tensor_shared);
} }
} }
@ -750,7 +733,7 @@ TEST(OVRemoteTensorTests, smoke_MixedTensorTypes) {
class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testing::WithParamInterface<bool> { class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testing::WithParamInterface<bool> {
protected: protected:
std::shared_ptr<ngraph::Function> fn_ptr; std::shared_ptr<ov::Model> fn_ptr;
std::string deviceName; std::string deviceName;
ov::AnyMap config; ov::AnyMap config;
@ -819,10 +802,9 @@ public:
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ASSERT_NO_THROW(output_tensor_shared.data()); ASSERT_NO_THROW(output_tensor_shared.data());
ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); ov::test::utils::compare(output_tensor_regular, output_tensor_shared);
} }
if (is_caching_test) { if (is_caching_test) {
@ -883,10 +865,9 @@ public:
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ASSERT_NO_THROW(output_tensor_shared.data()); ASSERT_NO_THROW(output_tensor_shared.data());
ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); ov::test::utils::compare(output_tensor_regular, output_tensor_shared);
} }
if (is_caching_test) { if (is_caching_test) {
@ -979,9 +960,8 @@ public:
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ov::test::utils::compare(output_tensor_regular, out_tensor, thr); ov::test::utils::compare(output_tensor_regular, out_tensor);
} }
if (is_caching_test) { if (is_caching_test) {
@ -1070,9 +1050,8 @@ public:
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ov::test::utils::compare(output_tensor_regular, out_tensor, thr); ov::test::utils::compare(output_tensor_regular, out_tensor);
} }
if (is_caching_test) { if (is_caching_test) {
@ -1162,9 +1141,8 @@ public:
{ {
ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output->get_element_type(), ov::element::f32);
ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size());
auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_regular.data());
ov::test::utils::compare(output_tensor_regular, out_tensor, thr); ov::test::utils::compare(output_tensor_regular, out_tensor);
} }
if (is_caching_test) { if (is_caching_test) {
@ -1781,7 +1759,7 @@ public:
protected: protected:
size_t num_batch; size_t num_batch;
std::vector<std::shared_ptr<ngraph::Function>> fn_ptrs; std::vector<std::shared_ptr<ov::Model>> fn_ptrs;
}; };
TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) {

View File

@ -12,11 +12,11 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
# include <gpu/gpu_context_api_dx.hpp> # include "gpu/gpu_context_api_dx.hpp"
#elif defined ENABLE_LIBVA #elif defined ENABLE_LIBVA
# include <gpu/gpu_context_api_va.hpp> # include "gpu/gpu_context_api_va.hpp"
#endif #endif
#include <gpu/gpu_context_api_ocl.hpp> #include "gpu/gpu_context_api_ocl.hpp"
namespace { namespace {
template <typename T> template <typename T>

View File

@ -3,7 +3,7 @@
// //
#include <behavior/plugin/core_threading.hpp> #include <behavior/plugin/core_threading.hpp>
#include <remote_blob_tests/remote_blob_helpers.hpp> #include <remote_tensor_tests/helpers.hpp>
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::gpu; using namespace InferenceEngine::gpu;

View File

@ -8,7 +8,7 @@
#include "openvino/runtime/core.hpp" #include "openvino/runtime/core.hpp"
#include "openvino/runtime/properties.hpp" #include "openvino/runtime/properties.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/core/preprocess/pre_post_process.hpp"
#include <remote_blob_tests/remote_blob_helpers.hpp> #include <remote_tensor_tests/remote_blob_helpers.hpp>
#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp"
TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) { TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) {