From d56dc20956fa7ff6816e7f2116d3e36aaa677420 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 20 Dec 2021 12:14:32 +0300 Subject: [PATCH] [GPU] Moved GPU remote headers to intel_gpu folder and namespace (#9290) --- .../runtime/{gpu => intel_gpu}/ocl/dx.hpp | 8 ++--- .../runtime/{gpu => intel_gpu}/ocl/ocl.hpp | 8 ++--- .../{gpu => intel_gpu}/ocl/ocl_wrapper.hpp | 0 .../runtime/{gpu => intel_gpu}/ocl/va.hpp | 8 ++--- .../inference_engine/CMakeLists.txt | 12 +++---- .../gpu_remote_tensor_tests.cpp | 34 +++++++++---------- 6 files changed, 35 insertions(+), 35 deletions(-) rename src/inference/include/openvino/runtime/{gpu => intel_gpu}/ocl/dx.hpp (98%) rename src/inference/include/openvino/runtime/{gpu => intel_gpu}/ocl/ocl.hpp (98%) rename src/inference/include/openvino/runtime/{gpu => intel_gpu}/ocl/ocl_wrapper.hpp (100%) rename src/inference/include/openvino/runtime/{gpu => intel_gpu}/ocl/va.hpp (97%) diff --git a/src/inference/include/openvino/runtime/gpu/ocl/dx.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp similarity index 98% rename from src/inference/include/openvino/runtime/gpu/ocl/dx.hpp rename to src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp index e94ab133c6c..b920edd2988 100644 --- a/src/inference/include/openvino/runtime/gpu/ocl/dx.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp @@ -7,7 +7,7 @@ * shared Video Acceleration device contexts * and shared memory tensors which contain Video Acceleration surfaces * - * @file openvino/runtime/gpu/ocl/dx.hpp + * @file openvino/runtime/intel_gpu/ocl/dx.hpp */ #pragma once @@ -24,11 +24,11 @@ #include #include -#include "openvino/runtime/gpu/ocl/ocl.hpp" +#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" namespace ov { namespace runtime { -namespace gpu { +namespace intel_gpu { namespace ocl { /** @@ -193,6 +193,6 @@ public: } }; } // namespace ocl -} // namespace gpu +} // namespace intel_gpu } // namespace runtime } // namespace ov diff --git a/src/inference/include/openvino/runtime/gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp similarity index 98% rename from src/inference/include/openvino/runtime/gpu/ocl/ocl.hpp rename to src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 4477c87873d..42565232897 100644 --- a/src/inference/include/openvino/runtime/gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -6,7 +6,7 @@ * @brief a header that defines wrappers for internal GPU plugin-specific * OpenCL context and OpenCL shared memory tensors * - * @file openvino/runtime/gpu/ocl/ocl.hpp + * @file openvino/runtime/intel_gpu/ocl/ocl.hpp */ #pragma once @@ -15,13 +15,13 @@ #include "gpu/gpu_params.hpp" #include "openvino/runtime/core.hpp" -#include "openvino/runtime/gpu/ocl/ocl_wrapper.hpp" +#include "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp" #include "openvino/runtime/remote_context.hpp" #include "openvino/runtime/remote_tensor.hpp" namespace ov { namespace runtime { -namespace gpu { +namespace intel_gpu { namespace ocl { /** @@ -320,6 +320,6 @@ public: }; } // namespace ocl -} // namespace gpu +} // namespace intel_gpu } // namespace runtime } // namespace ov diff --git a/src/inference/include/openvino/runtime/gpu/ocl/ocl_wrapper.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp similarity index 100% rename from src/inference/include/openvino/runtime/gpu/ocl/ocl_wrapper.hpp rename to src/inference/include/openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp diff --git a/src/inference/include/openvino/runtime/gpu/ocl/va.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp similarity index 97% rename from src/inference/include/openvino/runtime/gpu/ocl/va.hpp rename to src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp index 45e8611077c..ed846ce0bde 100644 --- a/src/inference/include/openvino/runtime/gpu/ocl/va.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp @@ -7,7 +7,7 @@ * shared Video Acceleration device contexts * and shared memory tensors which contain Video Acceleration surfaces * - * @file openvino/runtime/gpu/ocl/va.hpp + * @file openvino/runtime/intel_gpu/ocl/va.hpp */ #pragma once @@ -18,7 +18,7 @@ #include #include -#include "openvino/runtime/gpu/ocl/ocl.hpp" +#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" // clang-format off #include @@ -26,7 +26,7 @@ namespace ov { namespace runtime { -namespace gpu { +namespace intel_gpu { namespace ocl { /** @@ -147,6 +147,6 @@ public: } }; } // namespace ocl -} // namespace gpu +} // namespace intel_gpu } // namespace runtime } // namespace ov diff --git a/src/tests/functional/inference_engine/CMakeLists.txt b/src/tests/functional/inference_engine/CMakeLists.txt index 05ef097efaa..0cb94729dbc 100644 --- a/src/tests/functional/inference_engine/CMakeLists.txt +++ b/src/tests/functional/inference_engine/CMakeLists.txt @@ -148,18 +148,18 @@ function(ie_headers_compilation_with_custom_flags) "gpu/gpu_context_api_va.hpp" "gpu/gpu_context_api_dx.hpp" "gpu/gpu_ocl_wrapper.hpp" - "openvino/runtime/gpu/ocl/ocl_wrapper.hpp" - "openvino/runtime/gpu/ocl/ocl.hpp" - "openvino/runtime/gpu/ocl/va.hpp" - "openvino/runtime/gpu/ocl/dx.hpp") + "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp" + "openvino/runtime/intel_gpu/ocl/ocl.hpp" + "openvino/runtime/intel_gpu/ocl/va.hpp" + "openvino/runtime/intel_gpu/ocl/dx.hpp") endif() if(NOT WIN32) list(APPEND IE_TEST_HEADERS_TO_SKIP "gpu/gpu_context_api_dx.hpp" - "openvino/runtime/gpu/ocl/dx.hpp") + "openvino/runtime/intel_gpu/ocl/dx.hpp") endif() if(NOT LIBVA_FOUND) list(APPEND IE_TEST_HEADERS_TO_SKIP "gpu/gpu_context_api_va.hpp" - "openvino/runtime/gpu/ocl/va.hpp") + "openvino/runtime/intel_gpu/ocl/va.hpp") endif() endif() diff --git a/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp b/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp index b8c736b965f..b4ae7c0ea9d 100644 --- a/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp +++ b/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp @@ -7,7 +7,7 @@ #include #include -#include "openvino/runtime/gpu/ocl/ocl.hpp" +#include "openvino/runtime/intel_gpu/ocl/ocl.hpp" #include "openvino/runtime/core.hpp" #include @@ -98,7 +98,7 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { // inference using remote tensor auto inf_req_shared = exec_net.create_infer_request(); - auto cldnn_context = exec_net.get_context().as(); + auto cldnn_context = exec_net.get_context().as(); cl_context ctx = cldnn_context; auto ocl_instance = std::make_shared(ctx); cl_int err; @@ -159,8 +159,8 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { } case RemoteTensorSharingType::PLUGIN_CL_TENSOR: { auto cldnn_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape()); - ASSERT_TRUE(cldnn_tensor.is()); - auto cl_tensor = cldnn_tensor.as(); + ASSERT_TRUE(cldnn_tensor.is()); + auto cl_tensor = cldnn_tensor.as(); { cl::Buffer shared_buffer = cl_tensor; void* buffer = fakeImageData.data(); @@ -175,9 +175,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { GTEST_SKIP(); auto cldnn_tensor = cldnn_context.create_usm_host_tensor(input->get_element_type(), input->get_shape()); - ASSERT_TRUE(cldnn_tensor.is()); + ASSERT_TRUE(cldnn_tensor.is()); { - auto cl_tensor = cldnn_tensor.as(); + auto cl_tensor = cldnn_tensor.as(); void* shared_buffer = cl_tensor.get(); ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_HOST_INTEL); void* buffer = fakeImageData.data(); @@ -194,9 +194,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { GTEST_SKIP(); auto cldnn_tensor = cldnn_context.create_usm_device_tensor(input->get_element_type(), input->get_shape()); - ASSERT_TRUE(cldnn_tensor.is()); + ASSERT_TRUE(cldnn_tensor.is()); { - auto cl_tensor = cldnn_tensor.as(); + auto cl_tensor = cldnn_tensor.as(); void* shared_buffer = cl_tensor.get(); ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_DEVICE_INTEL); void* buffer = fakeImageData.data(); @@ -277,7 +277,7 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContext) { // inference using remote tensor auto ocl_instance = std::make_shared(); - auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_context.get()); + auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_context.get()); auto exec_net_shared = ie.compile_model(function, remote_context); auto inf_req_shared = exec_net_shared.create_infer_request(); inf_req_shared.set_tensor(input, fakeImageData); @@ -323,7 +323,7 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContextWithMultipleDevices) { cl::Context multi_device_ctx({ocl_instance_tmp->_device, ocl_instance_tmp->_device}); auto ocl_instance = std::make_shared(multi_device_ctx.get()); - auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_context.get(), 1); + auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_context.get(), 1); ASSERT_EQ(remote_context.get_device_name(), "GPU.0"); auto exec_net_shared = ie.compile_model(function, remote_context); @@ -376,9 +376,9 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_out_of_order) { cl::Buffer shared_input_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size, NULL, &err); cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err); - auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); + auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); auto exec_net_shared = ie.compile_model(function, remote_context); - auto gpu_context = exec_net_shared.get_context().as(); + auto gpu_context = exec_net_shared.get_context().as(); auto gpu_in_tensor = gpu_context.create_tensor(input->get_output_element_type(0), input->get_output_shape(0), shared_input_buffer); auto gpu_out_tensor = gpu_context.create_tensor(output->get_output_element_type(0), output->get_output_shape(0), shared_output_buffer); @@ -456,9 +456,9 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_in_order) { cl::Buffer shared_input_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size, NULL, &err); cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err); - auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); + auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); auto exec_net_shared = ie.compile_model(function, remote_context); - auto gpu_context = exec_net_shared.get_context().as(); + auto gpu_context = exec_net_shared.get_context().as(); auto gpu_in_tensor = gpu_context.create_tensor(input->get_output_element_type(0), input->get_output_shape(0), shared_input_buffer); auto gpu_out_tensor = gpu_context.create_tensor(output->get_output_element_type(0), output->get_output_shape(0), shared_output_buffer); @@ -531,7 +531,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image) { auto exec_net_b = ie.compile_model(function, CommonTestUtils::DEVICE_GPU); auto inf_req_remote = exec_net_b.create_infer_request(); - auto cldnn_context = exec_net_b.get_context().as(); + auto cldnn_context = exec_net_b.get_context().as(); cl_context ctx = cldnn_context.get(); auto ocl_instance = std::make_shared(ctx); cl_int err; @@ -650,9 +650,9 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_buffer) { cl::Buffer shared_input_uv_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size_uv, NULL, &err); cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err); - auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); + auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get()); auto exec_net_shared = ie.compile_model(function, remote_context); - auto gpu_context = exec_net_shared.get_context().as(); + auto gpu_context = exec_net_shared.get_context().as(); auto gpu_in_y_tensor = gpu_context.create_tensor(param_input_y->get_output_element_type(0), fake_image_data_y.get_shape(), shared_input_y_buffer); auto gpu_in_uv_tensor = gpu_context.create_tensor(param_input_uv->get_output_element_type(0), fake_image_data_uv.get_shape(), shared_input_uv_buffer);