[GPU] Moved GPU remote headers to intel_gpu folder and namespace (#9290)

This commit is contained in:
Vladimir Paramuzov 2021-12-20 12:14:32 +03:00 committed by GitHub
parent 114d37b28c
commit d56dc20956
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 35 additions and 35 deletions

View File

@ -7,7 +7,7 @@
* shared Video Acceleration device contexts
* and shared memory tensors which contain Video Acceleration surfaces
*
* @file openvino/runtime/gpu/ocl/dx.hpp
* @file openvino/runtime/intel_gpu/ocl/dx.hpp
*/
#pragma once
@ -24,11 +24,11 @@
#include <memory>
#include <string>
#include "openvino/runtime/gpu/ocl/ocl.hpp"
#include "openvino/runtime/intel_gpu/ocl/ocl.hpp"
namespace ov {
namespace runtime {
namespace gpu {
namespace intel_gpu {
namespace ocl {
/**
@ -193,6 +193,6 @@ public:
}
};
} // namespace ocl
} // namespace gpu
} // namespace intel_gpu
} // namespace runtime
} // namespace ov

View File

@ -6,7 +6,7 @@
* @brief a header that defines wrappers for internal GPU plugin-specific
* OpenCL context and OpenCL shared memory tensors
*
* @file openvino/runtime/gpu/ocl/ocl.hpp
* @file openvino/runtime/intel_gpu/ocl/ocl.hpp
*/
#pragma once
@ -15,13 +15,13 @@
#include "gpu/gpu_params.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/gpu/ocl/ocl_wrapper.hpp"
#include "openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp"
#include "openvino/runtime/remote_context.hpp"
#include "openvino/runtime/remote_tensor.hpp"
namespace ov {
namespace runtime {
namespace gpu {
namespace intel_gpu {
namespace ocl {
/**
@ -320,6 +320,6 @@ public:
};
} // namespace ocl
} // namespace gpu
} // namespace intel_gpu
} // namespace runtime
} // namespace ov

View File

@ -7,7 +7,7 @@
* shared Video Acceleration device contexts
* and shared memory tensors which contain Video Acceleration surfaces
*
* @file openvino/runtime/gpu/ocl/va.hpp
* @file openvino/runtime/intel_gpu/ocl/va.hpp
*/
#pragma once
@ -18,7 +18,7 @@
#include <memory>
#include <string>
#include "openvino/runtime/gpu/ocl/ocl.hpp"
#include "openvino/runtime/intel_gpu/ocl/ocl.hpp"
// clang-format off
#include <va/va.h>
@ -26,7 +26,7 @@
namespace ov {
namespace runtime {
namespace gpu {
namespace intel_gpu {
namespace ocl {
/**
@ -147,6 +147,6 @@ public:
}
};
} // namespace ocl
} // namespace gpu
} // namespace intel_gpu
} // namespace runtime
} // namespace ov

View File

@ -148,18 +148,18 @@ function(ie_headers_compilation_with_custom_flags)
"gpu/gpu_context_api_va.hpp"
"gpu/gpu_context_api_dx.hpp"
"gpu/gpu_ocl_wrapper.hpp"
"openvino/runtime/gpu/ocl/ocl_wrapper.hpp"
"openvino/runtime/gpu/ocl/ocl.hpp"
"openvino/runtime/gpu/ocl/va.hpp"
"openvino/runtime/gpu/ocl/dx.hpp")
"openvino/runtime/intel_gpu/ocl/ocl_wrapper.hpp"
"openvino/runtime/intel_gpu/ocl/ocl.hpp"
"openvino/runtime/intel_gpu/ocl/va.hpp"
"openvino/runtime/intel_gpu/ocl/dx.hpp")
endif()
if(NOT WIN32)
list(APPEND IE_TEST_HEADERS_TO_SKIP "gpu/gpu_context_api_dx.hpp"
"openvino/runtime/gpu/ocl/dx.hpp")
"openvino/runtime/intel_gpu/ocl/dx.hpp")
endif()
if(NOT LIBVA_FOUND)
list(APPEND IE_TEST_HEADERS_TO_SKIP "gpu/gpu_context_api_va.hpp"
"openvino/runtime/gpu/ocl/va.hpp")
"openvino/runtime/intel_gpu/ocl/va.hpp")
endif()
endif()

View File

@ -7,7 +7,7 @@
#include <vector>
#include <memory>
#include "openvino/runtime/gpu/ocl/ocl.hpp"
#include "openvino/runtime/intel_gpu/ocl/ocl.hpp"
#include "openvino/runtime/core.hpp"
#include <gpu/gpu_config.hpp>
@ -98,7 +98,7 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
// inference using remote tensor
auto inf_req_shared = exec_net.create_infer_request();
auto cldnn_context = exec_net.get_context().as<ov::runtime::gpu::ocl::ClContext>();
auto cldnn_context = exec_net.get_context().as<ov::runtime::intel_gpu::ocl::ClContext>();
cl_context ctx = cldnn_context;
auto ocl_instance = std::make_shared<OpenCL>(ctx);
cl_int err;
@ -159,8 +159,8 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
}
case RemoteTensorSharingType::PLUGIN_CL_TENSOR: {
auto cldnn_tensor = cldnn_context.create_tensor(input->get_element_type(), input->get_shape());
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::gpu::ocl::ClBufferTensor>());
auto cl_tensor = cldnn_tensor.as<ov::runtime::gpu::ocl::ClBufferTensor>();
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::intel_gpu::ocl::ClBufferTensor>());
auto cl_tensor = cldnn_tensor.as<ov::runtime::intel_gpu::ocl::ClBufferTensor>();
{
cl::Buffer shared_buffer = cl_tensor;
void* buffer = fakeImageData.data();
@ -175,9 +175,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
GTEST_SKIP();
auto cldnn_tensor = cldnn_context.create_usm_host_tensor(input->get_element_type(), input->get_shape());
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::gpu::ocl::USMTensor>());
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::intel_gpu::ocl::USMTensor>());
{
auto cl_tensor = cldnn_tensor.as<ov::runtime::gpu::ocl::USMTensor>();
auto cl_tensor = cldnn_tensor.as<ov::runtime::intel_gpu::ocl::USMTensor>();
void* shared_buffer = cl_tensor.get();
ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_HOST_INTEL);
void* buffer = fakeImageData.data();
@ -194,9 +194,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) {
GTEST_SKIP();
auto cldnn_tensor = cldnn_context.create_usm_device_tensor(input->get_element_type(), input->get_shape());
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::gpu::ocl::USMTensor>());
ASSERT_TRUE(cldnn_tensor.is<ov::runtime::intel_gpu::ocl::USMTensor>());
{
auto cl_tensor = cldnn_tensor.as<ov::runtime::gpu::ocl::USMTensor>();
auto cl_tensor = cldnn_tensor.as<ov::runtime::intel_gpu::ocl::USMTensor>();
void* shared_buffer = cl_tensor.get();
ASSERT_EQ(ocl_instance->get_allocation_type(shared_buffer), CL_MEM_TYPE_DEVICE_INTEL);
void* buffer = fakeImageData.data();
@ -277,7 +277,7 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContext) {
// inference using remote tensor
auto ocl_instance = std::make_shared<OpenCL>();
auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_context.get());
auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_context.get());
auto exec_net_shared = ie.compile_model(function, remote_context);
auto inf_req_shared = exec_net_shared.create_infer_request();
inf_req_shared.set_tensor(input, fakeImageData);
@ -323,7 +323,7 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserContextWithMultipleDevices) {
cl::Context multi_device_ctx({ocl_instance_tmp->_device, ocl_instance_tmp->_device});
auto ocl_instance = std::make_shared<OpenCL>(multi_device_ctx.get());
auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_context.get(), 1);
auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_context.get(), 1);
ASSERT_EQ(remote_context.get_device_name(), "GPU.0");
auto exec_net_shared = ie.compile_model(function, remote_context);
@ -376,9 +376,9 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_out_of_order) {
cl::Buffer shared_input_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size, NULL, &err);
cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err);
auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto exec_net_shared = ie.compile_model(function, remote_context);
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::gpu::ocl::ClContext>();
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::intel_gpu::ocl::ClContext>();
auto gpu_in_tensor = gpu_context.create_tensor(input->get_output_element_type(0), input->get_output_shape(0), shared_input_buffer);
auto gpu_out_tensor = gpu_context.create_tensor(output->get_output_element_type(0), output->get_output_shape(0), shared_output_buffer);
@ -456,9 +456,9 @@ TEST_F(OVRemoteTensor_Test, smoke_canInferOnUserQueue_in_order) {
cl::Buffer shared_input_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size, NULL, &err);
cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err);
auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto exec_net_shared = ie.compile_model(function, remote_context);
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::gpu::ocl::ClContext>();
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::intel_gpu::ocl::ClContext>();
auto gpu_in_tensor = gpu_context.create_tensor(input->get_output_element_type(0), input->get_output_shape(0), shared_input_buffer);
auto gpu_out_tensor = gpu_context.create_tensor(output->get_output_element_type(0), output->get_output_shape(0), shared_output_buffer);
@ -531,7 +531,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image) {
auto exec_net_b = ie.compile_model(function, CommonTestUtils::DEVICE_GPU);
auto inf_req_remote = exec_net_b.create_infer_request();
auto cldnn_context = exec_net_b.get_context().as<ov::runtime::gpu::ocl::ClContext>();
auto cldnn_context = exec_net_b.get_context().as<ov::runtime::intel_gpu::ocl::ClContext>();
cl_context ctx = cldnn_context.get();
auto ocl_instance = std::make_shared<OpenCL>(ctx);
cl_int err;
@ -650,9 +650,9 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_buffer) {
cl::Buffer shared_input_uv_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, in_size_uv, NULL, &err);
cl::Buffer shared_output_buffer(ocl_instance->_context, CL_MEM_READ_WRITE, out_size, NULL, &err);
auto remote_context = ov::runtime::gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto remote_context = ov::runtime::intel_gpu::ocl::ClContext(ie, ocl_instance->_queue.get());
auto exec_net_shared = ie.compile_model(function, remote_context);
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::gpu::ocl::ClContext>();
auto gpu_context = exec_net_shared.get_context().as<ov::runtime::intel_gpu::ocl::ClContext>();
auto gpu_in_y_tensor = gpu_context.create_tensor(param_input_y->get_output_element_type(0), fake_image_data_y.get_shape(), shared_input_y_buffer);
auto gpu_in_uv_tensor = gpu_context.create_tensor(param_input_uv->get_output_element_type(0), fake_image_data_uv.get_shape(), shared_input_uv_buffer);