diff --git a/inference-engine/src/cldnn_engine/cldnn_infer_request.cpp b/inference-engine/src/cldnn_engine/cldnn_infer_request.cpp index 1510562a384..c71acf4e6d9 100644 --- a/inference-engine/src/cldnn_engine/cldnn_infer_request.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_infer_request.cpp @@ -285,7 +285,7 @@ void CLDNNInferRequest::SetBlob(const std::string& name, const Blob::Ptr& data) // Stores the given blob as ROI blob. It will be used to fill in network input // during pre-processing if (_inputs[name]->is()) { - Blob::Ptr inputHostBlob = create_input_host_blob(desc); + Blob::Ptr inputHostBlob = create_host_blob(desc); inputHostBlob->allocate(); _inputs[name] = inputHostBlob; } @@ -626,8 +626,8 @@ void CLDNNInferRequest::wait_dynamic() { // ---------------------------- internal utils --------- ----------------------------------- // // ----------------------------------------------------------------------------------------- // -Blob::Ptr CLDNNInferRequest::create_input_host_blob(const TensorDesc& desc, uint8_t* mem_ptr) { - OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNInferRequest::create_input_host_blob"); +Blob::Ptr CLDNNInferRequest::create_host_blob(const TensorDesc& desc, uint8_t* mem_ptr) { + OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNInferRequest::create_host_blob"); const Precision& p = desc.getPrecision(); switch (p) { @@ -677,37 +677,7 @@ Blob::Ptr CLDNNInferRequest::create_input_host_blob(const TensorDesc& desc, uint else return make_shared_blob(desc); default: - IE_THROW(NotImplemented) << "The plugin does not support input " << p.name() << " precision"; - } -} - -Blob::Ptr CLDNNInferRequest::create_output_host_blob(const TensorDesc& desc, uint8_t* mem_ptr) { - OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "CLDNNInferRequest::create_output_host_blob"); - const Precision& p = desc.getPrecision(); - - switch (p) { - case Precision::FP32: - if (mem_ptr != nullptr) - return make_shared_blob(desc, reinterpret_cast(mem_ptr)); - else - return make_shared_blob(desc); - case Precision::FP16: - if (mem_ptr != nullptr) - return make_shared_blob(desc, reinterpret_cast(mem_ptr)); - else - return make_shared_blob(desc); - case Precision::I32: - if (mem_ptr != nullptr) - return make_shared_blob(desc, reinterpret_cast(mem_ptr)); - else - return make_shared_blob(desc); - case Precision::I64: - if (mem_ptr != nullptr) - return make_shared_blob(desc, reinterpret_cast(mem_ptr)); - else - return make_shared_blob(desc); - default: - IE_THROW() << "The plugin does not support output " << p.name() << " precision"; + IE_THROW(NotImplemented) << "The plugin does not support " << p.name() << " blob precision"; } } @@ -719,6 +689,8 @@ void CLDNNInferRequest::copy_output_data(cldnn::memory::ptr src, Blob::Ptr dst, case Precision::FP16: copyResultToOutputBlob(src, dst, bi, stream); break; case Precision::I32: copyResultToOutputBlob(src, dst, bi, stream); break; case Precision::I64: copyResultToOutputBlob(src, dst, bi, stream); break; + case Precision::U8: copyResultToOutputBlob(src, dst, bi, stream); break; + case Precision::I8: copyResultToOutputBlob(src, dst, bi, stream); break; default: IE_THROW(NotImplemented) << "The plugin does not support output " << dst->getTensorDesc().getPrecision() << " precision"; } } @@ -799,7 +771,7 @@ void CLDNNInferRequest::allocate_inputs() { desc_fp32.setPrecision(Precision::FP32); auto blobPtr = create_device_blob(desc_fp32, litr->second); _deviceInputs[name] = blobPtr; - Blob::Ptr inputBlob = create_input_host_blob(desc); + Blob::Ptr inputBlob = create_host_blob(desc); inputBlob->allocate(); _inputs[name] = inputBlob; } else { @@ -825,7 +797,7 @@ void CLDNNInferRequest::allocate_inputs_dynamic() { IE_THROW() << "Empty dimensions for input blob " << input.first; } - Blob::Ptr inputBlob = create_input_host_blob(desc); + Blob::Ptr inputBlob = create_host_blob(desc); if (desc.getPrecision() == Precision::I16 || desc.getPrecision() == Precision::U16) { desc.setPrecision(Precision::FP32); auto fp32inputBlob = InferenceEngine::make_shared_blob(desc); @@ -871,7 +843,7 @@ void CLDNNInferRequest::allocate_outputs_dynamic() { IE_THROW() << "Empty dimensions for output blob " << no.first; } - Blob::Ptr outputBlob = create_output_host_blob(desc); + Blob::Ptr outputBlob = create_host_blob(desc); outputBlob->allocate(); _outputs[no.first] = outputBlob; outputsMap[no.first] = outputID; diff --git a/inference-engine/src/cldnn_engine/cldnn_infer_request.h b/inference-engine/src/cldnn_engine/cldnn_infer_request.h index 210adc46be8..72c924b015e 100644 --- a/inference-engine/src/cldnn_engine/cldnn_infer_request.h +++ b/inference-engine/src/cldnn_engine/cldnn_infer_request.h @@ -79,8 +79,7 @@ private: std::vector& dependencies); void prepare_output(const cldnn::primitive_id& outputName, InferenceEngine::Blob::Ptr& outputBlob); - InferenceEngine::Blob::Ptr create_input_host_blob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr); - InferenceEngine::Blob::Ptr create_output_host_blob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr); + InferenceEngine::Blob::Ptr create_host_blob(const InferenceEngine::TensorDesc& desc, uint8_t* mem_ptr = nullptr); InferenceEngine::Blob::Ptr create_device_blob(const InferenceEngine::TensorDesc& desc, const cldnn::layout& layout); void copy_output_data(cldnn::memory::ptr outputMemory, InferenceEngine::Blob::Ptr bptr, buf_info* bi = nullptr); diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index b4cc9d7bfc4..7378a3f0e73 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -23,8 +23,6 @@ std::vector disabledTestPatterns() { R"(.*(PreprocessTest).*(SetMeanImagePreProcessSetBlob).*)", R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcessGetBlob).*)", R"(.*(InferRequestPreprocessDynamicallyInSetBlobTest).*)", - // TODO: Issue: 51764 - ".*InferRequestPreprocessConversionTest.*", // TODO: Issue: 41462 R"(.*(SoftMaxLayerTest).*axis=0.*)", // TODO: Issue: 43511