diff --git a/inference-engine/thirdparty/clDNN/src/network.cpp b/inference-engine/thirdparty/clDNN/src/network.cpp index c7bc4354876..6d126d9b481 100644 --- a/inference-engine/thirdparty/clDNN/src/network.cpp +++ b/inference-engine/thirdparty/clDNN/src/network.cpp @@ -823,6 +823,10 @@ void network::transfer_memory_to_device(std::shared_ptr instance // Allocate and transfer memory auto device_mem = inst_mem.get_engine()->allocate_memory(inst_mem.get_layout(), allocation_type::usm_device, false); device_mem->copy_from(get_stream(), inst_mem); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= 2) { + GPU_DEBUG_COUT << "[" << node.id() << ": constant]" << std::endl; + } _memory_pool->release_memory(&inst_mem, node.id(), get_id()); instance->set_output_memory(device_mem); } diff --git a/inference-engine/thirdparty/clDNN/src/program.cpp b/inference-engine/thirdparty/clDNN/src/program.cpp index 01d7b392f31..2454d19fdde 100644 --- a/inference-engine/thirdparty/clDNN/src/program.cpp +++ b/inference-engine/thirdparty/clDNN/src/program.cpp @@ -623,6 +623,9 @@ void program::transfer_memory_to_device() { auto device_mem = mem.get_engine()->allocate_memory(data_node_layout, allocation_type::usm_device, false); device_mem->copy_from(get_stream(), mem); data_node.attach_memory(device_mem); + GPU_DEBUG_IF(debug_config->verbose >= 2) { + GPU_DEBUG_COUT << "[" << data_node.id() << ": constant]" << std::endl; + } const_cast(data_node.get_primitive()->mem).reset(); // TODO: Do we need finish call here? Maybe call it in network::execute() ? get_stream().finish();