[GPU] Fixed USM shared blob allocation type (#9662)
This commit is contained in:
parent
c1e2e0344b
commit
4734b17e52
@ -84,7 +84,7 @@ void pre_replace_deconv::run(program& p) {
|
||||
p.remove_connection(*weights_node_ptr, deconv_node);
|
||||
}
|
||||
|
||||
auto filter_z = deconv_prim->grouped_weights_shape ? 1 : (filter_size.spatial[2] - 1);
|
||||
auto filter_z = deconv_prim->grouped_weights_shape ? 0 : (filter_size.spatial[2] - 1);
|
||||
pad.spatial[0] = (filter_size.spatial[0] - 1) - std::abs(pad.spatial[0]);
|
||||
pad.spatial[1] = (filter_size.spatial[1] - 1) - std::abs(pad.spatial[1]);
|
||||
pad.spatial[2] = filter_z - std::abs(pad.spatial[2]);
|
||||
|
@ -20,7 +20,7 @@ roi_align_inst::typed_primitive_inst(network& network, roi_align_node const& nod
|
||||
layout roi_align_inst::calc_output_layout(roi_align_node const& node) {
|
||||
auto primitive = node.get_primitive();
|
||||
auto input_layout = node.input(0).get_output_layout();
|
||||
auto rois_layout = node.input(0).get_output_layout();
|
||||
auto rois_layout = node.input(1).get_output_layout();
|
||||
auto num_rois = rois_layout.size.batch[0];
|
||||
auto num_channels = input_layout.size.feature[0];
|
||||
return layout(input_layout.data_type, format::bfyx, {num_rois, num_channels, primitive->pooled_h, primitive->pooled_w});
|
||||
|
@ -833,7 +833,7 @@ Blob::Ptr InferRequest::create_shared_device_blob(const InferenceEngine::TensorD
|
||||
usm_host_mem,
|
||||
0,
|
||||
0,
|
||||
RemoteBlobImpl::BlobType::BT_USM_HOST_INTERNAL);
|
||||
RemoteBlobImpl::BlobType::BT_USM_SHARED);
|
||||
if (!blob)
|
||||
IE_THROW(NotAllocated) << "Failed to allocate shared host <-> device blob";
|
||||
blob->allocate();
|
||||
|
Loading…
Reference in New Issue
Block a user