[GPU] Remove duplicated precision conversion function (#12827)

This commit is contained in:
Vladimir Paramuzov
2022-08-31 11:09:48 +04:00
committed by GitHub
parent a6b455876b
commit 5c11f09adc
43 changed files with 69 additions and 98 deletions

View File

@@ -57,35 +57,6 @@ inline cldnn::data_types DataTypeFromPrecision(InferenceEngine::Precision p) {
}
}
inline cldnn::data_types DataTypeFromPrecision(ngraph::element::Type t) {
switch (t) {
case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::u16:
case ngraph::element::Type_t::f32:
case ngraph::element::Type_t::f64:
return cldnn::data_types::f32;
case ngraph::element::Type_t::f16:
return cldnn::data_types::f16;
case ngraph::element::Type_t::u8:
return cldnn::data_types::u8;
case ngraph::element::Type_t::i8:
return cldnn::data_types::i8;
case ngraph::element::Type_t::i32:
case ngraph::element::Type_t::u32:
case ngraph::element::Type_t::u64:
return cldnn::data_types::i32;
case ngraph::element::Type_t::i64:
return cldnn::data_types::i64;
case ngraph::element::Type_t::boolean:
return cldnn::data_types::i8;
case ngraph::element::Type_t::u1:
return cldnn::data_types::bin;
default:
IE_THROW(ParameterMismatch)
<< "The plugin does not support " << t.get_type_name()<< " precision";
}
}
inline InferenceEngine::Precision PrecisionFromDataType(cldnn::data_types dt) {
switch (dt) {
case cldnn::data_types::bin:

View File

@@ -212,7 +212,7 @@ bool data_type_match(data_types data_type) {
return data_type == type_to_data_type<T>::value;
}
inline data_types data_type_to_element_type(ov::element::Type t) {
inline data_types element_type_to_data_type(ov::element::Type t) {
switch (t) {
case ov::element::Type_t::i16:
case ov::element::Type_t::u16:
@@ -240,7 +240,7 @@ inline data_types data_type_to_element_type(ov::element::Type t) {
}
}
inline ov::element::Type element_type_to_data_type(data_types t) {
inline ov::element::Type data_type_to_element_type(data_types t) {
switch (t) {
case cldnn::data_types::f32:
return ov::element::Type_t::f32;

View File

@@ -207,7 +207,7 @@ inline std::vector<T> read_vector(cldnn::memory::ptr mem, const cldnn::stream& s
}
inline std::shared_ptr<ngraph::runtime::HostTensor> make_host_tensor(layout l, void* memory_pointer) {
ov::element::Type et = element_type_to_data_type(l.data_type);
ov::element::Type et = data_type_to_element_type(l.data_type);
return std::make_shared<ngraph::runtime::HostTensor>(et, l.get_shape(), memory_pointer);
}

View File

@@ -35,7 +35,7 @@ static void CreateAdaptiveMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op
const auto indices_precision = op->get_output_element_type(1);
const auto indices_shape = op->get_output_shape(1);
const cldnn::layout indices_layout{DataTypeFromPrecision(indices_precision),
const cldnn::layout indices_layout{cldnn::element_type_to_data_type(indices_precision),
cldnn::format::get_default_format(indices_shape.size()),
tensor_from_dims(indices_shape)};
const auto indices_memory = p.GetEngine().allocate_memory(indices_layout);
@@ -50,7 +50,7 @@ static void CreateAdaptiveMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op
input_primitives[0],
tensor_from_dims(op->get_output_shape(0)),
input_primitives.back(),
DataTypeFromPrecision(op->get_index_element_type())};
cldnn::element_type_to_data_type(op->get_index_element_type())};
p.add_primitive(*op, poolPrim);
const cldnn::primitive_id indices_id_r = layer_type_name + ".out1";

View File

@@ -32,7 +32,7 @@ static void CreateCommonBroadcastOp(Program& p, const std::shared_ptr<ngraph::No
auto targetFormat = cldnn::format::get_default_format(outputRank);
if (targetFormat.value != cldnn::format::get_default_format(inputRank).value) {
auto reorderName = layerName + "_cldnn_in_reorder";
auto targetDatatype = DataTypeFromPrecision(op->get_input_element_type(0));
auto targetDatatype = cldnn::element_type_to_data_type(op->get_input_element_type(0));
auto reorderPrim = cldnn::reorder(reorderName,
inputPrimitive,
targetFormat,

View File

@@ -19,7 +19,7 @@ void CreateBucketizeOp(Program& p, const std::shared_ptr<ngraph::op::v3::Bucketi
const cldnn::bucketize bucketize_prim(layer_type_name_ID(op),
p.GetInputPrimitiveIDs(op),
DataTypeFromPrecision(op->get_output_type()),
cldnn::element_type_to_data_type(op->get_output_type()),
op->get_with_right_bound());
p.add_primitive(*op, bucketize_prim);
}

View File

@@ -23,7 +23,7 @@ static void CreateConcatOp(Program& p, const std::shared_ptr<ngraph::op::v0::Con
layerName,
inputPrimitives,
axis,
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, concatPrim);
}

View File

@@ -179,7 +179,7 @@ void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std::
constTensor = getConstTensor(newDims);
}
cldnn::layout constLayout = cldnn::layout(DataTypeFromPrecision(op->get_output_element_type(0)),
cldnn::layout constLayout = cldnn::layout(cldnn::element_type_to_data_type(op->get_output_element_type(0)),
constFormat,
constTensor);

View File

@@ -18,7 +18,7 @@ static void CreateConvertLikeOp(Program& p, const std::shared_ptr<ngraph::op::v1
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto outDataType = DataTypeFromPrecision(op->get_input_element_type(1));
auto outDataType = cldnn::element_type_to_data_type(op->get_input_element_type(1));
auto reorderPrim = cldnn::reorder(layerName,
inputPrimitives[0],
@@ -34,7 +34,7 @@ static void CreateConvertOp(Program& p, const std::shared_ptr<ngraph::op::v0::Co
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto outDataType = DataTypeFromPrecision(op->get_destination_type());
auto outDataType = cldnn::element_type_to_data_type(op->get_destination_type());
auto reorderPrim = cldnn::reorder(layerName,
inputPrimitives[0],

View File

@@ -18,7 +18,7 @@ static void CreateCommonConvertColorOp(Program& p, const std::shared_ptr<ngraph:
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto outDatatype = DataTypeFromPrecision(op->get_input_element_type(0));
auto outDatatype = cldnn::element_type_to_data_type(op->get_input_element_type(0));
auto outShape = tensor_from_dims(op->get_output_shape(0));
outShape = { outShape.sizes()[0], outShape.sizes()[2], outShape.sizes()[3], outShape.sizes()[1] };

View File

@@ -52,7 +52,7 @@ static void CreateGroupConvolutionOp(Program& p, const std::shared_ptr<ngraph::o
pads_begin,
dilations,
tensor_from_dims(outDims),
DataTypeFromPrecision(outPrecision),
cldnn::element_type_to_data_type(outPrecision),
weights_have_group_dim);
p.add_primitive(*op, convPrim);
@@ -87,7 +87,7 @@ static void CreateConvolutionOp(Program& p, const std::shared_ptr<ngraph::op::v1
pads_begin,
dilations,
tensor_from_dims(outDims),
DataTypeFromPrecision(outPrecision),
cldnn::element_type_to_data_type(outPrecision),
weights_have_group_dim);
p.add_primitive(*op, convPrim);
@@ -325,7 +325,7 @@ static void CreateBinaryConvolutionOp(Program& p, const std::shared_ptr<ngraph::
auto outDims = op->get_output_shape(0);
std::vector<cldnn::primitive_id> weights = {inputs[1]};
cldnn::data_types calc_precision = DataTypeFromPrecision(op->get_output_element_type(0));
cldnn::data_types calc_precision = cldnn::element_type_to_data_type(op->get_output_element_type(0));
auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin();

View File

@@ -26,7 +26,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if (inputDataType == cldnn::data_types::i64) {
// GPU primitive supports only i32 data type for 'sequence_length' and 'blank_index' inputs
// so we need additional reorder if it's provided as i64
@@ -69,7 +69,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
}
cldnn::layout mutableLayout = cldnn::layout(
DataTypeFromPrecision(mutable_precision),
cldnn::element_type_to_data_type(mutable_precision),
cldnn::format::get_default_format(op->get_output_shape(1).size()),
tensor_from_dims(op->get_output_shape(1)));
@@ -95,7 +95,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
tensor_from_dims(op->get_output_shape(0)));
// GPU primitive supports only i32 as output data type
primitive.output_data_type = DataTypeFromPrecision(ngraph::element::i32);
primitive.output_data_type = cldnn::element_type_to_data_type(ngraph::element::i32);
if (num_output == 2) {
primitive.second_output = reorderedInputs.back();

View File

@@ -146,7 +146,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
reorderPrimName,
inputPrimitives[param.portIndex],
param.format,
DataTypeFromPrecision(op->get_input_element_type(param.portIndex)),
cldnn::element_type_to_data_type(op->get_input_element_type(param.portIndex)),
std::vector<float>(),
cldnn::reorder_mean_mode::subtract);
@@ -180,7 +180,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
size_t W = (dims.size() > 3) ? dims[3] : 1;
cldnn::tensor outputTensor = cldnn::tensor(cldnn::batch(N), cldnn::feature(C), cldnn::spatial(W, H));
cldnn::layout outputLayout = cldnn::layout(DataTypeFromPrecision(op->get_output_element_type(0)), outputFormat, outputTensor);
cldnn::layout outputLayout = cldnn::layout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), outputFormat, outputTensor);
// evaluate work sizes rules
std::vector<size_t> gws, lws;

View File

@@ -46,7 +46,7 @@ void CreateElementwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cl
auto targetFormat = cldnn::format::get_default_format(outRank);
if (targetFormat.value != cldnn::format::get_default_format(inputRank).value) {
auto reorderName = layerName + "_cldnn_in" + std::to_string(i) + "_reorder";
auto targetDatatype = DataTypeFromPrecision(op->get_input_element_type(i));
auto targetDatatype = cldnn::element_type_to_data_type(op->get_input_element_type(i));
auto reorderPrim = cldnn::reorder(reorderName,
inputPrimitives[i],
targetFormat,
@@ -72,7 +72,7 @@ void CreateElementwiseOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cl
}
}
auto out_dt = DataTypeFromPrecision(op->get_output_element_type(0));
auto out_dt = cldnn::element_type_to_data_type(op->get_output_element_type(0));
auto eltwisePrim = cldnn::eltwise(layerName,
inputPrimitives,
mode,

View File

@@ -41,7 +41,7 @@ static void CreateEmbeddingBagOffsetsSumOp(Program& p, const std::shared_ptr<ngr
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices inputs,
// so we need additional reorders if they are provided as i64
@@ -78,7 +78,7 @@ static void CreateEmbeddingBagPackedSumOp(Program& p, const std::shared_ptr<ngra
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if ((portIndex == 1) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices input,
// so we need additional reorder if it's provided as i64
@@ -133,7 +133,7 @@ static void CreateEmbeddingSegmentsSumOp(Program& p, const std::shared_ptr<ngrap
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if (((portIndex == 1) || (portIndex == 2)) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for indices inputs,
// so we need additional reorders if they are provided as i64

View File

@@ -30,7 +30,7 @@ static void CreateExperimentalDetectronDetectionOutputOp(
const auto mutable_precision1 = op->get_output_element_type(1);
const auto output_shape1 = op->get_output_shape(1);
const cldnn::layout mutable_layout1{DataTypeFromPrecision(mutable_precision1),
const cldnn::layout mutable_layout1{cldnn::element_type_to_data_type(mutable_precision1),
cldnn::format::get_default_format(output_shape1.size()),
tensor_from_dims(output_shape1)};
cldnn::memory::ptr shared_memory1{p.GetEngine().allocate_memory(mutable_layout1)};
@@ -42,7 +42,7 @@ static void CreateExperimentalDetectronDetectionOutputOp(
const auto mutable_precision2 = op->get_output_element_type(2);
const auto output_shape2 = op->get_output_shape(2);
const cldnn::layout mutable_layout2{DataTypeFromPrecision(mutable_precision2),
const cldnn::layout mutable_layout2{cldnn::element_type_to_data_type(mutable_precision2),
cldnn::format::get_default_format(output_shape2.size()),
tensor_from_dims(output_shape2)};
cldnn::memory::ptr shared_memory2{p.GetEngine().allocate_memory(mutable_layout2)};

View File

@@ -30,7 +30,7 @@ static void CreateExperimentalDetectronGenerateProposalsSingleImageOp(
const auto mutable_precision = op->get_output_element_type(1);
const auto output_shape = op->get_output_shape(1);
const cldnn::layout mutable_layout{DataTypeFromPrecision(mutable_precision),
const cldnn::layout mutable_layout{cldnn::element_type_to_data_type(mutable_precision),
cldnn::format::get_default_format(output_shape.size()),
tensor_from_dims(output_shape)};
cldnn::memory::ptr shared_memory{p.GetEngine().allocate_memory(mutable_layout)};

View File

@@ -22,7 +22,7 @@ static void CreateExperimentalDetectronPriorGridGeneratorOp(
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronPriorGridGenerator>& op) {
validate_inputs_count(op, {3});
cldnn::tensor outTensor = mkTensor(op->get_output_shape(0));
auto outDataType = DataTypeFromPrecision(op->get_output_element_type(0));
auto outDataType = cldnn::element_type_to_data_type(op->get_output_element_type(0));
cldnn::layout outLayout{outDataType, cldnn::format::bfyx, outTensor};
auto& attrs = op->get_attrs();
auto& featmap_shape = op->get_input_shape(1);

View File

@@ -18,7 +18,7 @@ static void CreateExperimentalDetectronROIFeatureExtractorOp(Program& p, const s
std::string layerName = layer_type_name_ID(op) + ".out0";
cldnn::layout mutableLayout = cldnn::layout(
DataTypeFromPrecision(op->get_output_element_type(1)),
cldnn::element_type_to_data_type(op->get_output_element_type(1)),
cldnn::format::get_default_format(op->get_output_shape(1).size()),
tensor_from_dims(op->get_output_shape(1)));

View File

@@ -24,7 +24,7 @@ static void CreateFakeQuantizeOp(Program& p, const std::shared_ptr<ngraph::op::v
auto output_high_id = inputPrimitives[4];
int levels = static_cast<int>(op->get_levels());
auto dt = DataTypeFromPrecision(op->get_output_element_type(0));
auto dt = cldnn::element_type_to_data_type(op->get_output_element_type(0));
auto quantizationPrim = cldnn::quantize(layerName,
input_id,
input_low_id,

View File

@@ -22,7 +22,7 @@ static void CreateGatherTreeOp(Program& p, const std::shared_ptr<ngraph::op::v1:
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if (inputDataType == cldnn::data_types::i64) {
// GPU primitive does not support i64 inputs,
// so we need additional reorders to convert them to i32

View File

@@ -24,7 +24,7 @@ void CreateGatherOpBase(Program& p, const std::shared_ptr<T>& op, const int64_t
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if (inputDataType == cldnn::data_types::i64) {
// GPU primitive does not support i64 inputs,
// so we need additional reorders to convert them to i32

View File

@@ -20,7 +20,7 @@ static void CreateGRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::GRN>&
auto primitive = cldnn::grn(layerName,
inputPrimitives[0],
op->get_bias(),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, primitive);
}

View File

@@ -40,7 +40,7 @@ static DATA_TYPE CreateScalarData(Program &p, const cldnn::primitive_id& id, int
static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::shared_ptr<ngraph::Node>& op,
const cldnn::primitive_id& id, const cldnn::primitive_id& input,
const int32_t output_idx) {
const auto precision = DataTypeFromPrecision(op->get_output_element_type(output_idx));
const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx));
const auto format = cldnn::format::get_default_format(op->get_output_shape(output_idx).size());
const auto tensor = tensor_from_dims(op->get_output_shape(output_idx));
cldnn::layout output_layout = cldnn::layout(precision, format, tensor);
@@ -124,7 +124,7 @@ static void CreateLoopOp(Program& p, const std::shared_ptr<Loop>& op) {
{
const auto from_prim = body_topology.at(from_id);
const auto& to_ngraph_type = to->get_element_type();
const auto to_cldnn_type = DataTypeFromPrecision(to_ngraph_type);
const auto to_cldnn_type = cldnn::element_type_to_data_type(to_ngraph_type);
from_prim->output_data_type = to_cldnn_type;
}
back_edges.emplace_back(from_id, to_id);

View File

@@ -141,7 +141,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
inputName,
weightsName,
"",
DataTypeFromPrecision(op->get_output_element_type(0)),
cldnn::element_type_to_data_type(op->get_output_element_type(0)),
cldnn::padding(),
input_rank);
@@ -164,7 +164,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
}
cldnn::primitive_id reorderId = "reorder:" + outReshapeName + "_reorder";
cldnn::layout outputLayout(DataTypeFromPrecision(op->get_output_element_type(0)), outputFormat, outTensor);
cldnn::layout outputLayout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), outputFormat, outTensor);
auto reorder_prim = cldnn::reorder(reorderId,
layerName,
outputLayout,
@@ -206,7 +206,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
if (targetFormat.value != cldnn::format::get_default_format(inputDimsN).value) {
auto reorderName = layerName + "_cldnn_in" + std::to_string(i) + "_reorder";
auto targetDatatype = DataTypeFromPrecision(op->get_output_element_type(0));
auto targetDatatype = cldnn::element_type_to_data_type(op->get_output_element_type(0));
auto reorderPrim = cldnn::reorder(reorderName,
inputPrimitives[i],
targetFormat,
@@ -266,7 +266,7 @@ static void CreateMatMulOp(Program& p, const std::shared_ptr<ngraph::op::v0::Mat
auto gemmPrim = cldnn::gemm(layerName,
inputPrimitives,
DataTypeFromPrecision(op->get_output_element_type(0)),
cldnn::element_type_to_data_type(op->get_output_element_type(0)),
transA,
transB,
alpha,

View File

@@ -24,7 +24,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
reorderedInputs.resize(inputPrimitives.size());
for (size_t portIndex = 0; portIndex < inputPrimitives.size(); portIndex++) {
auto inputDataType = DataTypeFromPrecision(op->get_input_element_type(portIndex));
auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex));
if ((portIndex == 2) && (inputDataType == cldnn::data_types::i64)) {
// GPU primitive supports only i32 data type for 'max_output_boxes_per_class' input
// so we need additional reorder if it's provided as i64
@@ -64,7 +64,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
mutable_precision_second = ngraph::element::i32;
}
cldnn::layout mutableLayoutSecond = cldnn::layout(
DataTypeFromPrecision(mutable_precision_second),
cldnn::element_type_to_data_type(mutable_precision_second),
cldnn::format::get_default_format(op->get_output_shape(2).size()),
tensor_from_dims(op->get_output_shape(2)));
@@ -82,7 +82,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
case 2: {
auto mutable_precision_first = op->get_output_element_type(1);
cldnn::layout mutableLayoutFirst = cldnn::layout(
DataTypeFromPrecision(mutable_precision_first),
cldnn::element_type_to_data_type(mutable_precision_first),
cldnn::format::bfyx,
cldnn::tensor(static_cast<int32_t>(outputIndices), 3, 1, 1));
@@ -112,7 +112,7 @@ static void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_pt
op->m_sort_result_descending,
"", "", "", "", "", "");
prim.output_data_type = DataTypeFromPrecision(out_type);
prim.output_data_type = cldnn::element_type_to_data_type(out_type);
switch (reorderedInputs.size()) {
case 6: prim.soft_nms_sigma = reorderedInputs[5];

View File

@@ -35,7 +35,7 @@ static void CreateNormalizeL2Op(Program& p, const std::shared_ptr<ngraph::op::v0
// We create fake scale constant and fill it with ones to keep the same behavior as current primitive
auto scale = std::make_shared<ngraph::op::v0::Constant>(op->get_output_element_type(0), ngraph::Shape{1}, std::vector<float>{1.0});
cldnn::layout constLayout = cldnn::layout(DataTypeFromPrecision(op->get_output_element_type(0)), cldnn::format::bfyx, cldnn::tensor{1});
cldnn::layout constLayout = cldnn::layout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), cldnn::format::bfyx, cldnn::tensor{1});
auto mem = p.GetEngine().allocate_memory(constLayout, false);
cldnn::mem_lock<int8_t> tmpPointer{mem, p.GetEngine().get_program_stream()};
auto buf = tmpPointer.data();

View File

@@ -60,7 +60,7 @@ static void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::One
auto oneHotPrim = cldnn::one_hot(layerName,
inputPrimitives[0],
out_tensor,
DataTypeFromPrecision(op->get_output_element_type(0)),
cldnn::element_type_to_data_type(op->get_output_element_type(0)),
axis,
depth,
on_value,

View File

@@ -95,7 +95,7 @@ static void CreateParameterOp(Program& p, const std::shared_ptr<ngraph::op::v0::
auto inputName = layer_type_name_ID(op);
auto preProcess = inputInfo->getPreProcess();
size_t meanChannels = preProcess.getNumberOfChannels();
cldnn::layout networkInputLayout(DataTypeFromPrecision(op->get_output_element_type(0)),
cldnn::layout networkInputLayout(cldnn::element_type_to_data_type(op->get_output_element_type(0)),
inputFormat,
dataTensor.transform(inputFormat, 1));
cldnn::primitive_id meanBlobID = inputName + Program::m_meanValuesTag;

View File

@@ -37,7 +37,7 @@ static void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Av
strides,
pads_begin,
tensor_from_dims(op->get_output_shape(0)),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
poolPrim.pad_end = pads_end;
p.add_primitive(*op, poolPrim);
}
@@ -65,7 +65,7 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Ma
strides,
pads_begin,
tensor_from_dims(op->get_output_shape(0)),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
poolPrim.pad_end = pads_end;
p.add_primitive(*op, poolPrim);
}
@@ -81,7 +81,7 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::Ma
const auto mutable_precision = op->get_output_element_type(1);
const auto output_shape = op->get_output_shape(1);
cldnn::layout mutableLayout = cldnn::layout(DataTypeFromPrecision(mutable_precision),
cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision),
cldnn::format::get_default_format(output_shape.size()),
tensor_from_dims(output_shape));
const auto shared_memory = p.GetEngine().allocate_memory(mutableLayout);
@@ -113,9 +113,9 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::Ma
pads_begin,
pads_end,
op->get_axis(),
DataTypeFromPrecision(op->get_index_element_type()),
cldnn::element_type_to_data_type(op->get_index_element_type()),
tensor_from_dims(op->get_output_shape(0)),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, poolPrim);
const cldnn::primitive_id maxpool_mutable_id_r = layer_type_name + ".out1";

View File

@@ -60,7 +60,7 @@ static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::
offset,
width,
height,
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, priorBoxPrim);
}

View File

@@ -60,7 +60,7 @@ static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
mutable_precision = ngraph::element::i32;
}
cldnn::layout mutableLayout = cldnn::layout(DataTypeFromPrecision(mutable_precision),
cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision),
cldnn::format::get_default_format(op->get_output_shape(1).size()),
tensor_from_dims(op->get_output_shape(1)));

View File

@@ -20,7 +20,7 @@ void CreateRandomUniformOp(Program &p, const std::shared_ptr<ngraph::op::v8::Ran
auto random_uniform_prim = cldnn::random_uniform(layer_type_name_ID(op),
input_primitives,
DataTypeFromPrecision(op->get_out_type()),
cldnn::element_type_to_data_type(op->get_out_type()),
op->get_global_seed(),
op->get_op_seed(),
tensor_from_dims(output_shape),

View File

@@ -20,7 +20,7 @@ static void CreateRangeOp(Program &p, const std::shared_ptr<ngraph::op::v4::Rang
throw std::runtime_error { "range v4 output rank is " + std::to_string(r) };
}
cldnn::tensor outTensor { cldnn::spatial(outShape[0]) };
auto outDataType = DataTypeFromPrecision(op->get_output_element_type(0));
auto outDataType = cldnn::element_type_to_data_type(op->get_output_element_type(0));
cldnn::layout outLayout { outDataType, cldnn::format::bfyx, outTensor };
cldnn::range prim { layer_type_name_ID(op), p.GetInputPrimitiveIDs(op), outLayout };
p.add_primitive(*op, prim);

View File

@@ -75,7 +75,7 @@ static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
auto reorderLayerName = layerName + "_reorder";
cldnn::format out_format = cldnn::format::any;
auto out_dt = DataTypeFromPrecision(op->get_output_element_type(0));
auto out_dt = cldnn::element_type_to_data_type(op->get_output_element_type(0));
if (!keep_dims && rank > 4) {
if (rank - axes.size() == 6)
out_format = cldnn::format::bfwzyx;

View File

@@ -39,7 +39,7 @@ static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node
default: break;
}
cldnn::layout outputLayout(DataTypeFromPrecision(op->get_output_element_type(0)), outputFormat, outTensor);
cldnn::layout outputLayout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), outputFormat, outTensor);
p.add_primitive(*op, cldnn::reorder(reorderId,
reshapeInputId,
outputLayout,

View File

@@ -94,7 +94,7 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::L
float clip = op->get_clip();
// LSTM primitive works with single precision for all in/out/weights tensors
auto lstm_dtype = DataTypeFromPrecision(op->get_output_element_type(0));
auto lstm_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0));
cldnn::primitive_id inReshapeID = layerName + "_inReshape";
cldnn::primitive_id permuteID = layerName + "_inputReorder";
@@ -205,7 +205,7 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v
bool isForward = op->get_direction() == ngraph::op::RecurrentSequenceDirection::FORWARD;
// LSTM primitive works with single precision for all in/out/weights tensors
auto lstm_dtype = DataTypeFromPrecision(op->get_output_element_type(0));
auto lstm_dtype = cldnn::element_type_to_data_type(op->get_output_element_type(0));
cldnn::primitive_id inReshapeID = layerName + "_inReshape";
cldnn::primitive_id permuteID = layerName + "_inputReorder";

View File

@@ -42,7 +42,7 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
if (targetFormat.value != cldnn::format::get_default_format(input_rank).value) {
auto reorderName = layerName + "_cldnn_in" + std::to_string(i) + "_reorder";
auto targetDatatype = DataTypeFromPrecision(op->get_input_element_type(i));
auto targetDatatype = cldnn::element_type_to_data_type(op->get_input_element_type(i));
auto reorderPrim = cldnn::reorder(reorderName,
inputPrimitives[i],
targetFormat,

View File

@@ -20,7 +20,7 @@ static void CreateShapeOfOpCommon(Program& p, const std::shared_ptr<ngraph::Node
auto primitive = cldnn::shape_of(layerName,
inputPrimitives[0],
op->get_output_partial_shape(0).rank().get_length(),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, primitive);
}

View File

@@ -37,7 +37,7 @@ static DATA_TYPE CreateScalarData(Program &p, const cldnn::primitive_id& id, int
static cldnn::mutable_data CreateAdditionalOutputData(Program &p, const std::shared_ptr<ngraph::Node>& op,
const cldnn::primitive_id& id, const cldnn::primitive_id& input,
const int32_t output_idx) {
const auto precision = DataTypeFromPrecision(op->get_output_element_type(output_idx));
const auto precision = cldnn::element_type_to_data_type(op->get_output_element_type(output_idx));
const auto format = cldnn::format::get_default_format(op->get_output_shape(output_idx).size());
const auto tensor = tensor_from_dims(op->get_output_shape(output_idx));
cldnn::layout output_layout = cldnn::layout(precision, format, tensor);
@@ -97,7 +97,7 @@ static void CreateTensorIteratorOp(Program &p, const std::shared_ptr<TensorItera
{
const auto from_prim = body_topology.at(from_id);
const auto& to_ngraph_type = to->get_element_type();
const auto to_cldnn_type = DataTypeFromPrecision(to_ngraph_type);
const auto to_cldnn_type = cldnn::element_type_to_data_type(to_ngraph_type);
from_prim->output_data_type = to_cldnn_type;
}
back_edges.emplace_back(from_id, to_id);

View File

@@ -31,7 +31,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>
mutable_precision = ngraph::element::i32;
}
cldnn::layout mutableLayout = cldnn::layout(DataTypeFromPrecision(mutable_precision),
cldnn::layout mutableLayout = cldnn::layout(cldnn::element_type_to_data_type(mutable_precision),
cldnn::format::get_default_format(op->get_output_shape(1).size()),
tensor_from_dims(op->get_output_shape(1)));
@@ -56,7 +56,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>
stype,
true,
cldnn::padding({0, 0, 0, 0}, 0),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, argmaxPrim);
@@ -74,7 +74,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>
stype,
true,
cldnn::padding({0, 0, 0, 0}, 0),
DataTypeFromPrecision(op->get_output_element_type(0)));
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, argmaxPrim);
} else {

View File

@@ -54,7 +54,7 @@ static void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::
auto reorder_prim = cldnn::reorder(layerName,
inputPrimitives[0],
cldnn::format::bfyx,
DataTypeFromPrecision(precision),
cldnn::element_type_to_data_type(precision),
std::vector<float>(),
cldnn::reorder_mean_mode::none);
p.add_primitive(*op, reorder_prim);

View File

@@ -19,7 +19,7 @@ void CreateVariableAccessPrimitive(Program &p, const std::shared_ptr<ngraph::op:
const std::string &variable_id) {
validate_inputs_count(op, {1});
const auto output_data_type = DataTypeFromPrecision(op->get_output_element_type(0));
const auto output_data_type = cldnn::element_type_to_data_type(op->get_output_element_type(0));
const auto op_output_shape = op->get_output_shape(0);
const auto output_format = cldnn::format::get_default_format(op_output_shape.size());
const auto output_shape = tensor_from_dims(op_output_shape);