[CPU] Add tests for SetBlob + I64 (#2402)

This commit is contained in:
Maxim Andronov
2020-10-29 11:34:29 +03:00
committed by GitHub
parent 1144eefe4d
commit fdbfab8546
24 changed files with 492 additions and 205 deletions

View File

@@ -114,6 +114,7 @@ file(GLOB SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.cpp
${LAYERS}
${OS_SPECIFIC_SRC}
)

View File

@@ -29,6 +29,7 @@
#include <legacy/net_pass.h>
#include <legacy/details/ie_cnn_network_tools.h>
#include "nodes/common/cpu_memcpy.h"
#include "nodes/common/cpu_convert.h"
#include "precision_utils.h"
#include <ie_plugin_config.hpp>
@@ -757,9 +758,14 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) {
ext_blob->allocate();
}
if (ext_blob->byteSize() != intr_blob.GetSize())
THROW_IE_EXCEPTION << "Output blob size is not equal network output size ("
<< ext_blob->size() << "!=" << intr_blob.GetSize()/sizeof(float) << ").";
auto srcPrec = MKLDNNMemory::convertToIePrec(intr_blob.GetDataType());
auto dstPrec = ext_blob->getTensorDesc().getPrecision();
if (srcPrec == dstPrec && ext_blob->byteSize() != intr_blob.GetSize())
THROW_IE_EXCEPTION << "Output blob byte size is not equal network output byte size ("
<< ext_blob->byteSize() << "!=" << intr_blob.GetSize() << ").";
if (ext_blob->size() != intr_blob.GetElementsCount())
THROW_IE_EXCEPTION << "Output blob number of elements is not equal network output number of elements ("
<< ext_blob->size() << "!=" << intr_blob.GetElementsCount() << ").";
void *ext_blob_ptr = ext_blob->buffer();
void *intr_blob_ptr = intr_blob.GetData();
@@ -772,9 +778,9 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) {
// TODO: Should we support InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT???
if (config.batchLimit)
MB_to_process = std::min<int>(config.batchLimit, MB_to_process);
size_t size_to_copy = intr_blob.GetSize() * MB_to_process / MB;
size_t size_to_copy = intr_blob.GetElementsCount() * MB_to_process / MB;
cpu_memcpy_s(ext_blob_ptr, ext_blob->byteSize(), intr_blob_ptr, size_to_copy);
cpu_convert(intr_blob_ptr, ext_blob_ptr, srcPrec, dstPrec, size_to_copy);
}
}

View File

@@ -13,6 +13,7 @@
#include <ie_compound_blob.h>
#include "mkldnn_exec_network.h"
#include "mkldnn_itt.h"
#include "nodes/common/cpu_convert.h"
MKLDNNPlugin::MKLDNNInferRequest::MKLDNNInferRequest(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs,
@@ -55,33 +56,24 @@ void MKLDNNPlugin::MKLDNNInferRequest::pushInput(const std::string& inputName, I
graph->PushInputData(inputName, inputBlob);
}
namespace {
template <typename T, typename DstT>
void copyFrom(const InferenceEngine::Blob* src, DstT* dst) {
if (!dst) {
return;
}
const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
if (t_blob == nullptr) {
THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
<< typeid(T).name();
}
const T* srcPtr = t_blob->readOnly();
if (srcPtr == nullptr) {
THROW_IE_EXCEPTION << "Input data was not allocated.";
}
for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
template <typename dst>
void MKLDNNPlugin::MKLDNNInferRequest::copyConvert(InferenceEngine::Precision convertTo, const std::pair<std::string, InferenceEngine::Blob::Ptr> &input,
std::vector<InferenceEngine::Blob::Ptr> &convertedInputs) {
InferenceEngine::Blob::Ptr iconv = make_blob_with_precision(convertTo, InferenceEngine::TensorDesc(convertTo, input.second->getTensorDesc().getDims(),
input.second->getTensorDesc().getLayout()));
convertedInputs.push_back(iconv);
iconv->allocate();
auto in = dynamic_cast<InferenceEngine::TBlob<dst> *>(iconv.get());
if (in == nullptr)
THROW_IE_EXCEPTION << "Cannot get TBlob";
if (input.second->size() != iconv->size())
THROW_IE_EXCEPTION << "Can't copy tensor: input and converted tensors have different size: " << input.second->size() << " and " << iconv->size();
void *srcData = input.second->cbuffer().as<void *>();
void *dstData = iconv->buffer().as<void *>();
cpu_convert(srcData, dstData, input.second->getTensorDesc().getPrecision(), iconv->getTensorDesc().getPrecision(), iconv->size());
pushInput<dst>(input.first, iconv);
}
template <typename T>
void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
copyFrom<T>(src, dst);
}
} // namespace
void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
using namespace openvino::itt;
OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, profilingTask);
@@ -96,12 +88,9 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
std::vector<InferenceEngine::Blob::Ptr> convertedInputs;
for (auto input : _inputs) {
if (!_networkInputs[input.first]) {
THROW_IE_EXCEPTION <<
"input blobs map contains not registered during IInferencePlugin::LoadNetwork blob with name "
<< input.first;
THROW_IE_EXCEPTION << "Input blobs map contains not registered during IInferencePlugin::LoadNetwork blob with name " << input.first;
}
InferenceEngine::Blob::Ptr iconv;
InferenceEngine::TBlob<float> *in_f = nullptr;
switch (input.second->getTensorDesc().getPrecision()) {
case InferenceEngine::Precision::FP32:
@@ -113,33 +102,14 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
case InferenceEngine::Precision::I8:
pushInput<int8_t>(input.first, input.second);
break;
case InferenceEngine::Precision::U16: {
case InferenceEngine::Precision::U16:
// U16 is unsupported by mkldnn, so here we convert the blob and send I32
iconv = InferenceEngine::make_shared_blob<std::int32_t>({InferenceEngine::Precision::I32,
input.second->getTensorDesc().getDims(),
input.second->getTensorDesc().getLayout()});
convertedInputs.push_back(iconv);
iconv->allocate();
auto in = dynamic_cast<InferenceEngine::TBlob<std::int32_t> *>(iconv.get());
if (in == nullptr)
THROW_IE_EXCEPTION << "Cannot get TBlob";
copyFrom<uint16_t, std::int32_t>(input.second.get(), in->data());
pushInput<std::int32_t>(input.first, iconv);
}
copyConvert<int32_t>(InferenceEngine::Precision::I32, input, convertedInputs);
break;
case InferenceEngine::Precision::I16:
if (graph->hasMeanImageFor(input.first)) {
// If a mean image exists, we convert the blob and send FP32
iconv = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32,
input.second->getTensorDesc().getDims(),
input.second->getTensorDesc().getLayout()});
convertedInputs.push_back(iconv);
iconv->allocate();
in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
if (in_f == nullptr)
THROW_IE_EXCEPTION << "Cannot get TBlob";
copyToFloat<int16_t>(in_f->data(), input.second.get());
pushInput<float>(input.first, iconv);
copyConvert<float>(InferenceEngine::Precision::FP32, input, convertedInputs);
} else {
// Instead we can send I16 directly
pushInput<int16_t>(input.first, input.second);
@@ -149,21 +119,20 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
case InferenceEngine::Precision::BOOL:
if (graph->hasMeanImageFor(input.first)) {
// If a mean image exists, we convert the blob and send FP32
iconv = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32,
input.second->getTensorDesc().getDims(),
input.second->getTensorDesc().getLayout()});
convertedInputs.push_back(iconv);
iconv->allocate();
in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
if (in_f == nullptr)
THROW_IE_EXCEPTION << "Cannot get TBlob";
copyToFloat<uint8_t>(in_f->data(), input.second.get());
pushInput<float>(input.first, iconv);
copyConvert<float>(InferenceEngine::Precision::FP32, input, convertedInputs);
} else {
// Instead we can send I8 directly
pushInput<uint8_t>(input.first, input.second);
}
break;
case InferenceEngine::Precision::I64:
// I64 is unsupported by mkldnn, so here we convert the blob and send I32
copyConvert<int32_t>(InferenceEngine::Precision::I32, input, convertedInputs);
break;
case InferenceEngine::Precision::U64:
// U64 is unsupported by mkldnn, so here we convert the blob and send I32
copyConvert<int32_t>(InferenceEngine::Precision::I32, input, convertedInputs);
break;
default:
THROW_IE_EXCEPTION << "Unsupported input precision " << input.second->getTensorDesc().getPrecision();
}
@@ -234,10 +203,18 @@ void MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const char *name, InferenceEngine
return;
}
_outputs[name] = make_blob_with_precision(blobs[name]->getTensorDesc());
InferenceEngine::TensorDesc desc = blobs[name]->getTensorDesc();
// WA: need to avoid exception thrown when we compare blocking desc in SetBlob
// in situation if we push output blobs as inputs for next network (in Hetero plugin)
// it may be that output tensor desc will be different from real input tensor desc for next network
// because the optimal descriptor was chosen (e.g. inPlace case for Split node)
auto currBlockDesc = InferenceEngine::BlockingDesc(desc.getDims(), desc.getBlockingDesc().getOrder());
desc = InferenceEngine::TensorDesc(desc.getPrecision(), desc.getDims(), currBlockDesc);
_outputs[name] = make_blob_with_precision(desc);
_outputs[name]->allocate();
if (blobs[name]->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32 &&
!graph->getProperty().batchLimit) {
if (desc.getPrecision() == InferenceEngine::Precision::FP32 && !graph->getProperty().batchLimit) {
externalPtr[name] = _outputs[name]->buffer();
}
data = _outputs[name];
@@ -267,8 +244,8 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const char *name, const Inference
size_t dataSize = data->size();
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
if (foundInput->getPrecision() != data->getTensorDesc().getPrecision()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set Blob with precision "
<< data->getTensorDesc().getPrecision();
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set input blob with precision: "
<< data->getTensorDesc().getPrecision() << ", if CNNNetwork input blob precision is: " << foundInput->getPrecision();
}
const bool preProcRequired = preProcessingRequired(foundInput, data);
@@ -295,7 +272,12 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const char *name, const Inference
}
if (foundInput->getTensorDesc().getDims() != data->getTensorDesc().getDims()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set input Blob. Dimensions mismatch.";
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set input blob. Dimensions mismatch.";
}
if (data->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY &&
foundInput->getTensorDesc().getBlockingDesc() != data->getTensorDesc().getBlockingDesc()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set input blob. Blocking descriptor mismatch.";
}
if (data->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32 &&
@@ -311,6 +293,10 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const char *name, const Inference
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str
<< "cannot set compound blob: supported only for input pre-processing";
}
if (foundOutput->getPrecision() != data->getTensorDesc().getPrecision()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set output blob with precision: "
<< data->getTensorDesc().getPrecision() << ", if CNNNetwork output blob precision is: " << foundOutput->getPrecision();
}
size_t outputSize = foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR
? InferenceEngine::details::product(foundOutput->getDims())
: 1;
@@ -321,9 +307,9 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const char *name, const Inference
if (foundOutput->getTensorDesc().getDims() != data->getTensorDesc().getDims()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set output Blob. Dimensions mismatch.";
}
if (foundOutput->getPrecision() != data->getTensorDesc().getPrecision()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str
<< "Failed to set Blob with precision not corresponding to user output precision";
if (data->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && foundOutput->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY &&
foundOutput->getTensorDesc().getBlockingDesc() != data->getTensorDesc().getBlockingDesc()) {
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Failed to set output blob. Blocking descriptor mismatch.";
}
if (data->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32 &&
!graph->getProperty().batchLimit) {

View File

@@ -46,6 +46,10 @@ public:
private:
template <typename T> void pushInput(const std::string& inputName, InferenceEngine::Blob::Ptr& inputBlob);
template <typename dst>
void copyConvert(InferenceEngine::Precision convertTo, const std::pair<std::string, InferenceEngine::Blob::Ptr> &input,
std::vector<InferenceEngine::Blob::Ptr> &convertedInputs);
void changeDefaultPtr();
std::shared_ptr<MKLDNNExecNetwork> execNetwork;
MKLDNNGraph* graph = nullptr;

View File

@@ -98,10 +98,8 @@ void MKLDNNMemory::SetData(memory::data_type dataType, memory::format format, co
std::vector<ptrdiff_t> dims(memData.dims, memData.dims + memData.ndims);
auto data_type = GetDataType();
MKLDNNMemory src(eng);
src.Create(dims, data_type, format, data);
src.Create(dims, dataType, format, data);
std::shared_ptr<mkldnn::reorder> pReorder =
std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(src.GetPrimitive(), GetPrimitive()));
@@ -351,6 +349,28 @@ void MKLDNNMemory::CreateBlockingDesc(memory::desc &desc) {
blk.strides[0][curr_idx] = dims[curr_idx] == 0 ? 1 : blk.strides[0][prev_idx] * (std::max)((ptrdiff_t)1, dims[prev_idx]);
}
}
Precision MKLDNNMemory::convertToIePrec(memory::data_type dataType) {
switch (dataType) {
case memory::f32:
return Precision::FP32;
case memory::u8:
return Precision::U8;
case memory::s8:
return Precision::I8;
case memory::s16:
return Precision::I16;
case memory::s32:
return Precision::I32;
case memory::bin:
return Precision::BIN;
case memory::bf16:
return Precision::BF16;
default:
THROW_IE_EXCEPTION << "Unknown mkldnn data type";
}
}
memory::format MKLDNNMemory::Convert(const InferenceEngine::Layout layout) {
switch (layout) {
case NCHW:
@@ -1133,6 +1153,11 @@ MKLDNNMemoryDesc::MKLDNNMemoryDesc(const TensorDesc& tDesc):
mkldnnFormat = memory::format::x;
} else if (realDims.ndims() == 2) {
mkldnnFormat = memory::format::nc;
} else if (realDims.ndims() == 3) {
if (order == SizeVector{0, 1, 2})
mkldnnFormat = memory::format::tnc;
else if (order == SizeVector{1, 0, 2})
mkldnnFormat = memory::format::ntc;
} else if (realDims.ndims() == 4) {
if (order.size() == 7 &&
order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3 && order[4] == 1 && order[5] == 0 && order[6] == 1) {

View File

@@ -115,6 +115,7 @@ public:
static InferenceEngine::Layout GetPlainLayout(mkldnn::memory::dims dims);
static bool isConsistant(mkldnn::memory::dims dims, mkldnn::memory::format format);
static mkldnn::memory::format Convert(const InferenceEngine::Layout layout);
static InferenceEngine::Precision convertToIePrec(mkldnn::memory::data_type dataType);
static std::string formatToString(mkldnn::memory::format fmt);

View File

@@ -217,7 +217,9 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, const st
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::I8 &&
input_precision != InferenceEngine::Precision::U8 &&
input_precision != InferenceEngine::Precision::BOOL) {
input_precision != InferenceEngine::Precision::BOOL &&
input_precision != InferenceEngine::Precision::I64 &&
input_precision != InferenceEngine::Precision::U64) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str
<< "Input image format " << input_precision << " is not supported yet...";
}

View File

@@ -0,0 +1,98 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_convert.h"
#include "cpu_memcpy.h"
#include <type_traits>
#include <ie_parallel.hpp>
using namespace InferenceEngine;
template<typename srcType, typename dstType>
void convert(void *srcPtr, void *dstPtr, const size_t size) {
if (std::is_same<srcType, dstType>::value) {
cpu_memcpy(dstPtr, srcPtr, size*sizeof(dstType));
} else {
const srcType *srcData = reinterpret_cast<const srcType *>(srcPtr);
dstType *dstData = reinterpret_cast<dstType *>(dstPtr);
parallel_for(size, [&](size_t i) {
dstData[i] = static_cast<dstType>(srcData[i]);
});
}
}
template <typename srcType>
void convertFrom(void *srcPtr, void *dstPtr, Precision dstPrc, const size_t size) {
switch (dstPrc) {
case Precision::U8:
convert<srcType, PrecisionTrait<Precision::U8>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::I8:
convert<srcType, PrecisionTrait<Precision::I8>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::U16:
convert<srcType, PrecisionTrait<Precision::U16>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::I16:
convert<srcType, PrecisionTrait<Precision::I16>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::I32:
convert<srcType, PrecisionTrait<Precision::I32>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::U64:
convert<srcType, PrecisionTrait<Precision::U64>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::I64:
convert<srcType, PrecisionTrait<Precision::I64>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::FP32:
convert<srcType, PrecisionTrait<Precision::FP32>::value_type>(srcPtr, dstPtr, size);
break;
case Precision::BOOL:
convert<srcType, PrecisionTrait<Precision::BOOL>::value_type>(srcPtr, dstPtr, size);
break;
default:
THROW_IE_EXCEPTION << "cpu_convert can't convert to: " << dstPrc << " precision";
}
}
void cpu_convert(void *srcPtr, void *dstPtr, Precision srcPrc, Precision dstPrc, const size_t size) {
if (srcPrc == dstPrc) {
cpu_memcpy(dstPtr, srcPtr, size*dstPrc.size());
return;
}
switch (srcPrc) {
case Precision::U8:
convertFrom<PrecisionTrait<Precision::U8>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::I8:
convertFrom<PrecisionTrait<Precision::I8>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::U16:
convertFrom<PrecisionTrait<Precision::U16>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::I16:
convertFrom<PrecisionTrait<Precision::I16>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::I32:
convertFrom<PrecisionTrait<Precision::I32>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::U64:
convertFrom<PrecisionTrait<Precision::U64>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::I64:
convertFrom<PrecisionTrait<Precision::I64>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::FP32:
convertFrom<PrecisionTrait<Precision::FP32>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
case Precision::BOOL:
convertFrom<PrecisionTrait<Precision::BOOL>::value_type>(srcPtr, dstPtr, dstPrc, size);
break;
default:
THROW_IE_EXCEPTION << "cpu_convert can't convert from: " << srcPrc << " precision";
}
}

View File

@@ -0,0 +1,23 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ie_precision.hpp>
/**
* @brief Copy size elements from buffer specified srcPtr pointer to buffer specified dstPtr.
* If the precisions srcPrc and dstPrc are different, a conversion from srcPrc to dstPrc is performed.
* @param srcPtr
* pointer to the buffer to convert from
* @param dstPtr
* pointer to the buffer to convert to
* @param srcPrc
* precision the buffer from which convert
* @param dstPrc
* precision the buffer to which convert
* @param size
* number of elements in buffers to be converted
* @return none.
*/
void cpu_convert(void *srcPtr, void *dstPtr, InferenceEngine::Precision srcPrc, InferenceEngine::Precision dstPrc, const size_t size);

View File

@@ -4,7 +4,7 @@
#pragma once
#include <stdlib.h>
#include <cstring>
#include "ie_api.h"
/**

View File

@@ -9,30 +9,19 @@
#include <vector>
#include "ie_parallel.hpp"
#include "ie_precision.hpp"
#include "common/cpu_convert.h"
namespace InferenceEngine {
namespace Extensions {
namespace Cpu {
class ConvertImpl: public ExtLayerBase {
template<typename src_d, typename dst_d>
void exec_cast(const Blob::CPtr& inputs, Blob::Ptr& outputs) {
const src_d *src_data = inputs->cbuffer().as<src_d *>() +
inputs->getTensorDesc().getBlockingDesc().getOffsetPadding();
dst_d* dst_data = outputs->buffer().as<dst_d *>() +
outputs->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (inputs->size() != outputs->size())
THROW_IE_EXCEPTION << "Input and output buffers have different sizes!";
parallel_for(inputs->size(), [&](size_t i) {
dst_data[i] = static_cast<dst_d>(src_data[i]);
});
}
public:
explicit ConvertImpl(const CNNLayer* layer) {
try {
if (layer->insData.size() != 1 || layer->outData.empty())
THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
logPrefix = "Convert layer with name '" + layer->name + "' ";
if (layer->insData.size() != 1 || layer->outData.size() != 1)
THROW_IE_EXCEPTION << logPrefix << "has incorrect number of input/output edges";
precision = layer->GetParamAsString("precision");
@@ -40,13 +29,13 @@ public:
DataConfig dataIn;
const SizeVector& ins_dims = layer->insData[0].lock()->getTensorDesc().getDims();
dataIn.desc = TensorDesc(layer->insData[0].lock()->getTensorDesc().getPrecision(), ins_dims,
layer->insData[0].lock()->getTensorDesc().getLayout());
layer->insData[0].lock()->getTensorDesc().getLayout());
config.inConfs.push_back(dataIn);
DataConfig dataConfigOut;
const SizeVector& out_dims = layer->outData[0]->getTensorDesc().getDims();
dataConfigOut.desc = TensorDesc(layer->outData[0]->getTensorDesc().getPrecision(), out_dims,
layer->outData[0]->getTensorDesc().getLayout());
layer->outData[0]->getTensorDesc().getLayout());
config.outConfs.push_back(dataConfigOut);
config.dynBatchSupport = false;
confs.push_back(config);
@@ -55,72 +44,18 @@ public:
}
}
StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs,
ResponseDesc *resp) noexcept override {
StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override {
try {
auto compare = getPrecisionMask(inputs[0]->getTensorDesc().getPrecision(), outputs[0]->getTensorDesc().getPrecision());
switch (compare) {
case getPrecisionMask(Precision::U8, Precision::FP32):
exec_cast<PrecisionTrait<Precision::U8>::value_type, PrecisionTrait<Precision::FP32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::U8):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U8>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I16, Precision::FP32):
exec_cast<PrecisionTrait<Precision::I16>::value_type, PrecisionTrait<Precision::FP32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::I16):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I16>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::U16, Precision::FP32):
exec_cast<PrecisionTrait<Precision::U16>::value_type, PrecisionTrait<Precision::FP32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::U16):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U16>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I32, Precision::I32):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I64, Precision::I64):
exec_cast<PrecisionTrait<Precision::I64>::value_type, PrecisionTrait<Precision::I64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::U64, Precision::U64):
exec_cast<PrecisionTrait<Precision::U64>::value_type, PrecisionTrait<Precision::U64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::FP32):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I32, Precision::I64):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I32, Precision::U64):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::U64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::I32, Precision::FP32):
exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::I32):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I32>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::I64):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::FP32, Precision::U64):
exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U64>::value_type>(inputs[0], outputs[0]);
break;
case getPrecisionMask(Precision::U8, Precision::I32):
exec_cast<PrecisionTrait<Precision::U8>::value_type, PrecisionTrait<Precision::I32>::value_type>(inputs[0], outputs[0]);
break;
default:
std::stringstream ss;
ss << "Unsupported precisions: " << inputs[0]->getTensorDesc().getPrecision() << " -> " << outputs[0]->getTensorDesc().getPrecision();
std::string errorMsg = ss.str();
if (resp) {
errorMsg.copy(resp->msg, sizeof(resp->msg)-1);
}
THROW_IE_EXCEPTION << errorMsg;
}
void *srcPtr = inputs[0]->cbuffer().as<void *>();
void *dstPtr = outputs[0]->buffer().as<void *>();
if (inputs[0]->size() != outputs[0]->size())
THROW_IE_EXCEPTION << logPrefix << "has input and output buffers with different sizes";
cpu_convert(srcPtr, dstPtr, inputs[0]->getTensorDesc().getPrecision(), outputs[0]->getTensorDesc().getPrecision(), outputs[0]->size());
} catch (InferenceEngine::details::InferenceEngineException &ex) {
errorMsg = ex.what();
if (resp)
errorMsg.copy(resp->msg, sizeof(resp->msg)-1);
return GENERAL_ERROR;
} catch(...) {
return GENERAL_ERROR;
}
@@ -129,6 +64,7 @@ public:
private:
std::string precision;
std::string logPrefix;
};
REG_FACTORY_FOR(ConvertImpl, Convert);

View File

@@ -0,0 +1,20 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/set_blob.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace BehaviorTestsDefinitions;
using namespace InferenceEngine;
const std::vector<Precision> precisionSet = {Precision::FP32, Precision::I16, Precision::U8, Precision::I8, Precision::U16, Precision::I32, Precision::BOOL,
Precision::I64, Precision::U64};
const std::vector<setType> typeSet = {setType::INPUT, setType::OUTPUT, setType::BOTH};
const auto params = ::testing::Combine(::testing::ValuesIn(precisionSet),
::testing::ValuesIn(typeSet),
::testing::Values(CommonTestUtils::DEVICE_CPU));
INSTANTIATE_TEST_CASE_P(smoke_SetBlobCPU, SetBlobTest, params, SetBlobTest::getTestCaseName);

View File

@@ -8,24 +8,29 @@
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace InferenceEngine;
namespace {
const std::vector<std::vector<size_t>> inShape = {{1, 2, 3, 4}};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8,
InferenceEngine::Precision::I8,
const std::vector<Precision> precisions = {
Precision::U8,
Precision::I8,
Precision::U16,
Precision::I16,
Precision::I32,
Precision::U64,
Precision::I64,
Precision::FP32
};
INSTANTIATE_TEST_CASE_P(smoke_NoReshape, ConvertLayerTest,
INSTANTIATE_TEST_CASE_P(smoke_ConvertLayerTest, ConvertLayerTest,
::testing::Combine(
::testing::Values(inShape),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(precisions),
::testing::ValuesIn(precisions),
::testing::Values(Layout::ANY),
::testing::Values(Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConvertLayerTest::getTestCaseName);

View File

@@ -41,8 +41,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*IEClassGetAvailableDevices.*)",
// TODO: Issue: 25533
R"(.*ConvertLikeLayerTest.*)",
// TODO: Issue: 34516
R"(.*ConvertLayerTest.*)",
// TODO: Issue: 34055
R"(.*ShapeOfLayerTest.*)",
R"(.*ReluShapeOfSubgraphTest.*)",

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "functional_test_utils/layer_test_utils.hpp"
#include "common_test_utils/data_utils.hpp"
#include "common_test_utils/common_utils.hpp"
namespace BehaviorTestsDefinitions {
enum class setType {
INPUT,
OUTPUT,
BOTH
};
std::ostream& operator<<(std::ostream & os, setType type);
using SetBlobParams = std::tuple<InferenceEngine::Precision, // precision in CNNNetwork
setType, // type for which blob is set
std::string>; // Device name
class SetBlobTest : public testing::WithParamInterface<SetBlobParams>, virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<SetBlobParams> obj);
void Infer() override;
protected:
void SetUp() override;
private:
InferenceEngine::Precision precision;
setType type;
};
} // namespace BehaviorTestsDefinitions

View File

@@ -0,0 +1,111 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/set_blob.hpp"
#include <single_layer_tests/cum_sum.hpp>
using namespace InferenceEngine;
namespace BehaviorTestsDefinitions {
std::ostream& operator<<(std::ostream & os, setType type) {
switch (type) {
case setType::INPUT:
os << "INPUT";
break;
case setType::OUTPUT:
os << "OUTPUT";
break;
case setType::BOTH:
os << "BOTH";
break;
default:
THROW_IE_EXCEPTION << "Not supported type for SetBlob";
}
return os;
}
std::string SetBlobTest::getTestCaseName(testing::TestParamInfo<SetBlobParams> obj) {
Precision prec;
setType type;
std::string targetDevice;
std::tie(prec, type, targetDevice) = obj.param;
std::ostringstream result;
result << "Type="<< type;
result << " Device="<< targetDevice;
result << " Precision=" << prec;
return result.str();
}
inline void fillBlob(Blob::Ptr &blob) {
switch (blob->getTensorDesc().getPrecision()) {
#define CASE(X) case X: CommonTestUtils::fill_data_random<X>(blob); break;
CASE(InferenceEngine::Precision::FP32)
CASE(InferenceEngine::Precision::U8)
CASE(InferenceEngine::Precision::U16)
CASE(InferenceEngine::Precision::I8)
CASE(InferenceEngine::Precision::I16)
CASE(InferenceEngine::Precision::I64)
CASE(InferenceEngine::Precision::U64)
CASE(InferenceEngine::Precision::I32)
CASE(InferenceEngine::Precision::BOOL)
#undef CASE
default:
THROW_IE_EXCEPTION << "Can't fill blob with precision: " << blob->getTensorDesc().getPrecision();
}
}
void SetBlobTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
inputs.clear();
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
Blob::Ptr inBlob;
if (type == setType::INPUT || type == setType::BOTH) {
inBlob = make_blob_with_precision(precision, info->getTensorDesc());
inBlob->allocate();
fillBlob(inBlob);
} else {
inBlob = GenerateInput(*info);
}
inferRequest.SetBlob(info->name(), inBlob);
inputs.push_back(inBlob);
}
if (type == setType::OUTPUT || type == setType::BOTH) {
for (const auto &output : executableNetwork.GetOutputsInfo()) {
const auto &info = output.second;
Blob::Ptr outBlob = make_blob_with_precision(precision, info->getTensorDesc());
outBlob->allocate();
fillBlob(outBlob);
inferRequest.SetBlob(info->getName(), outBlob);
}
}
inferRequest.Infer();
}
void SetBlobTest::SetUp() {
SizeVector IS{4, 5, 6, 7};
std::tie(precision, type, targetDevice) = this->GetParam();
if (type == setType::OUTPUT || type == setType::BOTH)
outPrc = precision;
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision);
auto params = ngraph::builder::makeParams(ngPrc, {IS});
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto axisNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector<int64_t>{-1})->output(0);
auto cumSum = std::dynamic_pointer_cast<ngraph::opset4::CumSum>(ngraph::builder::makeCumSum(paramOuts[0], axisNode, false, false));
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(cumSum)};
function = std::make_shared<ngraph::Function>(results, params, "InferSetBlob");
}
TEST_P(SetBlobTest, CompareWithRefs) {
Run();
}
} // namespace BehaviorTestsDefinitions

View File

@@ -468,6 +468,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::Tenso
CASE(InferenceEngine::Precision::I8)
CASE(InferenceEngine::Precision::I16)
CASE(InferenceEngine::Precision::I64)
CASE(InferenceEngine::Precision::U64)
CASE(InferenceEngine::Precision::BIN)
CASE(InferenceEngine::Precision::I32)
CASE(InferenceEngine::Precision::BOOL)

View File

@@ -39,12 +39,29 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
const auto &size = actual->size();
switch (precision) {
case InferenceEngine::Precision::FP32:
Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
size, threshold);
Compare<float>(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer), size, threshold);
break;
case InferenceEngine::Precision::I32:
Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
Compare<int32_t>(reinterpret_cast<const int32_t *>(expectedBuffer), reinterpret_cast<const int32_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I64:
Compare<int64_t>(reinterpret_cast<const int64_t *>(expectedBuffer), reinterpret_cast<const int64_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I8:
Compare<int8_t>(reinterpret_cast<const int8_t *>(expectedBuffer), reinterpret_cast<const int8_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U16:
Compare<uint16_t>(reinterpret_cast<const uint16_t *>(expectedBuffer), reinterpret_cast<const uint16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I16:
Compare<int16_t>(reinterpret_cast<const int16_t *>(expectedBuffer), reinterpret_cast<const int16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::BOOL:
case InferenceEngine::Precision::U8:
Compare<uint8_t>(reinterpret_cast<const uint8_t *>(expectedBuffer), reinterpret_cast<const uint8_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U64:
Compare<uint64_t>(reinterpret_cast<const uint64_t *>(expectedBuffer), reinterpret_cast<const uint64_t *>(actualBuffer), size, 0);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
@@ -143,12 +160,14 @@ std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
}
auto ieOutPrc = outPrc;
if (outPrc == InferenceEngine::Precision::UNSPECIFIED) {
const auto &actualOutputs = GetOutputs();
ieOutPrc = actualOutputs[0]->getTensorDesc().getPrecision();
const auto &actualOutputs = GetOutputs();
std::vector<ngraph::element::Type_t> convertType(actualOutputs.size(), FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ieOutPrc));
if (ieOutPrc == InferenceEngine::Precision::UNSPECIFIED) {
for (size_t i = 0; i < convertType.size(); i++) {
convertType[i] = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(actualOutputs[i]->getTensorDesc().getPrecision());
}
}
const auto &convertType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ieOutPrc);
std::vector<std::vector<std::uint8_t>> expectedOutputs;
switch (refMode) {
case INTERPRETER: {

View File

@@ -67,18 +67,29 @@ public:
protected:
LayerTestsCommon();
template<typename T>
typename std::enable_if<std::is_signed<T>::value, T>::type
static ie_abs(const T &val) {
return std::abs(val);
}
template<typename T>
typename std::enable_if<std::is_unsigned<T>::value, T>::type
static ie_abs(const T &val) {
return val;
}
template<class T>
void Compare(const T *expected, const T *actual, std::size_t size, T threshold) {
static void Compare(const T *expected, const T *actual, std::size_t size, T threshold) {
for (std::size_t i = 0; i < size; ++i) {
const auto &ref = expected[i];
const auto &res = actual[i];
const auto absoluteDifference = std::abs(res - ref);
const auto absoluteDifference = ie_abs(res - ref);
if (absoluteDifference <= threshold) {
continue;
}
const auto max = std::max(std::abs(res), std::abs(ref));
const auto max = std::max(ie_abs(res), ie_abs(ref));
ASSERT_TRUE(max != 0 && ((absoluteDifference / max) <= threshold))
<< "Relative comparison of values expected: " << ref << " and actual: " << res
<< " at index " << i << " with threshold " << threshold

View File

@@ -35,6 +35,8 @@ inline ::ngraph::element::Type convertIE2nGraphPrc(const InferenceEngine::Precis
return ::ngraph::element::Type(::ngraph::element::Type_t::i32);
case InferenceEngine::Precision::I64:
return ::ngraph::element::Type(::ngraph::element::Type_t::i64);
case InferenceEngine::Precision::U64:
return ::ngraph::element::Type(::ngraph::element::Type_t::u64);
case InferenceEngine::Precision::BOOL:
return ::ngraph::element::Type(::ngraph::element::Type_t::boolean);
case InferenceEngine::Precision::BIN:

View File

@@ -215,7 +215,7 @@ inline ngraph::NodeVector castOps2Nodes(const std::vector<std::shared_ptr<opType
std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function,
const std::vector<std::vector<std::uint8_t>> &inputs,
element::Type_t convertType = element::Type_t::undefined);
std::vector<ngraph::element::Type_t> convertType = {});
//
// This function compares two nGraph functions and requires them to have exactly one output
@@ -230,7 +230,7 @@ std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function
const std::vector<std::vector<std::uint8_t>> &inputs);
std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Function> &function,
element::Type_t convertType = element::Type_t::undefined);
std::vector<ngraph::element::Type_t> convertType = {});
std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo &type_info,
const ngraph::OutputVector &outputVector);

View File

@@ -76,7 +76,7 @@ OutputVector convert2OutputVector(const std::vector<std::shared_ptr<Node>> &node
}
std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function, const std::vector<std::vector<std::uint8_t>> &inputs,
element::Type_t convertType) {
std::vector<ngraph::element::Type_t> convertType) {
runtime::Backend::set_backend_shared_library_search_directory("");
auto backend = runtime::Backend::create("INTERPRETER");
@@ -115,13 +115,15 @@ std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr
auto handle = backend->compile(function);
handle->call_with_validate(outputTensors, inputTensors);
auto outputs = std::vector<std::vector<std::uint8_t>>(results.size());
size_t in = 0;
for (const auto &result : results) {
const auto &resultIndex = function->get_result_index(result);
auto &output = outputs[resultIndex];
output.resize(shape_size(result->get_shape()) * result->get_element_type().size());
outputTensors[resultIndex]->read(output.data(), output.size());
if (convertType != element::Type_t::undefined)
output = convertOutputPrecision(output, result->get_element_type(), convertType, shape_size(result->get_shape()));
if (!convertType.empty() && convertType[in] != element::Type_t::undefined && result->get_element_type() != element::Type(convertType[in]))
output = convertOutputPrecision(output, result->get_element_type(), convertType[in], shape_size(result->get_shape()));
in++;
}
return outputs;
@@ -155,7 +157,7 @@ std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function
return foldedFunc;
}
std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Function> &function, element::Type_t convertType) {
std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Function> &function, std::vector<ngraph::element::Type_t> convertType) {
size_t numOutputs = function->get_output_size();
auto outputs = std::vector<std::vector<std::uint8_t>>(numOutputs);
for (size_t i = 0; i < numOutputs; i++) {
@@ -169,8 +171,8 @@ std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Functi
const auto dataSize = shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size();
outputs[i].resize(dataSize);
std::copy(data, data + dataSize, outputs[i].data());
if (convertType != element::Type_t::undefined)
outputs[i] = convertOutputPrecision(outputs[i], parrentNode->get_element_type(), convertType, shape_size(parrentNode->get_shape()));
if (!convertType.empty() && convertType[i] != element::Type_t::undefined && parrentNode->get_element_type() != element::Type(convertType[i]))
outputs[i] = convertOutputPrecision(outputs[i], parrentNode->get_element_type(), convertType[i], shape_size(parrentNode->get_shape()));
}
return outputs;
}
@@ -508,28 +510,28 @@ std::vector<std::uint8_t> convertOutputPrecision(std::vector<std::uint8_t> &outp
case element::Type_t::boolean: {
switch (toPrecision) {
case element::Type_t::u8: {
return convertPrecision<bool, uint8_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, uint8_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::u16: {
return convertPrecision<bool, uint16_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, uint16_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::i8: {
return convertPrecision<bool, int8_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, int8_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::i16: {
return convertPrecision<bool, int16_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, int16_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::i32: {
return convertPrecision<bool, int32_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, int32_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::i64: {
return convertPrecision<bool, int64_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, int64_t>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::f32: {
return convertPrecision<bool, float>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, float>(output, elementsCount, element::Type(toPrecision).size());
}
case element::Type_t::u64: {
return convertPrecision<bool, uint64_t>(output, elementsCount, element::Type(toPrecision).size());
return convertPrecision<char, uint64_t>(output, elementsCount, element::Type(toPrecision).size());
}
default:
throw std::runtime_error("convertOutputPrecision can't convert from: " + element::Type(fromPrecision).get_type_name() + " to: " +

View File

@@ -29,7 +29,6 @@ from tests.test_ngraph.util import run_op_node
from tests import (xfail_issue_34323,
xfail_issue_35929,
xfail_issue_36476,
xfail_issue_36478,
xfail_issue_36479,
xfail_issue_36480)
@@ -72,7 +71,7 @@ def test_ngraph_function_api():
pytest.param(np.uint8, marks=xfail_issue_36479),
np.uint16,
pytest.param(np.uint32, marks=xfail_issue_36476),
pytest.param(np.uint64, marks=xfail_issue_36478),
np.uint64,
],
)
def test_simple_computation_on_ndarrays(dtype):

View File

@@ -252,8 +252,7 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_min_uint32_cpu"),
(xfail_issue_36478,
"OnnxBackendNodeModelTest.test_max_uint64_cpu",
"OnnxBackendNodeModelTest.test_min_uint64_cpu",
"OnnxBackendNodeModelTest.test_pow_types_float32_uint64_cpu"),
"OnnxBackendNodeModelTest.test_min_uint64_cpu"),
(xfail_issue_36437,
"OnnxBackendNodeModelTest.test_argmax_default_axis_example_cpu",
"OnnxBackendNodeModelTest.test_argmax_default_axis_random_cpu",