[VPU] Convert precisions on input/output blobs (#9115)

This commit is contained in:
Mateusz Tabaka
2022-01-18 15:43:27 +01:00
committed by GitHub
parent 0824bc65d1
commit 79e442eea6
14 changed files with 319 additions and 34 deletions

View File

@@ -118,7 +118,8 @@ ngraph::element::Type details::toLegacyType(const ngraph::element::Type& ngraph_
if (input) {
return ngraph_type == ngraph::element::f16 ? ngraph::element::f32 : ngraph_type;
} else {
if (ngraph_type == ngraph::element::i64 || ngraph_type == ngraph::element::i32) {
if (ngraph_type == ngraph::element::i64 || ngraph_type == ngraph::element::u64 ||
ngraph_type == ngraph::element::i32 || ngraph_type == ngraph::element::u32) {
return ngraph::element::i32;
} else if (ngraph_type != ngraph::element::f32) {
return ngraph::element::f32;

View File

@@ -26,8 +26,6 @@ using namespace vpu;
using namespace vpu::MyriadPlugin;
using namespace InferenceEngine;
#define MEMCPY(dst, src, bytes) std::copy_n((src), (bytes), (dst))
MyriadInferRequest::MyriadInferRequest(GraphDesc &graphDesc,
const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs,
@@ -119,6 +117,104 @@ void MyriadInferRequest::InferImpl() {
GetResult();
}
static bool needsTypeConvert(const Precision& precision) {
switch (precision) {
case Precision::FP16:
case Precision::FP32:
case Precision::I32:
return false;
case Precision::I64:
case Precision::U64:
case Precision::BOOL:
return true;
default:
return false;
}
return false;
}
template <typename T, typename U>
static void convert(const T* const src, U* dst, size_t n) {
std::transform(src, src + n, dst, [] (T i) -> U { return static_cast<U>(i); });
}
static const char* const NOT_ENOUGH_INPUT_SPACE_ERR_MSG = "Not enough space available in inputBuffer. Input size is too big";
static void convertInput(const uint8_t* const src, uint8_t* dst, const Precision& precision, size_t size, size_t remainingSize) {
size_t numElements = 0;
// U32 -> I32 is handled in copyInput by std::copy_n, unless blob layout != vpuLayout
// then assert "Unimplemented blob transformation from precision .. to .." in ie::blob_copy fires
switch (precision) {
case Precision::I64:
numElements = size / sizeof(int64_t);
IE_ASSERT((numElements * sizeof(int32_t)) <= remainingSize) << NOT_ENOUGH_INPUT_SPACE_ERR_MSG;
convert(reinterpret_cast<const int64_t* const>(src), reinterpret_cast<int32_t*>(dst), numElements);
return;
case Precision::U64:
numElements = size / sizeof(uint64_t);
IE_ASSERT((numElements * sizeof(int32_t)) <= remainingSize) << NOT_ENOUGH_INPUT_SPACE_ERR_MSG;
convert(reinterpret_cast<const uint64_t* const>(src), reinterpret_cast<int32_t*>(dst), numElements);
return;
case Precision::BOOL:
numElements = size / sizeof(bool);
IE_ASSERT((numElements * sizeof(int32_t)) <= remainingSize) << NOT_ENOUGH_INPUT_SPACE_ERR_MSG;
convert(reinterpret_cast<const bool* const>(src), reinterpret_cast<int32_t*>(dst), numElements);
return;
default:
return;
}
}
static void convertOutput(const uint8_t* src, uint8_t* dst, const Precision& precision, size_t size) {
switch (precision) {
case Precision::I64:
convert(reinterpret_cast<const int32_t*>(src), reinterpret_cast<int64_t*>(dst), size / sizeof(int64_t));
return;
case Precision::U64:
convert(reinterpret_cast<const int32_t*>(src), reinterpret_cast<uint64_t*>(dst), size / sizeof(uint64_t));
return;
case Precision::BOOL:
convert(reinterpret_cast<const int32_t*>(src), reinterpret_cast<bool*>(dst), size / sizeof(bool));
return;
default:
return;
}
}
static void copyInput(const ie::Blob::Ptr& inputBlob, uint8_t* dst, size_t size, size_t remainingSize, ie::Layout layout, ie::Layout vpuLayout) {
const auto& precision = inputBlob->getTensorDesc().getPrecision();
bool needsConvert = needsTypeConvert(precision);
if (!needsConvert) {
IE_ASSERT(size <= remainingSize) << NOT_ENOUGH_INPUT_SPACE_ERR_MSG;
if (layout != vpuLayout) {
copyBlob(inputBlob, vpuLayout, dst);
} else {
std::copy_n(inputBlob->buffer().as<uint8_t*>(), size, dst);
}
} else {
IE_ASSERT(layout == vpuLayout) << "Can't convert blob with layout not matching vpu layout";
convertInput(inputBlob->buffer().as<uint8_t*>(), dst, precision, size, remainingSize);
}
}
static void copyOutput(uint8_t* src, const ie::Blob::Ptr& outputBlob, const Precision& outPrec, const SizeVector& outDims, ie::Layout vpuLayout) {
bool needsConvert = needsTypeConvert(outPrec);
const auto layout = outputBlob->getTensorDesc().getLayout();
if (!needsConvert) {
if (layout != vpuLayout) {
// TODO: TensorDesc doesn't update internal BlockingDesc and strides when setLayout is called
const auto tempTensorDesc = ie::TensorDesc{outPrec, outDims, vpuLayout};
const auto tmpBlob = make_blob_with_precision(tempTensorDesc, src);
copyBlob(tmpBlob, outputBlob);
} else {
std::copy_n(src, outputBlob->byteSize(), outputBlob->buffer().as<uint8_t*>());
}
} else {
IE_ASSERT(layout == vpuLayout) << "Can't convert blob with layout not matching vpu layout";
convertOutput(src, outputBlob->buffer().as<uint8_t*>(), outPrec, outputBlob->byteSize());
}
}
void MyriadInferRequest::InferAsync() {
if (_isNetworkConstant) {
return;
@@ -151,21 +247,13 @@ void MyriadInferRequest::InferAsync() {
const auto offset = getOffset(name);
const auto byteSize = blob->byteSize();
const auto requiredSize = vpu::checked_cast<size_t>(offset) + byteSize;
IE_ASSERT(requiredSize <= inputBuffer.size()) << "MyriadInferRequest::InferAsync()\n"
<< "Input offset is too big. "
<< "Required size: " << requiredSize
<< ", Input buffer size: " << inputBuffer.size();
const auto remainingSize = inputBuffer.size() - vpu::checked_cast<size_t>(offset);
const auto foundBlob = getNetInputInfo(name);
const auto vpuLayout = foundBlob->second->getTensorDesc().getLayout();
const auto layout = blob->getTensorDesc().getLayout();
if (layout != vpuLayout) {
copyBlob(blob, vpuLayout, &inputBuffer[offset]);
} else {
MEMCPY(&inputBuffer[offset], blob->buffer().as<uint8_t*>(), byteSize);
}
copyInput(blob, &inputBuffer[offset], byteSize, remainingSize, layout, vpuLayout);
const auto offsetShape = inputInfo.offset.find(name+"_real_shape");
if (offsetShape == inputInfo.offset.end()) {
continue;
@@ -182,14 +270,20 @@ void MyriadInferRequest::InferAsync() {
_executor->queueInference(_graphDesc, inputBuffer.data(),
_inputInfo.totalSize, nullptr, 0);
}
static void copyBlobAccordingUpperBound(
const Blob::Ptr& in,
const Blob::Ptr& out) {
const auto inLayout = in->getTensorDesc().getLayout();
const auto outLayout = out->getTensorDesc().getLayout();
const auto& inDesc = in->getTensorDesc();
const auto& outDesc = out->getTensorDesc();
const auto inLayout = inDesc.getLayout();
const auto outLayout = outDesc.getLayout();
const auto& inBlockingDesc = in->getTensorDesc().getBlockingDesc();
const auto& outBlockingDesc = out->getTensorDesc().getBlockingDesc();
const auto& outPrec = outDesc.getPrecision();
bool needsConvert = needsTypeConvert(outPrec);
const auto& inBlockingDesc = inDesc.getBlockingDesc();
const auto& outBlockingDesc = outDesc.getBlockingDesc();
const auto& inDims = inBlockingDesc.getBlockDims();
const auto& outDims = outBlockingDesc.getBlockDims();
@@ -226,7 +320,11 @@ static void copyBlobAccordingUpperBound(
}
if (!isGarbageLine) {
// We transfer outLineByteSize bytes, so garbage data at the end of the line is not copied.
std::copy_n(inPtr + inByteOffset, outLineByteSize, outPtr + outByteOffset);
if (!needsConvert) {
std::copy_n(inPtr + inByteOffset, outLineByteSize, outPtr + outByteOffset);
} else {
convertOutput(inPtr + inByteOffset, outPtr + outByteOffset, outPrec, outLineByteSize);
}
outByteOffset += outLineByteSize;
}
}
@@ -261,7 +359,7 @@ void MyriadInferRequest::GetResult() {
const auto& name = (*it).first;
const auto& blob = (*it).second;
if (blob->getTensorDesc().getLayout() == getVpuLayout(name)) {
if (blob->getTensorDesc().getLayout() == getVpuLayout(name) && !needsTypeConvert(blob->getTensorDesc().getPrecision())) {
_executor->getResult(_graphDesc, blob->buffer(), static_cast<unsigned>(blob->byteSize()));
return;
}
@@ -324,11 +422,7 @@ void MyriadInferRequest::GetResult() {
copyBlobAccordingUpperBound(tmpBlob, ieBlob);
} else {
// TODO: TensorDesc doesn't update internal BlockingDesc and strides when setLayout is called
const auto tempTensorDesc = ie::TensorDesc{ieOutPrc, ieOutDims, getVpuLayout(ieBlobName)};
const auto tmpBlob = make_blob_with_precision(tempTensorDesc, resultBuffer.data() + resultOffset(ieBlobName));
copyBlob(tmpBlob, ieBlob);
copyOutput(resultBuffer.data() + resultOffset(ieBlobName), ieBlob, ieOutPrc, ieOutDims, getVpuLayout(ieBlobName));
}
}
}

View File

@@ -31,10 +31,6 @@ std::vector<InferenceEngine::Precision> fpTypes = {
InferenceEngine::Precision::FP16,
};
std::vector<InferenceEngine::Precision> intTypes = {
InferenceEngine::Precision::I32,
};
std::vector<ngraph::helpers::ComparisonTypes> comparisonOpTypesFpToFp = {
ngraph::helpers::ComparisonTypes::EQUAL,
ngraph::helpers::ComparisonTypes::NOT_EQUAL,
@@ -76,4 +72,61 @@ INSTANTIATE_TEST_SUITE_P(smoke_ComparisonInt,
::testing::Values(Config{{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
ComparisonLayerTest::getTestCaseName);
std::vector<ngraph::helpers::ComparisonTypes> comparisonOpsInt = {
ngraph::helpers::ComparisonTypes::EQUAL,
ngraph::helpers::ComparisonTypes::GREATER_EQUAL,
};
INSTANTIATE_TEST_SUITE_P(smoke_ComparisonBOOL,
ComparisonLayerTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(inputShapes)),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::ValuesIn(comparisonOpsInt),
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(Config{{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
ComparisonLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ComparisonU32,
ComparisonLayerTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(inputShapes)),
::testing::Values(InferenceEngine::Precision::U32),
::testing::ValuesIn(comparisonOpsInt),
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(Config{{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
ComparisonLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ComparisonI64,
ComparisonLayerTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(inputShapes)),
::testing::Values(InferenceEngine::Precision::I64),
::testing::ValuesIn(comparisonOpsInt),
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(Config{{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
ComparisonLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ComparisonU64,
ComparisonLayerTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(inputShapes)),
::testing::Values(InferenceEngine::Precision::U64),
::testing::ValuesIn(comparisonOpsInt),
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(Config{{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
ComparisonLayerTest::getTestCaseName);
} // namespace

View File

@@ -32,6 +32,9 @@ std::vector<ov::test::ElementType> fpTypes = {
std::vector<ov::test::ElementType> intTypes = {
ov::element::i32,
ov::element::u32,
ov::element::i64,
ov::element::u64,
};
std::vector<CommonTestUtils::OpType> opTypes = {

View File

@@ -27,10 +27,15 @@ TEST_P(GatherElementsLayerTestVPU, GatherElementsTests) {
const std::vector<InferenceEngine::Precision> dPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::U32,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::U64,
};
const std::vector<InferenceEngine::Precision> iPrecisions = {
InferenceEngine::Precision::I32
InferenceEngine::Precision::I32,
InferenceEngine::Precision::I64,
};
INSTANTIATE_TEST_SUITE_P(smoke_GatherElements1, GatherElementsLayerTestVPU,
@@ -54,4 +59,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_GatherElements2, GatherElementsLayerTestVPU,
GatherElementsLayerTest::getTestCaseName);
} // namespace
} // namespace

View File

@@ -13,10 +13,14 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::U8,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::U64,
};
const std::vector<InferenceEngine::Precision> indicesPrecisions = {
InferenceEngine::Precision::I32,
InferenceEngine::Precision::I64,
};
const std::vector<GatherNDParamsSubset> layerParams = {

View File

@@ -96,4 +96,41 @@ INSTANTIATE_TEST_SUITE_P(smoke_EltwiseLogicalNotInt,
::testing::Values(additional_config)),
LogicalLayerTest::getTestCaseName);
std::vector<ngraph::helpers::LogicalTypes> logicalOpTypes = {
ngraph::helpers::LogicalTypes::LOGICAL_AND,
};
std::vector<ngraph::helpers::InputLayerType> secondInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER,
};
const auto LogicalTestParams = ::testing::Combine(
::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapes)),
::testing::ValuesIn(logicalOpTypes),
::testing::ValuesIn(secondInputTypes),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(additional_config));
const auto LogicalTestParamsNot = ::testing::Combine(
::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapesNot)),
::testing::Values(ngraph::helpers::LogicalTypes::LOGICAL_NOT),
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Precision::BOOL),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(additional_config));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, LogicalLayerTest, LogicalTestParams, LogicalLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNot, LogicalLayerTest, LogicalTestParamsNot, LogicalLayerTest::getTestCaseName);
} // namespace

View File

@@ -46,4 +46,43 @@ INSTANTIATE_TEST_SUITE_P(smoke_maximum, MaxMinLayerTest,
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
MaxMinLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_maximumU32, MaxMinLayerTest,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(opType),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputType),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
MaxMinLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_maximumI64, MaxMinLayerTest,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(opType),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputType),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
MaxMinLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_maximumU64, MaxMinLayerTest,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(opType),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputType),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
MaxMinLayerTest::getTestCaseName);
} // namespace

View File

@@ -34,6 +34,9 @@ std::vector<std::vector<size_t>> inShapes = {
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::I32,
InferenceEngine::Precision::U32,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::U64,
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::U8,
};

View File

@@ -67,4 +67,40 @@ INSTANTIATE_TEST_SUITE_P(smoke_StridedSlice_tests, StridedSliceLayerTest,
::testing::Values(getConfig())),
StridedSliceLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_StridedSlice_testsU32, StridedSliceLayerTest,
::testing::Combine(
::testing::ValuesIn(testCases),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Precision::U32),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(getConfig())),
StridedSliceLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_StridedSlice_testsI64, StridedSliceLayerTest,
::testing::Combine(
::testing::ValuesIn(testCases),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Precision::I64),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(getConfig())),
StridedSliceLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_StridedSlice_testsU64, StridedSliceLayerTest,
::testing::Combine(
::testing::ValuesIn(testCases),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Precision::U64),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::Values(getConfig())),
StridedSliceLayerTest::getTestCaseName);
} // namespace

View File

@@ -47,6 +47,9 @@ const std::vector<std::pair<std::vector<std::size_t>, int>> inputShapes = {
const std::vector<InferenceEngine::Precision> networkPrecisions = {
InferenceEngine::Precision::I32,
InferenceEngine::Precision::U32,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::U64,
InferenceEngine::Precision::FP32,
};
@@ -77,8 +80,8 @@ private:
const auto& inputShape = std::get<1>(params).first;
const auto& axis = std::get<1>(params).second;
const auto& networkPrecision = std::get<2>(params);
const auto& inputPrecision = InferenceEngine::Precision::UNSPECIFIED;
const auto& outputPrecision = InferenceEngine::Precision::UNSPECIFIED;
const auto& inputPrecision = networkPrecision;
const auto& outputPrecision = networkPrecision;
const auto& inputLayout = InferenceEngine::Layout::ANY;
const auto& outputLayout = InferenceEngine::Layout::ANY;

View File

@@ -63,6 +63,7 @@ protected:
StaticShapeBroadcastParam shapes;
std::tie(shapes, inPrc, targetDevice) = this->GetParam();
outPrc = inPrc;
const auto inputShape = std::get<0>(shapes);
const auto targetShape = std::get<1>(shapes);
@@ -122,6 +123,9 @@ std::vector<StaticShapeBroadcastParam> broadcastParam = {
std::vector<InferenceEngine::Precision> broadcastPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::U32,
InferenceEngine::Precision::I64,
InferenceEngine::Precision::U64,
};
INSTANTIATE_TEST_SUITE_P(smoke_accuracy, StaticShapeBroadcastLayerTest,

View File

@@ -282,7 +282,9 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) {
EXPECT_NO_THROW(importedExecNet.output("concat_op").get_node());
const auto outputType = elementType == ngraph::element::i32 ||
elementType == ngraph::element::i64 ? ngraph::element::i32 : ngraph::element::f32;
elementType == ngraph::element::u32 ||
elementType == ngraph::element::i64 ||
elementType == ngraph::element::u64 ? ngraph::element::i32 : ngraph::element::f32;
const auto inputType = elementType == ngraph::element::f16 ? ngraph::element::Type_t::f32 : elementType;
EXPECT_EQ(inputType, importedExecNet.input("param1").get_element_type());

View File

@@ -35,6 +35,7 @@ void GatherElementsLayerTest::SetUp() {
InferenceEngine::Precision dPrecision, iPrecision;
int axis;
std::tie(dataShape, indicesShape, axis, dPrecision, iPrecision, targetDevice) = this->GetParam();
outPrc = dPrecision;
auto ngDPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dPrecision);
auto ngIPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iPrecision);