Merge remote-tracking branch 'upstream/master' into revise_op_select

This commit is contained in:
jdanieck 2021-07-12 13:58:18 +02:00
commit c2dd8f3a0f
38 changed files with 1277 additions and 1996 deletions

View File

@ -103,7 +103,6 @@ jobs:
cmakeArgs: >
-GNinja
-DVERBOSE_BUILD=ON
-DENABLE_TEMPLATE_PLUGIN=ON
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
-DENABLE_PYTHON=ON
-DPYTHON_EXECUTABLE=/usr/bin/python3.6

View File

@ -24,8 +24,6 @@ Supported values:\
ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)
ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF)
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF

View File

@ -36,10 +36,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
if(ENABLE_TEMPLATE_PLUGIN)
ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
POSSIBLE_PLUGINS ${TARGET_NAME})
endif()
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component

View File

@ -66,8 +66,17 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// TODO: add post-processing based on outputsInfoMap
// Example: register CommonOptimizations transformation from transformations library
passManager.register_pass<ngraph::pass::CommonOptimizations>();
// Template plugin handles only FP32 networks
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// GAPI supports only FP32 networks for pre-processing
bool needF16toF32 = false;
for (const auto& param : function->get_parameters()) {
if (param->get_element_type() == ngraph::element::f16 &&
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) {
needF16toF32 = true;
break;
}
}
if (needF16toF32)
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();

View File

@ -0,0 +1,173 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "base_reference_test.hpp"
#include <gtest/gtest.h>
#include "transformations/utils/utils.hpp"
using namespace InferenceEngine;
CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
core = PluginCache::get().ie(targetDevice);
}
void CommonReferenceTest::Exec() {
LoadNetwork();
FillInputs();
Infer();
Validate();
}
void CommonReferenceTest::LoadNetwork() {
InferenceEngine::CNNNetwork cnnNetwork(function);
auto inputInfo = cnnNetwork.getInputsInfo();
auto outputInfo = cnnNetwork.getOutputsInfo();
for (const auto& param : function->get_parameters()) {
inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
}
for (const auto& result : function->get_results()) {
outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
InferenceEngine::details::convertPrecision(result->get_element_type()));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
}
void CommonReferenceTest::FillInputs() {
const auto& inputInfo = executableNetwork.GetInputsInfo();
const auto& params = function->get_parameters();
ASSERT_EQ(params.size(), inputData.size());
ASSERT_EQ(inputInfo.size(), inputData.size());
for (size_t i = 0; i < params.size(); i++) {
const auto& param = params[i];
const auto infoIt = inputInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputInfo.cend());
const auto& info = infoIt->second;
auto blob = make_blob_with_precision(info->getTensorDesc());
blob->allocate();
ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize());
MemoryBlob::Ptr mInputData = as<MemoryBlob>(inputData[i]);
ASSERT_NE(mInputData, nullptr);
auto minputDataHolder = mInputData->rmap();
MemoryBlob::Ptr mBlob = as<MemoryBlob>(blob);
ASSERT_NE(mBlob, nullptr);
auto mBlobHolder = mBlob->wmap();
std::memcpy(mBlobHolder.as<void*>(), minputDataHolder.as<const void*>(), inputData[i]->byteSize());
inputData[i] = blob;
}
}
void CommonReferenceTest::Infer() {
inferRequest = executableNetwork.CreateInferRequest();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (size_t i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
const auto& info = infoIt->second;
auto blob = inputData[i];
inferRequest.SetBlob(info->name(), blob);
}
inferRequest.Infer();
}
void CommonReferenceTest::Validate() {
ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size());
std::vector<InferenceEngine::Blob::Ptr> outputs;
for (const auto& result : function->get_results()) {
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
outputs.emplace_back(inferRequest.GetBlob(name));
}
ASSERT_EQ(refOutData.size(), outputs.size());
for (size_t i = 0; i < refOutData.size(); i++) {
ValidateBlobs(refOutData[i], outputs[i]);
}
}
void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
ASSERT_TRUE(refBlob != nullptr);
ASSERT_TRUE(outBlob != nullptr);
ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());
auto mRef = as<InferenceEngine::MemoryBlob>(refBlob);
IE_ASSERT(mRef);
const auto refLockMemory = mRef->rmap();
const auto refBuffer = refLockMemory.as<const std::uint8_t*>();
auto mOut = as<InferenceEngine::MemoryBlob>(outBlob);
IE_ASSERT(mOut);
const auto outLockMemory = mOut->rmap();
const auto outBuffer = outLockMemory.as<const std::uint8_t*>();
const auto& precision = refBlob->getTensorDesc().getPrecision();
switch (precision) {
case InferenceEngine::Precision::BF16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::bfloat16, ngraph::bfloat16>(
reinterpret_cast<const ngraph::bfloat16*>(refBuffer), reinterpret_cast<const ngraph::bfloat16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP16:
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::float16, ngraph::float16>(
reinterpret_cast<const ngraph::float16*>(refBuffer), reinterpret_cast<const ngraph::float16*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::FP32:
LayerTestsUtils::LayerTestsCommon::Compare<float, float>(reinterpret_cast<const float*>(refBuffer), reinterpret_cast<const float*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I8:
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(reinterpret_cast<const int8_t*>(refBuffer), reinterpret_cast<const int8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I16:
LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(reinterpret_cast<const int16_t*>(refBuffer), reinterpret_cast<const int16_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I32:
LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(reinterpret_cast<const int32_t*>(refBuffer), reinterpret_cast<const int32_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I64:
LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(reinterpret_cast<const int64_t*>(refBuffer), reinterpret_cast<const int64_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::BOOL:
case InferenceEngine::Precision::U8:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U16:
LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(reinterpret_cast<const uint16_t*>(refBuffer),
reinterpret_cast<const uint16_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U32:
LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(reinterpret_cast<const uint32_t*>(refBuffer),
reinterpret_cast<const uint32_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::U64:
LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(reinterpret_cast<const uint64_t*>(refBuffer),
reinterpret_cast<const uint64_t*>(outBuffer), refBlob->size(), threshold);
break;
case InferenceEngine::Precision::I4:
case InferenceEngine::Precision::U4:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 2, threshold);
break;
case InferenceEngine::Precision::BIN:
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
refBlob->size() / 8, threshold);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}

View File

@ -0,0 +1,53 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
class CommonReferenceTest {
public:
CommonReferenceTest();
void Exec();
void LoadNetwork();
void FillInputs();
void Infer();
void Validate();
private:
void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob);
protected:
const std::string targetDevice;
std::shared_ptr<InferenceEngine::Core> core;
std::shared_ptr<ngraph::Function> function;
InferenceEngine::ExecutableNetwork executableNetwork;
InferenceEngine::InferRequest inferRequest;
std::vector<InferenceEngine::Blob::Ptr> inputData;
std::vector<InferenceEngine::Blob::Ptr> refOutData;
float threshold = 1e-2f;
};
template <class T>
InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector<T>& values, size_t size = 0) {
size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
auto blob = make_blob_with_precision(
InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C));
blob->allocate();
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
IE_ASSERT(minput);
auto minputHolder = minput->wmap();
std::memcpy(minputHolder.as<void*>(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
return blob;
}

View File

@ -0,0 +1,441 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
#include <tuple>
#include "base_reference_test.hpp"
using namespace ngraph;
using namespace InferenceEngine;
struct ConvertParams {
template <class IT, class OT>
ConvertParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const ngraph::element::Type& oType, const std::vector<IT>& iValues,
const std::vector<OT>& oValues, size_t iSize = 0, size_t oSize = 0)
: pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)), refData(CreateBlob(oType, oValues, oSize)) {}
ngraph::PartialShape pshape;
ngraph::element::Type inType;
ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData;
InferenceEngine::Blob::Ptr refData;
};
class ReferenceConvertLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape, params.inType, params.outType);
inputData = {params.inputData};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<ConvertParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "shape=" << param.pshape << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
const element::Type& expected_output_type) {
const auto in = std::make_shared<op::Parameter>(input_type, input_shape);
const auto convert = std::make_shared<op::Convert>(in, expected_output_type);
return std::make_shared<Function>(NodeVector {convert}, ParameterVector {in});
}
};
TEST_P(ReferenceConvertLayerTest, CompareWithHardcodedRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(
smoke_Convert_With_Hardcoded_Refs, ReferenceConvertLayerTest,
::testing::Values(
// destination boolean
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
std::vector<uint8_t> {0, 12, 23, 0, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()},
std::vector<char> {0, 1, 1, 0, 0, 1}),
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
std::vector<int32_t> {0, -12, 23, 0, std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()},
std::vector<char> {0, 1, 1, 0, 1, 1}),
ConvertParams(ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
std::vector<float> {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max(),
std::numeric_limits<float>::min(), std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()},
std::vector<char> {0, 1, 1, 0, 1, 1, 1, 1, 1}),
// destination bf16
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
std::vector<bfloat16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
// destination f16
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16, std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
std::vector<float16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
// destination f32
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector<uint8_t> {0xA0},
std::vector<float> {1.0f, 0.0f, 1.0f, 0.0f}, 4),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector<uint8_t> {0xFB, 0x0A},
std::vector<float> {15.0f, 11.0f, 0.0f, 10.0f}, 4),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector<uint8_t> {255, 128, 32, 0},
std::vector<float> {255.0f, 128.0f, 32.0f, 0.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32, std::vector<uint16_t> {64000, 32000, 128, 0},
std::vector<float> {64000.0f, 32000.0f, 128.0f, 0.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32, std::vector<uint32_t> {4000000, 2000000, 128, 0},
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32, std::vector<uint64_t> {4000000, 2000000, 128, 0},
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector<uint8_t> {0xFE, 0xF2},
std::vector<float> {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector<int8_t> {-127, -0, 0, 127},
std::vector<float> {-127.0f, -0.0f, 0.0f, 127.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32, std::vector<int16_t> {-32000, -0, 0, 32000},
std::vector<float> {-32000.0f, -0.0f, 0.0f, 32000.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32, std::vector<int32_t> {-64000, -0, 0, 64000},
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32, std::vector<int64_t> {-64000, -0, 0, 64000},
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
// destination i4
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
4, 4),
// destination i8
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector<uint8_t> {0x81},
std::vector<int8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<int8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<int8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<int8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<int8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector<int8_t> {-1, -2, 2, 3},
std::vector<int8_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<int8_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<int8_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<int8_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<int8_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<int8_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector<float> {-1, -2, 2, 3},
std::vector<int8_t> {-1, -2, 2, 3}),
// destination i16
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector<uint8_t> {0x81},
std::vector<int16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<int16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<int16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<int16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<int16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector<int8_t> {-1, -2, 2, 3},
std::vector<int16_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<int16_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<int16_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<int16_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<int16_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<int16_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector<float> {-1, -2, 2, 3},
std::vector<int16_t> {-1, -2, 2, 3}),
// destination i32
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector<uint8_t> {0x81},
std::vector<int32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<int32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<int32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<int32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<int32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector<int8_t> {-1, -2, 2, 3},
std::vector<int32_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<int32_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<int32_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<int32_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<int32_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<int32_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector<float> {-1, -2, 2, 3},
std::vector<int32_t> {-1, -2, 2, 3}),
// destination i64
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector<uint8_t> {0x81},
std::vector<int64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<int64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<int64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<int64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<int64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector<int8_t> {-1, -2, 2, 3},
std::vector<int64_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<int64_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<int64_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<int64_t> {-1, -2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<int64_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<int64_t> {-1, -2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector<float> {-1, -2, 2, 3},
std::vector<int64_t> {-1, -2, 2, 3}),
// destination u1
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0xA0}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
std::vector<uint8_t> {0x90}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1, std::vector<uint8_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1, std::vector<uint16_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1, std::vector<uint32_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1, std::vector<uint64_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
std::vector<uint8_t> {0x90}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1, std::vector<int8_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1, std::vector<int16_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1, std::vector<int32_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1, std::vector<int64_t> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1, std::vector<ngraph::float16> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1, std::vector<ngraph::bfloat16> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1, std::vector<float> {1, 0, 1, 0, 0, 0, 0, 1},
std::vector<uint8_t> {0xA1}, 8, 8),
// destination u4
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector<int16_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector<int32_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector<int64_t> {-1, -2, 2, 3},
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4, std::vector<ngraph::float16> {-1, -2, 0, 3},
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
4, 4),
// destination u8
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector<uint8_t> {0x81},
std::vector<uint8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector<int8_t> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector<int16_t> {1, 2, 2, 3},
std::vector<uint8_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector<int32_t> {1, 2, 2, 3},
std::vector<uint8_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector<int64_t> {1, 2, 2, 3},
std::vector<uint8_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8, std::vector<ngraph::float16> {1, 2, 0, 3},
std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
std::vector<uint8_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector<float> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
// destination u16
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector<uint8_t> {0x81},
std::vector<uint16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint16_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint16_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector<int8_t> {1, 2, 2, 3},
std::vector<uint16_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector<int16_t> {1, 2, 2, 3},
std::vector<uint16_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector<int32_t> {1, 2, 2, 3},
std::vector<uint16_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector<int64_t> {1, 2, 2, 3},
std::vector<uint16_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16, std::vector<ngraph::float16> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
std::vector<uint16_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector<float> {1, 2, 2, 3},
std::vector<uint16_t> {1, 2, 2, 3}),
// destination u32
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector<uint8_t> {0x81},
std::vector<uint32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint32_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint32_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector<int8_t> {1, 2, 2, 3},
std::vector<uint32_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector<int16_t> {1, 2, 2, 3},
std::vector<uint32_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector<int32_t> {1, 2, 2, 3},
std::vector<uint32_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector<int64_t> {1, 2, 2, 3},
std::vector<uint32_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32, std::vector<ngraph::float16> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
std::vector<uint32_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector<float> {1, 2, 2, 3},
std::vector<uint32_t> {1, 2, 2, 3}),
// destination u64
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector<uint8_t> {0x81},
std::vector<uint64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint64_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector<uint8_t> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector<uint16_t> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector<uint32_t> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector<uint64_t> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
std::vector<uint64_t> {2, 1, 4, 3}, 4),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector<int8_t> {1, 2, 2, 3},
std::vector<uint64_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector<int16_t> {1, 2, 2, 3},
std::vector<uint64_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector<int32_t> {1, 2, 2, 3},
std::vector<uint64_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector<int64_t> {1, 2, 2, 3},
std::vector<uint64_t> {1, 2, 2, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64, std::vector<ngraph::float16> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
std::vector<uint64_t> {1, 2, 0, 3}),
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector<float> {1, 2, 2, 3},
std::vector<uint64_t> {1, 2, 2, 3})),
ReferenceConvertLayerTest::getTestCaseName);

View File

@ -73,7 +73,7 @@ using IEClassSetConfigTestHETERO = IEClassNetworkTest;
TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
{
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO"));
@ -84,7 +84,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
}
{
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO"));
@ -95,7 +95,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
}
{
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
@ -118,7 +118,7 @@ INSTANTIATE_TEST_SUITE_P(
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
std::string deviceName = CommonTestUtils::DEVICE_TEMPLATE;
@ -209,4 +209,4 @@ INSTANTIATE_TEST_SUITE_P(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
#endif // ENABLE_MKL_DNN
} // namespace
} // namespace

View File

@ -134,6 +134,8 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) {
return Precision(Precision::BIN);
case ::ngraph::element::Type_t::boolean:
return Precision(Precision::BOOL);
case ::ngraph::element::Type_t::dynamic:
return Precision(Precision::UNSPECIFIED);
default:
IE_THROW() << "Incorrect precision " << precision.get_type_name() << "!"; return{};
}

View File

@ -55,7 +55,7 @@ class TRANSFORMATIONS_API FrameworkNode : public Op {
public:
NGRAPH_RTTI_DECLARATION;
explicit FrameworkNode(const OutputVector& inputs);
explicit FrameworkNode(const OutputVector& inputs, size_t output_size = 1);
void validate_and_infer_types() override;

View File

@ -10,8 +10,9 @@ using namespace ngraph;
NGRAPH_RTTI_DEFINITION(op::FrameworkNode, "FrameworkNode", 0);
op::FrameworkNode::FrameworkNode(const OutputVector& inputs)
op::FrameworkNode::FrameworkNode(const OutputVector& inputs, size_t output_size)
: Op(inputs) {
set_output_size(output_size);
constructor_validate_and_infer_types();
}

View File

@ -495,6 +495,7 @@ std::string get_opset_name(
std::string get_precision_name(const ngraph::element::Type & elem_type) {
switch (elem_type) {
case ::ngraph::element::Type_t::undefined:
case ::ngraph::element::Type_t::dynamic:
return "UNSPECIFIED";
case ::ngraph::element::Type_t::f16:
return "FP16";

View File

@ -14,13 +14,23 @@ namespace {
const std::vector<std::vector<size_t>> inShape = {{1, 2, 3, 4}};
const std::vector<Precision> precisions = {
Precision::U8,
// Ticket: 59594
// Precision::I4,
Precision::I8,
Precision::U16,
Precision::I16,
Precision::I32,
Precision::U64,
Precision::I64,
// Ticket: 59594
// Precision::BIN,
// Precision::BOOL,
// Precision::U4,
Precision::U8,
Precision::U16,
// Ticket: 59594
// Precision::U32,
Precision::U64,
Precision::BF16,
Precision::FP16,
Precision::FP32
};
@ -34,4 +44,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertLayerTest, ConvertLayerTest,
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConvertLayerTest::getTestCaseName);
} // namespace
} // namespace

View File

@ -62,6 +62,14 @@ namespace BehaviorTestsDefinitions {
}
inline Core createCoreWithTemplate() {
Core ie;
std::string pluginName = "templatePlugin";
pluginName += IE_BUILD_POSTFIX;
ie.RegisterPlugin(pluginName, "TEMPLATE");
return ie;
}
class IEClassBasicTestP : public ::testing::Test, public WithParamInterface<std::pair<std::string, std::string> > {
protected:
std::string deviceName;
@ -211,20 +219,20 @@ TEST(IEClassBasicTest, smoke_createDefault) {
TEST_P(IEClassBasicTestP, registerExistingPluginThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.RegisterPlugin(pluginName, deviceName), Exception);
}
TEST_P(IEClassBasicTestP, registerNewPluginNoThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.RegisterPlugin(pluginName, "NEW_DEVICE_NAME"));
ASSERT_NO_THROW(ie.GetMetric("NEW_DEVICE_NAME", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
}
TEST(IEClassBasicTest, smoke_registerExistingPluginFileThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.RegisterPlugins("nonExistPlugins.xml"), Exception);
}
@ -277,7 +285,7 @@ TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) {
GTEST_COUT << "Test " << testIndex << std::endl;
Core ie;
Core ie = createCoreWithTemplate();
GTEST_COUT << "Core created " << testIndex << std::endl;
ASSERT_NO_THROW(ie.RegisterPlugins(::FileUtils::wStringtoMBCSstringChar(pluginsXmlW)));
CommonTestUtils::removeFile(pluginsXmlW);
@ -310,19 +318,19 @@ TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) {
TEST_P(IEClassBasicTestP, getVersionsByExactDeviceNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.GetVersions(deviceName + ".0"));
}
TEST_P(IEClassBasicTestP, getVersionsByDeviceClassNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.GetVersions(deviceName));
}
TEST_P(IEClassBasicTestP, getVersionsNonEmpty) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_EQ(2, ie.GetVersions(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName).size());
}
@ -332,7 +340,7 @@ TEST_P(IEClassBasicTestP, getVersionsNonEmpty) {
TEST_P(IEClassBasicTestP, unregisterExistingPluginNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
// device instance is not created yet
ASSERT_THROW(ie.UnregisterPlugin(deviceName), Exception);
@ -344,7 +352,7 @@ TEST_P(IEClassBasicTestP, unregisterExistingPluginNoThrow) {
TEST_P(IEClassBasicTestP, accessToUnregisteredPluginThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.UnregisterPlugin(deviceName), Exception);
ASSERT_NO_THROW(ie.GetVersions(deviceName));
ASSERT_NO_THROW(ie.UnregisterPlugin(deviceName));
@ -355,7 +363,7 @@ TEST_P(IEClassBasicTestP, accessToUnregisteredPluginThrows) {
TEST(IEClassBasicTest, smoke_unregisterNonExistingPluginThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.UnregisterPlugin("unkown_device"), Exception);
}
@ -365,45 +373,45 @@ TEST(IEClassBasicTest, smoke_unregisterNonExistingPluginThrows) {
TEST_P(IEClassBasicTestP, SetConfigAllThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.SetConfig({{"unsupported_key", "4"}}));
ASSERT_ANY_THROW(ie.GetVersions(deviceName));
}
TEST_P(IEClassBasicTestP, SetConfigForUnRegisteredDeviceThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.SetConfig({{"unsupported_key", "4"}}, "unregistered_device"), Exception);
}
TEST_P(IEClassBasicTestP, SetConfigNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}}, deviceName));
}
TEST_P(IEClassBasicTestP, SetConfigAllNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}}));
ASSERT_NO_THROW(ie.GetVersions(deviceName));
}
TEST(IEClassBasicTest, smoke_SetConfigHeteroThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.SetConfig({{KEY_PERF_COUNT, YES}}, CommonTestUtils::DEVICE_HETERO));
}
TEST_P(IEClassBasicTestP, SetConfigHeteroTargetFallbackThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.SetConfig({{"TARGET_FALLBACK", deviceName}}, CommonTestUtils::DEVICE_HETERO));
}
TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
bool value = false;
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), YES}}, CommonTestUtils::DEVICE_HETERO));
@ -421,7 +429,7 @@ TEST(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) {
TEST_P(IEClassBasicTestP, ImportNetworkThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
if (deviceName == CommonTestUtils::DEVICE_CPU ||
deviceName == CommonTestUtils::DEVICE_GPU) {
@ -440,20 +448,20 @@ TEST_P(IEClassBasicTestP, ImportNetworkThrows) {
TEST(IEClassBasicTest, smoke_ImportNetworkHeteroThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.ImportNetwork("model", CommonTestUtils::DEVICE_HETERO), NetworkNotRead);
}
TEST(IEClassBasicTest, smoke_ImportNetworkMultiThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::Core ie;
InferenceEngine::Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.ImportNetwork("model", CommonTestUtils::DEVICE_MULTI), NetworkNotRead);
}
TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
RemoteContext::Ptr context = nullptr;
std::istringstream stream("None");
ASSERT_THROW(ie.ImportNetwork(stream, context, {}), Exception);
@ -465,19 +473,19 @@ TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) {
TEST_P(IEClassNetworkTestP, LoadNetworkActualNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, deviceName));
}
TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName));
}
TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}}));
}
@ -487,7 +495,7 @@ TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) {
TEST_P(IEClassImportExportTestP, smoke_ImportNetworkThrowsIfNoDeviceName) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
std::stringstream strm;
ExecutableNetwork executableNetwork;
ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
@ -500,7 +508,7 @@ TEST_P(IEClassImportExportTestP, smoke_ImportNetworkThrowsIfNoDeviceName) {
TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowWithDeviceName) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
std::stringstream strm;
ExecutableNetwork executableNetwork;
ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
@ -511,7 +519,7 @@ TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowWithDeviceName) {
TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ExecutableNetwork executableNetwork;
std::string fileName{"ExportedNetwork"};
{
@ -534,13 +542,13 @@ TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThro
TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName));
}
TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
try {
ie.QueryNetwork(actualNetwork, deviceName);
@ -552,7 +560,7 @@ TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) {
TEST_P(IEClassNetworkTestP, QueryNetworkWithKSO) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
try {
auto rres = ie.QueryNetwork(ksoNetwork, deviceName);
@ -571,7 +579,7 @@ TEST_P(IEClassNetworkTestP, QueryNetworkWithKSO) {
TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
try {
std::shared_ptr<ngraph::Function> func;
@ -623,7 +631,7 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) {
TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
try {
auto rres = ie.QueryNetwork(ksoNetwork, deviceName);
@ -647,7 +655,7 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithKSO) {
TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
QueryNetworkResult res;
ASSERT_NO_THROW(res = ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}}));
ASSERT_LT(0, res.supportedLayersMap.size());
@ -655,13 +663,13 @@ TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) {
TEST_P(IEClassNetworkTestP, QueryNetworkMultiThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
ASSERT_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_MULTI), Exception);
}
TEST(IEClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
std::string deviceName = CommonTestUtils::DEVICE_HETERO;
@ -678,7 +686,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) {
TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
std::string deviceName = CommonTestUtils::DEVICE_HETERO;
@ -695,7 +703,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) {
TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
// TODO: check
std::string targetDevice = CommonTestUtils::DEVICE_HETERO + std::string(":") + CommonTestUtils::DEVICE_CPU;
ASSERT_THROW(ie.GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)), Exception);
@ -703,7 +711,7 @@ TEST(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) {
TEST_P(IEClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)));
@ -719,7 +727,7 @@ TEST_P(IEClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
@ -735,7 +743,7 @@ TEST_P(IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)));
@ -751,7 +759,7 @@ TEST_P(IEClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME)));
@ -763,7 +771,7 @@ TEST_P(IEClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES)));
@ -779,7 +787,7 @@ TEST_P(IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow)
TEST_P(IEClassGetMetricTest_DEVICE_GOPS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_GOPS)));
@ -795,7 +803,7 @@ TEST_P(IEClassGetMetricTest_DEVICE_GOPS, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_TYPE)));
@ -808,7 +816,7 @@ TEST_P(IEClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS)));
@ -821,7 +829,7 @@ TEST_P(IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintN
TEST_P(IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS)));
@ -834,7 +842,7 @@ TEST_P(IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoTh
TEST_P(IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)));
@ -857,7 +865,7 @@ TEST_P(IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoT
TEST_P(IEClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_STREAMS)));
@ -877,7 +885,7 @@ TEST_P(IEClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) {
TEST_P(IEClassGetMetricTest_ThrowUnsupported, GetMetricThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_THROW(p = ie.GetMetric(deviceName, "unsupported_metric"), Exception);
@ -885,7 +893,7 @@ TEST_P(IEClassGetMetricTest_ThrowUnsupported, GetMetricThrow) {
TEST_P(IEClassGetConfigTest, GetConfigNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
@ -900,7 +908,7 @@ TEST_P(IEClassGetConfigTest, GetConfigNoThrow) {
TEST_P(IEClassGetConfigTest, GetConfigHeteroNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
@ -913,7 +921,7 @@ TEST_P(IEClassGetConfigTest, GetConfigHeteroNoThrow) {
TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_THROW(p = ie.GetConfig(CommonTestUtils::DEVICE_HETERO, "unsupported_config"), Exception);
@ -921,7 +929,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) {
TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_THROW(p = ie.GetConfig(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName, HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)),
@ -930,7 +938,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) {
TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_THROW(p = ie.GetConfig(deviceName, "unsupported_config"), Exception);
@ -938,7 +946,7 @@ TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) {
TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
std::vector<std::string> devices;
ASSERT_NO_THROW(devices = ie.GetAvailableDevices());
@ -962,7 +970,7 @@ TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) {
//
TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -981,7 +989,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoT
TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1000,7 +1008,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow
TEST_P(IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1015,7 +1023,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) {
TEST_P(IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1030,7 +1038,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, G
TEST_P(IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1040,7 +1048,7 @@ TEST_P(IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow)
TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1057,7 +1065,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoThrow) {
TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1067,7 +1075,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigThrows) {
TEST_P(IEClassExecutableNetworkSetConfigTest, SetConfigThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1077,7 +1085,7 @@ TEST_P(IEClassExecutableNetworkSetConfigTest, SetConfigThrows) {
TEST_P(IEClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1090,7 +1098,7 @@ TEST_P(IEClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) {
TEST_P(IEClassExecutableNetworkUnsupportedConfigTest, UnsupportedConfigThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
@ -1099,7 +1107,7 @@ TEST_P(IEClassExecutableNetworkUnsupportedConfigTest, UnsupportedConfigThrows) {
TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
@ -1122,7 +1130,7 @@ TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) {
TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter pHetero, pDevice;
ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
@ -1156,7 +1164,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet
TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter pHetero, pDevice;
ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
@ -1196,7 +1204,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN
TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
ExecutableNetwork exeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
@ -1209,7 +1217,7 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThro
TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
Parameter p;
setHeteroNetworkAffinity(deviceName);
@ -1237,7 +1245,7 @@ bool supportsDeviceID(Core &ie, const std::string &deviceName) {
TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
@ -1252,7 +1260,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) {
TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
try {
@ -1268,7 +1276,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) {
TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".110"), Exception);
@ -1279,7 +1287,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) {
TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".l0"), Exception);
@ -1290,7 +1298,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) {
TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.QueryNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO,
@ -1305,7 +1313,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) {
//
TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
@ -1320,7 +1328,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
@ -1334,7 +1342,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkWithBigDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".10"), Exception);
@ -1345,7 +1353,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithBigDeviceIDThrows) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".l0"), Exception);
@ -1356,7 +1364,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.LoadNetwork(actualNetwork, "HETERO",
@ -1368,7 +1376,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName)) {
ASSERT_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO,
@ -1385,7 +1393,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
@ -1406,7 +1414,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) {
TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
@ -1431,7 +1439,7 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) {
TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
@ -1466,7 +1474,7 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) {
TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
@ -1500,7 +1508,7 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) {
TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
Core ie = createCoreWithTemplate();
{
auto versions = ie.GetVersions(std::string(CommonTestUtils::DEVICE_MULTI) + ":" + deviceName + "," + CommonTestUtils::DEVICE_CPU);
ASSERT_EQ(3, versions.size());
@ -1510,7 +1518,7 @@ TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins
config.insert({"CPU_THREADS_NUM", "3"});
}
ASSERT_NO_THROW({
Core ie;
Core ie = createCoreWithTemplate();
std::string name = actualNetwork.getInputsInfo().begin()->first;
actualNetwork.getInputsInfo().at(name)->setPrecision(Precision::U8);
auto executableNetwork = ie.LoadNetwork(actualNetwork, deviceName, config);

View File

@ -8,6 +8,13 @@
#include "behavior/cpp_holders.hpp"
namespace BehaviorTestsDefinitions {
inline InferenceEngine::Core createCoreWithTemplate() {
InferenceEngine::Core ie;
std::string pluginName = "templatePlugin";
pluginName += IE_BUILD_POSTFIX;
ie.RegisterPlugin(pluginName, "TEMPLATE");
return ie;
}
std::string HoldersTest::getTestCaseName(testing::TestParamInfo<HoldersParams> obj) {
std::string targetDevice;
std::vector<int> order;
@ -47,7 +54,7 @@ namespace BehaviorTestsDefinitions {
void release_order_test(std::vector<int> order, const std::string &deviceName,
std::shared_ptr<ngraph::Function> function) {
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::Core core;
InferenceEngine::Core core = createCoreWithTemplate();
auto exe_net = core.LoadNetwork(cnnNet, deviceName);
auto request = exe_net.CreateInferRequest();
std::vector<InferenceEngine::VariableState> states = {};
@ -60,7 +67,7 @@ namespace BehaviorTestsDefinitions {
auto release = [&](int i) {
switch (i) {
case 0:
core = InferenceEngine::Core{};
core = createCoreWithTemplate();
break;
case 1:
exe_net = {};
@ -84,7 +91,7 @@ namespace BehaviorTestsDefinitions {
std::vector<int> order, const std::string &deviceName,
std::shared_ptr<ngraph::Function> function) {
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::Core core;
InferenceEngine::Core core = createCoreWithTemplate();
std::stringstream stream;
{
auto exe_net = core.LoadNetwork(cnnNet, deviceName);
@ -142,7 +149,7 @@ namespace BehaviorTestsDefinitions {
TEST_P(HoldersTestOnImportedNetwork, CreateRequestWithCoreRemoved) {
InferenceEngine::CNNNetwork cnnNet(function);
InferenceEngine::Core core;
InferenceEngine::Core core = createCoreWithTemplate();
std::stringstream stream;
{
auto exe_net = core.LoadNetwork(cnnNet, targetDevice);

View File

@ -101,7 +101,7 @@ public:
double diff = static_cast<float>(absoluteDifference) / max;
if (max == 0 || (diff > static_cast<float>(threshold)) ||
std::isnan(static_cast<float>(res)) || std::isnan(static_cast<float>(ref))) {
IE_THROW() << "Relative comparison of values expected: " << ref << " and actual: " << res
IE_THROW() << "Relative comparison of values expected: " << std::to_string(ref) << " and actual: " << std::to_string(res)
<< " at index " << i << " with threshold " << threshold
<< " failed";
}

View File

@ -49,6 +49,13 @@ std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &device
}
assert(0 != ie_core.use_count());
// register template plugin if it is needed
try {
std::string pluginName = "templatePlugin";
pluginName += IE_BUILD_POSTFIX;
ie_core->RegisterPlugin(pluginName, "TEMPLATE");
} catch (...) {}
if (!deviceToCheck.empty()) {
std::vector<std::string> metrics = ie_core->GetMetric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS));
@ -61,11 +68,13 @@ std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &device
std::exit(EXIT_FAILURE);
}
#ifndef NDEBUG
std::cout << "Available devices for " << deviceToCheck << ":" << std::endl;
for (const auto &device : availableDevices) {
std::cout << " " << device << std::endl;
}
#endif
}
}
return ie_core;

View File

@ -45,7 +45,7 @@ if(COMMAND ie_faster_build)
)
endif()
target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder
target_link_libraries(onnx_importer PRIVATE onnx_common ngraph::builder inference_engine_transformations
PUBLIC ngraph)
target_include_directories(onnx_importer PUBLIC $<BUILD_INTERFACE:${ONNX_IMPORT_INCLUDE_DIR}>

View File

@ -75,9 +75,8 @@ namespace ngraph
bool has_attribute(const std::string& name) const;
Subgraph get_subgraph_from_attribute(
const std::string& name,
const std::map<std::size_t, std::string>& carried_dependencies_map) const;
bool has_subgraph() const;
std::shared_ptr<Subgraph> get_subgraph() const;
template <typename T>
T get_attribute_value(const std::string& name, T default_value) const;

View File

@ -0,0 +1,100 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <core/graph.hpp>
#include <ngraph/visibility.hpp>
#include <ngraph_ops/framework_node.hpp>
#include <onnx_import/core/node.hpp>
namespace ONNX_NAMESPACE
{
// forward declaration
class ModelProto;
} // namespace ONNX_NAMESPACE
namespace ngraph
{
namespace onnx_import
{
class Model;
}
namespace frontend
{
class ONNXFrameworkNode : public op::FrameworkNode
{
public:
NGRAPH_RTTI_DECLARATION;
ONNXFrameworkNode(const onnx_import::Node& node)
: FrameworkNode(node.get_ng_inputs(), node.get_outputs_size())
, m_node(node)
{
}
ONNXFrameworkNode(const onnx_import::Node& node, const OutputVector& inputs)
: FrameworkNode(inputs, node.get_outputs_size())
, m_node(node)
{
}
const onnx_import::Node& get_onnx_node() const { return m_node; }
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& inputs) const override;
virtual bool visit_attributes(AttributeVisitor& visitor) override
{
// TODO: implement reading as well, now it work for serialization only
std::string domain = m_node.domain();
std::string op_type = m_node.op_type();
visitor.on_attribute("ONNX_META_domain", domain);
visitor.on_attribute("ONNX_META_type", op_type);
return true;
}
private:
onnx_import::Node m_node;
};
class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode
{
public:
NGRAPH_RTTI_DECLARATION;
ONNXSubgraphFrameworkNode(const onnx_import::Node& node, const OutputVector& inputs)
: ONNXFrameworkNode(node, inputs)
{
}
void infer_inputs_from_parent()
{
get_onnx_node().get_subgraph()->infer_inputs_from_parent();
}
std::shared_ptr<Function> get_subgraph_body() const
{
auto subgraph = get_onnx_node().get_subgraph();
return std::make_shared<Function>(subgraph->get_ng_outputs(),
subgraph->get_ng_parameters(),
subgraph->get_name());
}
};
} // namespace frontend
} // namespace ngraph

View File

@ -11,9 +11,7 @@ namespace ngraph
{
namespace onnx_import
{
Subgraph Attribute::get_subgraph(
const Graph& parent_graph,
const std::map<std::size_t, std::string>& carried_dependencies_map) const
Subgraph Attribute::get_subgraph(const Graph& parent_graph) const
{
if (m_attribute_proto->type() != ONNX_NAMESPACE::AttributeProto_AttributeType_GRAPH)
{
@ -25,33 +23,6 @@ namespace ngraph
const auto& graph = m_attribute_proto->g();
model_proto->mutable_graph()->CopyFrom(graph);
const std::size_t subgraph_inputs_count =
static_cast<size_t>(model_proto->mutable_graph()->mutable_input()->size());
// Use the `carried_dependencies_map` to infer the types for the subgraph inputs
for (const auto& carried_dependency : carried_dependencies_map)
{
if (carried_dependency.first >= subgraph_inputs_count)
{
NGRAPH_WARN << "Input with index: '" << carried_dependency.first
<< "' was not found in the subgraph";
}
else
{
const auto& parent_in =
parent_graph.get_ng_node_from_cache(carried_dependency.second);
const auto& carried_type = parent_in.get_element_type();
auto subgraph_in =
model_proto->mutable_graph()->mutable_input(carried_dependency.first);
auto subgraph_in_tensor_type =
subgraph_in->mutable_type()->mutable_tensor_type();
if (!subgraph_in_tensor_type->has_elem_type())
{
subgraph_in_tensor_type->set_elem_type(
onnx_common::ng_to_onnx_data_type(carried_type));
}
}
}
// set opset version and domain from the parent graph
model_proto->mutable_opset_import()->CopyFrom(parent_graph.get_opset_imports());
auto model = common::make_unique<Model>(std::move(model_proto));

View File

@ -316,9 +316,7 @@ namespace ngraph
float get_float() const { return m_attribute_proto->f(); }
int64_t get_integer() const { return m_attribute_proto->i(); }
const std::string& get_string() const { return m_attribute_proto->s(); }
Subgraph get_subgraph(
const Graph& parent_graph,
const std::map<std::size_t, std::string>& carried_dependencies_map) const;
Subgraph get_subgraph(const Graph& parent_graph) const;
std::vector<Tensor> get_tensor_array() const
{

View File

@ -14,6 +14,7 @@
#include "ngraph/node.hpp"
#include "ngraph/provenance.hpp"
#include "onnx_import/core/node.hpp"
#include "onnx_import/onnx_framework_node.hpp"
#include "utils/common.hpp"
#include "utils/provenance_tag.hpp"
@ -55,25 +56,6 @@ namespace ngraph
Graph::Graph(std::unique_ptr<Model>&& model)
: Graph(std::move(model), common::make_unique<GraphCache>())
{
// Remove dangling Parameters
for (auto param_it = m_parameters.begin(); param_it != m_parameters.end();)
{
if ((*param_it)->get_output_target_inputs(0).size() == 0)
{
const auto& name = (*param_it)->get_friendly_name();
auto out_it = std::find_if(
m_outputs.begin(), m_outputs.end(), [&name](const ValueInfo& info) {
return info.get_name() == name;
});
if (out_it == m_outputs.end())
{
m_cache->remove_node(name);
param_it = m_parameters.erase(param_it);
continue;
}
}
param_it++;
}
}
Graph::Graph(std::unique_ptr<Model>&& model, std::unique_ptr<GraphCache>&& cache)
@ -174,14 +156,82 @@ namespace ngraph
NGRAPH_CHECK(unknown_operators.empty(),
"nGraph does not support the following ONNX operations: ",
detail::to_string(unknown_operators));
}
void Graph::convert_to_ngraph_nodes()
{
// Process ONNX graph nodes, convert to nGraph nodes
for (const auto& node_proto : m_model->get_graph().node())
{
m_nodes.emplace_back(node_proto, *this);
const Node& node{m_nodes.back()};
if (node.has_subgraph())
{
auto subgraph = node.get_subgraph();
auto body_func = subgraph->convert();
}
OutputVector ng_nodes{node.get_ng_nodes()};
set_friendly_names(node, ng_nodes);
for (std::size_t i{0}; i < node.get_outputs_size(); ++i)
{
m_cache->emplace_node(node.output(i), std::move(ng_nodes.at(i)));
}
}
}
void Graph::remove_dangling_parameters()
{
for (auto param_it = m_parameters.begin(); param_it != m_parameters.end();)
{
if ((*param_it)->get_output_target_inputs(0).size() == 0)
{
const auto& name = (*param_it)->get_friendly_name();
auto out_it = std::find_if(
m_outputs.begin(), m_outputs.end(), [&name](const ValueInfo& info) {
return info.get_name() == name;
});
if (out_it == m_outputs.end())
{
m_cache->remove_node(name);
param_it = m_parameters.erase(param_it);
continue;
}
}
param_it++;
}
}
std::shared_ptr<Function> Graph::convert()
{
convert_to_ngraph_nodes();
remove_dangling_parameters();
return create_function();
}
void Graph::decode_to_framework_nodes()
{
// Process ONNX graph nodes, convert to nGraph nodes
for (const auto& node_proto : m_model->get_graph().node())
{
m_nodes.emplace_back(node_proto, *this);
const Node& node{m_nodes.back()};
std::shared_ptr<frontend::ONNXFrameworkNode> framework_node;
if (node.has_subgraph())
{
auto subgraph = node.get_subgraph();
auto body_func = subgraph->decode();
auto inputs = node.get_ng_inputs();
for (const auto& input : subgraph->get_inputs_from_parent())
inputs.push_back(input);
framework_node =
std::make_shared<ngraph::frontend::ONNXSubgraphFrameworkNode>(node, inputs);
}
else
{
framework_node = std::make_shared<ngraph::frontend::ONNXFrameworkNode>(node);
}
OutputVector ng_nodes{framework_node->outputs()};
set_friendly_names(node, ng_nodes);
// Iterate over the number of outputs for given node in graph.
// Some of them may be optional and trimmed. See:
// https://github.com/onnx/onnx/blob/master/docs/IR.md#optional-inputs-and-outputs
@ -192,12 +242,24 @@ namespace ngraph
}
}
const GraphCache& Graph::get_graph_cache() const { return *m_cache.get(); }
bool Graph::is_node_in_cache(const std::string& name) const
std::shared_ptr<Function> Graph::create_function()
{
return m_cache->contains(name);
auto function = std::make_shared<Function>(get_ng_outputs(), m_parameters, get_name());
for (std::size_t i{0}; i < function->get_output_size(); ++i)
{
function->get_output_op(i)->set_friendly_name(m_outputs.at(i).get_name());
}
return function;
}
std::shared_ptr<Function> Graph::decode()
{
decode_to_framework_nodes();
return create_function();
}
const GraphCache& Graph::get_graph_cache() const { return *m_cache.get(); }
Output<ngraph::Node> Graph::get_ng_node_from_cache(const std::string& name) const
{
return m_cache->get_node(name);
@ -247,6 +309,12 @@ namespace ngraph
set_friendly_names(onnx_node, ng_node_vector);
add_provenance_tags(onnx_node, ng_node_vector);
for (std::size_t i{0}; i < onnx_node.get_outputs_size(); ++i)
{
auto ng_node = ng_node_vector.at(i);
m_cache->emplace_node(onnx_node.output(i), std::move(ng_node));
}
return ng_node_vector;
}
@ -323,9 +391,21 @@ namespace ngraph
}
Subgraph::Subgraph(std::unique_ptr<Model>&& model, const Graph& parent_graph)
: Graph(
std::move(model),
std::unique_ptr<SubgraphCache>(new SubgraphCache(parent_graph.get_graph_cache())))
: Graph(std::move(model), common::make_unique<GraphCache>())
, m_parent_graph_cache(&parent_graph.get_graph_cache())
{
}
Output<ngraph::Node> Subgraph::get_ng_node_from_cache(const std::string& name) const
{
if (m_cache->contains(name))
{
return m_cache->get_node(name);
}
return m_parent_graph_cache->get_node(name);
}
void Subgraph::find_inputs_from_parent()
{
// find all nodes on edge parent graph-subgraph
// (it means input of node from parent graph, output from subgraph)
@ -334,16 +414,16 @@ namespace ngraph
int input_index = 0;
for (const auto& in_name : node_proto.input())
{
if (m_cache->node_scope(in_name) == NodeScope::ParentGraph)
if (m_parent_graph_cache->contains(in_name))
{
const auto& from_parent_node = m_cache->get_node(in_name);
const auto& from_parent_node = m_parent_graph_cache->get_node(in_name);
// constants are skipped
if (!ngraph::is_type<ngraph::op::Constant>(
from_parent_node.get_node_shared_ptr()))
{
for (const auto& out_name : node_proto.output())
{
if (m_cache->node_scope(out_name) == NodeScope::SubGraph)
if (m_cache->contains(out_name))
{
auto out_node_to_replace_input = m_cache->get_node(out_name);
auto new_param = std::make_shared<ngraph::op::Parameter>(
@ -353,8 +433,10 @@ namespace ngraph
out_node_to_replace_input.get_node()
->input(input_index)
.replace_source_output(new_param);
m_parameter_to_parent_node_map.insert({new_param, in_name});
m_cache->emplace_node(in_name, new_param);
m_parameters.push_back(new_param);
m_outputs_from_parent.push_back(from_parent_node);
m_inputs_from_parent.push_back(in_name);
}
}
}
@ -364,11 +446,39 @@ namespace ngraph
}
}
const std::vector<Output<ngraph::Node>> Subgraph::get_outputs_from_parent() const
std::shared_ptr<Function> Subgraph::convert()
{
return m_outputs_from_parent;
convert_to_ngraph_nodes();
find_inputs_from_parent();
return create_function();
}
void Subgraph::decode_to_framework_nodes()
{
Graph::decode_to_framework_nodes();
find_inputs_from_parent();
}
const std::vector<Output<ngraph::Node>> Subgraph::get_inputs_from_parent() const
{
OutputVector result;
for (const auto& name : m_inputs_from_parent)
{
result.push_back(m_parent_graph_cache->get_node(name));
}
return result;
}
void Subgraph::infer_inputs_from_parent()
{
for (auto& it : m_parameter_to_parent_node_map)
{
const auto& node = m_parent_graph_cache->get_node(it.second);
auto& parameter = it.first;
parameter->set_element_type(node.get_element_type());
parameter->set_partial_shape(node.get_partial_shape());
}
}
} // namespace onnx_import
} // namespace ngraph

View File

@ -31,13 +31,14 @@ namespace ngraph
Graph& operator=(const Graph&) = delete;
Graph& operator=(Graph&&) = default;
virtual std::shared_ptr<Function> convert();
std::shared_ptr<Function> decode();
const std::vector<Node>& get_nodes() const { return m_nodes; }
const std::vector<ValueInfo>& get_inputs() const { return m_inputs; }
const std::vector<ValueInfo>& get_outputs() const { return m_outputs; }
OutputVector get_ng_outputs() const;
const ParameterVector& get_ng_parameters() const { return m_parameters; }
bool is_node_in_cache(const std::string& name) const;
Output<ngraph::Node> get_ng_node_from_cache(const std::string& name) const;
virtual Output<ngraph::Node> get_ng_node_from_cache(const std::string& name) const;
const std::string& get_name() const { return m_model->get_graph().name(); }
OutputVector make_ng_nodes(const Node& onnx_node) const;
const GraphCache& get_graph_cache() const;
@ -60,6 +61,11 @@ namespace ngraph
const OutputVector& ng_node_vector) const;
protected:
virtual void decode_to_framework_nodes();
void convert_to_ngraph_nodes();
void remove_dangling_parameters();
std::shared_ptr<Function> create_function();
ParameterVector m_parameters;
std::unique_ptr<Model> m_model;
std::unique_ptr<GraphCache> m_cache;
@ -82,9 +88,11 @@ namespace ngraph
/// \param[in] parent_graph The reference to the parent graph.
Subgraph(std::unique_ptr<Model>&& model, const Graph& parent_graph);
/// \brief Return outputs which are on the edge the subgraph and the parent graph.
/// \brief Return nodes which are on the edge the subgraph and the parent graph.
/// \return Vector of edge nodes from parent scope.
const std::vector<Output<ngraph::Node>> get_outputs_from_parent() const;
const std::vector<Output<ngraph::Node>> get_inputs_from_parent() const;
std::shared_ptr<Function> convert() override;
Subgraph() = delete;
@ -94,8 +102,17 @@ namespace ngraph
Subgraph& operator=(const Subgraph&) = delete;
Subgraph& operator=(Subgraph&&) = default;
Output<ngraph::Node> get_ng_node_from_cache(const std::string& name) const override;
void infer_inputs_from_parent();
private:
std::vector<Output<ngraph::Node>> m_outputs_from_parent;
void decode_to_framework_nodes() override;
void find_inputs_from_parent();
const GraphCache* m_parent_graph_cache;
std::vector<std::string> m_inputs_from_parent;
std::unordered_map<std::shared_ptr<ngraph::op::Parameter>, std::string>
m_parameter_to_parent_node_map;
};
inline std::ostream& operator<<(std::ostream& outs, const Graph& graph)

View File

@ -39,55 +39,5 @@ namespace ngraph
{
return (m_graph_cache_map.count(name) > 0);
}
NodeScope GraphCache::node_scope(const std::string& name) const
{
return contains(name) ? NodeScope::ParentGraph : NodeScope::Lack;
}
SubgraphCache::SubgraphCache(const GraphCache& parent_graph_cache)
: m_parent_graph_cache{&parent_graph_cache}
{
if (m_parent_graph_cache == nullptr)
{
throw ngraph_error("Parent graph cache is not initialized");
}
}
Output<ngraph::Node> SubgraphCache::get_node(const std::string& name) const
{
// present in subgraph scope
if (GraphCache::contains(name))
{
return GraphCache::get_node(name);
}
else // present in parent graph scope
{
return m_parent_graph_cache->get_node(name);
}
}
bool SubgraphCache::contains(const std::string& name) const
{
// the node is in subgraph or in parent graph scope
return GraphCache::contains(name) || m_parent_graph_cache->contains(name);
}
NodeScope SubgraphCache::node_scope(const std::string& name) const
{
if (GraphCache::contains(name))
{
return NodeScope::SubGraph;
}
else if (m_parent_graph_cache->contains(name))
{
return NodeScope::ParentGraph;
}
else
{
return NodeScope::Lack;
}
}
} // namespace onnx_import
} // namespace ngraph

View File

@ -14,17 +14,6 @@ namespace ngraph
{
namespace onnx_import
{
/// \brief Enum which determines scope (visibility) of nodes in GraphCache.
enum class NodeScope
{
// in parent graph scope
ParentGraph = 1,
// in subgraph scope
SubGraph,
// not available at all
Lack
};
/// \brief GraphCache stores and provides access to ONNX graph initializers.
class GraphCache
{
@ -58,58 +47,10 @@ namespace ngraph
/// \return true if the node named `name` exist in the cache, false otherwise.
virtual bool contains(const std::string& name) const;
/// \brief Return NodeScope enum which determines scope of the node.
/// \note If the method is called on GraphCache the ParentGraph enum
/// value is retunred always.
///
/// \param[in] name The name of the node.
///
/// \return SubGraph if node belongs to SubgraphCache, ParentGraph if
/// is avalible in parent_graph_cache, otherwise Lack
virtual NodeScope node_scope(const std::string& name) const;
virtual ~GraphCache() = default;
private:
std::map<std::string, Output<ngraph::Node>> m_graph_cache_map;
};
class SubgraphCache : public GraphCache
{
public:
/// \brief Constructs a SubgraphCache class object.
///
/// \param[in] parent_graph_cache The reference to the parent graph.
SubgraphCache(const GraphCache& parent_graph_cache);
/// \brief Get the node from the cache (subgraph or parent graph)
///
/// \note If the node is not found the ngraph_error exception is thrown.
///
/// \param[in] name The name of the node.
///
/// \return The node named `name` from subgraph (as present) or from parent graph.
Output<ngraph::Node> get_node(const std::string& name) const override;
/// \brief Return true if the node named `name` exist in the cache.
///
/// \param[in] name The name of the node.
///
/// \return true if the node named `name` exist in the cache
/// (subgraph or parent graph), false otherwise.
bool contains(const std::string& name) const override;
/// \brief Return NodeScope enum which determines scope of the node.
///
/// \param[in] name The name of the node.
///
/// \return SubGraph if the node belongs to SubgraphCache, ParentGraph if
/// is avalible in parent_graph_cache, otherwise Lack
NodeScope node_scope(const std::string& name) const override;
private:
const GraphCache* m_parent_graph_cache;
};
} // namespace onnx_import
} // namespace ngraph

View File

@ -6,6 +6,7 @@
#include "core/model.hpp"
#include "ngraph/log.hpp"
#include "onnx_import/onnx_framework_node.hpp"
#include "ops_bridge.hpp"
namespace ngraph

View File

@ -26,6 +26,29 @@ namespace ngraph
, m_graph{&graph}
, m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())}
, m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())}
{
const auto it =
std::find_if(std::begin(m_attributes),
std::end(m_attributes),
[&](const Attribute& attribute) { return attribute.is_graph(); });
m_has_subgraph = it != std::end(m_attributes);
if (m_has_subgraph)
{
m_subgraph = std::make_shared<Subgraph>(it->get_subgraph(*m_graph));
}
}
Impl(const ONNX_NAMESPACE::NodeProto& node_proto,
const Graph& graph,
std::shared_ptr<Subgraph> subgraph)
: m_node_proto{&node_proto}
, m_name{node_proto.has_name() ? node_proto.name() : ""}
, m_domain{get_node_domain(node_proto)}
, m_graph{&graph}
, m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())}
, m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())}
, m_has_subgraph(subgraph != nullptr)
, m_subgraph(subgraph)
{
}
@ -44,9 +67,8 @@ namespace ngraph
bool has_attribute(const std::string& name) const;
Subgraph get_subgraph_from_attribute(
const std::string& name,
const std::map<std::size_t, std::string>& carried_dependencies_map) const;
bool has_subgraph() const;
std::shared_ptr<Subgraph> get_subgraph() const;
template <typename T>
T get_attribute_value(const std::string& name, T default_value) const;
@ -58,6 +80,8 @@ namespace ngraph
const Graph& graph() const;
private:
Subgraph get_subgraph_from_attribute(const std::string& name) const;
const ONNX_NAMESPACE::NodeProto* m_node_proto;
std::string m_name;
std::string m_domain;
@ -65,6 +89,9 @@ namespace ngraph
std::vector<Attribute> m_attributes;
std::vector<std::reference_wrapper<const std::string>> m_output_names;
mutable std::string m_description;
bool m_has_subgraph;
std::shared_ptr<Subgraph> m_subgraph;
};
const ONNX_NAMESPACE::NodeProto& Node::Impl::node_proto() const { return *m_node_proto; }
@ -94,9 +121,7 @@ namespace ngraph
return it != std::end(m_attributes);
}
Subgraph Node::Impl::get_subgraph_from_attribute(
const std::string& name,
const std::map<std::size_t, std::string>& carried_dependencies_map) const
Subgraph Node::Impl::get_subgraph_from_attribute(const std::string& name) const
{
auto it = std::find_if(
std::begin(m_attributes), std::end(m_attributes), [&](const Attribute& attribute) {
@ -106,9 +131,13 @@ namespace ngraph
{
throw error::node::UnknownAttribute{this->name(), name};
}
return it->get_subgraph(graph(), carried_dependencies_map);
return it->get_subgraph(*m_graph);
}
bool Node::Impl::has_subgraph() const { return m_has_subgraph; }
std::shared_ptr<Subgraph> Node::Impl::get_subgraph() const { return m_subgraph; }
template <typename T>
T Node::Impl::get_attribute_value(const std::string& name, T default_value) const
{
@ -140,8 +169,7 @@ namespace ngraph
template <>
Subgraph Node::Impl::get_attribute_value(const std::string& name) const
{
const std::map<std::size_t, std::string> empty_map;
return get_subgraph_from_attribute(name, empty_map);
return get_subgraph_from_attribute(name);
}
OutputVector Node::Impl::get_ng_nodes(const Node& node) const
@ -196,7 +224,9 @@ namespace ngraph
}
Node::Node(const Node& other)
: m_pimpl{new Impl{other.m_pimpl->node_proto(), other.m_pimpl->graph()},
: m_pimpl{new Impl{other.m_pimpl->node_proto(),
other.m_pimpl->graph(),
other.get_subgraph()},
[](Impl* impl) { delete impl; }}
{
}
@ -219,12 +249,9 @@ namespace ngraph
return m_pimpl->has_attribute(name);
}
Subgraph Node::get_subgraph_from_attribute(
const std::string& name,
const std::map<std::size_t, std::string>& carried_dependencies_map) const
{
return m_pimpl->get_subgraph_from_attribute(name, carried_dependencies_map);
}
bool Node::has_subgraph() const { return m_pimpl->has_subgraph(); }
std::shared_ptr<Subgraph> Node::get_subgraph() const { return m_pimpl->get_subgraph(); }
std::vector<std::string> Node::get_attribute_names() const
{
@ -462,7 +489,6 @@ namespace ngraph
{
return m_pimpl->template get_attribute_value<std::vector<Graph>>(name);
}
} // namespace onnx_import
} // namespace ngraph

View File

@ -36,7 +36,10 @@ namespace ngraph
public:
static constexpr NodeTypeInfo type_info{"NullNode", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
NullNode() = default;
NullNode()
: Node(1)
{
}
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@ -19,20 +19,6 @@ namespace ngraph
{
namespace onnx_import
{
namespace error
{
namespace value_info
{
struct unspecified_element_type : ngraph_error
{
unspecified_element_type()
: ngraph_error{"value info has no element type specified"}
{
}
};
} // namespace value_info
} // namespace error
class ValueInfo
{
public:
@ -65,12 +51,12 @@ namespace ngraph
const PartialShape& get_shape() const { return m_partial_shape; }
const element::Type& get_element_type() const
{
if (!m_value_info_proto->type().tensor_type().has_elem_type())
if (m_value_info_proto->type().tensor_type().has_elem_type())
{
throw error::value_info::unspecified_element_type{};
return common::get_ngraph_element_type(
m_value_info_proto->type().tensor_type().elem_type());
}
return common::get_ngraph_element_type(
m_value_info_proto->type().tensor_type().elem_type());
return ngraph::element::dynamic;
}
std::shared_ptr<ngraph::Node>

View File

@ -0,0 +1,34 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <onnx_import/onnx_framework_node.hpp>
namespace ngraph
{
namespace frontend
{
NGRAPH_RTTI_DEFINITION(ONNXFrameworkNode, "ONNXFrameworkNode", 1);
std::shared_ptr<Node>
ONNXFrameworkNode::clone_with_new_inputs(const OutputVector& inputs) const
{
return std::make_shared<ONNXFrameworkNode>(m_node, inputs);
}
NGRAPH_RTTI_DEFINITION(ONNXSubgraphFrameworkNode, "ONNXSubgraphFrameworkNode", 1);
} // namespace frontend
} // namespace ngraph

View File

@ -77,10 +77,18 @@ namespace ngraph
loop_carried_dependencies[i].get_node()->get_friendly_name();
}
const Subgraph& body_graph{
node.get_subgraph_from_attribute("body", loop_carried_dependencies_map)};
auto body_outputs = body_graph.get_ng_outputs();
const auto& body_inputs = body_graph.get_ng_parameters();
auto body_graph = node.get_subgraph();
auto body_outputs = body_graph->get_ng_outputs();
const auto& body_inputs = body_graph->get_ng_parameters();
// Infer loop body inputs' element type based on carried dependencies
for (size_t i = 0; i < loop_carried_dependencies.size(); i++)
{
body_inputs[i + 2]->set_element_type(
loop_carried_dependencies[i].get_element_type());
body_inputs[i + 2]->set_partial_shape(
loop_carried_dependencies[i].get_partial_shape());
}
// optional inputs
Output<ngraph::Node> trip_count;
@ -190,22 +198,22 @@ namespace ngraph
final_values.push_back(loop->get_iter_value(*body_outputs_it++, -1));
}
const auto& outputs_from_parent = body_graph.get_outputs_from_parent();
const auto& inputs_from_parent = body_graph->get_inputs_from_parent();
CHECK_VALID_NODE(
node,
static_cast<size_t>(std::distance(body_inputs_it, body_inputs.end())) ==
outputs_from_parent.size(),
inputs_from_parent.size(),
"Expected number of invariant parameters is"
" not equal number of provided outputs from parent scope");
" not equal number of provided inputs from parent scope");
// Set-up parameters from parent graph which are not changed during Loop's
// iterations
for (auto out_from_parent_it = outputs_from_parent.begin();
for (auto in_from_parent_it = inputs_from_parent.begin();
body_inputs_it != body_inputs.end() &&
out_from_parent_it != outputs_from_parent.end();
++body_inputs_it, ++out_from_parent_it)
in_from_parent_it != inputs_from_parent.end();
++body_inputs_it, ++in_from_parent_it)
{
loop->set_invariant_input(*body_inputs_it, *out_from_parent_it);
loop->set_invariant_input(*body_inputs_it, *in_from_parent_it);
}
// Set-up scan outputs

View File

@ -6,7 +6,9 @@
#include "core/graph.hpp"
#include "core/model.hpp"
#include "core/null_node.hpp"
#include "core/transform.hpp"
#include "onnx_import/onnx_framework_node.hpp"
#include "onnx_import/utils/onnx_internal.hpp"
namespace ngraph
@ -15,21 +17,81 @@ namespace ngraph
{
namespace detail
{
std::shared_ptr<Function>
convert_to_ng_function(const ONNX_NAMESPACE::ModelProto& model_proto)
void remove_dangling_parameters(std::shared_ptr<Function>& function)
{
auto p_model_proto = common::make_unique<ONNX_NAMESPACE::ModelProto>(model_proto);
auto model = common::make_unique<Model>(std::move(p_model_proto));
Graph graph{std::move(model)};
auto function = std::make_shared<Function>(
graph.get_ng_outputs(), graph.get_ng_parameters(), graph.get_name());
for (std::size_t i{0}; i < function->get_output_size(); ++i)
const auto parameters = function->get_parameters();
for (auto parameter : parameters)
{
function->get_output_op(i)->set_friendly_name(
graph.get_outputs().at(i).get_name());
const auto parameter_users = parameter->get_users();
// if a Parameter is connected to a ONNXFrameworkNode that was not converted
// during convert_function it means, this Parameter is dangling and we can
// remove it from function
const bool is_dangling_parameter = std::all_of(
parameter_users.begin(),
parameter_users.end(),
[](const std::shared_ptr<ngraph::Node>& node) -> bool {
return std::dynamic_pointer_cast<frontend::ONNXFrameworkNode>(node) !=
nullptr;
});
if (is_dangling_parameter)
{
function->remove_parameter(parameter);
}
}
return function;
}
void remove_dangling_results(std::shared_ptr<Function>& function)
{
const auto results = function->get_results();
for (auto result : results)
{
// we can remove Result from function if after function conversion,
// Result is connected to NullNode only
const auto result_inputs = result->input_values();
const bool is_dangling_result =
std::all_of(result_inputs.begin(),
result_inputs.end(),
[](const Output<ngraph::Node>& node) -> bool {
return ngraph::op::is_null(node);
});
if (is_dangling_result)
{
function->remove_result(result);
}
}
}
void convert_decoded_function(std::shared_ptr<Function> function)
{
for (const auto& node : function->get_ordered_ops())
{
if (auto raw_node =
std::dynamic_pointer_cast<frontend::ONNXFrameworkNode>(node))
{
if (auto subgraph_node =
std::dynamic_pointer_cast<frontend::ONNXSubgraphFrameworkNode>(
node))
{
subgraph_node->infer_inputs_from_parent();
convert_decoded_function(subgraph_node->get_subgraph_body());
}
const auto& onnx_node = raw_node->get_onnx_node();
OutputVector ng_nodes{onnx_node.get_ng_nodes()};
if (ng_nodes.size() > raw_node->get_output_size())
{
ng_nodes.resize(raw_node->get_output_size());
}
replace_node(raw_node, ng_nodes);
}
else
{
// Have to revalidate node because new intpus can affect shape/type
// propagation for already translated nodes
node->revalidate_and_infer_types();
}
}
remove_dangling_parameters(function);
remove_dangling_results(function);
}
std::shared_ptr<Function> import_onnx_model(ONNX_NAMESPACE::ModelProto& model_proto,
@ -39,7 +101,10 @@ namespace ngraph
transform::fixup_legacy_operators(model_proto);
transform::update_external_data_paths(model_proto, model_path);
return detail::convert_to_ng_function(model_proto);
auto p_model_proto = common::make_unique<ONNX_NAMESPACE::ModelProto>(model_proto);
auto model = common::make_unique<Model>(std::move(p_model_proto));
Graph graph{std::move(model)};
return graph.convert();
}
} // namespace detail
} // namespace onnx_import

View File

@ -390,8 +390,7 @@ def test_cast_errors():
for name, value in zip(node.input, [input_data])
]
output_tensors = [
make_tensor_value_info(name, onnx.TensorProto.FLOAT16, value.shape)
for name, value in zip(node.output, ())
make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16, input_data.shape)
] # type: ignore
graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
@ -406,8 +405,7 @@ def test_cast_errors():
for name, value in zip(node.input, [input_data])
]
output_tensors = [
make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
for name, value in zip(node.output, ())
make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape)
] # type: ignore
graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
@ -422,8 +420,7 @@ def test_cast_errors():
for name, value in zip(node.input, [input_data])
]
output_tensors = [
make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
for name, value in zip(node.output, ())
make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape)
] # type: ignore
graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
@ -438,8 +435,7 @@ def test_cast_errors():
for name, value in zip(node.input, [input_data])
]
output_tensors = [
make_tensor_value_info(name, onnx.TensorProto.COMPLEX128, value.shape)
for name, value in zip(node.output, ())
make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128, input_data.shape)
] # type: ignore
graph = make_graph([node], "compute_graph", input_tensors, output_tensors)

View File

@ -388,7 +388,6 @@ set(MULTI_TEST_SRC
backend/comparison.in.cpp
backend/concat.in.cpp
backend/constant.in.cpp
backend/convert.in.cpp
backend/convert_like.in.cpp
backend/convolution_backprop.in.cpp
backend/convolution.in.cpp

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,6 @@ ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "target_shape"
output: "output"
op_type: "ConstantFill"
attribute {

View File

@ -929,90 +929,6 @@ roll_3d_input
roll_3d_input_negative_shift
roll_negative_axes
# convert operation
IE_CPU.convert_u8_to_boolean
IE_CPU.convert_i32_to_boolean
IE_CPU.convert_f32_to_boolean
IE_CPU.convert_u8_to_f16
IE_CPU.convert_u8_to_i16
IE_CPU.convert_u8_to_i64
IE_CPU.convert_u8_to_u16
IE_CPU.convert_u8_to_u32
IE_CPU.convert_u8_to_u64
IE_CPU.convert_f16_to_f32
IE_CPU.convert_u32_to_f32
IE_CPU.convert_i4_to_f32
IE_CPU.convert_u1_to_f32
IE_CPU.convert_u4_to_f32
IE_CPU.convert_u1_to_u1
IE_CPU.convert_u4_to_u1
IE_CPU.convert_u8_to_u1
IE_CPU.convert_u16_to_u1
IE_CPU.convert_u32_to_u1
IE_CPU.convert_u64_to_u1
IE_CPU.convert_i4_to_u1
IE_CPU.convert_i8_to_u1
IE_CPU.convert_i16_to_u1
IE_CPU.convert_i32_to_u1
IE_CPU.convert_i64_to_u1
IE_CPU.convert_f16_to_u1
IE_CPU.convert_bf16_to_u1
IE_CPU.convert_f32_to_u1
IE_CPU.convert_u1_to_i4
IE_CPU.convert_u4_to_i4
IE_CPU.convert_u8_to_i4
IE_CPU.convert_u16_to_i4
IE_CPU.convert_u32_to_i4
IE_CPU.convert_u64_to_i4
IE_CPU.convert_i8_to_i4
IE_CPU.convert_i16_to_i4
IE_CPU.convert_i32_to_i4
IE_CPU.convert_i64_to_i4
IE_CPU.convert_f16_to_i4
IE_CPU.convert_bf16_to_i4
IE_CPU.convert_f32_to_i4
IE_CPU.convert_u1_to_u4
IE_CPU.convert_u4_to_u4
IE_CPU.convert_u8_to_u4
IE_CPU.convert_u16_to_u4
IE_CPU.convert_u32_to_u4
IE_CPU.convert_u64_to_u4
IE_CPU.convert_i4_to_u4
IE_CPU.convert_i8_to_u4
IE_CPU.convert_i16_to_u4
IE_CPU.convert_i32_to_u4
IE_CPU.convert_i64_to_u4
IE_CPU.convert_f16_to_u4
IE_CPU.convert_bf16_to_u4
IE_CPU.convert_f32_to_u4
IE_CPU.convert_u1_to_i8
IE_CPU.convert_u4_to_i8
IE_CPU.convert_i4_to_i8
IE_CPU.convert_u1_to_i16
IE_CPU.convert_u4_to_i16
IE_CPU.convert_i4_to_i16
IE_CPU.convert_u1_to_i32
IE_CPU.convert_u4_to_i32
IE_CPU.convert_i4_to_i32
IE_CPU.convert_u1_to_i64
IE_CPU.convert_u4_to_i64
IE_CPU.convert_i4_to_i64
IE_CPU.convert_u1_to_u8
IE_CPU.convert_u4_to_u8
IE_CPU.convert_i4_to_u8
IE_CPU.convert_u1_to_u16
IE_CPU.convert_u4_to_u16
IE_CPU.convert_i4_to_u16
IE_CPU.convert_u1_to_u32
IE_CPU.convert_u4_to_u32
IE_CPU.convert_i4_to_u32
IE_CPU.convert_u1_to_u64
IE_CPU.convert_u4_to_u64
IE_CPU.convert_i4_to_u64
#-------------------------------------------------------------------------------
#
# Inference Engine CPU plugin excludes