Param/Const => Result tests (#9294)

* Tests for param => result

* Added const => result, param => result tests

* Disabled tests on CPU

* Added more tests

* Enabled import / export for template

* clang-format

* Reverted scatter tests

* Rename back

* Fixed typo

* Fixed compilation for GNA

* Fixed comments

* Fixed collisions

* Revert renaming back

* Added skip filters for GNA / MYRIAD
This commit is contained in:
Ilya Lavrenov 2022-02-01 11:01:12 +03:00 committed by GitHub
parent 75abee2500
commit 4717e7639c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 273 additions and 32 deletions

View File

@ -25,19 +25,21 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*Behavior.*ExecutableNetworkBaseTest.*canExport.*)",
R"(.*Behavior.*ExecutableNetworkBaseTest.*(CanCreateTwoExeNetworksAndCheckFunction).*)",
R"(.*Behavior.*ExecutableNetworkBaseTest.*(checkGetExecGraphInfoIsNotNullptr).*)",
R"(.*smoke_BehaviorTests.*OVExecNetwork.ieImportExportedFunction.*)",
R"(.*LoadNetworkCreateDefaultExecGraphResult.*)",
// TODO: Round with f16 is not supported
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.*readFromV10IR.*)",
// TODO: execution graph is not supported
R"(.*ExecGraph.*)",
R"(.*smoke_Hetero_BehaviorTests.*OVExecGraphImportExportTest.*readFromV10IR.*)",
// TODO: support import / export of precisions in template plugin
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.ieImportExportedFunction.*)",
R"(.*smoke_BehaviorTests.*OVExecNetwork.ieImportExportedFunction.*)",
R"(.*smoke_Hetero_BehaviorTests.*OVExecGraphImportExportTest.ieImportExportedFunction.*)",
R"(.*smoke_BehaviorTests.*OVExecGraphImportExportTest.ieImportExportedFunction.*)",
// TODO: Round with f16 is not supported
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.*readFromV10IR.*)",
R"(.*smoke_Hetero_BehaviorTests.*OVExecGraphImportExportTest.*readFromV10IR.*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i8|u8).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i16|u16).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i64|u64).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=u32.*)",
R"(.*importExportedIENetworkConstantResultOnly.*elementType=(u32|u64).*)",
// CVS-64094
R"(.*ReferenceLogSoftmaxLayerTest.*4.*iType=f16.*axis=.*1.*)",

View File

@ -696,7 +696,7 @@ ov::Output<const ov::Node> ov::Model::output(const std::string& tensor_name) con
return result;
}
}
throw ov::Exception("Output for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Output for tensor name '" + tensor_name + "' is not found.");
}
std::vector<ov::Output<ov::Node>> ov::Model::outputs() {
@ -720,7 +720,7 @@ ov::Output<ov::Node> ov::Model::output(const std::string& tensor_name) {
if (res->get_input_tensor(0).get_names().count(tensor_name))
return res;
}
throw ov::Exception("Output for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Output for tensor name '" + tensor_name + "' is not found.");
}
/// Input Model
@ -751,7 +751,7 @@ ov::Output<const ov::Node> ov::Model::input(const std::string& tensor_name) cons
return parameter;
}
}
throw ov::Exception("Input for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Input for tensor name '" + tensor_name + "' is not found.");
}
std::vector<ov::Output<ov::Node>> ov::Model::inputs() {
@ -776,7 +776,7 @@ ov::Output<ov::Node> ov::Model::input(const std::string& tensor_name) {
if (param->get_output_tensor(0).get_names().count(tensor_name))
return param;
}
throw ov::Exception("Input for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Input for tensor name '" + tensor_name + "' is not found.");
}
void ov::Model::reshape(const ov::PartialShape& partial_shape) {

View File

@ -527,6 +527,60 @@ TEST(model, multiple_inputs_outputs_model_from_const_model) {
EXPECT_EQ(f->outputs().size(), 2);
}
TEST(model, parameter_result_function) {
std::shared_ptr<ov::Model> function = nullptr;
{
auto param = std::make_shared<ov::opset8::Parameter>(ov::element::f16, ngraph::Shape({1, 3, 24, 24}));
param->set_friendly_name("param");
param->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(param);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
function->set_friendly_name("ParamResult");
}
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_NO_THROW(function->input());
EXPECT_NO_THROW(function->input("data"));
EXPECT_THROW(function->input("param"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_NO_THROW(function->output());
EXPECT_EQ(1, function->output(0).get_tensor().get_names().size());
EXPECT_NO_THROW(function->output("data"));
EXPECT_THROW(function->output("constant"), ov::Exception);
EXPECT_EQ(ov::element::f16, function->input("data").get_element_type());
EXPECT_EQ(ov::element::f16, function->output("data").get_element_type());
}
TEST(model, constant_result_function) {
std::shared_ptr<ov::Model> function = nullptr;
std::shared_ptr<ov::Node> constant = nullptr;
{
constant = std::make_shared<ov::opset8::Constant>(ov::element::f32, ngraph::Shape({1, 3, 24, 24}));
constant->set_friendly_name("constant");
constant->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(constant);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{});
function->set_friendly_name("ConstResult");
}
EXPECT_EQ(function->inputs().size(), 0);
EXPECT_THROW(function->input(), ov::Exception);
EXPECT_THROW(function->input("data"), ov::Exception);
EXPECT_THROW(function->input("constant"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_NO_THROW(function->output());
EXPECT_EQ(1, function->output(0).get_tensor().get_names().size());
EXPECT_NO_THROW(function->output("data"));
EXPECT_THROW(function->output("constant"), ov::Exception);
EXPECT_EQ(ov::element::f32, function->output("data").get_element_type());
}
TEST(model_reshape, ReshapedDynamicShapeLayout) {
std::shared_ptr<ov::Model> ngraph;
{

View File

@ -164,7 +164,7 @@ ov::Output<const ov::Node> CompiledModel::input(const std::string& tensor_name)
return param;
}
}
throw ov::Exception("Input for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Input for tensor name '" + tensor_name + "' is not found.");
});
}
@ -196,7 +196,7 @@ ov::Output<const ov::Node> CompiledModel::output(const std::string& tensor_name)
return result;
}
}
throw ov::Exception("Output for tensor name " + tensor_name + " was not found.");
throw ov::Exception("Output for tensor name '" + tensor_name + "' is not found.");
});
}

View File

@ -684,9 +684,8 @@ public:
// create getInputs() based on GetInputsInfo()
using namespace InferenceEngine::details;
if (exec->getInputs().empty()) {
if (exec->getInputs().empty() && !exec->GetInputsInfo().empty()) {
const auto& inputsInfo = exec->GetInputsInfo();
OPENVINO_ASSERT(!inputsInfo.empty(), "inputsInfo is empty after network import");
std::vector<std::shared_ptr<const ov::Node>> params;
params.reserve(inputsInfo.size());

View File

@ -302,7 +302,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
const int64_t ir_version = it->second.as<int64_t>();
if (ir_version == 10 && newAPI) {
std::unordered_set<std::string> leaf_names;
std::unordered_map<std::string, std::shared_ptr<ov::descriptor::Tensor>> leaf_names;
const auto inputs = function->inputs();
for (size_t i = 0; i < inputs.size(); ++i) {
const auto ngraph_type = inputs[i].get_element_type();
@ -312,7 +312,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(),
"Model tensor names have collisions.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(name);
leaf_names.emplace(name, inputs[i].get_tensor_ptr());
}
}
@ -323,10 +323,11 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
prepost.output(i).tensor().set_element_type(legacy_type);
for (const auto& name : outputs[i].get_names()) {
OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(),
auto tensor_it = leaf_names.find(name);
OPENVINO_ASSERT(tensor_it == leaf_names.end() || tensor_it->second == outputs[i].get_tensor_ptr(),
"Model tensor names have collisions.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(name);
leaf_names.emplace(name, outputs[i].get_tensor_ptr());
}
}
@ -345,7 +346,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
result->output(0).get_names().find(res_name) != result->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(res_name);
leaf_names.emplace(res_name, nullptr);
result->output(0).get_tensor().add_names({res_name});
}
for (const auto& param : function->get_parameters()) {
@ -355,7 +356,7 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
param->output(0).get_names().find(param_name) != param->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(param_name);
leaf_names.emplace(param_name, nullptr);
param->output(0).get_tensor().add_names({param_name});
}
}

View File

@ -87,7 +87,7 @@ TEST_F(CustomOpsSerializationTest, CustomOpUser_ONNXImporter) {
#endif // OPENVINO_STATIC_LIBRARY
#endif // NGRAPH_ONNX_FRONTEND_ENABLE
#endif // ENABLE_OV_ONNX_FRONTEND
TEST_F(CustomOpsSerializationTest, CustomOpTransformation) {
const std::string model = CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_op.xml");

View File

@ -104,6 +104,18 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*ExecGraphInfo.*)",
R"(.*Hetero.*Behavior.*ExecutableNetworkBaseTest.*CanCreateTwoExeNetworksAndCheckFunction.*)",
// CVS-74306
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i8|u8).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i16|u16).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=(i64|u64).*)",
R"(.*importExportedIENetworkParameterResultOnly.*elementType=u32.*)",
// CVS-74307
R"(.*ConstantResultOnly.*elementType=(i8|u8).*)",
R"(.*ConstantResultOnly.*elementType=(i16|u16).*)",
R"(.*ConstantResultOnly.*elementType=(i64|u64).*)",
R"(.*ConstantResultOnly.*elementType=(u32|f16).*)",
// CPU plugin does not support some precisions
R"(smoke_CachingSupportCase_CPU/LoadNetworkCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_CPU)",
@ -123,8 +135,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*BehaviorTests.*canRun3SyncRequestsConsistentlyFromThreads.*CPU_THROUGHPUT.*)",
// Issue 67214
R"(smoke_PrePostProcess.*resize_and_convert_layout_i8.*)",
// Issue 67910
R"(.*smoke_PrePostProcess.*two_inputs_trivial.*)",
// TODO: CVS-67255
R"(smoke_If.*SimpleIf2OutTest.*)",

View File

@ -42,6 +42,10 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)",
R"(.*ConstantResultSubgraphTest.*IS=\(2\.3\.4\.5\).*)",
R"(.*ConstantResultSubgraphTest.*inPrc=(U8|I8|I32|U64|I64|BOOL).*)",
R"(.*importExportedFunctionParameterResultOnly.*)",
R"(.*importExportedFunctionConstantResultOnly.*)",
R"(.*importExportedIENetworkConstantResultOnly.*)",
R"(.*importExportedIENetworkParameterResultOnly.*)",
// TODO: Issue 57363 (Param -> Result subgraphs)
R"(.*smoke_MemoryTest.*LOW_LATENCY.*iteration_count=1_.*)",

View File

@ -77,8 +77,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*EltwiseLayerTest.*OpType=FloorMod.*NetType=i64.*)",
// Issue connected with OV2.0
R"(.*EltwiseLayerTest.*OpType=Pow.*NetType=i64.*)",
// TODO: Issue 67910
R"(.*smoke_PrePostProcess_GPU.*two_inputs_trivial.*)",
// TODO: Issue: 67486
R"(.*(SoftMaxLayerTest).*)",
// TODO: Issue: 68712

View File

@ -59,6 +59,7 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*_Hetero_Behavior.*OVExecGraphImportExportTest.*)",
// TODO: Issue 65013
R"(.*LoadNetworkCreateDefaultExecGraphResult.*)",
R"(.*importExportedFunctionConstantResultOnly.*elementType=(f32|f16).*)",
// Not expected behavior
R"(.*Behavior.*ExecNetSetPrecision.*canSetOutputPrecisionForNetwork.*U8.*)",
R"(.*CoreThreadingTestsWithIterations.*)",

View File

@ -74,7 +74,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedFunction) {
ov::CompiledModel execNet;
// Create simple function
// Create simple function
{
auto param1 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param1->set_friendly_name("param1");
@ -150,6 +150,85 @@ TEST_P(OVExecGraphImportExportTest, importExportedFunction) {
EXPECT_THROW(importedExecNet.output("relu_op"), ov::Exception);
}
TEST_P(OVExecGraphImportExportTest, importExportedFunctionParameterResultOnly) {
if (targetDevice == "MULTI" || targetDevice == "AUTO") {
GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl;
}
// Create a simple function
{
auto param = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param->set_friendly_name("param");
param->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(param);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{param});
function->set_friendly_name("ParamResult");
}
auto execNet = core->compile_model(function, targetDevice, configuration);
std::stringstream strm;
execNet.export_model(strm);
ov::runtime::CompiledModel importedExecNet = core->import_model(strm, targetDevice, configuration);
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_NO_THROW(importedExecNet.input());
EXPECT_NO_THROW(importedExecNet.input("data").get_node());
EXPECT_THROW(importedExecNet.input("param"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_NO_THROW(importedExecNet.output());
EXPECT_EQ(function->output(0).get_tensor().get_names(),
importedExecNet.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedExecNet.output("data").get_node());
EXPECT_THROW(importedExecNet.output("param"), ov::Exception);
EXPECT_EQ(ov::element::Type(elementType), importedExecNet.input("data").get_element_type());
EXPECT_EQ(ov::element::Type(elementType), importedExecNet.output("data").get_element_type());
}
TEST_P(OVExecGraphImportExportTest, importExportedFunctionConstantResultOnly) {
if (targetDevice == "MULTI" || targetDevice == "AUTO") {
GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl;
}
// Create a simple function
{
auto constant = std::make_shared<ov::opset8::Constant>(elementType, ngraph::Shape({1, 3, 24, 24}));
constant->set_friendly_name("constant");
constant->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(constant);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{});
function->set_friendly_name("ConstResult");
}
auto execNet = core->compile_model(function, targetDevice, configuration);
std::stringstream strm;
execNet.export_model(strm);
ov::runtime::CompiledModel importedExecNet = core->import_model(strm, targetDevice, configuration);
EXPECT_EQ(function->inputs().size(), 0);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_THROW(importedExecNet.input(), ov::Exception);
EXPECT_THROW(importedExecNet.input("data"), ov::Exception);
EXPECT_THROW(importedExecNet.input("constant"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_NO_THROW(importedExecNet.output());
EXPECT_EQ(function->output(0).get_tensor().get_names(),
importedExecNet.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedExecNet.output("data").get_node());
EXPECT_THROW(importedExecNet.output("constant"), ov::Exception);
EXPECT_EQ(ov::element::Type(elementType), importedExecNet.output("data").get_element_type());
}
TEST_P(OVExecGraphImportExportTest, readFromV10IR) {
std::string model = R"V0G0N(
<net name="Network" version="10">
@ -275,7 +354,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) {
result2->set_friendly_name("result2");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
function->set_friendly_name("SingleRuLU");
function->set_friendly_name("SingleReLU");
}
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), targetDevice, any_copy(configuration));
@ -312,6 +391,98 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) {
EXPECT_EQ(outputType, importedExecNet.output("relu_op").get_element_type());
}
TEST_P(OVExecGraphImportExportTest, importExportedIENetworkParameterResultOnly) {
if (targetDevice == "MULTI" || targetDevice == "AUTO") {
GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl;
}
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
InferenceEngine::ExecutableNetwork execNet;
// Create a simple function
{
auto param = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param->set_friendly_name("param");
param->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(param);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{param});
function->set_friendly_name("ParamResult");
}
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), targetDevice, any_copy(configuration));
std::stringstream strm;
execNet.Export(strm);
ov::runtime::CompiledModel importedExecNet = core->import_model(strm, targetDevice, configuration);
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_NO_THROW(importedExecNet.input());
EXPECT_NO_THROW(importedExecNet.input("data").get_node());
EXPECT_NO_THROW(importedExecNet.input("param").get_node());
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_NO_THROW(importedExecNet.output());
EXPECT_NE(function->output(0).get_tensor().get_names(),
importedExecNet.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedExecNet.output("data").get_node());
EXPECT_NO_THROW(importedExecNet.output("param").get_node());
const ov::element::Type outputType = elementType == ngraph::element::i32 ||
elementType == ngraph::element::i64 ? ngraph::element::i32 : ngraph::element::f32;
const ov::element::Type inputType = elementType ==
ngraph::element::f16 ? ngraph::element::Type_t::f32 : elementType;
EXPECT_EQ(inputType, importedExecNet.input("param").get_element_type());
EXPECT_EQ(outputType, importedExecNet.output("data").get_element_type());
}
TEST_P(OVExecGraphImportExportTest, importExportedIENetworkConstantResultOnly) {
if (targetDevice == "MULTI" || targetDevice == "AUTO") {
GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl;
}
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
InferenceEngine::ExecutableNetwork execNet;
// Create a simple function
{
auto constant = std::make_shared<ov::opset8::Constant>(elementType, ngraph::Shape({1, 3, 24, 24}));
constant->set_friendly_name("constant");
constant->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(constant);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{});
function->set_friendly_name("ConstResult");
}
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), targetDevice, any_copy(configuration));
std::stringstream strm;
execNet.Export(strm);
ov::runtime::CompiledModel importedExecNet = core->import_model(strm, targetDevice, configuration);
EXPECT_EQ(function->inputs().size(), 0);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_THROW(importedExecNet.input(), ov::Exception);
EXPECT_THROW(importedExecNet.input("data"), ov::Exception);
EXPECT_THROW(importedExecNet.input("constant"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_NO_THROW(importedExecNet.output());
EXPECT_NE(function->output(0).get_tensor().get_names(),
importedExecNet.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedExecNet.output("data").get_node());
EXPECT_NO_THROW(importedExecNet.output("constant").get_node());
const auto outputType = elementType == ngraph::element::i32 ||
elementType == ngraph::element::i64 ? ngraph::element::i32 : ngraph::element::f32;
EXPECT_EQ(outputType, importedExecNet.output("data").get_element_type());
}
TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) {
if (targetDevice == "MULTI" || targetDevice == "AUTO") {
GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl;
@ -340,7 +511,7 @@ TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) {
result2->set_friendly_name("result2");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
function->set_friendly_name("SingleRuLU");
function->set_friendly_name("SingleReLU");
}
execNet = core->compile_model(function, targetDevice, configuration);

View File

@ -624,6 +624,7 @@ TEST_P(OVExecutableNetworkBaseTest, precisionsAsInOriginalIR) {
ov::CompiledModel execNet;
EXPECT_NO_THROW(execNet = core->compile_model(m_out_xml_path_1, targetDevice, configuration));
CommonTestUtils::removeIRFiles(m_out_xml_path_1, m_out_bin_path_1);
EXPECT_EQ(function->get_parameters().size(), execNet.inputs().size());
auto ref_parameter = function->get_parameters().back();