[apiConformance] Rebalance ImportExport and LifeTime tests (#17129)

This commit is contained in:
Sofya Balandina
2023-04-28 13:19:20 +01:00
committed by GitHub
parent e2baf7f0a2
commit 9248ecadee
9 changed files with 535 additions and 25 deletions

View File

@@ -1,7 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/exec_graph_info.hpp"
#include "behavior/compiled_model/import_export.hpp"
#include "ie_plugin_config.hpp"
#include <common_test_utils/test_constants.hpp>
@@ -30,26 +30,26 @@ const std::vector<ov::AnyMap> heteroConfigs = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU)}};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
} // namespace

View File

@@ -170,7 +170,9 @@ std::vector<std::string> disabledTestPatterns() {
// 98151. Not valid sorting for slices in reference.
R"(.*UniqueLayerTestCPU.*axis.*True.*)",
// 109482. Sporadic failure.
R"(.*smoke_StaticSpaceToBatch_4D_parallel_block_edge.*)"
R"(.*smoke_StaticSpaceToBatch_4D_parallel_block_edge.*)",
// AUTO does not support import / export
R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)"
};
#if defined(OPENVINO_ARCH_X86)

View File

@@ -1,10 +1,9 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/exec_graph_info.hpp"
#include <common_test_utils/test_constants.hpp>
#include "behavior/compiled_model/import_export.hpp"
#include "ie_plugin_config.hpp"
using namespace ov::test::behavior;
@@ -13,10 +12,10 @@ const std::vector<ov::element::Type_t> netPrecisions = {ov::element::i16, ov::el
const std::vector<ov::AnyMap> configs = {{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}}};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(configs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
} // namespace

View File

@@ -75,7 +75,7 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*LoadNetworkTest.*QueryNetwork(MULTIWithHETERO|HETEROWithMULTI)NoThrow_V10.*)",
R"(.*Behavior.*OVCompiledModelBaseTest.*get(Inputs|Outputs)FromFunctionWithSeveral(Inputs|Outputs).*)",
// TODO: temporary disabled. Need to be enabled when PR 9282 is merged
R"(.*OVExecGraphImportExportTest.*readFromV10IR.*)",
R"(.*OVCompiledGraphImportExportTest.*readFromV10IR.*)",
// Issue connected with OV2.0
R"(.*EltwiseLayerTest.*NetType=f16.*)",
// TODO: Issue: 69639

View File

@@ -1,7 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/exec_graph_info.hpp"
#include "behavior/compiled_model/import_export.hpp"
#include "ie_plugin_config.hpp"
#include <common_test_utils/test_constants.hpp>
@@ -30,26 +30,26 @@ const std::vector<ov::AnyMap> heteroConfigs = {
{ov::device::priorities(CommonTestUtils::DEVICE_TEMPLATE)}};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_AUTO),
::testing::ValuesIn(multiConfigs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
} // namespace

View File

@@ -112,6 +112,8 @@ std::vector<std::string> disabledTestPatterns() {
// Why query state should throw an exception
R"(.*InferRequestQueryStateExceptionTest.*inferreq_smoke_QueryState_ExceptionTest.*)",
R"(.*OVInferRequestCheckTensorPrecision.*get(Input|Output|Inputs|Outputs)From.*FunctionWith(Single|Several).*type=(u4|u1|i4|boolean).*)",
// AUTO does not support import / export
R"(.*smoke_Auto_BehaviorTests/OVCompiledGraphImportExportTest.*(mportExport|readFromV10IR).*/targetDevice=(AUTO).*)",
};
#ifdef _WIN32

View File

@@ -1,7 +1,7 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/exec_graph_info.hpp"
#include "behavior/compiled_model/import_export.hpp"
#include "ov_api_conformance_helpers.hpp"
#include "ie_plugin_config.hpp"
@@ -29,10 +29,10 @@ const std::vector<ov::element::Type_t> ovExecGraphInfoElemTypes = {
};
INSTANTIATE_TEST_SUITE_P(ov_compiled_model,
OVExecGraphImportExportTest,
OVCompiledGraphImportExportTest,
::testing::Combine(
::testing::ValuesIn(ovExecGraphInfoElemTypes),
::testing::ValuesIn(return_all_possible_device_combination()),
::testing::Values(pluginConfig)),
OVExecGraphImportExportTest::getTestCaseName);
OVCompiledGraphImportExportTest::getTestCaseName);
} // namespace

View File

@@ -10,11 +10,11 @@ using namespace ov::test::conformance;
namespace {
INSTANTIATE_TEST_SUITE_P(ov_plugin, OVHoldersTest,
INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, OVHoldersTest,
::testing::ValuesIn(return_all_possible_device_combination()),
OVHoldersTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(ov_plugin, OVHoldersTestOnImportedNetwork,
INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, OVHoldersTestOnImportedNetwork,
::testing::ValuesIn(return_all_possible_device_combination()),
OVHoldersTestOnImportedNetwork::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,507 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifcorer: Apache-2.0
//
#include <fstream>
#include <exec_graph_info.hpp>
#include <openvino/pass/serialize.hpp>
#include <ie_ngraph_utils.hpp>
#include "base/ov_behavior_test_utils.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
namespace ov {
namespace test {
namespace behavior {
typedef std::tuple<
ov::element::Type_t, // Element type
std::string, // Device name
ov::AnyMap // Config
> OVCompiledGraphImportExportTestParams;
class OVCompiledGraphImportExportTest : public testing::WithParamInterface<OVCompiledGraphImportExportTestParams>,
public OVCompiledNetworkTestBase {
public:
static std::string getTestCaseName(testing::TestParamInfo<OVCompiledGraphImportExportTestParams> obj) {
ov::element::Type_t elementType;
std::string targetDevice;
ov::AnyMap configuration;
std::tie(elementType, targetDevice, configuration) = obj.param;
std::replace(targetDevice.begin(), targetDevice.end(), ':', '.');
std::ostringstream result;
result << "targetDevice=" << targetDevice << "_";
result << "elementType=" << elementType << "_";
if (!configuration.empty()) {
result << "config=(";
for (const auto& config : configuration) {
result << config.first << "=";
config.second.print(result);
result << "_";
}
result << ")";
}
return result.str();
}
void SetUp() override {
std::tie(elementType, target_device, configuration) = this->GetParam();
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED();
APIBaseTest::SetUp();
}
void TearDown() override {
if (!configuration.empty()) {
utils::PluginCache::get().reset();
}
APIBaseTest::TearDown();
}
protected:
std::shared_ptr<ov::Core> core = utils::PluginCache::get().core();
ov::AnyMap configuration;
ov::element::Type_t elementType;
std::shared_ptr<ov::Model> function;
};
TEST_P(OVCompiledGraphImportExportTest, importExportedFunction) {
ov::CompiledModel execNet;
// Create simple function
{
auto param1 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto param2 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param2->set_friendly_name("param2");
param2->output(0).get_tensor().set_names({"data2"});
auto relu = std::make_shared<ov::opset8::Relu>(param1);
relu->set_friendly_name("relu_op");
relu->output(0).get_tensor().set_names({"relu"});
auto result1 = std::make_shared<ov::opset8::Result>(relu);
result1->set_friendly_name("result1");
auto concat = std::make_shared<ov::opset8::Concat>(OutputVector{relu, param2}, 1);
concat->set_friendly_name("concat_op");
concat->output(0).get_tensor().set_names({"concat"});
auto result2 = std::make_shared<ov::opset8::Result>(concat);
result2->set_friendly_name("result2");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
function->set_friendly_name("SingleRuLU");
}
execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;
execNet.export_model(strm);
ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 2);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_THROW(importedExecNet.input(), ov::Exception);
EXPECT_EQ(function->input(0).get_tensor().get_names(), importedExecNet.input(0).get_tensor().get_names());
EXPECT_EQ(function->input(0).get_tensor().get_partial_shape(),
importedExecNet.input(0).get_tensor().get_partial_shape());
EXPECT_EQ(function->input(0).get_tensor().get_element_type(),
importedExecNet.input(0).get_tensor().get_element_type());
EXPECT_EQ(function->input(0).get_element_type(),
importedExecNet.input(0).get_tensor().get_element_type());
EXPECT_EQ(function->input(1).get_tensor().get_names(), importedExecNet.input(1).get_tensor().get_names());
EXPECT_EQ(function->input(1).get_tensor().get_partial_shape(),
importedExecNet.input(1).get_tensor().get_partial_shape());
EXPECT_EQ(function->input(1).get_tensor().get_element_type(),
importedExecNet.input(1).get_tensor().get_element_type());
EXPECT_EQ(function->input(1).get_element_type(),
importedExecNet.input(1).get_tensor().get_element_type());
EXPECT_EQ(importedExecNet.input(0).get_node(), importedExecNet.input("data1").get_node());
EXPECT_NE(importedExecNet.input(1).get_node(), importedExecNet.input("data1").get_node());
EXPECT_EQ(importedExecNet.input(1).get_node(), importedExecNet.input("data2").get_node());
EXPECT_NE(importedExecNet.input(0).get_node(), importedExecNet.input("data2").get_node());
EXPECT_EQ(function->outputs().size(), 2);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_THROW(importedExecNet.output(), ov::Exception);
EXPECT_EQ(function->output(0).get_tensor().get_names(), importedExecNet.output(0).get_tensor().get_names());
EXPECT_EQ(function->output(0).get_tensor().get_partial_shape(),
importedExecNet.output(0).get_tensor().get_partial_shape());
EXPECT_EQ(function->output(0).get_tensor().get_element_type(),
importedExecNet.output(0).get_tensor().get_element_type());
EXPECT_EQ(function->output(0).get_element_type(),
importedExecNet.output(0).get_tensor().get_element_type());
EXPECT_EQ(function->output(1).get_tensor().get_names(), importedExecNet.output(1).get_tensor().get_names());
EXPECT_EQ(function->output(1).get_tensor().get_partial_shape(),
importedExecNet.output(1).get_tensor().get_partial_shape());
EXPECT_EQ(function->output(1).get_tensor().get_element_type(),
importedExecNet.output(1).get_tensor().get_element_type());
EXPECT_EQ(function->output(1).get_element_type(),
importedExecNet.output(1).get_tensor().get_element_type());
EXPECT_EQ(importedExecNet.output(0).get_node(), importedExecNet.output("relu").get_node());
EXPECT_NE(importedExecNet.output(1).get_node(), importedExecNet.output("relu").get_node());
EXPECT_EQ(importedExecNet.output(1).get_node(), importedExecNet.output("concat").get_node());
EXPECT_NE(importedExecNet.output(0).get_node(), importedExecNet.output("concat").get_node());
EXPECT_THROW(importedExecNet.input("param1"), ov::Exception);
EXPECT_THROW(importedExecNet.input("param2"), ov::Exception);
EXPECT_THROW(importedExecNet.output("concat_op"), ov::Exception);
EXPECT_THROW(importedExecNet.output("relu_op"), ov::Exception);
}
TEST_P(OVCompiledGraphImportExportTest, importExportedFunctionParameterResultOnly) {
// Create a simple function
{
auto param = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param->set_friendly_name("param");
param->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(param);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{param});
function->set_friendly_name("ParamResult");
}
auto execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;
execNet.export_model(strm);
ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size());
EXPECT_NO_THROW(importedCompiledModel.input());
EXPECT_NO_THROW(importedCompiledModel.input("data").get_node());
EXPECT_THROW(importedCompiledModel.input("param"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size());
EXPECT_NO_THROW(importedCompiledModel.output());
EXPECT_EQ(function->output(0).get_tensor().get_names(),
importedCompiledModel.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedCompiledModel.output("data").get_node());
EXPECT_THROW(importedCompiledModel.output("param"), ov::Exception);
EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.input("data").get_element_type());
EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type());
}
TEST_P(OVCompiledGraphImportExportTest, importExportedFunctionConstantResultOnly) {
// Create a simple function
{
auto constant = std::make_shared<ov::opset8::Constant>(elementType, ngraph::Shape({1, 3, 24, 24}));
constant->set_friendly_name("constant");
constant->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(constant);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{});
function->set_friendly_name("ConstResult");
}
auto execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;
execNet.export_model(strm);
ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 0);
EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size());
EXPECT_THROW(importedCompiledModel.input(), ov::Exception);
EXPECT_THROW(importedCompiledModel.input("data"), ov::Exception);
EXPECT_THROW(importedCompiledModel.input("constant"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size());
EXPECT_NO_THROW(importedCompiledModel.output());
EXPECT_EQ(function->output(0).get_tensor().get_names(),
importedCompiledModel.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedCompiledModel.output("data").get_node());
EXPECT_THROW(importedCompiledModel.output("constant"), ov::Exception);
EXPECT_EQ(ov::element::Type(elementType), importedCompiledModel.output("data").get_element_type());
}
TEST_P(OVCompiledGraphImportExportTest, readFromV10IR) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset8">
<data element_type="f16" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP16" names="data">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="round" id="1" type="Round" version="opset8">
<data mode="half_to_even"/>
<input>
<port id="1" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP16" names="r">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset8">
<input>
<port id="0" precision="FP16">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
function = core->read_model(model, ov::Tensor());
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_NO_THROW(function->input("in1")); // remove if read_model does not change function names
EXPECT_NO_THROW(function->output("round")); // remove if read_model does not change function names
ov::CompiledModel execNet = core->compile_model(function, target_device, configuration);
EXPECT_EQ(execNet.inputs().size(), 1);
EXPECT_EQ(execNet.outputs().size(), 1);
EXPECT_NO_THROW(execNet.input("in1"));
EXPECT_NO_THROW(execNet.output("round"));
std::stringstream strm;
execNet.export_model(strm);
ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration);
EXPECT_EQ(importedExecNet.inputs().size(), 1);
EXPECT_EQ(importedExecNet.outputs().size(), 1);
EXPECT_NO_THROW(importedExecNet.input("in1"));
EXPECT_NO_THROW(importedExecNet.output("round"));
EXPECT_EQ(importedExecNet.input().get_element_type(), ov::element::f32);
EXPECT_EQ(importedExecNet.output().get_element_type(), ov::element::f32);
}
static std::map<std::string, std::string> any_copy(const ov::AnyMap& params) {
auto to_config_string = [] (const Any& any) -> std::string {
if (any.is<bool>()) {
return any.as<bool>() ? "YES" : "NO";
} else {
std::stringstream strm;
any.print(strm);
return strm.str();
}
};
std::map<std::string, std::string> result;
for (auto&& value : params) {
result.emplace(value.first, to_config_string(value.second));
}
return result;
}
TEST_P(OVCompiledGraphImportExportTest, importExportedIENetwork) {
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
InferenceEngine::ExecutableNetwork execNet;
// Create simple function
{
auto param1 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto param2 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param2->set_friendly_name("param2");
param2->output(0).get_tensor().set_names({"data2"});
auto relu = std::make_shared<ov::opset8::Relu>(param1);
relu->set_friendly_name("relu_op");
relu->output(0).get_tensor().set_names({"relu"});
auto result1 = std::make_shared<ov::opset8::Result>(relu);
result1->set_friendly_name("result1");
auto concat = std::make_shared<ov::opset8::Concat>(OutputVector{relu, param2}, 1);
concat->set_friendly_name("concat_op");
concat->output(0).get_tensor().set_names({"concat"});
auto result2 = std::make_shared<ov::opset8::Result>(concat);
result2->set_friendly_name("result2");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
function->set_friendly_name("SingleReLU");
}
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration));
std::stringstream strm;
execNet.Export(strm);
ov::CompiledModel importedExecNet = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 2);
EXPECT_EQ(function->inputs().size(), importedExecNet.inputs().size());
EXPECT_THROW(importedExecNet.input(), ov::Exception);
EXPECT_NO_THROW(importedExecNet.input("data1").get_node());
EXPECT_NO_THROW(importedExecNet.input("data2").get_node());
EXPECT_NO_THROW(importedExecNet.input("param1").get_node());
EXPECT_NO_THROW(importedExecNet.input("param2").get_node());
EXPECT_EQ(function->outputs().size(), 2);
EXPECT_EQ(function->outputs().size(), importedExecNet.outputs().size());
EXPECT_THROW(importedExecNet.output(), ov::Exception);
EXPECT_NE(function->output(0).get_tensor().get_names(),
importedExecNet.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedExecNet.output("relu").get_node());
EXPECT_NO_THROW(importedExecNet.output("concat").get_node());
EXPECT_NO_THROW(importedExecNet.output("relu_op").get_node());
EXPECT_NO_THROW(importedExecNet.output("concat_op").get_node());
const auto outputType = elementType == ngraph::element::i32 ||
elementType == ngraph::element::u32 ||
elementType == ngraph::element::i64 ||
elementType == ngraph::element::u64 ? ngraph::element::i32 : ngraph::element::f32;
const auto inputType = elementType == ngraph::element::f16 ? ngraph::element::Type_t::f32 : elementType;
EXPECT_EQ(inputType, importedExecNet.input("param1").get_element_type());
EXPECT_EQ(inputType, importedExecNet.input("param2").get_element_type());
EXPECT_EQ(outputType, importedExecNet.output("concat_op").get_element_type());
EXPECT_EQ(outputType, importedExecNet.output("relu_op").get_element_type());
}
TEST_P(OVCompiledGraphImportExportTest, importExportedIENetworkParameterResultOnly) {
// New plugin API wraps CNNNetwork conversions into model, it is why parameter->result graphs won't work in legacy API with new plugin
std::shared_ptr<ov::Core> core = ov::test::utils::PluginCache::get().core();
ov::CompiledModel compiled_model;
// Create a simple function
{
auto param = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param->set_friendly_name("param");
param->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(param);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
function->set_friendly_name("ParamResult");
}
compiled_model = core->compile_model(function, target_device, configuration);
auto inputPrecision = compiled_model.input().get_element_type();
auto outputPrecision = compiled_model.output().get_element_type();
std::stringstream strm;
compiled_model.export_model(strm);
ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 1);
EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size());
EXPECT_NO_THROW(importedCompiledModel.input());
EXPECT_NO_THROW(importedCompiledModel.input("data").get_node());
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size());
EXPECT_NO_THROW(importedCompiledModel.output());
EXPECT_EQ(function->output(0).get_tensor().get_names(), importedCompiledModel.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedCompiledModel.output("data").get_node());
EXPECT_EQ(inputPrecision, importedCompiledModel.input("data").get_element_type());
EXPECT_EQ(outputPrecision, importedCompiledModel.output("data").get_element_type());
}
TEST_P(OVCompiledGraphImportExportTest, importExportedIENetworkConstantResultOnly) {
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
InferenceEngine::ExecutableNetwork execNet;
// Create a simple function
{
auto constant = std::make_shared<ov::opset8::Constant>(elementType, ngraph::Shape({1, 3, 24, 24}));
constant->set_friendly_name("constant");
constant->output(0).get_tensor().set_names({"data"});
auto result = std::make_shared<ov::opset8::Result>(constant);
result->set_friendly_name("result");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
ngraph::ParameterVector{});
function->set_friendly_name("ConstResult");
}
execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration));
auto outputPrecision = InferenceEngine::details::convertPrecision(execNet.GetOutputsInfo().at("constant")->getPrecision());
std::stringstream strm;
execNet.Export(strm);
ov::CompiledModel importedCompiledModel = core->import_model(strm, target_device, configuration);
EXPECT_EQ(function->inputs().size(), 0);
EXPECT_EQ(function->inputs().size(), importedCompiledModel.inputs().size());
EXPECT_THROW(importedCompiledModel.input(), ov::Exception);
EXPECT_THROW(importedCompiledModel.input("data"), ov::Exception);
EXPECT_THROW(importedCompiledModel.input("constant"), ov::Exception);
EXPECT_EQ(function->outputs().size(), 1);
EXPECT_EQ(function->outputs().size(), importedCompiledModel.outputs().size());
EXPECT_NO_THROW(importedCompiledModel.output());
EXPECT_NE(function->output(0).get_tensor().get_names(),
importedCompiledModel.output(0).get_tensor().get_names());
EXPECT_NO_THROW(importedCompiledModel.output("data").get_node());
EXPECT_NO_THROW(importedCompiledModel.output("constant").get_node());
EXPECT_EQ(outputPrecision, importedCompiledModel.output("data").get_element_type());
EXPECT_EQ(outputPrecision, importedCompiledModel.output("constant").get_element_type());
}
TEST_P(OVCompiledGraphImportExportTest, ovImportExportedFunction) {
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
ov::CompiledModel execNet;
// Create simple function
{
auto param1 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param1->set_friendly_name("param1");
param1->output(0).get_tensor().set_names({"data1"});
auto param2 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
param2->set_friendly_name("param2");
param2->output(0).get_tensor().set_names({"data2"});
auto relu = std::make_shared<ov::opset8::Relu>(param1);
relu->set_friendly_name("relu_op");
relu->output(0).get_tensor().set_names({"relu"});
auto result1 = std::make_shared<ov::opset8::Result>(relu);
result1->set_friendly_name("result1");
auto concat = std::make_shared<ov::opset8::Concat>(OutputVector{relu, param2}, 1);
concat->set_friendly_name("concat_op");
concat->output(0).get_tensor().set_names({"concat"});
auto result2 = std::make_shared<ov::opset8::Result>(concat);
result2->set_friendly_name("result2");
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
function->set_friendly_name("SingleReLU");
}
execNet = core->compile_model(function, target_device, configuration);
std::stringstream strm;
execNet.export_model(strm);
InferenceEngine::ExecutableNetwork importedExecNet = ie->ImportNetwork(strm, target_device, any_copy(configuration));
EXPECT_EQ(function->inputs().size(), 2);
EXPECT_EQ(function->inputs().size(), importedExecNet.GetInputsInfo().size());
EXPECT_NO_THROW(importedExecNet.GetInputsInfo()["param1"]);
EXPECT_NO_THROW(importedExecNet.GetInputsInfo()["param2"]);
EXPECT_EQ(function->outputs().size(), 2);
EXPECT_EQ(function->outputs().size(), importedExecNet.GetOutputsInfo().size());
EXPECT_NO_THROW(importedExecNet.GetOutputsInfo()["relu_op"]);
EXPECT_NO_THROW(importedExecNet.GetOutputsInfo()["concat_op"]);
const auto prc = InferenceEngine::details::convertPrecision(elementType);
EXPECT_EQ(prc, importedExecNet.GetInputsInfo()["param1"]->getPrecision());
EXPECT_EQ(prc, importedExecNet.GetInputsInfo()["param2"]->getPrecision());
EXPECT_EQ(prc, importedExecNet.GetOutputsInfo()["concat_op"]->getPrecision());
EXPECT_EQ(prc, importedExecNet.GetOutputsInfo()["relu_op"]->getPrecision());
}
} // namespace behavior
} // namespace test
} // namespace ov