Add ov::loaded_from_cache checking for CompileModelLoadFromMemoryTest (#15030)

* Add ov::loaded_from_cache checking for CompileModelLoadFromMemoryTestBase

* Skip gna in skip_tests_config
This commit is contained in:
River Li 2023-01-11 14:45:11 +08:00 committed by GitHub
parent e0359d3085
commit 8c84faeecd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 61 additions and 13 deletions

View File

@ -179,4 +179,15 @@ namespace {
::testing::Combine(::testing::ValuesIn(TestTargets),
::testing::ValuesIn(LoadFromFileConfigs)),
CompileModelLoadFromMemoryTestBase::getTestCaseName);
const std::vector<ov::AnyMap> CpuConfigs = {
{ov::num_streams(2)},
};
const std::vector<std::string> TestCpuTargets = {
CommonTestUtils::DEVICE_CPU,
};
INSTANTIATE_TEST_SUITE_P(smoke_CachingSupportCase_CPU,
CompileModelLoadFromMemoryTestBase,
::testing::Combine(::testing::ValuesIn(TestCpuTargets), ::testing::ValuesIn(CpuConfigs)),
CompileModelLoadFromMemoryTestBase::getTestCaseName);
} // namespace

View File

@ -69,6 +69,7 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*OVExecutableNetworkBaseTest.*CanSetConfigToExecNet.*)",
R"(.*OVExecutableNetworkBaseTest.*CanGetInputsInfoAndCheck.*)",
R"(.*OVExecutableNetworkBaseTest.*getOutputsFromSplitFunctionWithSeveralOutputs.*)",
R"(.*OVExecutableNetworkBaseTest.*canLoadNetworkFromMemory.*)",
R"(.*OVClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK.*GetMetricNoThrow.*)",
R"(.*Behavior.*OVExecutableNetworkBaseTest.*get(Inputs|Outputs)FromFunctionWithSeveral(Inputs|Outputs).*)",
// TODO: temporary disabled. Need to be enabled when PR 9282 is merged

View File

@ -135,10 +135,6 @@ TEST_P(OVExecutableNetworkBaseTest, canLoadNetworkFromMemory) {
</edges>
</net>
)V0G0N";
if (target_device.find("GNA") != std::string::npos) {
GTEST_SKIP();
}
EXPECT_NO_THROW(auto execNet = core->compile_model(model, ov::Tensor(), target_device, configuration));
}

View File

@ -101,6 +101,7 @@ public:
void SetUp() override;
void TearDown() override;
void run() override;
bool importExportSupported(ov::Core &core) const;
};
using compileKernelsCacheParams = std::tuple<

View File

@ -335,10 +335,30 @@ std::string CompileModelLoadFromMemoryTestBase::getTestCaseName(
return result.str();
}
bool CompileModelLoadFromMemoryTestBase::importExportSupported(ov::Core& core) const {
auto supportedProperties = core.get_property(targetDevice, ov::supported_properties);
if (std::find(supportedProperties.begin(), supportedProperties.end(), ov::device::capabilities) ==
supportedProperties.end()) {
return false;
}
auto device_capabilities = core.get_property(targetDevice, ov::device::capabilities);
if (std::find(device_capabilities.begin(),
device_capabilities.end(),
std::string(ov::device::capability::EXPORT_IMPORT)) == device_capabilities.end()) {
return false;
}
return true;
}
void CompileModelLoadFromMemoryTestBase::SetUp() {
ovModelWithName funcPair;
std::tie(targetDevice, configuration) = GetParam();
target_device = targetDevice;
if ((targetDevice.find("GPU") != std::string::npos)) {
#if !defined(_WIN32) && !defined(_WIN64)
setenv("OV_GPU_CACHE_MODEL", "1", 1);
#endif
}
APIBaseTest::SetUp();
std::stringstream ss;
auto hash = std::hash<std::string>()(SubgraphBaseTest::GetTestName());
@ -392,20 +412,39 @@ void CompileModelLoadFromMemoryTestBase::TearDown() {
core->set_property(ov::cache_dir());
APIBaseTest::TearDown();
weights_vector.clear();
if ((targetDevice.find("GPU") != std::string::npos)) {
#if !defined(_WIN32) && !defined(_WIN64)
setenv("OV_GPU_CACHE_MODEL", "", 1);
#endif
}
}
void CompileModelLoadFromMemoryTestBase::run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
core->set_property(ov::cache_dir(m_cacheFolderName));
try {
compiledModel = core->compile_model(m_model, m_weights, targetDevice, configuration);
inferRequest = compiledModel.create_infer_request();
inferRequest.infer();
} catch (const Exception& ex) {
GTEST_FAIL() << "Can't loadNetwork with model path " << m_modelName << "\nException [" << ex.what() << "]"
<< std::endl;
} catch (...) {
GTEST_FAIL() << "Can't compile network with model path " << m_modelName << std::endl;
for (int i = 0; i < 2; i++) {
try {
compiledModel = core->compile_model(m_model, m_weights, targetDevice, configuration);
if (importExportSupported(*core)) {
ASSERT_EQ(i != 0, compiledModel.get_property(ov::loaded_from_cache));
}
inferRequest = compiledModel.create_infer_request();
inferRequest.infer();
} catch (const Exception& ex) {
GTEST_FAIL() << "Can't loadNetwork with model path " << m_modelName << "\nException [" << ex.what() << "]"
<< std::endl;
} catch (...) {
GTEST_FAIL() << "Can't compile network with model path " << m_modelName << std::endl;
}
// For GPU plugin, KEY_GPU_THROUGHPUT_STREAMS will lead to config.throughput_streams==2, and Export stops.
if (targetDevice.find("GPU") != std::string::npos) {
auto item = configuration.find(ov::hint::performance_mode.name());
if (item != configuration.end() &&
item->second.as<ov::hint::PerformanceMode>() == ov::hint::PerformanceMode::THROUGHPUT) {
break;
}
}
}
}