fix coredump when quit benchmark_app (#13026)

* fix coredump when quit benchmark_app

Signed-off-by: fishbell <bell.song@intel.com>

* enable tests

Signed-off-by: fishbell <bell.song@intel.com>

* add macro to handle CPU not built

Signed-off-by: fishbell <bell.song@intel.com>

Signed-off-by: fishbell <bell.song@intel.com>
This commit is contained in:
yanlan song
2022-09-15 16:47:11 +08:00
committed by GitHub
parent 80f1677c2c
commit 72c3bf222b
9 changed files with 63 additions and 28 deletions

View File

@@ -549,7 +549,8 @@ IInferPtr AutoSchedule::CreateInferRequest() {
if (!syncRequestImpl)
syncRequestImpl = CreateInferRequestImpl(execNetwork->_networkInputs, execNetwork->_networkOutputs);
syncRequestImpl->setPointerToExecutableNetworkInternal(execNetwork);
if (_passthroughExeNet) {
bool isCumulative = (_autoSContext->_performanceHint == IE::PluginConfigParams::CUMULATIVE_THROUGHPUT) ? true : false;
if (_passthroughExeNet && !isCumulative) {
std::string perfmode;
try {
perfmode = _passthroughExeNet->GetConfig(
@@ -557,10 +558,22 @@ IInferPtr AutoSchedule::CreateInferRequest() {
} catch(...) {
LOG_INFO("query perf hint from passthrough network failed");
}
if (_autoSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT))
if (_autoSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT)) {
syncRequestImpl->setPointerToSo(_passthroughExeNet._so);
} else {
auto so = _passthroughExeNet._ptr->GetPointerToSo();
// Get the _so from passthrough executable network when batch plugin is disable.
if (!so)
so = _passthroughExeNet._so;
syncRequestImpl->setPointerToSo(so);
}
} else if (std::static_pointer_cast<MultiDeviceInferRequest>(syncRequestImpl)->GetSharedRequest()) {
// cumulative case, load to MULTI:*
auto sharedMultiRequest = std::static_pointer_cast<MultiDeviceInferRequest>(syncRequestImpl)->GetSharedRequest();
if (sharedMultiRequest._ptr->getPointerToSo())
syncRequestImpl->setPointerToSo(sharedMultiRequest._ptr->getPointerToSo());
else
syncRequestImpl->setPointerToSo(_passthroughExeNet._ptr->GetPointerToSo());
syncRequestImpl->setPointerToSo(sharedMultiRequest._so);
}
return std::make_shared<AsyncInferRequest>(shared_from_this(),
syncRequestImpl,

View File

@@ -194,24 +194,5 @@ IInferPtr BinderMultiSchedule::CreateInferRequestImpl(IE::InputsDataMap networkI
return syncImpl;
}
IInferPtr BinderMultiSchedule::CreateInferRequest() {
auto execNetwork = std::dynamic_pointer_cast<MultiExecutableNetwork>(
_multiSContext->_executableNetwork.lock());
if (_passthroughExeNet) {
auto res = _passthroughExeNet->CreateInferRequest();
res->setPointerToExecutableNetworkInternal(execNetwork);
return res;
}
IInferPtr syncRequestImpl;
if (_multiSContext->_core && _multiSContext->_core->isNewAPI())
syncRequestImpl = CreateInferRequestImpl(execNetwork->_parameters, execNetwork->_results);
if (!syncRequestImpl)
syncRequestImpl = CreateInferRequestImpl(execNetwork->_networkInputs, execNetwork->_networkOutputs);
syncRequestImpl->setPointerToExecutableNetworkInternal(execNetwork);
return std::make_shared<AsyncInferRequest>(shared_from_this(),
syncRequestImpl,
execNetwork->_callbackExecutor);
}
} // namespace MultiDevicePlugin

View File

@@ -18,7 +18,6 @@ namespace MultiDevicePlugin {
class BinderMultiSchedule : public MultiSchedule {
public:
using Ptr = std::shared_ptr<BinderMultiSchedule>;
IInferPtr CreateInferRequest() override;
IInferPtr CreateInferRequestImpl(IE::InputsDataMap networkInputs, IE::OutputsDataMap networkOutputs) override;
IE::IInferRequestInternal::Ptr CreateInferRequestImpl(const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) override;

View File

@@ -18,7 +18,6 @@
namespace MultiDevicePlugin {
class MultiExecutableNetwork : public ExecutableNetwork {
friend IInferPtr MultiSchedule::CreateInferRequest();
friend IInferPtr BinderMultiSchedule::CreateInferRequest();
public:
using Ptr = std::shared_ptr<MultiExecutableNetwork>;

View File

@@ -311,10 +311,21 @@ IInferPtr MultiSchedule::CreateInferRequest() {
} catch(...) {
LOG_INFO("query perf hint from passthrough network failed");
}
if (_multiSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT))
if (_multiSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT)) {
syncRequestImpl->setPointerToSo(_passthroughExeNet._so);
} else {
auto so = _passthroughExeNet._ptr->GetPointerToSo();
// Get the _so from passthrough executable network when batch plugin is disable.
if (!so)
so = _passthroughExeNet._so;
syncRequestImpl->setPointerToSo(so);
}
} else if (_multiSContext->_bindBuffer) {
auto sharedRequest = std::static_pointer_cast<MultiDeviceInferRequest>(syncRequestImpl)->GetSharedRequest();
if (sharedRequest._ptr->getPointerToSo())
syncRequestImpl->setPointerToSo(sharedRequest._ptr->getPointerToSo());
else
syncRequestImpl->setPointerToSo(_passthroughExeNet._ptr->GetPointerToSo());
syncRequestImpl->setPointerToSo(sharedRequest._so);
}
return std::make_shared<AsyncInferRequest>(shared_from_this(),
syncRequestImpl,

View File

@@ -519,10 +519,12 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
multiSContext->_LogTag = _LogTag;
IExecutableNetworkInternal::Ptr impl;
auto tmpiter = fullConfig.find(ov::intel_auto::device_bind_buffer.name());
if (tmpiter != fullConfig.end() && tmpiter->second == PluginConfigParams::YES)
if (tmpiter != fullConfig.end() && tmpiter->second == PluginConfigParams::YES) {
multiSContext->_bindBuffer = true;
impl = std::make_shared<MultiExecutableNetwork>(multiSContext, std::make_shared<BinderMultiSchedule>());
else
} else {
impl = std::make_shared<MultiExecutableNetwork>(multiSContext, std::make_shared<MultiSchedule>());
}
if (!modelPath.empty()) {
SetExeNetworkInfo(impl,
executableNetworkPerDevice.begin()->second->GetInputsInfo(),

View File

@@ -16,4 +16,18 @@ namespace {
//CommonTestUtils::DEVICE_BATCH,
"HETERO:GPU"),
OVHoldersTest::getTestCaseName);
const std::vector<std::string> device_names_and_priorities = {
"MULTI:GPU", // GPU via MULTI,
"AUTO:GPU", // GPU via AUTO,
#ifdef ENABLE_INTEL_CPU
"AUTO:GPU,CPU", // GPU+CPU
"AUTO:CPU,GPU", // CPU+GPU
"MULTI:GPU,CPU", // GPU+CPU
"MULTI:CPU,GPU", // CPU+GPU
#endif
};
INSTANTIATE_TEST_SUITE_P(smoke_VirtualPlugin_BehaviorTests, OVHoldersTestWithConfig,
::testing::ValuesIn(device_names_and_priorities),
OVHoldersTestWithConfig::getTestCaseName);
} // namespace

View File

@@ -5,6 +5,7 @@
#pragma once
#include "base/ov_behavior_test_utils.hpp"
#include "openvino/runtime/intel_auto/properties.hpp"
namespace ov {
namespace test {
@@ -36,6 +37,8 @@ public:
std::string targetDevice;
std::string deathTestStyle;
};
using OVHoldersTestWithConfig = OVHoldersTest;
} // namespace behavior
} // namespace test
} // namespace ov

View File

@@ -104,6 +104,19 @@ TEST_P(OVHoldersTest, LoadedRemoteContext) {
}
}
TEST_P(OVHoldersTestWithConfig, LoadedTensor) {
ov::Tensor tensor;
{
ov::Core core = createCoreWithTemplate();
ov::AnyMap property;
property[ov::intel_auto::device_bind_buffer.name()] = true;
if (targetDevice.find("AUTO") != std::string::npos)
property[ov::hint::performance_mode.name()] = ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT;
auto compiled_model = core.compile_model(function, targetDevice, property);
auto request = compiled_model.create_infer_request();
tensor = request.get_input_tensor();
}
}
std::string OVHoldersTestOnImportedNetwork::getTestCaseName(testing::TestParamInfo<std::string> obj) {
return "targetDevice=" + obj.param;