Fixed newAPI for case if core was removed (#12207)

* Fixed newAPI for case if core was removed

* Fixed code style

* Fixed typo

* Use new API by default

* Create core with template plugin

* Added doxygen comment
This commit is contained in:
Ilya Churaev 2022-07-22 17:46:08 +04:00 committed by GitHub
parent 23e513d679
commit 811a483acb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 55 additions and 49 deletions

4
.gitignore vendored
View File

@ -1,7 +1,7 @@
# build/artifact dirs
_*
/*[Bb]uild*/
[Bb]uild*/
cmake-build*
# but ensure we don't skip __init__.py and __main__.py
!__init__.py

View File

@ -166,10 +166,8 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C
// ! [executable_network:create_infer_request]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
InferenceEngine::IInferRequestInternal::Ptr internalRequest;
if (this->_plugin) {
const auto& core = _plugin->GetCore();
if (core && core->isNewAPI())
internalRequest = CreateInferRequestImpl(_parameters, _results);
if (this->_plugin && _plugin->IsNewAPI()) {
internalRequest = CreateInferRequestImpl(_parameters, _results);
}
if (!internalRequest)
internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);

View File

@ -264,6 +264,12 @@ public:
*/
virtual std::shared_ptr<ov::ICore> GetCore() const noexcept;
/**
* @brief Provides an information about used API
* @return true if new API is used
*/
bool IsNewAPI() const noexcept;
/**
* @brief Gets reference to tasks execution manager
* @return Reference to ExecutorManager interface
@ -350,6 +356,7 @@ protected:
std::map<std::string, std::string> _config; //!< A map config keys -> values
std::weak_ptr<ov::ICore> _core; //!< A pointer to ICore interface
std::shared_ptr<ExecutorManager> _executorManager; //!< A tasks execution manager
bool _isNewAPI; //!< A flag which shows used API
};
/**

View File

@ -77,7 +77,7 @@ OutputsDataMap copyInfo(const OutputsDataMap& networkOutputs) {
return _networkOutputs;
}
IInferencePlugin::IInferencePlugin() : _executorManager(InferenceEngine::executorManager()) {}
IInferencePlugin::IInferencePlugin() : _executorManager(InferenceEngine::executorManager()), _isNewAPI(true) {}
void IInferencePlugin::VersionStore::copyFrom(const Version& v) {
_dsc = v.description;
@ -147,8 +147,7 @@ std::shared_ptr<IExecutableNetworkInternal> IInferencePlugin::LoadNetwork(
orig_function->get_friendly_name());
function->get_rt_info() = orig_function->get_rt_info();
}
const auto& core = GetCore();
if (function && core && !core->isNewAPI()) {
if (function && !IsNewAPI()) {
auto& rt_info = function->get_rt_info();
if (rt_info.find("version") == rt_info.end()) {
rt_info["version"] = int64_t(10);
@ -161,9 +160,8 @@ std::shared_ptr<IExecutableNetworkInternal> IInferencePlugin::LoadNetwork(
std::dynamic_pointer_cast<const details::CNNNetworkNGraphImpl>(orig_icnn.shared_from_this());
OPENVINO_ASSERT(orig_impl != nullptr,
"Internal: orig_impl must be castable to details::CNNNetworkNGraphImpl");
auto new_impl = std::make_shared<details::CNNNetworkNGraphImpl>(function,
orig_impl->getExtensions(),
GetCore()->isNewAPI());
auto new_impl =
std::make_shared<details::CNNNetworkNGraphImpl>(function, orig_impl->getExtensions(), IsNewAPI());
network = CNNNetwork(new_impl);
for (const auto& inputInfo : orig_network.getInputsInfo()) {
auto toInfo = network.getInputsInfo().at(inputInfo.first);
@ -253,12 +251,19 @@ std::shared_ptr<IExecutableNetworkInternal> IInferencePlugin::ImportNetwork(
void IInferencePlugin::SetCore(std::weak_ptr<ICore> core) {
IE_ASSERT(!core.expired());
_core = core;
auto locked_core = _core.lock();
if (locked_core)
_isNewAPI = locked_core->isNewAPI();
}
std::shared_ptr<ICore> IInferencePlugin::GetCore() const noexcept {
return _core.lock();
}
bool IInferencePlugin::IsNewAPI() const noexcept {
return _isNewAPI;
}
const std::shared_ptr<ExecutorManager>& IInferencePlugin::executorManager() const {
return _executorManager;
}
@ -295,8 +300,7 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
const std::shared_ptr<const ov::Model>& function) {
const auto& core = GetCore();
bool newAPI = core && core->isNewAPI();
bool newAPI = IsNewAPI();
InferenceEngine::SetExeNetworkInfo(exeNetwork, function, newAPI);
exeNetwork->SetPointerToPlugin(shared_from_this());
}

View File

@ -480,10 +480,7 @@ InferenceEngine::IInferRequestInternal::Ptr AutoBatchExecutableNetwork::CreateIn
InferenceEngine::IInferRequestInternal::Ptr AutoBatchExecutableNetwork::CreateInferRequestImpl(
const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) {
if (!this->_plugin)
return nullptr;
const auto& core = _plugin->GetCore();
if (!core || !core->isNewAPI())
if (!this->_plugin || !_plugin->IsNewAPI())
return nullptr;
auto workerRequestPtrAndId = GetWorkerInferRequest();
return std::make_shared<AutoBatchInferRequest>(inputs,

View File

@ -804,10 +804,7 @@ void HeteroExecutableNetwork::Export(std::ostream& heteroModel) {
IInferRequestInternal::Ptr HeteroExecutableNetwork::CreateInferRequestImpl(
const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) {
if (!this->_plugin)
return nullptr;
const auto& core = _plugin->GetCore();
if (!core || !core->isNewAPI())
if (!this->_plugin || !_plugin->IsNewAPI())
return nullptr;
HeteroInferRequest::SubRequestsList inferRequests;
int index = 0;

View File

@ -42,10 +42,7 @@ namespace intel_cpu {
InferenceEngine::IInferRequestInternal::Ptr
ExecNetwork::CreateInferRequestImpl(const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) {
if (!this->_plugin)
return nullptr;
const auto& core = _plugin->GetCore();
if (!core || !core->isNewAPI())
if (!this->_plugin || !_plugin->IsNewAPI())
return nullptr;
return std::make_shared<InferRequest>(inputs, outputs, std::static_pointer_cast<ExecNetwork>(shared_from_this()));
}

View File

@ -819,11 +819,7 @@ void Engine::SetConfig(const std::map<std::string, std::string> &config) {
}
bool Engine::isLegacyAPI() const {
const auto& core = GetCore();
if (!core)
IE_CPU_PLUGIN_THROW() << "Unable to get API version. Core is unavailable";
return !core->isNewAPI();
return !IsNewAPI();
}
Parameter Engine::GetConfigLegacy(const std::string& name, const std::map<std::string, Parameter>& options) const {

View File

@ -69,10 +69,7 @@ class GNAExecutableNetwork : public InferenceEngine::IExecutableNetworkInternal
InferenceEngine::IInferRequestInternal::Ptr
CreateInferRequestImpl(const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) override {
if (!this->_plugin)
return nullptr;
const auto& core = _plugin->GetCore();
if (!core || !core->isNewAPI())
if (!this->_plugin || !_plugin->IsNewAPI())
return nullptr;
return std::make_shared<GNAInferRequest>(plg, inputs, outputs);
}

View File

@ -117,10 +117,8 @@ IInferRequestInternal::Ptr CompiledModel::CreateInferRequest() {
}
}
if (this->_plugin) {
const auto& core = _plugin->GetCore();
if (core && core->isNewAPI())
internalRequest = CreateInferRequestImpl(_parameters, _results);
if (this->_plugin && _plugin->IsNewAPI()) {
internalRequest = CreateInferRequestImpl(_parameters, _results);
}
if (!internalRequest)
internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
@ -139,7 +137,7 @@ std::shared_ptr<ngraph::Function> CompiledModel::GetExecGraphInfo() {
}
InferenceEngine::Parameter CompiledModel::GetConfig(const std::string &name) const {
const bool is_new_api = _plugin->GetCore()->isNewAPI();
const bool is_new_api = _plugin->IsNewAPI();
auto it = m_config.key_config_map.find(name);
if (it != m_config.key_config_map.end()) {
std::string val = it->second;

View File

@ -574,7 +574,7 @@ Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string,
}
Config config = _impl->m_configs.GetConfig(device_id);
const bool is_new_api = GetCore()->isNewAPI();
const bool is_new_api = IsNewAPI();
if (config.key_config_map.find(name) != config.key_config_map.end()) {
std::string val = config.key_config_map.find(name)->second;
if (is_new_api) {
@ -695,7 +695,7 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
auto iter = device_map.find(device_id);
auto device = iter != device_map.end() ? iter->second : device_map.begin()->second;
auto device_info = device->get_info();
bool is_new_api = GetCore()->isNewAPI();
bool is_new_api = IsNewAPI();
if (name == ov::supported_properties) {
return decltype(ov::supported_properties)::value_type {

View File

@ -70,13 +70,11 @@ public:
IE_THROW() << "Can not create infer request: there is no available devices with platform ";
}
std::shared_ptr<MyriadInferRequest> syncRequestImpl;
if (this->_plugin) {
const auto& core = _plugin->GetCore();
if (core && core->isNewAPI())
syncRequestImpl = std::make_shared<MyriadInferRequest>(_graphDesc, _parameters, _results,
_inputInfo, _outputInfo,
_graphMetaData.stagesMeta, _config, _log,
_executor, _constDatas, _isNetworkConstant);
if (this->_plugin && _plugin->IsNewAPI()) {
syncRequestImpl = std::make_shared<MyriadInferRequest>(_graphDesc, _parameters, _results,
_inputInfo, _outputInfo,
_graphMetaData.stagesMeta, _config, _log,
_executor, _constDatas, _isNetworkConstant);
}
if (!syncRequestImpl)
syncRequestImpl = std::make_shared<MyriadInferRequest>(_graphDesc, _networkInputs, _networkOutputs,

View File

@ -27,6 +27,7 @@
#include "ngraph_functions/subgraph_builders.hpp"
#include "shared_test_classes/subgraph/basic_lstm.hpp"
#include "behavior/ov_infer_request/infer_request_dynamic.hpp"
#include "base/ov_behavior_test_utils.hpp"
#include <common_test_utils/ov_tensor_utils.hpp>
namespace ov {
@ -494,6 +495,21 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithSetTensor2times) {
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputName)));
}
TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkWithLocalCore) {
ov::CompiledModel compiled_model;
{
ov::Core local_core = createCoreWithTemplate();
const std::string tensor_name = "input_tensor";
std::map<std::string, ov::PartialShape> shapes;
shapes[tensor_name] = {ov::Dimension::dynamic(), 4, 20, 20};
OV_ASSERT_NO_THROW(function->reshape(shapes));
// Load ov::Model to target plugins
compiled_model = local_core.compile_model(function, targetDevice, configuration);
}
// Create InferRequest
OV_ASSERT_NO_THROW(compiled_model.create_infer_request());
}
TEST_P(OVNotSupportRequestDynamicTests, InferDynamicNotSupported) {
const std::string tensor_name = "input_tensor";
const ov::Shape refShape = inOutShapes[0].first;

View File

@ -26,6 +26,7 @@ public:
MOCK_METHOD(std::string, GetName, (), (const, noexcept));
MOCK_METHOD(void, SetCore, (std::weak_ptr<InferenceEngine::ICore>), (noexcept));
MOCK_METHOD(std::shared_ptr<InferenceEngine::ICore>, GetCore, (), (const, noexcept));
MOCK_METHOD(bool, IsNewAPI, (), (const, noexcept));
MOCK_CONST_METHOD2(GetConfig, InferenceEngine::Parameter(
const std::string&, const std::map<std::string, InferenceEngine::Parameter>&));
MOCK_CONST_METHOD2(GetMetric, InferenceEngine::Parameter(