Changed template plugin namespace (#15962)

* Changed template plugin namespace

* Fixed documentation
This commit is contained in:
Ilya Churaev 2023-02-28 02:27:12 +04:00 committed by GitHub
parent 68b7b8e69b
commit e5f2903c83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 96 additions and 85 deletions

View File

@ -12,7 +12,7 @@ Inference Engine Plugin API provides the helper InferenceEngine::IInferRequestIn
to use as a base class for a synchronous inference request implementation. Based of that, a declaration
of a synchronous request class can look as follows:
@snippet src/infer_request.hpp infer_request:header
@snippet src/sync_infer_request.hpp infer_request:header
#### Class Fields
@ -34,7 +34,7 @@ The example class has several fields:
The constructor initializes helper fields and calls methods which allocate blobs:
@snippet src/infer_request.cpp infer_request:ctor
@snippet src/sync_infer_request.cpp infer_request:ctor
> **NOTE**: Call InferenceEngine::CNNNetwork::getInputsInfo and InferenceEngine::CNNNetwork::getOutputsInfo to specify both layout and precision of blobs, which you can set with InferenceEngine::InferRequest::SetBlob and get with InferenceEngine::InferRequest::GetBlob. A plugin uses these hints to determine its internal layouts and precisions for input and output blobs if needed.
@ -42,7 +42,7 @@ The constructor initializes helper fields and calls methods which allocate blobs
Decrements a number of created inference requests:
@snippet src/infer_request.cpp infer_request:dtor
@snippet src/sync_infer_request.cpp infer_request:dtor
### `InferImpl()`
@ -50,13 +50,13 @@ Decrements a number of created inference requests:
- Checks blobs set by users
- Calls the `InferImpl` method defined in a derived class to call actual pipeline stages synchronously
@snippet src/infer_request.cpp infer_request:infer_impl
@snippet src/sync_infer_request.cpp infer_request:infer_impl
#### 1. `inferPreprocess`
Below is the code of the `inferPreprocess` method to demonstrate Inference Engine common preprocessing step handling:
@snippet src/infer_request.cpp infer_request:infer_preprocess
@snippet src/sync_infer_request.cpp infer_request:infer_preprocess
**Details:**
* `InferImpl` must call the InferenceEngine::IInferRequestInternal::execDataPreprocessing function, which executes common Inference Engine preprocessing step (for example, applies resize or color conversion operations) if it is set by the user. The output dimensions, layout and precision matches the input information set via InferenceEngine::CNNNetwork::getInputsInfo.
@ -66,18 +66,18 @@ Below is the code of the `inferPreprocess` method to demonstrate Inference Engin
Executes a pipeline synchronously using `_executable` object:
@snippet src/infer_request.cpp infer_request:start_pipeline
@snippet src/sync_infer_request.cpp infer_request:start_pipeline
#### 3. `inferPostprocess`
Converts output blobs if precisions of backend output blobs and blobs passed by user are different:
@snippet src/infer_request.cpp infer_request:infer_postprocess
@snippet src/sync_infer_request.cpp infer_request:infer_postprocess
### `GetPerformanceCounts()`
The method sets performance counters which were measured during pipeline stages execution:
@snippet src/infer_request.cpp infer_request:get_performance_counts
@snippet src/sync_infer_request.cpp infer_request:get_performance_counts
The next step in the plugin library implementation is the [Asynchronous Inference Request](@ref openvino_docs_ie_plugin_dg_async_infer_request) class.

View File

@ -4,13 +4,13 @@
#include "async_infer_request.hpp"
#include "infer_request.hpp"
#include "openvino/runtime/iinfer_request.hpp"
#include "sync_infer_request.hpp"
#include "template_itt.hpp"
// ! [async_infer_request:ctor]
TemplatePlugin::AsyncInferRequest::AsyncInferRequest(
const std::shared_ptr<TemplatePlugin::InferRequest>& request,
ov::template_plugin::AsyncInferRequest::AsyncInferRequest(
const std::shared_ptr<ov::template_plugin::InferRequest>& request,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const std::shared_ptr<ov::threading::ITaskExecutor>& wait_executor,
const std::shared_ptr<ov::threading::ITaskExecutor>& callback_executor)
@ -47,7 +47,7 @@ TemplatePlugin::AsyncInferRequest::AsyncInferRequest(
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplatePlugin::AsyncInferRequest::~AsyncInferRequest() {
ov::template_plugin::AsyncInferRequest::~AsyncInferRequest() {
ov::IAsyncInferRequest::stop_and_wait();
}
// ! [async_infer_request:dtor]

View File

@ -6,11 +6,12 @@
#include <memory>
#include "infer_request.hpp"
#include "openvino/runtime/iasync_infer_request.hpp"
#include "openvino/runtime/iinfer_request.hpp"
#include "sync_infer_request.hpp"
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
// ! [async_infer_request:header]
class AsyncInferRequest : public ov::IAsyncInferRequest {
@ -27,4 +28,5 @@ private:
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov

View File

@ -15,13 +15,11 @@
#include "template_itt.hpp"
#include "transformations/utils/utils.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const Configuration& cfg)
ov::template_plugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const std::shared_ptr<ov::threading::ITaskExecutor>& task_executor,
const Configuration& cfg)
: ov::ICompiledModel(model, plugin, task_executor), // Disable default threads creation
_cfg(cfg),
m_model(model) {
@ -45,7 +43,7 @@ TemplatePlugin::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& m
// forward declaration
void transform_model(const std::shared_ptr<ov::Model>& model);
void TemplatePlugin::CompiledModel::compile_model(const std::shared_ptr<ov::Model>& model) {
void ov::template_plugin::CompiledModel::compile_model(const std::shared_ptr<ov::Model>& model) {
// apply plugins transformations
transform_model(model);
// Perform any other steps like allocation and filling backend specific memory handles and so on
@ -53,44 +51,44 @@ void TemplatePlugin::CompiledModel::compile_model(const std::shared_ptr<ov::Mode
// ! [executable_network:map_graph]
// ! [executable_network:create_infer_request]
std::shared_ptr<ov::IAsyncInferRequest> TemplatePlugin::CompiledModel::create_infer_request() const {
std::shared_ptr<ov::IAsyncInferRequest> ov::template_plugin::CompiledModel::create_infer_request() const {
auto internal_request = create_sync_infer_request();
auto async_infer_request =
std::make_shared<AsyncInferRequest>(std::static_pointer_cast<TemplatePlugin::InferRequest>(internal_request),
get_task_executor(),
get_template_plugin()->_waitExecutor,
get_callback_executor());
auto async_infer_request = std::make_shared<AsyncInferRequest>(
std::static_pointer_cast<ov::template_plugin::InferRequest>(internal_request),
get_task_executor(),
get_template_plugin()->_waitExecutor,
get_callback_executor());
return async_infer_request;
}
std::shared_ptr<ov::ISyncInferRequest> TemplatePlugin::CompiledModel::create_sync_infer_request() const {
std::shared_ptr<ov::ISyncInferRequest> ov::template_plugin::CompiledModel::create_sync_infer_request() const {
return std::make_shared<InferRequest>(
std::static_pointer_cast<const TemplatePlugin::CompiledModel>(shared_from_this()));
std::static_pointer_cast<const ov::template_plugin::CompiledModel>(shared_from_this()));
}
// ! [executable_network:create_infer_request]
void TemplatePlugin::CompiledModel::set_property(const ov::AnyMap& properties) {
void ov::template_plugin::CompiledModel::set_property(const ov::AnyMap& properties) {
OPENVINO_NOT_IMPLEMENTED;
}
ov::RemoteContext TemplatePlugin::CompiledModel::get_context() const {
ov::RemoteContext ov::template_plugin::CompiledModel::get_context() const {
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<const ov::Model> TemplatePlugin::CompiledModel::get_runtime_model() const {
std::shared_ptr<const ov::Model> ov::template_plugin::CompiledModel::get_runtime_model() const {
return m_model;
}
std::shared_ptr<const Plugin> TemplatePlugin::CompiledModel::get_template_plugin() const {
std::shared_ptr<const ov::template_plugin::Plugin> ov::template_plugin::CompiledModel::get_template_plugin() const {
auto plugin = get_plugin();
OPENVINO_ASSERT(plugin);
auto template_plugin = std::static_pointer_cast<const TemplatePlugin::Plugin>(plugin);
auto template_plugin = std::static_pointer_cast<const ov::template_plugin::Plugin>(plugin);
OPENVINO_ASSERT(template_plugin);
return template_plugin;
}
// ! [executable_network:get_config]
ov::Any TemplatePlugin::CompiledModel::get_property(const std::string& name) const {
ov::Any ov::template_plugin::CompiledModel::get_property(const std::string& name) const {
const auto& add_ro_properties = [](const std::string& name, std::vector<ov::PropertyName>& properties) {
properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO});
};
@ -152,7 +150,7 @@ ov::Any TemplatePlugin::CompiledModel::get_property(const std::string& name) con
// ! [executable_network:get_config]
// ! [executable_network:export]
void TemplatePlugin::CompiledModel::export_model(std::ostream& modelStream) const {
void ov::template_plugin::CompiledModel::export_model(std::ostream& modelStream) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "ExecutableNetwork::Export");
std::stringstream xmlFile, binFile;

View File

@ -10,7 +10,8 @@
#include "openvino/runtime/tensor.hpp"
#include "template_config.hpp"
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
class Plugin;
class InferRequest;
@ -55,4 +56,5 @@ private:
};
// ! [executable_network:header]
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov

View File

@ -19,15 +19,13 @@
#include "transformations/op_conversions/convert_reduce_to_pooling.hpp"
#include "transformations/template_pattern_transformation.hpp"
using namespace TemplatePlugin;
namespace {
static constexpr const char* wait_executor_name = "TemplateWaitExecutor";
static constexpr const char* stream_executor_name = "TemplateStreamsExecutor";
} // namespace
// ! [plugin:ctor]
Plugin::Plugin() {
ov::template_plugin::Plugin::Plugin() {
// TODO: fill with actual device name, backend engine
set_device_name("TEMPLATE");
@ -40,7 +38,7 @@ Plugin::Plugin() {
// ! [plugin:ctor]
// ! [plugin:dtor]
Plugin::~Plugin() {
ov::template_plugin::Plugin::~Plugin() {
// Plugin should remove executors from executor cache to avoid threads number growth in the whole application
get_executor_manager()->clear(stream_executor_name);
get_executor_manager()->clear(wait_executor_name);
@ -49,11 +47,11 @@ Plugin::~Plugin() {
}
// ! [plugin:dtor]
ov::RemoteContext TemplatePlugin::Plugin::create_context(const ov::AnyMap& remote_properties) const {
ov::RemoteContext ov::template_plugin::Plugin::create_context(const ov::AnyMap& remote_properties) const {
OPENVINO_NOT_IMPLEMENTED;
}
ov::RemoteContext TemplatePlugin::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
ov::RemoteContext ov::template_plugin::Plugin::get_default_context(const ov::AnyMap& remote_properties) const {
OPENVINO_NOT_IMPLEMENTED;
}
@ -85,8 +83,9 @@ void transform_model(const std::shared_ptr<ov::Model>& model) {
// ! [plugin:transform_network]
// ! [plugin:load_exe_network_impl]
std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const {
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::compile_model(
const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::compile_model");
auto fullConfig = Configuration{properties, _cfg};
@ -101,16 +100,17 @@ std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::compile_model(const
return compiled_model;
}
std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::RemoteContext& context) const {
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::compile_model(
const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::RemoteContext& context) const {
OPENVINO_NOT_IMPLEMENTED;
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network]
std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::import_model(std::istream& model,
const ov::AnyMap& properties) const {
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::import_model(std::istream& model,
const ov::AnyMap& properties) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::import_model");
auto fullConfig = Configuration{properties, _cfg};
@ -141,16 +141,16 @@ std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::import_model(std::is
return compiled_model;
}
std::shared_ptr<ov::ICompiledModel> TemplatePlugin::Plugin::import_model(std::istream& model,
const ov::RemoteContext& context,
const ov::AnyMap& properties) const {
std::shared_ptr<ov::ICompiledModel> ov::template_plugin::Plugin::import_model(std::istream& model,
const ov::RemoteContext& context,
const ov::AnyMap& properties) const {
OPENVINO_NOT_IMPLEMENTED;
}
// ! [plugin:import_network]
// ! [plugin:query_network]
ov::SupportedOpsMap TemplatePlugin::Plugin::query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const {
ov::SupportedOpsMap ov::template_plugin::Plugin::query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const {
OV_ITT_SCOPED_TASK(TemplatePlugin::itt::domains::TemplatePlugin, "Plugin::query_model");
Configuration fullConfig{properties, _cfg, false};
@ -194,13 +194,13 @@ ov::SupportedOpsMap TemplatePlugin::Plugin::query_model(const std::shared_ptr<co
// ! [plugin:query_network]
// ! [plugin:set_config]
void TemplatePlugin::Plugin::set_property(const ov::AnyMap& properties) {
void ov::template_plugin::Plugin::set_property(const ov::AnyMap& properties) {
_cfg = Configuration{properties, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
ov::Any TemplatePlugin::Plugin::get_property(const std::string& name, const ov::AnyMap& arguments) const {
ov::Any ov::template_plugin::Plugin::get_property(const std::string& name, const ov::AnyMap& arguments) const {
const auto& add_ro_properties = [](const std::string& name, std::vector<ov::PropertyName>& properties) {
properties.emplace_back(ov::PropertyName{name, ov::PropertyMutability::RO});
};
@ -283,5 +283,5 @@ ov::Any TemplatePlugin::Plugin::get_property(const std::string& name, const ov::
// ! [plugin:create_plugin_engine]
static const ov::Version version = {CI_BUILD_NUMBER, "openvino_template_plugin"};
OV_DEFINE_PLUGIN_CREATE_FUNCTION(Plugin, version)
OV_DEFINE_PLUGIN_CREATE_FUNCTION(ov::template_plugin::Plugin, version)
// ! [plugin:create_plugin_engine]

View File

@ -12,7 +12,8 @@
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
class Plugin : public ov::IPlugin {
public:
@ -46,7 +47,7 @@ public:
const ov::AnyMap& properties) const override;
private:
friend class TemplatePlugin::CompiledModel;
friend class CompiledModel;
friend class InferRequest;
std::shared_ptr<ngraph::runtime::Backend> _backend;
@ -54,5 +55,6 @@ private:
std::shared_ptr<ov::threading::ITaskExecutor> _waitExecutor;
};
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov
//! [plugin:header]

View File

@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "infer_request.hpp"
#include "sync_infer_request.hpp"
#include <algorithm>
#include <map>
@ -31,7 +31,7 @@ void allocate_tensor_impl(ov::Tensor& tensor, const ov::element::Type& element_t
} // namespace
// ! [infer_request:ctor]
TemplatePlugin::InferRequest::InferRequest(const std::shared_ptr<const TemplatePlugin::CompiledModel>& model)
ov::template_plugin::InferRequest::InferRequest(const std::shared_ptr<const ov::template_plugin::CompiledModel>& model)
: ov::ISyncInferRequest(model) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
@ -75,23 +75,24 @@ TemplatePlugin::InferRequest::InferRequest(const std::shared_ptr<const TemplateP
}
// ! [infer_request:ctor]
std::vector<std::shared_ptr<ov::IVariableState>> TemplatePlugin::InferRequest::query_state() const {
std::vector<std::shared_ptr<ov::IVariableState>> ov::template_plugin::InferRequest::query_state() const {
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<const TemplatePlugin::CompiledModel> TemplatePlugin::InferRequest::get_template_model() const {
std::shared_ptr<const ov::template_plugin::CompiledModel> ov::template_plugin::InferRequest::get_template_model()
const {
auto& compiled_model = get_compiled_model();
auto template_model = std::dynamic_pointer_cast<const TemplatePlugin::CompiledModel>(compiled_model);
auto template_model = std::dynamic_pointer_cast<const ov::template_plugin::CompiledModel>(compiled_model);
OPENVINO_ASSERT(template_model);
return template_model;
}
// ! [infer_request:dtor]
TemplatePlugin::InferRequest::~InferRequest() = default;
ov::template_plugin::InferRequest::~InferRequest() = default;
// ! [infer_request:dtor]
// ! [infer_request:infer_impl]
void TemplatePlugin::InferRequest::infer() {
void ov::template_plugin::InferRequest::infer() {
// TODO: fill with actual list of pipeline stages, which are executed synchronously for sync infer requests
infer_preprocess();
start_pipeline();
@ -101,7 +102,7 @@ void TemplatePlugin::InferRequest::infer() {
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplatePlugin::InferRequest::infer_preprocess() {
void ov::template_plugin::InferRequest::infer_preprocess() {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, m_profiling_task[Preprocess]);
auto start = Time::now();
convert_batched_tensors();
@ -168,7 +169,7 @@ void TemplatePlugin::InferRequest::infer_preprocess() {
// ! [infer_request:infer_preprocess]
// ! [infer_request:start_pipeline]
void TemplatePlugin::InferRequest::start_pipeline() {
void ov::template_plugin::InferRequest::start_pipeline() {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, m_profiling_task[StartPipeline])
auto start = Time::now();
m_executable->call(m_backend_output_tensors, m_backend_input_tensors);
@ -176,7 +177,7 @@ void TemplatePlugin::InferRequest::start_pipeline() {
}
// ! [infer_request:start_pipeline]
void TemplatePlugin::InferRequest::wait_pipeline() {
void ov::template_plugin::InferRequest::wait_pipeline() {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, m_profiling_task[WaitPipeline])
auto start = Time::now();
// TODO: Wait pipeline using driver API or other synchronizations methods
@ -185,7 +186,7 @@ void TemplatePlugin::InferRequest::wait_pipeline() {
}
// ! [infer_request:infer_postprocess]
void TemplatePlugin::InferRequest::infer_postprocess() {
void ov::template_plugin::InferRequest::infer_postprocess() {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, m_profiling_task[Postprocess]);
auto start = Time::now();
OPENVINO_ASSERT(get_outputs().size() == m_backend_output_tensors.size());
@ -206,8 +207,8 @@ void TemplatePlugin::InferRequest::infer_postprocess() {
// ! [infer_request:infer_postprocess]
// ! [infer_request:set_blobs_impl]
void TemplatePlugin::InferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
const std::vector<ov::Tensor>& tensors) {
void ov::template_plugin::InferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
const std::vector<ov::Tensor>& tensors) {
for (const auto& input : get_inputs()) {
if (input == port) {
m_batched_tensors[input.get_tensor_ptr()] = tensors;
@ -219,7 +220,7 @@ void TemplatePlugin::InferRequest::set_tensors_impl(const ov::Output<const ov::N
// ! [infer_request:set_blobs_impl]
// ! [infer_request:get_performance_counts]
std::vector<ov::ProfilingInfo> TemplatePlugin::InferRequest::get_profiling_info() const {
std::vector<ov::ProfilingInfo> ov::template_plugin::InferRequest::get_profiling_info() const {
std::vector<ov::ProfilingInfo> info;
const auto fill_profiling_info = [](const std::string& name,
const std::chrono::duration<float, std::micro>& time) -> ov::ProfilingInfo {

View File

@ -16,7 +16,8 @@
#include "openvino/itt.hpp"
#include "openvino/runtime/isync_infer_request.hpp"
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
// forward declaration
class CompiledModel;
@ -24,7 +25,7 @@ class CompiledModel;
// ! [infer_request:header]
class InferRequest : public ov::ISyncInferRequest {
public:
explicit InferRequest(const std::shared_ptr<const TemplatePlugin::CompiledModel>& compiled_model);
explicit InferRequest(const std::shared_ptr<const ov::template_plugin::CompiledModel>& compiled_model);
~InferRequest();
void infer() override;
@ -54,4 +55,5 @@ private:
};
// ! [infer_request:header]
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov

View File

@ -9,7 +9,7 @@
#include "template/config.hpp"
using namespace TemplatePlugin;
using namespace ov::template_plugin;
Configuration::Configuration() {}

View File

@ -10,7 +10,8 @@
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/threading/istreams_executor.hpp"
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
// ! [configuration:header]
using ConfigMap = std::map<std::string, ov::Any>;
@ -37,4 +38,5 @@ struct Configuration {
};
// ! [configuration:header]
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov

View File

@ -11,10 +11,12 @@
#include <openvino/itt.hpp>
namespace TemplatePlugin {
namespace ov {
namespace template_plugin {
namespace itt {
namespace domains {
OV_ITT_DOMAIN(TemplatePlugin);
}
} // namespace itt
} // namespace TemplatePlugin
} // namespace template_plugin
} // namespace ov