Fixed documentation code style (#7008)

This commit is contained in:
Ilya Churaev 2021-08-11 09:38:43 +03:00 committed by GitHub
parent e36f42b205
commit ff500b0bed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 277 additions and 144 deletions

View File

@ -1,6 +1,7 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
ColumnLimit: 120
Language: Cpp
Standard: Cpp11
@ -8,18 +9,20 @@ Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 160
# Specialize this comment pragma in order to avoid changes in SEA copyrights
BinPackArguments: false
BinPackParameters: false
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false
IndentPPDirectives: AfterHash
ForEachMacros:
- foreach
- FOREACH_CHILD

View File

@ -22,7 +22,8 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
IE_THROW() << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
IE_THROW() << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
if (castedNode->get_input_element_type(0) != ngraph::element::f32 ||
castedNode->get_output_element_type(0) != ngraph::element::f32)
IE_THROW() << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
inShape = castedNode->get_input_shape(0);
@ -34,9 +35,12 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(
std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape,
const InferenceEngine::SizeVector& outShape,
bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
@ -45,9 +49,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
inData.desc =
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
outData.desc =
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
@ -64,9 +70,11 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
inData.desc =
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
outData.desc =
InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
@ -87,7 +95,8 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config,
InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
@ -115,10 +124,13 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig&
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
const float* src_data =
inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float* dst_data =
outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;

View File

@ -16,8 +16,10 @@ public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept override;
private:

View File

@ -7,12 +7,12 @@
#include "cpu_kernel.hpp"
#include "op.hpp"
#ifdef OPENCV_IMPORT_ENABLED
#include "fft_kernel.hpp"
#include "fft_op.hpp"
# include "fft_kernel.hpp"
# include "fft_op.hpp"
#endif
#include <ngraph/ngraph.hpp>
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
#include <onnx_import/onnx_utils.hpp>
# include <onnx_import/onnx_utils.hpp>
#endif
#include <map>
@ -25,18 +25,24 @@ using namespace TemplateExtension;
//! [extension:ctor]
Extension::Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
int64_t add = node.get_attribute_value<int64_t>("add");
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
});
#ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
bool inverse = node.get_attribute_value<int64_t>("inverse");
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
});
#endif
ngraph::onnx_import::register_operator(Operation::type_info.name,
1,
"custom_domain",
[](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
int64_t add = node.get_attribute_value<int64_t>("add");
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
});
# ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::register_operator(FFTOp::type_info.name,
1,
"custom_domain",
[](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
bool inverse = node.get_attribute_value<int64_t>("inverse");
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
});
# endif
#endif
}
//! [extension:ctor]
@ -45,9 +51,9 @@ Extension::Extension() {
Extension::~Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
#ifdef OPENCV_IMPORT_ENABLED
# ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
#endif // OPENCV_IMPORT_ENABLED
# endif // OPENCV_IMPORT_ENABLED
#endif // NGRAPH_ONNX_IMPORT_ENABLED
}
//! [extension:dtor]
@ -92,7 +98,8 @@ std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::N
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node,
const std::string& implType) {
if (implType == "CPU") {
if (std::dynamic_pointer_cast<Operation>(node)) {
return std::make_shared<OpImplementation>(node);

View File

@ -25,7 +25,8 @@ public:
std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node,
const std::string& implType) override;
};
} // namespace TemplateExtension

View File

@ -21,14 +21,16 @@ FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) {
IE_THROW() << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
IE_THROW() << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
if (castedNode->get_input_element_type(0) != ngraph::element::f32 ||
castedNode->get_output_element_type(0) != ngraph::element::f32)
IE_THROW() << "Operation supports only FP32 tensors.";
inpShape = castedNode->get_input_shape(0);
outShape = castedNode->get_output_shape(0);
inverse = castedNode->inverse;
}
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept {
std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig;
InferenceEngine::SizeVector order(inpShape.size());
@ -55,7 +57,8 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<Infe
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config,
InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
@ -85,7 +88,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
return cv::Mat(size, CV_32F, (void*)blob->buffer());
}
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept {
cv::Mat inp = infEngineBlobToMat(inputs[0]);
cv::Mat out = infEngineBlobToMat(outputs[0]);
@ -95,7 +99,8 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
const int w = inp.size[3];
cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2);
for (int i = 0; i < n; ++i) {
std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
cv::merge(components, complex);
if (!inverse)

View File

@ -16,8 +16,10 @@ public:
explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept override;
private:

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo FFTOp::type_info;
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse): Op({inp}) {
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse) : Op({inp}) {
constructor_validate_and_infer_types();
inverse = _inverse;
}

View File

@ -11,7 +11,7 @@ namespace TemplateExtension {
class FFTOp : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0};
static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0};
const ngraph::NodeTypeInfo& get_type_info() const override {
return type_info;
}

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
//! [op:ctor]
NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0);
Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add): Op({arg}), add(add) {
Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add) : Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]

View File

@ -9,10 +9,13 @@
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor)
: AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
: AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest),
_waitExecutor(waitExecutor) {
// In current implementation we have CPU only tasks and no needs in 2 executors
// So, by default single stage pipeline is created.
// This stage executes InferRequest::Infer() using cpuTaskExecutor.
@ -23,7 +26,8 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest:
if (remoteDevice) {
_pipeline = {{cpuTaskExecutor,
[this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
"TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},

View File

@ -13,8 +13,10 @@ namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest();

View File

@ -23,7 +23,8 @@ Configuration::Configuration(const ConfigMap& config, const Configuration& defau
if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) {
_streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value);
} else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
} else if (streamExecutorConfigKeys.end() !=
std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
_streamsExecutorConfig.SetConfig(key, value);
} else if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);

View File

@ -21,7 +21,9 @@ struct Configuration {
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true);
explicit Configuration(const ConfigMap& config,
const Configuration& defaultCfg = {},
const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;

View File

@ -18,8 +18,10 @@ using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg, const Plugin::Ptr& plugin)
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const Plugin::Ptr& plugin)
: InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
_cfg(cfg),
_plugin(plugin) {
@ -40,7 +42,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const Configuration& cfg, const Plugin::Ptr& plugin): _cfg(cfg), _plugin(plugin) {
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model,
const Configuration& cfg,
const Plugin::Ptr& plugin)
: _cfg(cfg),
_plugin(plugin) {
// read XML content
std::string xmlString;
std::uint64_t dataSize = 0;
@ -53,7 +59,9 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const
model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
if (0 != dataSize) {
dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast<std::size_t>(dataSize)}, InferenceEngine::Layout::C));
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8,
{static_cast<std::size_t>(dataSize)},
InferenceEngine::Layout::C));
dataBlob->allocate();
model.read(dataBlob->buffer(), dataSize);
}
@ -84,7 +92,8 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const
// ! [executable_network:map_graph]
// forward declaration
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
@ -117,29 +126,36 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
void TemplatePlugin::ExecutableNetwork::InitExecutor() {
// Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
// real hardware cores and NUMA nodes.
auto streamsExecutorConfig = InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(_cfg._streamsExecutorConfig);
auto streamsExecutorConfig =
InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(_cfg._streamsExecutorConfig);
streamsExecutorConfig._name = "TemplateStreamsExecutor";
// As Inference Engine CPU Streams Executor creates some additional therads
// it is better to avoid threads recreateion as some OSs memory allocator can not manage such usage cases
// and memory consumption can be larger than it is expected.
// So Inference Engone provides executors cache.
_taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig);
// NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of inference request pipeline
// _callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"});
// NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of
// inference request pipeline _callbackExecutor =
// InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"});
}
// ! [executable_network:init_executor]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(
InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs,
networkOutputs,
std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), _taskExecutor, _plugin->_waitExecutor,
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor,
_plugin->_waitExecutor,
_callbackExecutor);
}
// ! [executable_network:create_infer_request]
@ -154,11 +170,16 @@ InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const st
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const {
// TODO: return more supported values for metrics
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string> {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
IE_SET_METRIC_RETURN(SUPPORTED_METRICS,
std::vector<std::string>{METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey);
}

View File

@ -23,16 +23,20 @@ class Plugin;
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
// Methods from a base class ExecutableNetworkThreadSafeDefault
void Export(std::ostream& model) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(
InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override;
InferenceEngine::Parameter GetMetric(const std::string& name) const override;
InferenceEngine::Parameter GetConfig(const std::string& name) const override;
@ -40,7 +44,8 @@ public:
private:
friend class TemplateInferRequest;
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void InitExecutor();

View File

@ -23,19 +23,25 @@ using namespace InferenceEngine;
using Time = std::chrono::high_resolution_clock;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork)
: IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) {
: IInferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1));
std::string name = _executableNetwork->_function->get_friendly_name() + "_Req" + requestID;
_profilingTask = {
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name +
"_Preprocess"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name +
"_Postprocess"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name +
"_StartPipline"),
openvino::itt::handle("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name +
"_WaitPipline"),
};
_executable = _executableNetwork->_plugin->_backend->compile(_executableNetwork->_function);
@ -60,7 +66,10 @@ void TemplateInferRequest::allocateDeviceBuffers() {
}
template <typename BlobDataMap, typename GetNetworkPrecisionF>
static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision,
static void AllocateImpl(const BlobDataMap& userDataMap,
BlobMap& userBlobMap,
BlobMap& deviceBlobMap,
GetNetworkPrecisionF&& GetNetworkPrecision,
bool isInputBlob = true) {
for (auto&& userData : userDataMap) {
const auto& dims = userData.second->getTensorDesc().getDims();
@ -95,7 +104,9 @@ void TemplateInferRequest::allocateBlobs() {
});
auto&& results = _executableNetwork->_function->get_results();
AllocateImpl(
_networkOutputs, _outputs, _networkOutputBlobs,
_networkOutputs,
_outputs,
_networkOutputBlobs,
[&](const std::string& blobName) {
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
},
@ -114,8 +125,10 @@ void TemplateInferRequest::InferImpl() {
template <typename SrcT, typename DstT>
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
ngraph::runtime::reference::convert<SrcT, DstT>(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(), src->size());
ngraph::runtime::reference::convert<SrcT, DstT>(
InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(),
src->size());
}
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
@ -128,8 +141,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<std::uint8_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -141,8 +154,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<float, std::uint8_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -154,8 +167,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<int64_t, int32_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -167,8 +180,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<int16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -180,8 +193,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<int8_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -193,8 +206,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<bool, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -206,8 +219,8 @@ static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
blobCopy<uint16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision()
<< " to " << dst->getTensorDesc().getPrecision();
}
}
} break;
@ -230,7 +243,9 @@ void TemplateInferRequest::inferPreprocess() {
const auto& parameterShape = parameter->get_shape();
const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
parameterType, parameterShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
parameterType,
parameterShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
}
for (auto&& output : _outputs) {
auto outputBlob = output.second;
@ -243,7 +258,9 @@ void TemplateInferRequest::inferPreprocess() {
const auto& resultShape = result->get_shape();
const auto& resultType = result->get_element_type();
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
resultType, resultShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
resultType,
resultShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
}
_durations[Preprocess] = Time::now() - start;
}

View File

@ -26,7 +26,8 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest();

View File

@ -38,7 +38,8 @@ Plugin::Plugin() {
_backend = ngraph::runtime::Backend::create("INTERPRETER");
// create default stream executor with a given name
_waitExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"});
_waitExecutor =
InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"});
}
// ! [plugin:ctor]
@ -54,7 +55,8 @@ Plugin::~Plugin() {
// ! [plugin:transform_network]
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
auto transformedNetwork = ngraph::clone_function(*function);
@ -70,13 +72,15 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
bool needF16toF32 = false;
for (const auto& param : function->get_parameters()) {
if (param->get_element_type() == ngraph::element::f16 &&
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) {
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() !=
InferenceEngine::Precision::FP16) {
needF16toF32 = true;
break;
}
}
if (needF16toF32)
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
passManager.register_pass<ngraph::pass::ConvertPrecision>(
precisions_array{{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
@ -92,32 +96,41 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// ! [plugin:transform_network]
// ! [plugin:load_exe_network_impl]
InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) {
InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const ConfigMap& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl");
InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo();
InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo();
auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(network.getFunction(), networkInputs, networkOutputs, fullConfig,
auto fullConfig = Configuration{config, _cfg};
return std::make_shared<ExecutableNetwork>(network.getFunction(),
networkInputs,
networkOutputs,
fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this()));
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network]
InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork(std::istream& modelStream, const std::map<std::string, std::string>& config) {
InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork(
std::istream& modelStream,
const std::map<std::string, std::string>& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetwork");
auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
auto fullConfig = Configuration{config, _cfg};
return std::make_shared<ExecutableNetwork>(modelStream,
fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this()));
}
// ! [plugin:import_network]
// ! [plugin:query_network]
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const {
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network,
const ConfigMap& config) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork");
Configuration fullConfig {config, _cfg, false};
Configuration fullConfig{config, _cfg, false};
auto function = network.getFunction();
// 1. First of all we should store initial input operation set
@ -160,7 +173,8 @@ InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::
// 5. If some housekeeping nodes were not added - add them.
if (InferenceEngine::details::contains(supported, node->get_friendly_name())) {
for (auto&& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) ||
ngraph::op::is_parameter(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());
}
}
@ -175,11 +189,14 @@ InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::
// 6. Eliminate subgraphs that consist of housekeeping nodes only
if (ngraph::op::is_constant(node) || ngraph::op::is_parameter(node)) {
if (!InferenceEngine::details::contains(supported, node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) {
if (!InferenceEngine::details::contains(
supported,
node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
} else if (ngraph::op::is_output(node)) {
if (!InferenceEngine::details::contains(supported, node->input_values().begin()->get_node()->get_friendly_name())) {
if (!InferenceEngine::details::contains(supported,
node->input_values().begin()->get_node()->get_friendly_name())) {
supported.erase(node->get_friendly_name());
}
}
@ -204,27 +221,36 @@ void Plugin::AddExtension(const InferenceEngine::IExtensionPtr& /*extension*/) {
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap& config) {
_cfg = Configuration {config, _cfg};
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& /*options*/) const {
InferenceEngine::Parameter Plugin::GetConfig(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const {
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT),
METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
configKeys.emplace_back(configKey);
@ -251,7 +277,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1}));
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
IE_THROW(NotFound) << "Unsupported device metric: " << name;
}

View File

@ -23,12 +23,19 @@ public:
void SetConfig(const std::map<std::string, std::string>& config) override;
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) const override;
InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) override;
InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl(
const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) override;
void AddExtension(const std::shared_ptr<InferenceEngine::IExtension>& extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork(std::istream& model, const std::map<std::string, std::string>& config) override;
InferenceEngine::Parameter GetConfig(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork(
std::istream& model,
const std::map<std::string, std::string>& config) override;
private:
friend class ExecutableNetwork;

View File

@ -28,7 +28,10 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) {
}
auto mean_const = it->second;
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type");
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32,
"Mean for ",
param->get_friendly_name(),
" must have f32 type");
auto copy_param = param->clone_with_new_inputs({});
auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const);

View File

@ -12,7 +12,8 @@
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0);
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {}
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap)
: m_inputInfoMap(inputInfoMap) {}
bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) {
ngraph::pass::AddMeanSubtract::MeanMap meanMap;
@ -39,10 +40,12 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
has_mean_image = true;
if (c == 0) {
meanImage = pInfo[c]->meanData;
NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32,
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
NGRAPH_CHECK(
meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32,
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
} else {
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal");
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(),
"TensorDesc for PreProcessChannel::meanData must be equal");
}
}
}
@ -52,7 +55,8 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
continue;
}
NGRAPH_CHECK(!(has_mean_image && has_scales), "Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
NGRAPH_CHECK(!(has_mean_image && has_scales),
"Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
if (has_scales) {
ngraph::Shape shape(inputDims.size(), 1);

View File

@ -28,7 +28,10 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
}
auto scale_const = it->second;
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type");
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32,
"Scale for ",
param->get_friendly_name(),
" must have f32 type");
auto copy_param = param->clone_with_new_inputs({});
auto div = std::make_shared<ngraph::opset3::Divide>(copy_param, it->second);

View File

@ -24,7 +24,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
// Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1
Input<Node> input = node->input(0);
Output<Node> output = node->output(0);
if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static() && output.get_target_inputs().size() == 1) {
if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static() &&
output.get_target_inputs().size() == 1) {
nodes.push_back(node);
}
}
@ -32,7 +33,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
// Print types and names for collected nodes
for (auto& node : nodes) {
std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl;
std::cout << "Type: " << node->get_type_info().name << std::endl
<< "Name: " << node->get_friendly_name() << std::endl;
}
// Return false because we didn't change nGraph Function

View File

@ -33,7 +33,9 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() {
}
// Decompose Divide into Multiply with Power operations
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1}));
auto pow = std::make_shared<ngraph::opset3::Power>(
div->input_value(1),
opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow);
@ -70,7 +72,8 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
auto& node_to_output = m.get_pattern_value_map();
// Create new Relu operation and add register it for additional execution
auto new_relu = register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
auto new_relu =
register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
// Copy runtime info attributes to newly created operation
ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu);