Align style doc with samples (#5709)

* Added clang-format config

* Fixed style

* Fixed code-style for snippets and fixed build

* Disable clang-format for snippets

* Fixed comments
This commit is contained in:
Ilya Churaev 2021-05-25 10:32:48 +03:00 committed by GitHub
parent e41f378967
commit cc810297f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 780 additions and 1074 deletions

View File

@ -31,6 +31,7 @@ addIeTarget(
function(addIeTarget)
set(options
ADD_CPPLINT # Enables code style checks for the target
ADD_CLANG_FORMAT # Enables code style checks for the target
)
set(oneValueRequiredArgs
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
@ -119,6 +120,10 @@ function(addIeTarget)
# code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_ADD_CLANG_FORMAT)
# code style
add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE)
# developer package
openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE}
@ -128,7 +133,6 @@ function(addIeTarget)
# Provide default compile pdb name equal to target name
set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME})
endif()
endfunction()
#[[

View File

@ -27,7 +27,10 @@ endif()
# )
#
function(ie_add_plugin)
set(options SKIP_INSTALL)
set(options
SKIP_INSTALL
ADD_CLANG_FORMAT
)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
@ -73,7 +76,11 @@ function(ie_add_plugin)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
if (IE_PLUGIN_ADD_CLANG_FORMAT)
add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME})
else()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
endif()
# check that plugin with such name is not registered

25
docs/.clang-format Normal file
View File

@ -0,0 +1,25 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 160
# Specialize this comment pragma in order to avoid changes in SEA copyrights
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false

View File

@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op")
find_package(ngraph REQUIRED COMPONENTS onnx_importer)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp)
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
# [cmake:onnx_custom_op]
# Enable code style check
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

View File

@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND)
target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED)
endif()
# [cmake:extension]
# Enable code style check
file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp")
add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src})

View File

@ -3,13 +3,15 @@
//
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <ie_layouts.h>
#include "op.hpp"
using namespace TemplateExtension;
//! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode)
@ -32,8 +34,8 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
@ -72,7 +74,7 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
if (!error.empty()) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
resp->msg[sizeof(resp->msg) - 1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
@ -85,25 +87,24 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
IE_THROW()
<< "Operation can be initialized only with 4d input/output tensors!";
IE_THROW() << "Operation can be initialized only with 4d input/output tensors!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
IE_THROW() << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::Exception& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
resp->msg[sizeof(resp->msg) - 1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
@ -113,11 +114,10 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;

View File

@ -5,6 +5,7 @@
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
@ -13,13 +14,12 @@ namespace TemplateExtension {
class OpImplementation : public InferenceEngine::ILayerExecImpl {
public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept override;
private:
int64_t add;
ngraph::Shape inShape;

View File

@ -3,15 +3,16 @@
//
#include "extension.hpp"
#include "cpu_kernel.hpp"
#include "op.hpp"
#ifdef OPENCV_IMPORT_ENABLED
#include "fft_op.hpp"
#include "fft_kernel.hpp"
#include "fft_kernel.hpp"
#include "fft_op.hpp"
#endif
#include <ngraph/ngraph.hpp>
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
#include <onnx_import/onnx_utils.hpp>
#include <onnx_import/onnx_utils.hpp>
#endif
#include <map>
@ -21,22 +22,19 @@
using namespace TemplateExtension;
//! [extension:ctor]
Extension::Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::register_operator(
Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
int64_t add = node.get_attribute_value<int64_t>("add");
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
int64_t add = node.get_attribute_value<int64_t>("add");
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
});
#ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::register_operator(
FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
bool inverse = node.get_attribute_value<int64_t>("inverse");
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
bool inverse = node.get_attribute_value<int64_t>("inverse");
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
});
#endif
#endif
@ -47,19 +45,19 @@ Extension::Extension() {
Extension::~Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
#ifdef OPENCV_IMPORT_ENABLED
#ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
#endif // OPENCV_IMPORT_ENABLED
#endif // NGRAPH_ONNX_IMPORT_ENABLED
#endif // OPENCV_IMPORT_ENABLED
#endif // NGRAPH_ONNX_IMPORT_ENABLED
}
//! [extension:dtor]
//! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version
{1, 0}, // extension API version
"1.0",
"template_ext" // extension description message
"template_ext" // extension description message
};
versionInfo = &ExtensionDescription;
@ -80,7 +78,7 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
//! [extension:getOpSets]
//! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"};
}
@ -94,7 +92,7 @@ std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::N
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
if (implType == "CPU") {
if (std::dynamic_pointer_cast<Operation>(node)) {
return std::make_shared<OpImplementation>(node);
@ -110,16 +108,16 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_
//! [extension:getImplementation]
//! [extension:CreateExtension]
//Generate exported function
// Generate exported function
IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension)
//! [extension:CreateExtension]
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
InferenceEngine::ResponseDesc *resp) noexcept {
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode)
InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept {
try {
ext = new Extension();
return OK;
} catch (std::exception &ex) {
} catch (std::exception& ex) {
if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255);

View File

@ -4,13 +4,14 @@
#pragma once
#include <ie_iextension.h>
#include <ie_api.h>
#include <ngraph/ngraph.hpp>
#include <memory>
#include <vector>
#include <string>
#include <ie_iextension.h>
#include <map>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <string>
#include <vector>
//! [extension:header]
namespace TemplateExtension {

View File

@ -4,14 +4,16 @@
//! [fft_kernel:implementation]
#include "fft_kernel.hpp"
#include "fft_op.hpp"
#include <ie_layouts.h>
#include <opencv2/opencv.hpp>
#include "fft_op.hpp"
using namespace TemplateExtension;
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) {
auto castedNode = std::dynamic_pointer_cast<FFTOp>(node);
if (!castedNode)
IE_THROW() << "Cannot create implementation for unknown operation!";
@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
inverse = castedNode->inverse;
}
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc* resp) noexcept {
std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig;
InferenceEngine::SizeVector order(inpShape.size());
@ -54,28 +55,27 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<Infe
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
IE_THROW() << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::Exception& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
resp->msg[sizeof(resp->msg) - 1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
return InferenceEngine::OK;
}
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
{
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
// NOTE: Inference Engine sizes are reversed.
std::vector<size_t> dims = blob->getTensorDesc().getDims();
std::vector<int> size(dims.begin(), dims.end());
@ -84,9 +84,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return cv::Mat(size, CV_32F, (void*)blob->buffer());
}
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept {
cv::Mat inp = infEngineBlobToMat(inputs[0]);
cv::Mat out = infEngineBlobToMat(outputs[0]);
@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
const int w = inp.size[3];
cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2);
for (int i = 0; i < n; ++i) {
std::vector<cv::Mat> components = {
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))
};
std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
cv::merge(components, complex);
if (!inverse)
@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
else
cv::idft(complex, interleavedOut, cv::DFT_SCALE);
components = {
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))
};
components = {cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))};
cv::split(interleavedOut, components);
}
return InferenceEngine::OK;
}
//! [fft_kernel:implementation]

View File

@ -6,6 +6,7 @@
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
@ -13,13 +14,12 @@ namespace TemplateExtension {
class FFTImpl : public InferenceEngine::ILayerExecImpl {
public:
explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc* resp) noexcept override;
private:
ngraph::Shape inpShape;
ngraph::Shape outShape;
@ -27,5 +27,5 @@ private:
std::string error;
};
}
} // namespace TemplateExtension
//! [fft_kernel:header]

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo FFTOp::type_info;
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse) : Op({inp}) {
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse): Op({inp}) {
constructor_validate_and_infer_types();
inverse = _inverse;
}
@ -19,16 +19,15 @@ void FFTOp::validate_and_infer_types() {
set_output_type(0, get_input_element_type(0), outShape);
}
std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<FFTOp>(new_args.at(0), inverse);
}
bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) {
bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) {
visitor.on_attribute("inverse", inverse);
return true;
}
//! [fft_op:implementation]

View File

@ -11,8 +11,10 @@ namespace TemplateExtension {
class FFTOp : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0};
const ngraph::NodeTypeInfo& get_type_info() const override {
return type_info;
}
FFTOp() = default;
FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse);
@ -23,6 +25,5 @@ public:
bool inverse;
};
}
} // namespace TemplateExtension
//! [fft_op:header]

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
//! [op:ctor]
NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0);
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add): Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]
@ -22,7 +22,7 @@ void Operation::validate_and_infer_types() {
//! [op:validate]
//! [op:copy]
std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
@ -32,63 +32,63 @@ std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::Out
//! [op:copy]
//! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) {
visitor.on_attribute("add", add);
return true;
}
//! [op:visit_attributes]
//! [op:evaluate]
namespace
{
namespace {
template <class T>
void implementation(const T* input,
T* output,
int64_t add,
size_t size) {
void implementation(const T* input, T* output, int64_t add, size_t size) {
for (size_t i = 0; i < size; i++) {
output[i] = input[i] + add;
}
}
template <ngraph::element::Type_t ET>
bool evaluate_op(const ngraph::HostTensorPtr& arg0,
const ngraph::HostTensorPtr& out, int64_t add)
{
bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) {
size_t size = ngraph::shape_size(arg0->get_shape());
implementation(arg0->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
add,
size);
implementation(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), add, size);
return true;
}
} // namespace
bool Operation::evaluate(const ngraph::HostTensorVector& outputs,
const ngraph::HostTensorVector& inputs) const {
switch (inputs[0]->get_element_type())
{
case ngraph::element::Type_t::i8: return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i16: return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i32: return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i64: return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u8: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u16: return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u32: return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u64: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16: return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f16: return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32: return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
default: break;
bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const {
switch (inputs[0]->get_element_type()) {
case ngraph::element::Type_t::i8:
return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i16:
return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i32:
return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i64:
return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u8:
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u16:
return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u32:
return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u64:
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16:
return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f16:
return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32:
return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
default:
break;
}
return false;
}
bool Operation::has_evaluate() const {
switch (get_input_element_type(0))
{
switch (get_input_element_type(0)) {
case ngraph::element::Type_t::i8:
case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::i32:
@ -99,8 +99,10 @@ bool Operation::has_evaluate() const {
case ngraph::element::Type_t::u64:
case ngraph::element::Type_t::bf16:
case ngraph::element::Type_t::f16:
case ngraph::element::Type_t::f32: return true;
default: break;
case ngraph::element::Type_t::f32:
return true;
default:
break;
}
return false;
}

View File

@ -18,9 +18,10 @@ public:
void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() const { return add; }
bool evaluate(const ngraph::HostTensorVector& outputs,
const ngraph::HostTensorVector& inputs) const override;
int64_t getAddAttr() const {
return add;
}
bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override;
bool has_evaluate() const override;
private:

View File

@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
VERSION_DEFINES_FOR template_plugin.cpp
ADD_CLANG_FORMAT)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"

View File

@ -3,18 +3,16 @@
//
#include "template_async_infer_request.hpp"
#include "template_itt.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor)
: AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
// In current implementation we have CPU only tasks and no needs in 2 executors
// So, by default single stage pipeline is created.
// This stage executes InferRequest::Infer() using cpuTaskExecutor.
@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(
constexpr const auto remoteDevice = false;
if (remoteDevice) {
_pipeline = {
{cpuTaskExecutor, [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
"TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
"TemplateAsyncInferRequest::WaitPipeline");
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
"TemplateAsyncInferRequest::Postprocessing");
_inferRequest->inferPostprocess();
}}
};
_pipeline = {{cpuTaskExecutor,
[this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor,
[this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline");
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing");
_inferRequest->inferPostprocess();
}}};
}
}
// ! [async_infer_request:ctor]

View File

@ -13,15 +13,13 @@ namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest();
private:
TemplateInferRequest::Ptr _inferRequest;
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]

View File

@ -2,17 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ie_plugin_config.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include "template_config.hpp"
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <ie_plugin_config.hpp>
#include "template/template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration() {}
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
// If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration
auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys();
@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa
if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) {
_streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value);
} else if (streamExecutorConfigKeys.end() !=
std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
} else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
_streamsExecutorConfig.SetConfig(key, value);
} else if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);

View File

@ -4,11 +4,9 @@
#pragma once
#include <string>
#include <map>
#include <ie_parameter.hpp>
#include <map>
#include <string>
#include <threading/ie_istreams_executor.hpp>
namespace TemplatePlugin {
@ -18,19 +16,19 @@ using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
int deviceId = 0;
bool perfCount = true;
InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig;
};
// ! [configuration:header]

View File

@ -2,36 +2,35 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "template_executable_network.hpp"
#include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include "transformations/serialize.hpp"
#include "template/template_config.hpp"
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_itt.hpp"
#include "template_plugin.hpp"
#include "transformations/serialize.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const Plugin::Ptr& plugin) :
InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
_cfg(cfg),
_plugin(plugin) {
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg, const Plugin::Ptr& plugin)
: InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
_cfg(cfg),
_plugin(plugin) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
CompileNetwork(function, inputInfoMap, outputsInfoMap);
InitExecutor(); // creates thread-based executor using for async requests
InitExecutor(); // creates thread-based executor using for async requests
} catch (const InferenceEngine::Exception&) {
throw;
} catch (const std::exception & e) {
} catch (const std::exception& e) {
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
} catch (...) {
IE_THROW(Unexpected) << "Generic exception is thrown";
@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg,
const Plugin::Ptr& plugin) :
_cfg(cfg),
_plugin(plugin) {
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const Configuration& cfg, const Plugin::Ptr& plugin): _cfg(cfg), _plugin(plugin) {
// read XML content
std::string xmlString;
std::uint64_t dataSize = 0;
@ -57,9 +52,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
if (0 != dataSize) {
dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8,
{static_cast<std::size_t>(dataSize)},
InferenceEngine::Layout::C));
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast<std::size_t>(dataSize)}, InferenceEngine::Layout::C));
dataBlob->allocate();
model.read(dataBlob->buffer(), dataSize);
}
@ -77,10 +70,10 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
try {
CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap);
InitExecutor(); // creates thread-based executor using for async requests
InitExecutor(); // creates thread-based executor using for async requests
} catch (const InferenceEngine::Exception&) {
throw;
} catch (const std::exception & e) {
} catch (const std::exception& e) {
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
} catch (...) {
IE_THROW(Unexpected) << "Generic exception is thrown";
@ -90,12 +83,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
// ! [executable_network:map_graph]
// forward declaration
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap & inputInfoMap,
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap & inputInfoMap,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// TODO: perform actual graph compilation / mapping to backend graph representation / kernels
@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
}
// ! [executable_network:map_graph]
// ! [executable_network:init_executor]
void TemplatePlugin::ExecutableNetwork::InitExecutor() {
// Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
@ -137,10 +128,9 @@ void TemplatePlugin::ExecutableNetwork::InitExecutor() {
}
// ! [executable_network:init_executor]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
@ -148,32 +138,26 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C
// ! [executable_network:create_infer_request]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor);
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), _taskExecutor, _plugin->_waitExecutor,
_callbackExecutor);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const {
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string& name) const {
return _cfg.Get(name);
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const {
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const {
// TODO: return more supported values for metrics
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string> {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) };
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey);
}
@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) {
// Note: custom ngraph extensions are not supported
std::map<std::string, ngraph::OpSet> custom_opsets;
std::stringstream xmlFile, binFile;
ngraph::pass::Serialize serializer(xmlFile, binFile,
ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
serializer.run_on_function(_function);
auto m_constants = binFile.str();

View File

@ -4,13 +4,12 @@
#pragma once
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <ngraph/function.hpp>
#include "template_async_infer_request.hpp"
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
namespace TemplatePlugin {
@ -24,15 +23,10 @@ class Plugin;
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(std::istream& model,
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
~ExecutableNetwork() override = default;
@ -42,23 +36,22 @@ public:
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override;
InferenceEngine::Parameter GetMetric(const std::string &name) const override;
InferenceEngine::Parameter GetConfig(const std::string &name) const override;
InferenceEngine::Parameter GetMetric(const std::string& name) const override;
InferenceEngine::Parameter GetConfig(const std::string& name) const override;
private:
friend class TemplateInferRequest;
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void InitExecutor();
std::atomic<std::size_t> _requestId = {0};
Configuration _cfg;
std::shared_ptr<Plugin> _plugin;
std::shared_ptr<ngraph::Function> _function;
std::map<std::string, std::size_t> _inputIndex;
std::map<std::string, std::size_t> _outputIndex;
std::atomic<std::size_t> _requestId = {0};
Configuration _cfg;
std::shared_ptr<Plugin> _plugin;
std::shared_ptr<ngraph::Function> _function;
std::map<std::string, std::size_t> _inputIndex;
std::map<std::string, std::size_t> _outputIndex;
};
// ! [executable_network:header]

View File

@ -2,19 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ngraph/runtime/reference/convert.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
#include "template_itt.hpp"
#include "ie_ngraph_utils.hpp"
#include <algorithm>
#include <map>
#include <memory>
#include <ngraph/runtime/reference/convert.hpp>
#include <string>
#include <utility>
#include "blob_factory.hpp"
#include "ie_ngraph_utils.hpp"
#include "template_executable_network.hpp"
#include "template_itt.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using namespace InferenceEngine;
@ -22,11 +23,9 @@ using namespace InferenceEngine;
using Time = std::chrono::high_resolution_clock;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
IInferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork)
: IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1));
@ -60,11 +59,8 @@ void TemplateInferRequest::allocateDeviceBuffers() {
_outputTensors.resize(_networkOutputs.size());
}
template<typename BlobDataMap, typename GetNetworkPrecisionF>
static void AllocateImpl(const BlobDataMap& userDataMap,
BlobMap& userBlobMap,
BlobMap& deviceBlobMap,
GetNetworkPrecisionF&& GetNetworkPrecision,
template <typename BlobDataMap, typename GetNetworkPrecisionF>
static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision,
bool isInputBlob = true) {
for (auto&& userData : userDataMap) {
const auto& dims = userData.second->getTensorDesc().getDims();
@ -82,7 +78,7 @@ static void AllocateImpl(const BlobDataMap& userDataMap,
deviceBlob = userBlob;
} else {
if (userLayout != deviceLayout && !isInputBlob) {
IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs";
IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs";
}
deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout});
deviceBlob->allocate();
@ -94,13 +90,16 @@ static void AllocateImpl(const BlobDataMap& userDataMap,
void TemplateInferRequest::allocateBlobs() {
auto&& parameters = _executableNetwork->_function->get_parameters();
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) {
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&](const std::string& blobName) {
return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type();
});
auto&& results = _executableNetwork->_function->get_results();
AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) {
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
}, false);
AllocateImpl(
_networkOutputs, _outputs, _networkOutputBlobs,
[&](const std::string& blobName) {
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
},
false);
}
// ! [infer_request:infer_impl]
@ -113,103 +112,108 @@ void TemplateInferRequest::InferImpl() {
}
// ! [infer_request:infer_impl]
template<typename SrcT, typename DstT>
template <typename SrcT, typename DstT>
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
ngraph::runtime::reference::convert<SrcT, DstT>(
InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(),
src->size());
ngraph::runtime::reference::convert<SrcT, DstT>(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(), src->size());
}
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
switch (src->getTensorDesc().getPrecision()) {
case Precision::U8 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::U8 : break;
case Precision::FP32 : {
blobCopy<std::uint8_t, float>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
case Precision::U8: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::U8:
break;
case Precision::FP32: {
blobCopy<std::uint8_t, float>(src, dst);
} break;
case Precision::FP32 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::FP32 : break;
case Precision::U8 : {
blobCopy<float, std::uint8_t>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I64 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I64 : break;
case Precision::I32 : {
blobCopy<int64_t , int32_t>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I16 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I16 : break;
case Precision::FP32 : {
blobCopy<int16_t , float>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I8 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I8 : break;
case Precision::FP32 : {
blobCopy<int8_t , float>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::BOOL : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::BOOL : break;
case Precision::FP32 : {
blobCopy<bool , float>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::U16 : {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::U16 : break;
case Precision::FP32 : {
blobCopy<uint16_t , float>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::FP32: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::FP32:
break;
case Precision::U8: {
blobCopy<float, std::uint8_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I64: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I64:
break;
case Precision::I32: {
blobCopy<int64_t, int32_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I16: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I16:
break;
case Precision::FP32: {
blobCopy<int16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I8: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I8:
break;
case Precision::FP32: {
blobCopy<int8_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::BOOL: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::BOOL:
break;
case Precision::FP32: {
blobCopy<bool, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::U16: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::U16:
break;
case Precision::FP32: {
blobCopy<uint16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
}
}
}
@ -225,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& parameter = _parameters[index];
const auto& parameterShape = parameter->get_shape();
const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
parameterType, parameterShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
}
for (auto&& output : _outputs) {
auto outputBlob = output.second;
@ -238,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& result = _results[index];
const auto& resultShape = result->get_shape();
const auto& resultType = result->get_element_type();
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape,
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
resultType, resultShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
}
_durations[Preprocess] = Time::now() - start;
}

View File

@ -4,20 +4,17 @@
#pragma once
#include <array>
#include <chrono>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <executable.hpp>
#include <ie_input_info.hpp>
#include <map>
#include <memory>
#include <ngraph/runtime/tensor.hpp>
#include <openvino/itt.hpp>
#include <string>
#include <vector>
#include <array>
#include <memory>
#include <chrono>
#include <openvino/itt.hpp>
#include <ie_input_info.hpp>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <ngraph/runtime/tensor.hpp>
#include <executable.hpp>
namespace TemplatePlugin {
@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest();
@ -47,26 +43,20 @@ private:
void allocateDeviceBuffers();
void allocateBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages };
std::shared_ptr<ExecutableNetwork> _executableNetwork;
std::array<openvino::itt::handle_t, numOfStages> _profilingTask;
std::shared_ptr<ExecutableNetwork> _executableNetwork;
std::array<openvino::itt::handle_t, numOfStages> _profilingTask;
// for performance counters
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;
InferenceEngine::BlobMap _networkOutputBlobs;
ngraph::ParameterVector _parameters;
ngraph::ResultVector _results;
InferenceEngine::BlobMap _networkOutputBlobs;
ngraph::ParameterVector _parameters;
ngraph::ResultVector _results;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors;
std::shared_ptr<ngraph::runtime::Executable> _executable;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors;
std::shared_ptr<ngraph::runtime::Executable> _executable;
};
// ! [infer_request:header]

View File

@ -14,7 +14,7 @@
namespace TemplatePlugin {
namespace itt {
namespace domains {
OV_ITT_DOMAIN(TemplatePlugin);
}
}
OV_ITT_DOMAIN(TemplatePlugin);
}
} // namespace itt
} // namespace TemplatePlugin

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
// clang-format off
#include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp>
#include <ie_algorithm.hpp>
@ -24,6 +25,7 @@
#include "template_infer_request.hpp"
#include "transformations/template_pattern_transformation.hpp"
#include "transformations/preprocessing/preprocessing.hpp"
// clang-format on
using namespace TemplatePlugin;
@ -53,8 +55,7 @@ Plugin::~Plugin() {
// ! [plugin:transform_network]
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap & inputInfoMap,
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
auto transformedNetwork = ngraph::clone_function(*function);
@ -67,7 +68,7 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// Example: register CommonOptimizations transformation from transformations library
passManager.register_pass<ngraph::pass::CommonOptimizations>();
// Template plugin handles only FP32 networks
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32 }});
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
@ -83,36 +84,32 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// ! [plugin:transform_network]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork & network,
const ConfigMap &config) {
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl");
InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo();
InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo();
auto fullConfig = Configuration{ config, _cfg };
return std::make_shared<ExecutableNetwork>(network.getFunction(),
networkInputs, networkOutputs, fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this()));
auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(network.getFunction(), networkInputs, networkOutputs, fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this()));
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr
Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl");
auto fullConfig = Configuration{ config, _cfg };
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this()));
auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const {
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork");
Configuration fullConfig{config, _cfg, false};
Configuration fullConfig {config, _cfg, false};
auto function = network.getFunction();
// 1. First of all we should store initial input operation set
@ -198,36 +195,28 @@ void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
void Plugin::SetConfig(const ConfigMap& config) {
_cfg = Configuration {config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT),
METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
configKeys.emplace_back(configKey);
@ -236,7 +225,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
std::vector<std::string> availableDevices = {""};
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
@ -249,13 +238,13 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/ };
std::vector<std::string> capabilities = {METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/};
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1}));
} else {
IE_THROW(NotFound) << "Unsupported device metric: " << name;
}
}

View File

@ -4,11 +4,11 @@
#pragma once
#include "template_config.hpp"
#include "template_executable_network.hpp"
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include "backend.hpp"
#include "template_config.hpp"
#include "template_executable_network.hpp"
//! [plugin:header]
namespace TemplatePlugin {
@ -20,26 +20,24 @@ public:
Plugin();
~Plugin();
void SetConfig(const std::map<std::string, std::string> &config) override;
InferenceEngine::QueryNetworkResult
QueryNetwork(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string>& config) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void SetConfig(const std::map<std::string, std::string>& config) override;
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
friend class ExecutableNetwork;
friend class TemplateInferRequest;
std::shared_ptr<ngraph::runtime::Backend> _backend;
Configuration _cfg;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
std::shared_ptr<ngraph::runtime::Backend> _backend;
Configuration _cfg;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
} // namespace TemplatePlugin
//! [plugin:header]
//! [plugin:header]

View File

@ -2,21 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/preprocessing/mean_image_or_value.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp"
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0);
ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) {
// RUN_ON_FUNCTION_SCOPE(AddMeanSubtract);
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) {
ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
if (!param) {
return false;
@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
}
auto mean_const = it->second;
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32,
"Mean for ", param->get_friendly_name(), " must have f32 type");
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({});
auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const);

View File

@ -5,10 +5,9 @@
#pragma once
#include <map>
#include <string>
#include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp"
@ -29,5 +28,5 @@ public:
using MeanMap = std::map<std::string, std::shared_ptr<ngraph::op::v0::Constant>>;
NGRAPH_RTTI_DECLARATION;
explicit AddMeanSubtract(const MeanMap & inputInfoMap);
explicit AddMeanSubtract(const MeanMap& inputInfoMap);
};

View File

@ -2,26 +2,26 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/pass/manager.hpp>
#include "transformations/preprocessing/preprocessing.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp"
#include "transformations/preprocessing/std_scale.hpp"
#include "transformations/preprocessing/preprocessing.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0);
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap)
: m_inputInfoMap(inputInfoMap) { }
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {}
bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) {
ngraph::pass::AddMeanSubtract::MeanMap meanMap;
ngraph::pass::AddStdScale::ScaleMap scaleMap;
for (const auto & it : m_inputInfoMap) {
for (const auto& it : m_inputInfoMap) {
bool has_scales = false, has_mean_values = false, has_mean_image = false;
const InferenceEngine::PreProcessInfo & pInfo = it.second->getPreProcess();
const auto & inputDims = it.second->getTensorDesc().getDims();
const InferenceEngine::PreProcessInfo& pInfo = it.second->getPreProcess();
const auto& inputDims = it.second->getTensorDesc().getDims();
const size_t cn = pInfo.getNumberOfChannels();
std::vector<float> meanValues(cn), stdScales(cn);
InferenceEngine::Blob::Ptr meanImage = nullptr;
@ -40,11 +40,10 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
if (c == 0) {
meanImage = pInfo[c]->meanData;
NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32,
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
} else {
NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr");
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(),
"TensorDesc for PreProcessChannel::meanData must be equal");
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal");
}
}
}
@ -54,35 +53,33 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
continue;
}
NGRAPH_CHECK(!(has_mean_image && has_scales),
"Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
NGRAPH_CHECK(!(has_mean_image && has_scales), "Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
if (has_scales) {
ngraph::Shape shape(inputDims.size(), 1);
shape[1] = stdScales.size(); // C
shape[1] = stdScales.size(); // C
scaleMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, stdScales);
}
if (has_mean_values) {
ngraph::Shape shape(inputDims.size(), 1);
shape[1] = meanValues.size(); // C
shape[1] = meanValues.size(); // C
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanValues);
} else if (has_mean_image) {
ngraph::Shape shape = { cn };
ngraph::Shape shape = {cn};
auto dims = meanImage->getTensorDesc().getDims();
std::copy(dims.begin(), dims.end(), std::back_inserter(shape));
std::vector<float> meanImageData(ngraph::shape_size(shape));
for (size_t c = 0, i = 0; c < cn; ++c) {
auto lm = pInfo[c]->meanData->buffer();
const float *data = lm.as<const float *>();
const float* data = lm.as<const float*>();
std::memcpy(&meanImageData[i], data, meanImage->byteSize());
i += meanImage->size();
}
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32,
shape, meanImageData);
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData);
}
}

View File

@ -26,10 +26,11 @@ class AddPreprocessing;
* (x - mean) * stdScale
*/
class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass {
const InferenceEngine::InputsDataMap & m_inputInfoMap;
const InferenceEngine::InputsDataMap& m_inputInfoMap;
public:
NGRAPH_RTTI_DECLARATION;
explicit AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap);
explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap);
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
};

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/preprocessing/std_scale.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/std_scale.hpp"
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0);
@ -16,7 +16,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
// RUN_ON_FUNCTION_SCOPE(AddStdScale);
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) {
ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
if (!param) {
return false;
@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
}
auto scale_const = it->second;
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32,
"Scale for ", param->get_friendly_name(), " must have f32 type");
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({});
auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second);

View File

@ -5,10 +5,9 @@
#pragma once
#include <map>
#include <string>
#include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp"

View File

@ -15,7 +15,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
NodeVector nodes;
// Traverse nGraph Function in topological order
for (auto & node : f->get_ordered_ops()) {
for (auto& node : f->get_ordered_ops()) {
// Check that number of input and output ports are equal to 1
if (node->inputs().size() == 1 && node->outputs().size() == 1) {
// Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1
@ -28,9 +28,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
}
// Print types and names for collected nodes
for (auto & node : nodes) {
std::cout << "Type: " << node->get_type_info().name << std::endl
<< "Name: " << node->get_friendly_name() << std::endl;
for (auto& node : nodes) {
std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl;
}
// Return false because we didn't change nGraph Function

View File

@ -16,7 +16,7 @@ class MyFunctionTransformation;
// ! [function_pass:template_transformation_hpp]
// template_function_transformation.hpp
class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass {
class ngraph::pass::MyFunctionTransformation : public ngraph::pass::FunctionPass {
public:
NGRAPH_RTTI_DECLARATION;
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;

View File

@ -3,13 +3,14 @@
//
#include "transformations/template_pattern_transformation.hpp"
#include "transformations/template_function_transformation.hpp"
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
#include "transformations/template_function_transformation.hpp"
using namespace ngraph;
// ! [graph_rewrite:template_transformation_cpp]
@ -23,15 +24,14 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() {
auto div = std::make_shared<ngraph::opset3::Divide>(input0, input1);
ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide> (m.get_match_root());
auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide>(m.get_match_root());
// We can not apply this transformation in case with integer input data type
if (!div || div->input(0).get_element_type().is_integral()) {
return false;
}
// Decompose Divide into Multiply with Power operations
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1),
opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1}));
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow);
@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
auto& node_to_output = m.get_pattern_value_map();
// Create new Relu operation and add register it for additional execution
auto new_relu = register_new_node<ngraph::opset3::Relu>(
node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
auto new_relu = register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
// Copy runtime info attributes to newly created operation
ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu);
@ -91,60 +90,60 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
// ! [matcher_pass:relu_fusion]
void run_matcher_on_node(std::shared_ptr<ngraph::Node> node) {
// ! [matcher_pass:run_on_node]
if (ngraph::pass::DecomposeDivideMatcher().apply(node)) {
// successful execution (root node was replaced)
}
// ! [matcher_pass:run_on_node]
// ! [matcher_pass:run_on_node]
if (ngraph::pass::DecomposeDivideMatcher().apply(node)) {
// successful execution (root node was replaced)
}
// ! [matcher_pass:run_on_node]
}
void run_matcher_with_manager(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager]
// Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
pass::Manager manager;
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager]
// ! [matcher_pass:manager]
// Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
pass::Manager manager;
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager]
}
void run_matcher_with_manager2(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager2]
// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously
pass::Manager manager;
auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>();
anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>();
anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager2]
// ! [matcher_pass:manager2]
// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously
pass::Manager manager;
auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>();
anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>();
anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager2]
}
void run_matcher_with_manager3(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager3]
pass::Manager manager;
manager.register_pass<ngraph::pass::MyFunctionTransformation>();
// Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager3]
// ! [matcher_pass:manager3]
pass::Manager manager;
manager.register_pass<ngraph::pass::MyFunctionTransformation>();
// Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f);
// ! [matcher_pass:manager3]
}
void run_matcher_with_gr(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:graph_rewrite]
// Two matcher passes will run simultaneously in a single graph traversal
ngraph::pass::GraphRewrite pass;
pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>();
pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>();
pass.run_on_function(f);
// ! [matcher_pass:graph_rewrite]
// ! [matcher_pass:graph_rewrite]
// Two matcher passes will run simultaneously in a single graph traversal
ngraph::pass::GraphRewrite pass;
pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>();
pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>();
pass.run_on_function(f);
// ! [matcher_pass:graph_rewrite]
}
// ! [manual_constant_folding]
template <class T>
Output<Node> eltwise_fold(const Output<Node> & input0, const Output<Node> & input1) {
Output<Node> eltwise_fold(const Output<Node>& input0, const Output<Node>& input1) {
auto eltwise = std::make_shared<T>(input0, input1);
OutputVector output(eltwise->get_output_size());
// If constant folding wasn't successful return eltwise output

View File

@ -21,14 +21,14 @@ class ReluReluFusionMatcher;
* @ingroup ie_transformation_common_api
* @brief Add transformation description.
*/
class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass {
class ngraph::pass::DecomposeDivideMatcher : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
DecomposeDivideMatcher();
};
// ! [graph_rewrite:template_transformation_hpp]
class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass {
class ngraph::pass::ReluReluFusionMatcher : public ngraph::pass::MatcherPass {
public:
NGRAPH_RTTI_DECLARATION;
ReluReluFusionMatcher();

View File

@ -14,7 +14,7 @@ addIeTargetTest(
IE::funcSharedTests
INCLUDES
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include"
ADD_CPPLINT
ADD_CLANG_FORMAT
LABELS
TEMPLATE
)

View File

@ -4,5 +4,4 @@
#include "functional_test_utils/core_config.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
}
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}

View File

@ -7,19 +7,14 @@
using namespace LayerTestsDefinitions;
namespace {
static const std::vector<ngraph::element::Type> precisionsTemplate = {
ngraph::element::f32,
};
static const std::vector<ngraph::element::Type> precisionsTemplate = {
ngraph::element::f32,
};
static const std::vector<std::size_t> batchSizesTemplate = {
1, 2
};
static const std::vector<std::size_t> batchSizesTemplate = {1, 2};
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
::testing::Combine(
::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()),
::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(batchSizesTemplate),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
LoadNetworkCacheTestBase::getTestCaseName);
} // namespace
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
LoadNetworkCacheTestBase::getTestCaseName);
} // namespace

View File

@ -2,19 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp"
#include <template/template_config.hpp>
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
@ -27,32 +25,23 @@ const std::vector<std::map<std::string, std::string>> inconfigs = {
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName);
} // namespace
} // namespace

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <string>
#include <vector>
#include "behavior/core_integration.hpp"
#include <string>
#include <utility>
#include <vector>
using namespace BehaviorTestsDefinitions;
namespace {
@ -16,54 +16,31 @@ namespace {
// IE Class Common tests with <pluginName, deviceName params>
//
INSTANTIATE_TEST_CASE_P(
smoke_IEClassBasicTestP, IEClassBasicTestP,
::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
INSTANTIATE_TEST_CASE_P(smoke_IEClassBasicTestP, IEClassBasicTestP, ::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassNetworkTestP, IEClassNetworkTestP,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassNetworkTestP, IEClassNetworkTestP, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
//
// IE Class GetMetric
//
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
//
// IE Class SetConfig
@ -111,9 +88,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
// IE Class GetConfig
//
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetConfigTest, IEClassGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
@ -125,7 +100,7 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
std::vector<std::string> configValues = p;
for (auto &&confKey : configValues) {
for (auto&& confKey : configValues) {
if (CONFIG_KEY(DEVICE_ID) == confKey) {
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
@ -143,48 +118,37 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
// Executable Network GetMetric
//
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
//
// Executable Network GetConfig / SetConfig
//
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Query network
INSTANTIATE_TEST_CASE_P(
smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Load network
INSTANTIATE_TEST_CASE_P(
smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
//
// Hetero Executable Network GetMetric
@ -192,21 +156,17 @@ INSTANTIATE_TEST_CASE_P(
#ifdef ENABLE_MKL_DNN
INSTANTIATE_TEST_CASE_P(
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
#endif // ENABLE_MKL_DNN
} // namespace
} // namespace

View File

@ -8,32 +8,20 @@ using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::vector<int >> orders = {
const std::vector<std::vector<int>> orders = {
// 0 - plugin
// 1 - executable_network
// 2 - infer_request
{0, 1, 2},
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0}
};
{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), ::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
HoldersTestOnImportedNetwork::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
HoldersTestOnImportedNetwork::getTestCaseName);
} // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/exec_graph_info.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName);
} // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName);
} // namespace

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_callback.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_config.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName);
} // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName);
} // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName);
} // namespace

View File

@ -8,31 +8,16 @@ using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
const std::vector<InferenceEngine::Layout> Layout = {
InferenceEngine::Layout::NCHW,
InferenceEngine::Layout::CHW,
InferenceEngine::Layout::NC,
InferenceEngine::Layout::C
};
const std::vector<InferenceEngine::Layout> Layout = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::CHW, InferenceEngine::Layout::NC,
InferenceEngine::Layout::C};
const std::vector<std::vector<size_t>> inputShapes = {
{ 1, 3, 16, 16 },
{ 3, 32, 16 },
{ 1, 3 },
{ 3 }
};
const std::vector<std::vector<size_t>> inputShapes = {{1, 3, 16, 16}, {3, 32, 16}, {1, 3}, {3}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs),
::testing::ValuesIn(Layout),
::testing::ValuesIn(inputShapes)),
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs), ::testing::ValuesIn(Layout), ::testing::ValuesIn(inputShapes)),
LayoutTest::getTestCaseName);
} // namespace

View File

@ -2,39 +2,30 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::U8,
InferenceEngine::Precision::FP32
};
const std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP32};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels
::testing::Values(true), // Use SetInput
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels
::testing::Values(true), // Use SetInput
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(false), // use GetBlob
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(false), // use GetBlob
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName);
} // namespace

View File

@ -2,79 +2,50 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
CommonTestUtils::DEVICE_TEMPLATE }}
};
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}};
const std::vector<std::map<std::string, std::string>> heteroConfigs = {
{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE }}
};
const std::vector<std::map<std::string, std::string>> heteroConfigs = {{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)),
PreprocessTest::getTestCaseName);
const std::vector<InferenceEngine::Precision> ioPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::U8
};
const std::vector<InferenceEngine::Precision> ioPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8};
const std::vector<InferenceEngine::Layout> netLayouts = {
InferenceEngine::Layout::NCHW,
// InferenceEngine::Layout::NHWC
};
const std::vector<InferenceEngine::Layout> ioLayouts = {
InferenceEngine::Layout::NCHW,
InferenceEngine::Layout::NHWC
};
const std::vector<InferenceEngine::Layout> ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts),
::testing::ValuesIn(ioLayouts),
::testing::ValuesIn(ioLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(ioPrecisions), ::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts), ::testing::ValuesIn(ioLayouts), ::testing::ValuesIn(ioLayouts), ::testing::Bool(),
::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
PreprocessConversionTest::getTestCaseName);
} // namespace

View File

@ -8,34 +8,23 @@ using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTestInput::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTestOutput::getTestCaseName);
} // namespace

View File

@ -8,15 +8,11 @@ using namespace BehaviorTestsDefinitions;
namespace {
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
const std::vector<std::map<std::string, std::string>> configs = {{}};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
::testing::Combine(
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
VersionTest::getTestCaseName);
} // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero/query_network.hpp"
#include <vector>
#include "hetero/query_network.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
@ -14,8 +15,7 @@ using namespace HeteroTests;
auto ConvBias = ngraph::builder::subgraph::makeConvBias();
INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::Values(ConvBias)),
::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::Values(ConvBias)),
QueryNetworkTest::getTestCaseName);
} // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero/synthetic.hpp"
#include <vector>
#include "hetero/synthetic.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
@ -12,14 +13,12 @@ namespace {
using namespace HeteroTests;
INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest,
::testing::Combine(
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest,
::testing::Combine(
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName);
} // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/convolution.hpp"
#include <vector>
#include "single_layer_tests/convolution.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
@ -19,122 +20,72 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
/* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels = {{3, 3},
{3, 5}};
const std::vector<std::vector<size_t >> strides = {{1, 1},
{1, 3}};
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0},
{0, 3}};
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
{0, 3}};
const std::vector<std::vector<size_t >> dilations = {{1, 1},
{3, 1}};
const std::vector<std::vector<size_t>> kernels = {{3, 3}, {3, 5}};
const std::vector<std::vector<size_t>> strides = {{1, 1}, {1, 3}};
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, {0, 3}};
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}, {0, 3}};
const std::vector<std::vector<size_t>> dilations = {{1, 1}, {3, 1}};
const std::vector<size_t> numOutChannels = {1, 5};
const std::vector<ngraph::op::PadType> padTypes = {
ngraph::op::PadType::EXPLICIT,
ngraph::op::PadType::VALID
};
const std::vector<ngraph::op::PadType> padTypes = {ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID};
const auto conv2DParams_ExplicitPadding = ::testing::Combine(
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParams_ExplicitPadding =
::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds),
::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::Values(ngraph::op::PadType::EXPLICIT));
// ! [test_convolution:declare_parameters]
const auto conv2DParams_AutoPadValid = ::testing::Combine(
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::VALID)
);
const auto conv2DParams_AutoPadValid =
::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::VALID));
// ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_ExplicitPadding,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Combine(conv2DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName);
// ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine(
conv2DParams_AutoPadValid,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Combine(conv2DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName);
/* ============= 3D Convolution ============= */
const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3},
{3, 5, 3}};
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0},
{0, 2, 0}};
const std::vector<std::vector<size_t>> kernels3d = {{3, 3, 3}, {3, 5, 3}};
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}, {0, 2, 0}};
const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1},
{1, 2, 1}};
const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1},
{1, 2, 1}};
const std::vector<std::vector<size_t>> strides3d = {{1, 1, 1}, {1, 2, 1}};
const std::vector<std::vector<size_t>> dilations3d = {{1, 1, 1}, {1, 2, 1}};
const auto conv3DParams_ExplicitPadding = ::testing::Combine(
::testing::ValuesIn(kernels3d),
::testing::ValuesIn(strides3d),
::testing::ValuesIn(paddings3d),
::testing::ValuesIn(paddings3d),
::testing::ValuesIn(dilations3d),
::testing::Values(5),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv3DParams_AutoPadValid = ::testing::Combine(
::testing::ValuesIn(kernels3d),
::testing::ValuesIn(strides3d),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::ValuesIn(dilations3d),
::testing::Values(5),
::testing::Values(ngraph::op::PadType::VALID)
);
const auto conv3DParams_ExplicitPadding =
::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(paddings3d),
::testing::ValuesIn(dilations3d), ::testing::Values(5), ::testing::Values(ngraph::op::PadType::EXPLICIT));
const auto conv3DParams_AutoPadValid =
::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})), ::testing::ValuesIn(dilations3d), ::testing::Values(5),
::testing::Values(ngraph::op::PadType::VALID));
INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine(
conv3DParams_ExplicitPadding,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Combine(conv3DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine(
conv3DParams_AutoPadValid,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Combine(conv3DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName);
} // namespace

View File

@ -2,43 +2,34 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/reshape.hpp"
#include <vector>
#include "single_layer_tests/reshape.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP32,
};
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest,
::testing::Combine(
::testing::Values(true),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest,
::testing::Combine(
::testing::Values(true),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(std::vector<size_t>({10, 0, 100})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(std::vector<size_t>({10, 0, 100})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
} // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/softmax.hpp"
#include <vector>
#include "single_layer_tests/softmax.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
@ -25,28 +26,14 @@ const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
InferenceEngine::SizeVector {10, 10},
};
const std::vector<size_t> axis2D = {
0, 1
};
const std::vector<size_t> axis2D = {0, 1};
const auto params2D = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes2D),
testing::ValuesIn(axis2D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
const auto params2D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes2D), testing::ValuesIn(axis2D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
INSTANTIATE_TEST_CASE_P(
smoke_SoftMax2D,
SoftMaxLayerTest,
params2D,
SoftMaxLayerTest::getTestCaseName
);
INSTANTIATE_TEST_CASE_P(smoke_SoftMax2D, SoftMaxLayerTest, params2D, SoftMaxLayerTest::getTestCaseName);
const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
InferenceEngine::SizeVector {1, 100, 1, 1},
@ -56,23 +43,11 @@ const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
const std::vector<size_t> axis4D = {0, 1, 2, 3};
const auto params4D = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes4D),
testing::ValuesIn(axis4D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
const auto params4D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes4D), testing::ValuesIn(axis4D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
INSTANTIATE_TEST_CASE_P(
smoke_SoftMax4D,
SoftMaxLayerTest,
params4D,
SoftMaxLayerTest::getTestCaseName
);
INSTANTIATE_TEST_CASE_P(smoke_SoftMax4D, SoftMaxLayerTest, params4D, SoftMaxLayerTest::getTestCaseName);
} // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/split.hpp"
#include <vector>
#include "single_layer_tests/split.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
@ -12,17 +13,11 @@ using namespace LayerTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest,
::testing::Combine(
::testing::Values(1, 2, 3, 5, 6, 10, 30),
::testing::Values(0, 1, 2, 3),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Combine(::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(0, 1, 2, 3),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
SplitLayerTest::getTestCaseName);
} // namespace

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
#include <string>
#include <vector>
std::vector<std::string> disabledTestPatterns() {
return {
".*ExclusiveAsyncRequests.*",

View File

@ -18,11 +18,9 @@
// #include "common_test_utils/ngraph_test_utils.hpp"
// using namespace testing;
// using namespace ngraph;
// TEST(TransformationTests, Preprocessing_AddStdScale) {
// std::shared_ptr<Function> f(nullptr), f_ref(nullptr);

View File

@ -4,12 +4,11 @@
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <queue>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <queue>
#include <string>
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
@ -24,11 +23,11 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
// f_ref - ngraph::Function that is expected after applying transformation
{
// Example function
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5});
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
auto divide = std::make_shared<ngraph::opset3::Divide>(data, divide_constant);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{divide}, ngraph::ParameterVector{data});
f = std::make_shared<ngraph::Function>(ngraph::NodeVector {divide}, ngraph::ParameterVector {data});
// This transformation init runtime info attributes
ngraph::pass::InitNodeInfo().run_on_function(f);
@ -42,13 +41,12 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
{
// Example reference function
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5});
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant,
ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1}));
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{data});
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector {mul}, ngraph::ParameterVector {data});
}
// Compare that processed function and expected function are the same