Align style doc with samples (#5709)

* Added clang-format config

* Fixed style

* Fixed code-style for snippets and fixed build

* Disable clang-format for snippets

* Fixed comments
This commit is contained in:
Ilya Churaev 2021-05-25 10:32:48 +03:00 committed by GitHub
parent e41f378967
commit cc810297f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 780 additions and 1074 deletions

View File

@ -31,6 +31,7 @@ addIeTarget(
function(addIeTarget) function(addIeTarget)
set(options set(options
ADD_CPPLINT # Enables code style checks for the target ADD_CPPLINT # Enables code style checks for the target
ADD_CLANG_FORMAT # Enables code style checks for the target
) )
set(oneValueRequiredArgs set(oneValueRequiredArgs
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
@ -119,6 +120,10 @@ function(addIeTarget)
# code style # code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME}) add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
endif() endif()
if (ARG_ADD_CLANG_FORMAT)
# code style
add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE) if (ARG_DEVELOPER_PACKAGE)
# developer package # developer package
openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE}
@ -128,7 +133,6 @@ function(addIeTarget)
# Provide default compile pdb name equal to target name # Provide default compile pdb name equal to target name
set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME})
endif() endif()
endfunction() endfunction()
#[[ #[[

View File

@ -27,7 +27,10 @@ endif()
# ) # )
# #
function(ie_add_plugin) function(ie_add_plugin)
set(options SKIP_INSTALL) set(options
SKIP_INSTALL
ADD_CLANG_FORMAT
)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
@ -73,7 +76,11 @@ function(ie_add_plugin)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}") string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach() endforeach()
if (IE_PLUGIN_ADD_CLANG_FORMAT)
add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME})
else()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
endif()
# check that plugin with such name is not registered # check that plugin with such name is not registered

25
docs/.clang-format Normal file
View File

@ -0,0 +1,25 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 160
# Specialize this comment pragma in order to avoid changes in SEA copyrights
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false

View File

@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op")
find_package(ngraph REQUIRED COMPONENTS onnx_importer) find_package(ngraph REQUIRED COMPONENTS onnx_importer)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp) add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp)
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES}) target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
# [cmake:onnx_custom_op] # [cmake:onnx_custom_op]
# Enable code style check
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

View File

@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND)
target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED) target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED)
endif() endif()
# [cmake:extension] # [cmake:extension]
# Enable code style check
file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp")
add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src})

View File

@ -3,9 +3,11 @@
// //
#include "cpu_kernel.hpp" #include "cpu_kernel.hpp"
#include "op.hpp"
#include <ie_layouts.h> #include <ie_layouts.h>
#include "op.hpp"
using namespace TemplateExtension; using namespace TemplateExtension;
//! [cpu_implementation:ctor] //! [cpu_implementation:ctor]
@ -92,8 +94,7 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig
} }
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
IE_THROW() IE_THROW() << "Operation can be initialized only with 4d input/output tensors!";
<< "Operation can be initialized only with 4d input/output tensors!";
} }
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
@ -113,8 +114,7 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig
//! [cpu_implementation:init] //! [cpu_implementation:init]
//! [cpu_implementation:execute] //! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc* resp) noexcept { InferenceEngine::ResponseDesc* resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include <ie_iextension.h> #include <ie_iextension.h>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
namespace TemplateExtension { namespace TemplateExtension {
@ -15,11 +16,10 @@ public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node); explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc* resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
private: private:
int64_t add; int64_t add;
ngraph::Shape inShape; ngraph::Shape inShape;

View File

@ -3,11 +3,12 @@
// //
#include "extension.hpp" #include "extension.hpp"
#include "cpu_kernel.hpp" #include "cpu_kernel.hpp"
#include "op.hpp" #include "op.hpp"
#ifdef OPENCV_IMPORT_ENABLED #ifdef OPENCV_IMPORT_ENABLED
#include "fft_op.hpp"
#include "fft_kernel.hpp" #include "fft_kernel.hpp"
#include "fft_op.hpp"
#endif #endif
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
#ifdef NGRAPH_ONNX_IMPORT_ENABLED #ifdef NGRAPH_ONNX_IMPORT_ENABLED
@ -21,19 +22,16 @@
using namespace TemplateExtension; using namespace TemplateExtension;
//! [extension:ctor] //! [extension:ctor]
Extension::Extension() { Extension::Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED #ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::register_operator( ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
int64_t add = node.get_attribute_value<int64_t>("add"); int64_t add = node.get_attribute_value<int64_t>("add");
return {std::make_shared<Operation>(ng_inputs.at(0), add)}; return {std::make_shared<Operation>(ng_inputs.at(0), add)};
}); });
#ifdef OPENCV_IMPORT_ENABLED #ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::register_operator( ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
bool inverse = node.get_attribute_value<int64_t>("inverse"); bool inverse = node.get_attribute_value<int64_t>("inverse");
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)}; return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
@ -114,8 +112,8 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_
IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension) IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension)
//! [extension:CreateExtension] //! [extension:CreateExtension]
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext, INFERENCE_EXTENSION_API(InferenceEngine::StatusCode)
InferenceEngine::ResponseDesc *resp) noexcept { InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept {
try { try {
ext = new Extension(); ext = new Extension();
return OK; return OK;

View File

@ -4,13 +4,14 @@
#pragma once #pragma once
#include <ie_iextension.h>
#include <ie_api.h> #include <ie_api.h>
#include <ngraph/ngraph.hpp> #include <ie_iextension.h>
#include <memory>
#include <vector>
#include <string>
#include <map> #include <map>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <string>
#include <vector>
//! [extension:header] //! [extension:header]
namespace TemplateExtension { namespace TemplateExtension {

View File

@ -4,11 +4,13 @@
//! [fft_kernel:implementation] //! [fft_kernel:implementation]
#include "fft_kernel.hpp" #include "fft_kernel.hpp"
#include "fft_op.hpp"
#include <ie_layouts.h> #include <ie_layouts.h>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include "fft_op.hpp"
using namespace TemplateExtension; using namespace TemplateExtension;
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) { FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) {
@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
inverse = castedNode->inverse; inverse = castedNode->inverse;
} }
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf, InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::ResponseDesc *resp) noexcept {
std::vector<InferenceEngine::DataConfig> inDataConfig; std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig; std::vector<InferenceEngine::DataConfig> outDataConfig;
InferenceEngine::SizeVector order(inpShape.size()); InferenceEngine::SizeVector order(inpShape.size());
@ -74,8 +75,7 @@ InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig &config,
return InferenceEngine::OK; return InferenceEngine::OK;
} }
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
{
// NOTE: Inference Engine sizes are reversed. // NOTE: Inference Engine sizes are reversed.
std::vector<size_t> dims = blob->getTensorDesc().getDims(); std::vector<size_t> dims = blob->getTensorDesc().getDims();
std::vector<int> size(dims.begin(), dims.end()); std::vector<int> size(dims.begin(), dims.end());
@ -84,8 +84,7 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return cv::Mat(size, CV_32F, (void*)blob->buffer()); return cv::Mat(size, CV_32F, (void*)blob->buffer());
} }
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc* resp) noexcept { InferenceEngine::ResponseDesc* resp) noexcept {
cv::Mat inp = infEngineBlobToMat(inputs[0]); cv::Mat inp = infEngineBlobToMat(inputs[0]);
cv::Mat out = infEngineBlobToMat(outputs[0]); cv::Mat out = infEngineBlobToMat(outputs[0]);
@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
const int w = inp.size[3]; const int w = inp.size[3];
cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2); cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2);
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
std::vector<cv::Mat> components = { std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))
};
cv::merge(components, complex); cv::merge(components, complex);
if (!inverse) if (!inverse)
@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
else else
cv::idft(complex, interleavedOut, cv::DFT_SCALE); cv::idft(complex, interleavedOut, cv::DFT_SCALE);
components = { components = {cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))};
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))
};
cv::split(interleavedOut, components); cv::split(interleavedOut, components);
} }
return InferenceEngine::OK; return InferenceEngine::OK;
} }
//! [fft_kernel:implementation] //! [fft_kernel:implementation]

View File

@ -6,6 +6,7 @@
#pragma once #pragma once
#include <ie_iextension.h> #include <ie_iextension.h>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
namespace TemplateExtension { namespace TemplateExtension {
@ -15,11 +16,10 @@ public:
explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node); explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc* resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
private: private:
ngraph::Shape inpShape; ngraph::Shape inpShape;
ngraph::Shape outShape; ngraph::Shape outShape;
@ -27,5 +27,5 @@ private:
std::string error; std::string error;
}; };
} } // namespace TemplateExtension
//! [fft_kernel:header] //! [fft_kernel:header]

View File

@ -31,4 +31,3 @@ bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) {
return true; return true;
} }
//! [fft_op:implementation] //! [fft_op:implementation]

View File

@ -12,7 +12,9 @@ namespace TemplateExtension {
class FFTOp : public ngraph::op::Op { class FFTOp : public ngraph::op::Op {
public: public:
static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0}; static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } const ngraph::NodeTypeInfo& get_type_info() const override {
return type_info;
}
FFTOp() = default; FFTOp() = default;
FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse); FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse);
@ -23,6 +25,5 @@ public:
bool inverse; bool inverse;
}; };
} } // namespace TemplateExtension
//! [fft_op:header] //! [fft_op:header]

View File

@ -39,56 +39,56 @@ bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
//! [op:visit_attributes] //! [op:visit_attributes]
//! [op:evaluate] //! [op:evaluate]
namespace namespace {
{
template <class T> template <class T>
void implementation(const T* input, void implementation(const T* input, T* output, int64_t add, size_t size) {
T* output,
int64_t add,
size_t size) {
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
output[i] = input[i] + add; output[i] = input[i] + add;
} }
} }
template <ngraph::element::Type_t ET> template <ngraph::element::Type_t ET>
bool evaluate_op(const ngraph::HostTensorPtr& arg0, bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) {
const ngraph::HostTensorPtr& out, int64_t add)
{
size_t size = ngraph::shape_size(arg0->get_shape()); size_t size = ngraph::shape_size(arg0->get_shape());
implementation(arg0->get_data_ptr<ET>(), implementation(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), add, size);
out->get_data_ptr<ET>(),
add,
size);
return true; return true;
} }
} // namespace } // namespace
bool Operation::evaluate(const ngraph::HostTensorVector& outputs, bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const {
const ngraph::HostTensorVector& inputs) const { switch (inputs[0]->get_element_type()) {
switch (inputs[0]->get_element_type()) case ngraph::element::Type_t::i8:
{ return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i8: return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::i16: return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i32: return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i32:
case ngraph::element::Type_t::i64: return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u8: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i64:
case ngraph::element::Type_t::u16: return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u32: return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u8:
case ngraph::element::Type_t::u64: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16: return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u16:
case ngraph::element::Type_t::f16: return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32: return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u32:
default: break; return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u64:
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16:
return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f16:
return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32:
return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
default:
break;
} }
return false; return false;
} }
bool Operation::has_evaluate() const { bool Operation::has_evaluate() const {
switch (get_input_element_type(0)) switch (get_input_element_type(0)) {
{
case ngraph::element::Type_t::i8: case ngraph::element::Type_t::i8:
case ngraph::element::Type_t::i16: case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::i32: case ngraph::element::Type_t::i32:
@ -99,8 +99,10 @@ bool Operation::has_evaluate() const {
case ngraph::element::Type_t::u64: case ngraph::element::Type_t::u64:
case ngraph::element::Type_t::bf16: case ngraph::element::Type_t::bf16:
case ngraph::element::Type_t::f16: case ngraph::element::Type_t::f16:
case ngraph::element::Type_t::f32: return true; case ngraph::element::Type_t::f32:
default: break; return true;
default:
break;
} }
return false; return false;
} }

View File

@ -18,9 +18,10 @@ public:
void validate_and_infer_types() override; void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override; bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() const { return add; } int64_t getAddAttr() const {
bool evaluate(const ngraph::HostTensorVector& outputs, return add;
const ngraph::HostTensorVector& inputs) const override; }
bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override;
bool has_evaluate() const override; bool has_evaluate() const override;
private: private:

View File

@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE" DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS} SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp) VERSION_DEFINES_FOR template_plugin.cpp
ADD_CLANG_FORMAT)
target_include_directories(${TARGET_NAME} PRIVATE target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}"

View File

@ -3,18 +3,16 @@
// //
#include "template_async_infer_request.hpp" #include "template_async_infer_request.hpp"
#include "template_itt.hpp" #include "template_itt.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
// ! [async_infer_request:ctor] // ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest( TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) : const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor)
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
// In current implementation we have CPU only tasks and no needs in 2 executors // In current implementation we have CPU only tasks and no needs in 2 executors
// So, by default single stage pipeline is created. // So, by default single stage pipeline is created.
// This stage executes InferRequest::Infer() using cpuTaskExecutor. // This stage executes InferRequest::Infer() using cpuTaskExecutor.
@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(
constexpr const auto remoteDevice = false; constexpr const auto remoteDevice = false;
if (remoteDevice) { if (remoteDevice) {
_pipeline = { _pipeline = {{cpuTaskExecutor,
{cpuTaskExecutor, [this] { [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
"TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
_inferRequest->inferPreprocess(); _inferRequest->inferPreprocess();
_inferRequest->startPipeline(); _inferRequest->startPipeline();
}}, }},
{_waitExecutor, [this] { {_waitExecutor,
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, [this] {
"TemplateAsyncInferRequest::WaitPipeline"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline");
_inferRequest->waitPipeline(); _inferRequest->waitPipeline();
}}, }},
{cpuTaskExecutor, [this] { {cpuTaskExecutor, [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing");
"TemplateAsyncInferRequest::Postprocessing");
_inferRequest->inferPostprocess(); _inferRequest->inferPostprocess();
}} }}};
};
} }
} }
// ! [async_infer_request:ctor] // ! [async_infer_request:ctor]

View File

@ -13,10 +13,8 @@ namespace TemplatePlugin {
// ! [async_infer_request:header] // ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public: public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest(); ~TemplateAsyncInferRequest();

View File

@ -2,10 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_plugin_config.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include "template_config.hpp" #include "template_config.hpp"
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <ie_plugin_config.hpp>
#include "template/template_config.hpp" #include "template/template_config.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa
if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) { if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) {
_streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value); _streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value);
} else if (streamExecutorConfigKeys.end() != } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
_streamsExecutorConfig.SetConfig(key, value); _streamsExecutorConfig.SetConfig(key, value);
} else if (CONFIG_KEY(DEVICE_ID) == key) { } else if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value); deviceId = std::stoi(value);

View File

@ -4,11 +4,9 @@
#pragma once #pragma once
#include <string>
#include <map>
#include <ie_parameter.hpp> #include <ie_parameter.hpp>
#include <map>
#include <string>
#include <threading/ie_istreams_executor.hpp> #include <threading/ie_istreams_executor.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {

View File

@ -2,25 +2,24 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "template_executable_network.hpp"
#include <ie_metric_helpers.hpp> #include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp> #include <ie_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp> #include <threading/ie_executor_manager.hpp>
#include "transformations/serialize.hpp"
#include "template/template_config.hpp" #include "template/template_config.hpp"
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_itt.hpp" #include "template_itt.hpp"
#include "template_plugin.hpp"
#include "transformations/serialize.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork] // ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const Plugin::Ptr& plugin)
const Configuration& cfg, : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
const Plugin::Ptr& plugin) :
InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
_cfg(cfg), _cfg(cfg),
_plugin(plugin) { _plugin(plugin) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine) // TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
// ! [executable_network:ctor_cnnnetwork] // ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream] // ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const Configuration& cfg, const Plugin::Ptr& plugin): _cfg(cfg), _plugin(plugin) {
const Configuration& cfg,
const Plugin::Ptr& plugin) :
_cfg(cfg),
_plugin(plugin) {
// read XML content // read XML content
std::string xmlString; std::string xmlString;
std::uint64_t dataSize = 0; std::uint64_t dataSize = 0;
@ -57,9 +52,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize)); model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
if (0 != dataSize) { if (0 != dataSize) {
dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>( dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast<std::size_t>(dataSize)}, InferenceEngine::Layout::C));
{static_cast<std::size_t>(dataSize)},
InferenceEngine::Layout::C));
dataBlob->allocate(); dataBlob->allocate();
model.read(dataBlob->buffer(), dataSize); model.read(dataBlob->buffer(), dataSize);
} }
@ -90,8 +83,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
// ! [executable_network:map_graph] // ! [executable_network:map_graph]
// forward declaration // forward declaration
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap & inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap); const InferenceEngine::OutputsDataMap& outputsInfoMap);
void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
} }
// ! [executable_network:map_graph] // ! [executable_network:map_graph]
// ! [executable_network:init_executor] // ! [executable_network:init_executor]
void TemplatePlugin::ExecutableNetwork::InitExecutor() { void TemplatePlugin::ExecutableNetwork::InitExecutor() {
// Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account // Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
@ -137,7 +128,6 @@ void TemplatePlugin::ExecutableNetwork::InitExecutor() {
} }
// ! [executable_network:init_executor] // ! [executable_network:init_executor]
// ! [executable_network:create_infer_request_impl] // ! [executable_network:create_infer_request_impl]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) { InferenceEngine::OutputsDataMap networkOutputs) {
@ -148,8 +138,8 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C
// ! [executable_network:create_infer_request] // ! [executable_network:create_infer_request]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), _taskExecutor, _plugin->_waitExecutor,
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor); _callbackExecutor);
} }
// ! [executable_network:create_infer_request] // ! [executable_network:create_infer_request]
@ -163,16 +153,10 @@ InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const st
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const { InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const {
// TODO: return more supported values for metrics // TODO: return more supported values for metrics
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string> {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = { std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) };
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) { for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey); configKeys.emplace_back(configKey);
@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) {
// Note: custom ngraph extensions are not supported // Note: custom ngraph extensions are not supported
std::map<std::string, ngraph::OpSet> custom_opsets; std::map<std::string, ngraph::OpSet> custom_opsets;
std::stringstream xmlFile, binFile; std::stringstream xmlFile, binFile;
ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
serializer.run_on_function(_function); serializer.run_on_function(_function);
auto m_constants = binFile.str(); auto m_constants = binFile.str();

View File

@ -4,13 +4,12 @@
#pragma once #pragma once
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <ngraph/function.hpp> #include <ngraph/function.hpp>
#include "template_async_infer_request.hpp"
#include "template_config.hpp" #include "template_config.hpp"
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {
@ -24,15 +23,10 @@ class Plugin;
// ! [executable_network:header] // ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public: public:
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(std::istream& model, ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
~ExecutableNetwork() override = default; ~ExecutableNetwork() override = default;
@ -48,8 +42,7 @@ public:
private: private:
friend class TemplateInferRequest; friend class TemplateInferRequest;
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap); const InferenceEngine::OutputsDataMap& outputsInfoMap);
void InitExecutor(); void InitExecutor();

View File

@ -2,19 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ngraph/runtime/reference/convert.hpp>
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp" #include <algorithm>
#include "template_itt.hpp" #include <map>
#include "ie_ngraph_utils.hpp" #include <memory>
#include <ngraph/runtime/reference/convert.hpp>
#include <string>
#include <utility>
#include "blob_factory.hpp" #include "blob_factory.hpp"
#include "ie_ngraph_utils.hpp"
#include "template_executable_network.hpp"
#include "template_itt.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -22,11 +23,9 @@ using namespace InferenceEngine;
using Time = std::chrono::high_resolution_clock; using Time = std::chrono::high_resolution_clock;
// ! [infer_request:ctor] // ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork)
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) : : IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) {
IInferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks // TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1)); auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1));
@ -61,10 +60,7 @@ void TemplateInferRequest::allocateDeviceBuffers() {
} }
template <typename BlobDataMap, typename GetNetworkPrecisionF> template <typename BlobDataMap, typename GetNetworkPrecisionF>
static void AllocateImpl(const BlobDataMap& userDataMap, static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision,
BlobMap& userBlobMap,
BlobMap& deviceBlobMap,
GetNetworkPrecisionF&& GetNetworkPrecision,
bool isInputBlob = true) { bool isInputBlob = true) {
for (auto&& userData : userDataMap) { for (auto&& userData : userDataMap) {
const auto& dims = userData.second->getTensorDesc().getDims(); const auto& dims = userData.second->getTensorDesc().getDims();
@ -98,9 +94,12 @@ void TemplateInferRequest::allocateBlobs() {
return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type(); return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type();
}); });
auto&& results = _executableNetwork->_function->get_results(); auto&& results = _executableNetwork->_function->get_results();
AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) { AllocateImpl(
_networkOutputs, _outputs, _networkOutputBlobs,
[&](const std::string& blobName) {
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
}, false); },
false);
} }
// ! [infer_request:infer_impl] // ! [infer_request:infer_impl]
@ -115,95 +114,100 @@ void TemplateInferRequest::InferImpl() {
template <typename SrcT, typename DstT> template <typename SrcT, typename DstT>
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
ngraph::runtime::reference::convert<SrcT, DstT>( ngraph::runtime::reference::convert<SrcT, DstT>(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(), InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(), src->size());
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(),
src->size());
} }
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
switch (src->getTensorDesc().getPrecision()) { switch (src->getTensorDesc().getPrecision()) {
case Precision::U8: { case Precision::U8: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::U8 : break; case Precision::U8:
break;
case Precision::FP32: { case Precision::FP32: {
blobCopy<std::uint8_t, float>(src, dst); blobCopy<std::uint8_t, float>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::FP32: { case Precision::FP32: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::FP32 : break; case Precision::FP32:
break;
case Precision::U8: { case Precision::U8: {
blobCopy<float, std::uint8_t>(src, dst); blobCopy<float, std::uint8_t>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::I64: { case Precision::I64: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::I64 : break; case Precision::I64:
break;
case Precision::I32: { case Precision::I32: {
blobCopy<int64_t, int32_t>(src, dst); blobCopy<int64_t, int32_t>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::I16: { case Precision::I16: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::I16 : break; case Precision::I16:
break;
case Precision::FP32: { case Precision::FP32: {
blobCopy<int16_t, float>(src, dst); blobCopy<int16_t, float>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::I8: { case Precision::I8: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::I8 : break; case Precision::I8:
break;
case Precision::FP32: { case Precision::FP32: {
blobCopy<int8_t, float>(src, dst); blobCopy<int8_t, float>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::BOOL: { case Precision::BOOL: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::BOOL : break; case Precision::BOOL:
break;
case Precision::FP32: { case Precision::FP32: {
blobCopy<bool, float>(src, dst); blobCopy<bool, float>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
case Precision::U16: { case Precision::U16: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::U16 : break; case Precision::U16:
break;
case Precision::FP32: { case Precision::FP32: {
blobCopy<uint16_t, float>(src, dst); blobCopy<uint16_t, float>(src, dst);
} break; } break;
default: { default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); << dst->getTensorDesc().getPrecision();
} }
} }
} break; } break;
@ -225,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& parameter = _parameters[index]; const auto& parameter = _parameters[index];
const auto& parameterShape = parameter->get_shape(); const auto& parameterShape = parameter->get_shape();
const auto& parameterType = parameter->get_element_type(); const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape, _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>()); parameterType, parameterShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
} }
for (auto&& output : _outputs) { for (auto&& output : _outputs) {
auto outputBlob = output.second; auto outputBlob = output.second;
@ -238,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& result = _results[index]; const auto& result = _results[index];
const auto& resultShape = result->get_shape(); const auto& resultShape = result->get_shape();
const auto& resultType = result->get_element_type(); const auto& resultType = result->get_element_type();
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape, _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>()); resultType, resultShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
} }
_durations[Preprocess] = Time::now() - start; _durations[Preprocess] = Time::now() - start;
} }

View File

@ -4,20 +4,17 @@
#pragma once #pragma once
#include <array>
#include <chrono>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <executable.hpp>
#include <ie_input_info.hpp>
#include <map> #include <map>
#include <memory>
#include <ngraph/runtime/tensor.hpp>
#include <openvino/itt.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include <array>
#include <memory>
#include <chrono>
#include <openvino/itt.hpp>
#include <ie_input_info.hpp>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <ngraph/runtime/tensor.hpp>
#include <executable.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {
@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal {
public: public:
typedef std::shared_ptr<TemplateInferRequest> Ptr; typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork); const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest(); ~TemplateInferRequest();
@ -47,13 +43,7 @@ private:
void allocateDeviceBuffers(); void allocateDeviceBuffers();
void allocateBlobs(); void allocateBlobs();
enum { enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages };
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::shared_ptr<ExecutableNetwork> _executableNetwork; std::shared_ptr<ExecutableNetwork> _executableNetwork;
std::array<openvino::itt::handle_t, numOfStages> _profilingTask; std::array<openvino::itt::handle_t, numOfStages> _profilingTask;

View File

@ -16,5 +16,5 @@ namespace itt {
namespace domains { namespace domains {
OV_ITT_DOMAIN(TemplatePlugin); OV_ITT_DOMAIN(TemplatePlugin);
} }
} } // namespace itt
} } // namespace TemplatePlugin

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
// clang-format off
#include <ie_metric_helpers.hpp> #include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp> #include <ie_plugin_config.hpp>
#include <ie_algorithm.hpp> #include <ie_algorithm.hpp>
@ -24,6 +25,7 @@
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include "transformations/template_pattern_transformation.hpp" #include "transformations/template_pattern_transformation.hpp"
#include "transformations/preprocessing/preprocessing.hpp" #include "transformations/preprocessing/preprocessing.hpp"
// clang-format on
using namespace TemplatePlugin; using namespace TemplatePlugin;
@ -53,8 +55,7 @@ Plugin::~Plugin() {
// ! [plugin:transform_network] // ! [plugin:transform_network]
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap & inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) { const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
auto transformedNetwork = ngraph::clone_function(*function); auto transformedNetwork = ngraph::clone_function(*function);
@ -83,28 +84,24 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// ! [plugin:transform_network] // ! [plugin:transform_network]
// ! [plugin:load_exe_network_impl] // ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork & network, InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) {
const ConfigMap &config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl");
InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo(); InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo();
InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo(); InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo();
auto fullConfig = Configuration {config, _cfg}; auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(network.getFunction(), return std::make_shared<ExecutableNetwork>(network.getFunction(), networkInputs, networkOutputs, fullConfig,
networkInputs, networkOutputs, fullConfig,
std::static_pointer_cast<Plugin>(shared_from_this())); std::static_pointer_cast<Plugin>(shared_from_this()));
} }
// ! [plugin:load_exe_network_impl] // ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl] // ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl");
auto fullConfig = Configuration {config, _cfg}; auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
std::static_pointer_cast<Plugin>(shared_from_this()));
} }
// ! [plugin:import_network_impl] // ! [plugin:import_network_impl]
@ -212,21 +209,13 @@ InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std:
// ! [plugin:get_metric] // ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const { InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) { if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = { std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(SUPPORTED_METRICS), METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT),
METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics); IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = { std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) { for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {

View File

@ -4,11 +4,11 @@
#pragma once #pragma once
#include "template_config.hpp"
#include "template_executable_network.hpp"
#include <cpp_interfaces/impl/ie_plugin_internal.hpp> #include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include "backend.hpp" #include "backend.hpp"
#include "template_config.hpp"
#include "template_executable_network.hpp"
//! [plugin:header] //! [plugin:header]
namespace TemplatePlugin { namespace TemplatePlugin {
@ -21,11 +21,9 @@ public:
~Plugin(); ~Plugin();
void SetConfig(const std::map<std::string, std::string>& config) override; void SetConfig(const std::map<std::string, std::string>& config) override;
InferenceEngine::QueryNetworkResult InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
QueryNetwork(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string>& config) const override; const std::map<std::string, std::string>& config) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string>& config) override; const std::map<std::string, std::string>& config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override; InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "transformations/preprocessing/mean_image_or_value.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp"
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0);
@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
} }
auto mean_const = it->second; auto mean_const = it->second;
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type");
"Mean for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({}); auto copy_param = param->clone_with_new_inputs({});
auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const); auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const);

View File

@ -5,10 +5,9 @@
#pragma once #pragma once
#include <map> #include <map>
#include <string>
#include <ngraph/op/constant.hpp> #include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp> #include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp" #include "transformations_visibility.hpp"

View File

@ -2,17 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ngraph/pass/manager.hpp> #include "transformations/preprocessing/preprocessing.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp" #include "transformations/preprocessing/mean_image_or_value.hpp"
#include "transformations/preprocessing/std_scale.hpp" #include "transformations/preprocessing/std_scale.hpp"
#include "transformations/preprocessing/preprocessing.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0);
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap) ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {}
: m_inputInfoMap(inputInfoMap) { }
bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) { bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) {
ngraph::pass::AddMeanSubtract::MeanMap meanMap; ngraph::pass::AddMeanSubtract::MeanMap meanMap;
@ -43,8 +43,7 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
} else { } else {
NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr"); NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr");
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal");
"TensorDesc for PreProcessChannel::meanData must be equal");
} }
} }
} }
@ -54,8 +53,7 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
continue; continue;
} }
NGRAPH_CHECK(!(has_mean_image && has_scales), NGRAPH_CHECK(!(has_mean_image && has_scales), "Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
"Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
if (has_scales) { if (has_scales) {
ngraph::Shape shape(inputDims.size(), 1); ngraph::Shape shape(inputDims.size(), 1);
@ -81,8 +79,7 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
i += meanImage->size(); i += meanImage->size();
} }
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData);
shape, meanImageData);
} }
} }

View File

@ -27,6 +27,7 @@ class AddPreprocessing;
*/ */
class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass { class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass {
const InferenceEngine::InputsDataMap& m_inputInfoMap; const InferenceEngine::InputsDataMap& m_inputInfoMap;
public: public:
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap); explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap);

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "transformations/preprocessing/std_scale.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/std_scale.hpp"
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0);
@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
} }
auto scale_const = it->second; auto scale_const = it->second;
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type");
"Scale for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({}); auto copy_param = param->clone_with_new_inputs({});
auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second); auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second);

View File

@ -5,10 +5,9 @@
#pragma once #pragma once
#include <map> #include <map>
#include <string>
#include <ngraph/op/constant.hpp> #include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp> #include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp" #include "transformations_visibility.hpp"

View File

@ -29,8 +29,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
// Print types and names for collected nodes // Print types and names for collected nodes
for (auto& node : nodes) { for (auto& node : nodes) {
std::cout << "Type: " << node->get_type_info().name << std::endl std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl;
<< "Name: " << node->get_friendly_name() << std::endl;
} }
// Return false because we didn't change nGraph Function // Return false because we didn't change nGraph Function

View File

@ -3,13 +3,14 @@
// //
#include "transformations/template_pattern_transformation.hpp" #include "transformations/template_pattern_transformation.hpp"
#include "transformations/template_function_transformation.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp> #include <ngraph/rt_info.hpp>
#include "transformations/template_function_transformation.hpp"
using namespace ngraph; using namespace ngraph;
// ! [graph_rewrite:template_transformation_cpp] // ! [graph_rewrite:template_transformation_cpp]
@ -30,8 +31,7 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() {
} }
// Decompose Divide into Multiply with Power operations // Decompose Divide into Multiply with Power operations
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1}));
opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow); auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow);
@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
auto& node_to_output = m.get_pattern_value_map(); auto& node_to_output = m.get_pattern_value_map();
// Create new Relu operation and add register it for additional execution // Create new Relu operation and add register it for additional execution
auto new_relu = register_new_node<ngraph::opset3::Relu>( auto new_relu = register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
// Copy runtime info attributes to newly created operation // Copy runtime info attributes to newly created operation
ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu); ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu);

View File

@ -14,7 +14,7 @@ addIeTargetTest(
IE::funcSharedTests IE::funcSharedTests
INCLUDES INCLUDES
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include" "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include"
ADD_CPPLINT ADD_CLANG_FORMAT
LABELS LABELS
TEMPLATE TEMPLATE
) )

View File

@ -4,5 +4,4 @@
#include "functional_test_utils/core_config.hpp" #include "functional_test_utils/core_config.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
}

View File

@ -11,15 +11,10 @@ namespace {
ngraph::element::f32, ngraph::element::f32,
}; };
static const std::vector<std::size_t> batchSizesTemplate = { static const std::vector<std::size_t> batchSizesTemplate = {1, 2};
1, 2
};
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(batchSizesTemplate),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
LoadNetworkCacheTestBase::getTestCaseName); LoadNetworkCacheTestBase::getTestCaseName);
} // namespace } // namespace

View File

@ -2,19 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp" #include "behavior/config.hpp"
#include <template/template_config.hpp> #include <template/template_config.hpp>
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {
{{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
@ -27,31 +25,22 @@ const std::vector<std::map<std::string, std::string>> inconfigs = {
}; };
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)), ::testing::ValuesIn(inconfigs)),
IncorrectConfigTests::getTestCaseName); IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)), ::testing::ValuesIn(inconfigs)),
IncorrectConfigAPITests::getTestCaseName); IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName); CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests, INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName); CorrectConfigAPITests::getTestCaseName);

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <utility>
#include <string>
#include <vector>
#include "behavior/core_integration.hpp" #include "behavior/core_integration.hpp"
#include <string>
#include <utility>
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
@ -16,54 +16,31 @@ namespace {
// IE Class Common tests with <pluginName, deviceName params> // IE Class Common tests with <pluginName, deviceName params>
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassBasicTestP, IEClassBasicTestP, ::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
smoke_IEClassBasicTestP, IEClassBasicTestP,
::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassNetworkTestP, IEClassNetworkTestP, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassNetworkTestP, IEClassNetworkTestP,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// IE Class GetMetric // IE Class GetMetric
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// IE Class SetConfig // IE Class SetConfig
@ -111,9 +88,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
// IE Class GetConfig // IE Class GetConfig
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetConfigTest, IEClassGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest; using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
@ -143,48 +118,37 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
// Executable Network GetMetric // Executable Network GetMetric
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE")); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE")); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE")); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE")); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE")); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
// //
// Executable Network GetConfig / SetConfig // Executable Network GetConfig / SetConfig
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Query network // IE Class Query network
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Load network // IE Class Load network
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// Hetero Executable Network GetMetric // Hetero Executable Network GetMetric
@ -192,20 +156,16 @@ INSTANTIATE_TEST_CASE_P(
#ifdef ENABLE_MKL_DNN #ifdef ENABLE_MKL_DNN
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)); ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
#endif // ENABLE_MKL_DNN #endif // ENABLE_MKL_DNN

View File

@ -12,28 +12,16 @@ const std::vector<std::vector<int >> orders = {
// 0 - plugin // 0 - plugin
// 1 - executable_network // 1 - executable_network
// 2 - infer_request // 2 - infer_request
{0, 1, 2}, {0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(orders)),
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName); HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork,
::testing::Combine( ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), ::testing::ValuesIn(orders)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName); HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
HoldersTestOnImportedNetwork::getTestCaseName); HoldersTestOnImportedNetwork::getTestCaseName);
} // namespace } // namespace

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/exec_graph_info.hpp" #include "behavior/exec_graph_info.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName); ExecGraphTests::getTestCaseName);

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request.hpp" #include "behavior/infer_request.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName); InferRequestTests::getTestCaseName);

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request_callback.hpp" #include "behavior/infer_request_callback.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName); CallbackTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request_config.hpp" #include "behavior/infer_request_config.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName); InferConfigTests::getTestCaseName);

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp" #include "behavior/infer_request_input.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName); InferRequestInputTests::getTestCaseName);

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp" #include "behavior/infer_request_output.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName); InferRequestOutputTests::getTestCaseName);

View File

@ -8,31 +8,16 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
const std::vector<InferenceEngine::Layout> Layout = { const std::vector<InferenceEngine::Layout> Layout = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::CHW, InferenceEngine::Layout::NC,
InferenceEngine::Layout::NCHW, InferenceEngine::Layout::C};
InferenceEngine::Layout::CHW,
InferenceEngine::Layout::NC,
InferenceEngine::Layout::C
};
const std::vector<std::vector<size_t>> inputShapes = { const std::vector<std::vector<size_t>> inputShapes = {{1, 3, 16, 16}, {3, 32, 16}, {1, 3}, {3}};
{ 1, 3, 16, 16 },
{ 3, 32, 16 },
{ 1, 3 },
{ 3 }
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(configs), ::testing::ValuesIn(Layout), ::testing::ValuesIn(inputShapes)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs),
::testing::ValuesIn(Layout),
::testing::ValuesIn(inputShapes)),
LayoutTest::getTestCaseName); LayoutTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,39 +2,30 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp" #include "behavior/preprocessing.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> inputPrecisions = { const std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP32};
InferenceEngine::Precision::U8,
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest, INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels ::testing::Values(4), // Number of input tensor channels
::testing::Values(true), // Use SetInput ::testing::Values(true), // Use SetInput
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName); PreprocessingPrecisionConvertTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest, INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(inputPrecisions),
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors) ::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(false), // use GetBlob ::testing::Values(false), // use GetBlob
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName); PreprocessingPrecisionConvertTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,79 +2,50 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp" #include "behavior/set_preprocess.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = { const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}};
CommonTestUtils::DEVICE_TEMPLATE }}
};
const std::vector<std::map<std::string, std::string>> heteroConfigs = { const std::vector<std::map<std::string, std::string>> heteroConfigs = {{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE }}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)), ::testing::ValuesIn(multiConfigs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)), ::testing::ValuesIn(heteroConfigs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
const std::vector<InferenceEngine::Precision> ioPrecisions = { const std::vector<InferenceEngine::Precision> ioPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::U8
};
const std::vector<InferenceEngine::Layout> netLayouts = { const std::vector<InferenceEngine::Layout> netLayouts = {
InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NCHW,
// InferenceEngine::Layout::NHWC // InferenceEngine::Layout::NHWC
}; };
const std::vector<InferenceEngine::Layout> ioLayouts = { const std::vector<InferenceEngine::Layout> ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC};
InferenceEngine::Layout::NCHW,
InferenceEngine::Layout::NHWC
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(ioPrecisions), ::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netLayouts), ::testing::ValuesIn(ioLayouts), ::testing::ValuesIn(ioLayouts), ::testing::Bool(),
::testing::ValuesIn(ioPrecisions), ::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts),
::testing::ValuesIn(ioLayouts),
::testing::ValuesIn(ioLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessConversionTest::getTestCaseName); PreprocessConversionTest::getTestCaseName);
} // namespace } // namespace

View File

@ -8,33 +8,22 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
BehaviorTests::getTestCaseName); BehaviorTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
BehaviorTestInput::getTestCaseName); BehaviorTestInput::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
BehaviorTestOutput::getTestCaseName); BehaviorTestOutput::getTestCaseName);

View File

@ -8,14 +8,10 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)), ::testing::ValuesIn(configs)),
VersionTest::getTestCaseName); VersionTest::getTestCaseName);

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "hetero/query_network.hpp"
#include <vector> #include <vector>
#include "hetero/query_network.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp" #include "ngraph_functions/subgraph_builders.hpp"
@ -14,8 +15,7 @@ using namespace HeteroTests;
auto ConvBias = ngraph::builder::subgraph::makeConvBias(); auto ConvBias = ngraph::builder::subgraph::makeConvBias();
INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest, INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest,
::testing::Combine( ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::Values(ConvBias)), ::testing::Values(ConvBias)),
QueryNetworkTest::getTestCaseName); QueryNetworkTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "hetero/synthetic.hpp"
#include <vector> #include <vector>
#include "hetero/synthetic.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp" #include "ngraph_functions/subgraph_builders.hpp"
@ -12,14 +13,12 @@ namespace {
using namespace HeteroTests; using namespace HeteroTests;
INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest, INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest,
::testing::Combine( ::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)), ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName); HeteroSyntheticTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest, INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest,
::testing::Combine( ::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)), ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName); HeteroSyntheticTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/convolution.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/convolution.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -19,121 +20,71 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
/* ============= 2D Convolution ============= */ /* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels = {{3, 3}, const std::vector<std::vector<size_t>> kernels = {{3, 3}, {3, 5}};
{3, 5}}; const std::vector<std::vector<size_t>> strides = {{1, 1}, {1, 3}};
const std::vector<std::vector<size_t >> strides = {{1, 1}, const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, {0, 3}};
{1, 3}}; const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}, {0, 3}};
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, const std::vector<std::vector<size_t>> dilations = {{1, 1}, {3, 1}};
{0, 3}};
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
{0, 3}};
const std::vector<std::vector<size_t >> dilations = {{1, 1},
{3, 1}};
const std::vector<size_t> numOutChannels = {1, 5}; const std::vector<size_t> numOutChannels = {1, 5};
const std::vector<ngraph::op::PadType> padTypes = { const std::vector<ngraph::op::PadType> padTypes = {ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID};
ngraph::op::PadType::EXPLICIT,
ngraph::op::PadType::VALID
};
const auto conv2DParams_ExplicitPadding = ::testing::Combine( const auto conv2DParams_ExplicitPadding =
::testing::ValuesIn(kernels), ::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds),
::testing::ValuesIn(strides), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::Values(ngraph::op::PadType::EXPLICIT));
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
// ! [test_convolution:declare_parameters] // ! [test_convolution:declare_parameters]
const auto conv2DParams_AutoPadValid = ::testing::Combine( const auto conv2DParams_AutoPadValid =
::testing::ValuesIn(kernels), ::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels),
::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::Values(ngraph::op::PadType::VALID));
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::VALID)
);
// ! [test_convolution:instantiate] // ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv2DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
conv2DParams_ExplicitPadding,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
// ! [test_convolution:instantiate] // ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv2DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
conv2DParams_AutoPadValid,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
/* ============= 3D Convolution ============= */ /* ============= 3D Convolution ============= */
const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3}, const std::vector<std::vector<size_t>> kernels3d = {{3, 3, 3}, {3, 5, 3}};
{3, 5, 3}}; const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}, {0, 2, 0}};
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0},
{0, 2, 0}};
const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1}, const std::vector<std::vector<size_t>> strides3d = {{1, 1, 1}, {1, 2, 1}};
{1, 2, 1}}; const std::vector<std::vector<size_t>> dilations3d = {{1, 1, 1}, {1, 2, 1}};
const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1},
{1, 2, 1}};
const auto conv3DParams_ExplicitPadding = ::testing::Combine( const auto conv3DParams_ExplicitPadding =
::testing::ValuesIn(kernels3d), ::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(paddings3d),
::testing::ValuesIn(strides3d), ::testing::ValuesIn(dilations3d), ::testing::Values(5), ::testing::Values(ngraph::op::PadType::EXPLICIT));
::testing::ValuesIn(paddings3d), const auto conv3DParams_AutoPadValid =
::testing::ValuesIn(paddings3d), ::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::ValuesIn(dilations3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})), ::testing::ValuesIn(dilations3d), ::testing::Values(5),
::testing::Values(5), ::testing::Values(ngraph::op::PadType::VALID));
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv3DParams_AutoPadValid = ::testing::Combine(
::testing::ValuesIn(kernels3d),
::testing::ValuesIn(strides3d),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::ValuesIn(dilations3d),
::testing::Values(5),
::testing::Values(ngraph::op::PadType::VALID)
);
INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv3DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
conv3DParams_ExplicitPadding,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv3DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
conv3DParams_AutoPadValid,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/reshape.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/reshape.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -15,30 +16,20 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
}; };
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest, INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(true),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))), ::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName); ReshapeLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest, INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(true),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({10, 0, 100})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(std::vector<size_t>({10, 0, 100})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))), ::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName); ReshapeLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/softmax.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/softmax.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -25,28 +26,14 @@ const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
InferenceEngine::SizeVector {10, 10}, InferenceEngine::SizeVector {10, 10},
}; };
const std::vector<size_t> axis2D = { const std::vector<size_t> axis2D = {0, 1};
0, 1
};
const auto params2D = testing::Combine( const auto params2D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes2D), testing::ValuesIn(axis2D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes2D),
testing::ValuesIn(axis2D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_SoftMax2D, SoftMaxLayerTest, params2D, SoftMaxLayerTest::getTestCaseName);
smoke_SoftMax2D,
SoftMaxLayerTest,
params2D,
SoftMaxLayerTest::getTestCaseName
);
const std::vector<InferenceEngine::SizeVector> inputShapes4D = { const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
InferenceEngine::SizeVector {1, 100, 1, 1}, InferenceEngine::SizeVector {1, 100, 1, 1},
@ -56,23 +43,11 @@ const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
const std::vector<size_t> axis4D = {0, 1, 2, 3}; const std::vector<size_t> axis4D = {0, 1, 2, 3};
const auto params4D = testing::Combine( const auto params4D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes4D), testing::ValuesIn(axis4D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes4D),
testing::ValuesIn(axis4D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_SoftMax4D, SoftMaxLayerTest, params4D, SoftMaxLayerTest::getTestCaseName);
smoke_SoftMax4D,
SoftMaxLayerTest,
params4D,
SoftMaxLayerTest::getTestCaseName
);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/split.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/split.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -12,17 +13,11 @@ using namespace LayerTestsDefinitions;
namespace { namespace {
INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest, INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(0, 1, 2, 3),
::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(0, 1, 2, 3), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(std::vector<size_t>({})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
SplitLayerTest::getTestCaseName); SplitLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
#include <string>
#include <vector>
std::vector<std::string> disabledTestPatterns() { std::vector<std::string> disabledTestPatterns() {
return { return {
".*ExclusiveAsyncRequests.*", ".*ExclusiveAsyncRequests.*",

View File

@ -18,11 +18,9 @@
// #include "common_test_utils/ngraph_test_utils.hpp" // #include "common_test_utils/ngraph_test_utils.hpp"
// using namespace testing; // using namespace testing;
// using namespace ngraph; // using namespace ngraph;
// TEST(TransformationTests, Preprocessing_AddStdScale) { // TEST(TransformationTests, Preprocessing_AddStdScale) {
// std::shared_ptr<Function> f(nullptr), f_ref(nullptr); // std::shared_ptr<Function> f(nullptr), f_ref(nullptr);

View File

@ -4,12 +4,11 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <string>
#include <memory> #include <memory>
#include <queue>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <queue>
#include <string>
#include <transformations/init_node_info.hpp> #include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp> #include <transformations/utils/utils.hpp>
@ -44,8 +43,7 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
// Example reference function // Example reference function
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2}); auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5}); auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {-1}));
ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow); auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector {mul}, ngraph::ParameterVector {data}); f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector {mul}, ngraph::ParameterVector {data});