add plugin template (#515)

This commit is contained in:
Alexey Suhov
2020-05-22 22:34:00 +03:00
committed by GitHub
parent 2e3928071f
commit 0064c299c3
49 changed files with 2330 additions and 0 deletions

View File

@@ -0,0 +1,39 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:main]
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(InferenceEngineTemplatePlugin)
set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR})
find_package(InferenceEngineDeveloperPackage REQUIRED)
add_subdirectory(src)
if(ENABLE_TESTS)
include(CTest)
enable_testing()
if(ENABLE_FUNCTIONAL_TESTS)
add_subdirectory(tests_deprecated/functional)
add_subdirectory(tests/functional)
endif()
if(ENABLE_BEH_TESTS)
add_subdirectory(tests_deprecated/behavior)
endif()
endif()
# [cmake:main]
# install
# ATTENTION: uncomment to install component
# ie_cpack(template)

View File

@@ -0,0 +1,18 @@
# template-plugin
Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
## How to build
```bash
$ cd $DLDT_HOME
$ mkdir $DLDT_HOME/build
$ cd $DLDT_HOME/build
$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
$ make -j8
$ cd $TEMPLATE_PLUGIN_HOME
$ mkdir $TEMPLATE_PLUGIN_HOME/build
$ cd $TEMPLATE_PLUGIN_HOME/build
$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build ..
$ make -j8
```

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for DLIA plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file dlia_config.hpp
*/
#pragma once
#include <string>
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace TemplateMetrics {
/**
* @def TEMPLATE_METRIC_VALUE(name)
* @brief Shortcut for defining Template metric values
*/
#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name
#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name
// ! [public_header:metrics]
/**
* @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations.
*/
DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION);
// ! [public_header:metrics]
} // namespace TemplateMetrics
namespace TemplateConfigParams {
/**
* @def TEMPLATE_CONFIG_KEY(name)
* @brief Shortcut for defining Template device configuration keys
*/
#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name)
/**
* @brief The key to define the type of transformations for TEMPLATE inputs and outputs.
* TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom
* optimized version of transformation functions that do not use OpenMP and much more faster
* than native TEMPLATE functions. Values: "NO" - optimized plugin transformations
* are used, "YES" - native TEMPLATE transformations are used.
*/
DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY);
} // namespace TemplateConfigParams
} // namespace InferenceEngine

View File

@@ -0,0 +1,43 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:plugin]
set(TARGET_NAME "templatePlugin")
if(ENABLE_LTO)
ie_enable_lto()
endif()
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# adds a shared library with plugin
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component
# install
# set(component_name template)
# ie_cpack_add_component(${component_name} REQUIRED)
# install(TARGETS ${TARGET_NAME}
# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH}
# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
# COMPONENT ${component_name})

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <ie_profiling.hpp>
#include "template_async_infer_request.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_pipeline = {
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline)
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(WaitPipeline)
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(Postprocessing)
_inferRequest->inferPostprocess();
}}
};
}
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplateAsyncInferRequest::~TemplateAsyncInferRequest() {
InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait();
}
// ! [async_infer_request:dtor]

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include "template_infer_request.hpp"
namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest() override;
private:
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include <algorithm>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <file_utils.h>
#include <cpp_interfaces/exception2status.hpp>
#include "template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
for (auto&& c : config) {
const auto& key = c.first;
const auto& value = c.second;
if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);
} else if (CONFIG_KEY(PERF_COUNT) == key) {
perfCount = (CONFIG_VALUE(YES) == value);
} else if (throwOnUnsupported) {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key;
}
}
}
InferenceEngine::Parameter Configuration::Get(const std::string& name) const {
if (name == CONFIG_KEY(DEVICE_ID)) {
return {std::to_string(deviceId)};
} else if (name == CONFIG_KEY(PERF_COUNT)) {
return {perfCount};
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name;
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <string>
#include <map>
#include <unordered_map>
#include <ie_parameter.hpp>
namespace TemplatePlugin {
template<typename T>
using IOMap = std::unordered_map<std::string, T>;
// ! [configuration:header]
using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
};
// ! [configuration:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <atomic>
#include <set>
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <ie_metric_helpers.hpp>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <network_serializer.h>
#include <threading/ie_executor_manager.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ngraph/specialize_function.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <transformations/convert_divide.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg):
_name(network.getName()),
_cfg(cfg),
_waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
CompileGraph(ngraphFunction);
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
}
}
catch (const InferenceEngineException & e) {
throw e;
}
catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
}
catch (...) {
THROW_IE_EXCEPTION << "Generic exception is thrown";
}
}
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg) :
_cfg(cfg) {
// TODO: since Import network is not a mandatory functionality, this ctor can just be removed
}
// ! [executable_network:ctor_import_stream]
// ! [executable_network:compile_graph]
void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction) {
// TODO: perform actual graph compilation taking `_cfg` into account
// 1.Copy ngraph::Function first to apply some transformations later in
// ExecutableNetwork::CompileGraph, which modify original ngraph::Function
const bool shareConsts = false, constFolding = false;
std::vector<::ngraph::element::Type> new_types;
std::vector<::ngraph::PartialShape> new_shapes;
for (const auto &parameter : ngraphFunction->get_parameters()) {
new_shapes.emplace_back(parameter->get_partial_shape());
new_types.emplace_back(parameter->get_element_type());
}
auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(ngraphFunction),
new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
// 2. Perform common and device-specific transformations
ngraph::pass::Manager passManager;
// Example: register standard ngraph transformation from ngraph::ngraph
passManager.register_pass<ngraph::pass::ConstantFolding>();
// Example: register inference engine optimization transformation for IE::inference_engine_transformations
passManager.register_pass<ngraph::pass::ConvertDivide>();
// Register any other transformations
// ..
// After `run_passes`, we have the transformed function, where operations match device operations,
// and we can create device hardware-dependent graph
passManager.run_passes(copyFunction);
// 3. Iterate over operations and create hardware-specific ngraph
for (const auto& op : copyFunction->get_ordered_ops()) {
// TODO: map ngraph `op` to device operation
}
// 4. Perform any other steps like allocation and filling device buffers, and so on
}
// ! [executable_network:compile_graph]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
// TODO: return more supported values for config keys
if (name == CONFIG_KEY(DEVICE_ID) ||
name == CONFIG_KEY(PERF_COUNT)) {
result = _cfg.Get(name);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT)});
} else if (METRIC_KEY(NETWORK_NAME) == name) {
result = IE_SET_METRIC(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
// TODO: fill with actual number
unsigned int value = 1;
result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
// ! [executable_network:get_metric]
// ! [executable_network:export_impl]
void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
// TODO: Code which exports graph from std::ostream
}
// ! [executable_network:export_impl]

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <tuple>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <ie_common.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <cnn_network_impl.hpp>
#include <threading/ie_itask_executor.hpp>
#include <ngraph/function.hpp>
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
namespace TemplatePlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
*/
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg);
ExecutableNetwork(std::istream & model,
const Configuration& cfg);
~ExecutableNetwork() override = default;
// Methods from a base class ExecutableNetworkThreadSafeDefault
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
std::atomic<std::size_t> _requestId = {0};
std::string _name;
Configuration _cfg;
private:
void CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction);
std::shared_ptr<Engine> _plugin;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [executable_network:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,224 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ie_blob.h>
#include <ie_plugin.hpp>
#include <description_buffer.hpp>
#include <debug.h>
#include <ie_layouts.h>
#include <threading/ie_executor_manager.hpp>
#include <blob_transform.hpp>
#include <ie_parallel.hpp>
#include <ie_memcpy.h>
#include <precision_utils.h>
#include <template/template_config.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using Time = std::chrono::high_resolution_clock;
using ns = std::chrono::nanoseconds;
using fsec = std::chrono::duration<float>;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
InferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId);
_executableNetwork->_requestId++;
std::string name = _executableNetwork->_name + "_Req" + requestID;
_profilingTask = { {
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline") },
} };
allocateDeviceBuffers();
allocateInputBlobs();
allocateOutputBlobs();
}
// ! [infer_request:ctor]
// ! [infer_request:dtor]
TemplateInferRequest::~TemplateInferRequest() {
_executableNetwork->_requestId--;
}
// ! [infer_request:dtor]
void TemplateInferRequest::allocateDeviceBuffers() {
// TODO: allocate device buffers if Template device is a remote one
}
void TemplateInferRequest::allocateInputBlobs() {
for (auto &networkInput : _networkInputs) {
SizeVector dims = networkInput.second->getTensorDesc().getDims();
Precision precision = networkInput.second->getTensorDesc().getPrecision();
Layout input_layout = networkInput.second->getInputData()->getLayout();
Blob::Ptr inputBlob;
Blob::Ptr inputBlobNCHW;
switch (precision) {
case Precision::FP32 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
case Precision::I16 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
case Precision::U8 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << "Unsupported network precision: " << precision
<< precision << "! Supported precisions are: FP32, FP16, I16, U8";
}
// allocate the input blob
inputBlob->allocate();
_inputs[networkInput.first] = inputBlob;
if (inputBlobNCHW != inputBlob) {
inputBlobNCHW->allocate();
}
_inputsNCHW[networkInput.first] = inputBlobNCHW;
}
}
void TemplateInferRequest::allocateOutputBlobs() {
for (auto &networkOutput : _networkOutputs) {
SizeVector dims = networkOutput.second->getTensorDesc().getDims();
Precision precision = networkOutput.second->getPrecision();
Blob::Ptr outputBlob;
// allocate the output blob
Blob::Ptr outputBlobNCHW;
switch (precision) {
case Precision::FP32 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Unsupported output precision: "
<< precision << "! Supported precisions are: FP32, FP16";
}
// allocate the output blob
outputBlob->allocate();
_outputs[networkOutput.first] = outputBlob;
if (outputBlobNCHW != outputBlob) {
outputBlobNCHW->allocate();
}
_outputsNCHW[networkOutput.first] = outputBlobNCHW;
}
if (_networkOutputs.empty() || _networkInputs.empty()) {
THROW_IE_EXCEPTION << "Internal error: no information about network's output/input";
}
}
// ! [infer_request:infer_impl]
void TemplateInferRequest::InferImpl() {
// TODO: fill with actual list of pipeline stages, which are executed syncronously for sync infer requests
inferPreprocess();
startPipeline();
waitPipeline();
inferPostprocess();
}
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::inferPreprocess() {
auto prev = Time::now();
// execute input pre-processing.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto &input : InferRequestInternal::_inputs) {
auto& src = input.second;
auto& dst = _inputsNCHW[input.first];
if (src != dst) {
if (src->getTensorDesc().getPrecision() == dst->getTensorDesc().getPrecision()
&& src->getTensorDesc().getDims() == dst->getTensorDesc().getDims()
&& src->getTensorDesc().getLayout() == dst->getTensorDesc().getLayout()) {
_inputsNCHW[input.first] = input.second;
} else { // Convert Layout to NCHW
InferenceEngine::blob_copy(src, dst);
}
}
}
// TODO: Preprocessing on inputs if needed: work _inputsNCHW
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::startPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
// TODO: Start pipeline and fill _inputTransferTime, _executeTime, _outputTransferTime
}
void TemplateInferRequest::waitPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
auto prev = Time::now();
// TODO: Wait pipeline using driver API or other synronizations methods
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
void TemplateInferRequest::inferPostprocess() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess])
auto prev = Time::now();
// TODO: perform post-processing and convert to NHWC layout
_outputPostProcessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:get_performance_counts]
void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
InferenceEngineProfileInfo info;
info.execution_index = 0;
info.status = InferenceEngineProfileInfo::EXECUTED;
info.cpu_uSec = info.realTime_uSec = _inputPreprocessTime / 1000;
perfMap["1. input preprocessing"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _inputTransferTime / 1000;
perfMap["2. input transfer to a device"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _executeTime / 1000;
perfMap["3. execution time"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _outputTransferTime / 1000;
perfMap["4. output transfer from a device"] = info;
info.cpu_uSec = info.realTime_uSec = _outputPostProcessTime / 1000;
perfMap["5. output postprocessing"] = info;
}
// ! [infer_request:get_performance_counts]

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <ie_common.h>
#include <ie_profiling.hpp>
#include <cpp_interfaces/impl/ie_infer_request_internal.hpp>
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
#include <threading/ie_itask_executor.hpp>
#include "template_config.hpp"
namespace TemplatePlugin {
class ExecutableNetwork;
// ! [infer_request:header]
class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest() override;
void InferImpl() override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) const override;
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
void inferPreprocess();
void startPipeline();
void waitPipeline();
void inferPostprocess();
std::shared_ptr<ExecutableNetwork> _executableNetwork;
private:
void allocateDeviceBuffers();
void allocateInputBlobs();
void allocateOutputBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::array<InferenceEngine::ProfilingTask, numOfStages> _profilingTask;
InferenceEngine::BlobMap _inputsNCHW;
InferenceEngine::BlobMap _outputsNCHW;
// for performance counts
double _inputPreprocessTime = 0.0;
double _inputTransferTime = 0.0;
double _executeTime = 0.0;
double _outputTransferTime = 0.0;
double _outputPostProcessTime = 0.0;
};
// ! [infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,193 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <memory>
#include <vector>
#include <sstream>
#include <regex>
#include <string>
#include <map>
#include <ie_metric_helpers.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ie_plugin_config.hpp>
#include <ie_util_internal.hpp>
#include <inference_engine.hpp>
#include <file_utils.h>
#include <cpp_interfaces/base/ie_plugin_base.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <graph_tools.hpp>
#include <ie_input_info.hpp>
#include <ie_layouts.h>
#include <hetero/hetero_plugin_config.hpp>
#include <template/template_config.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_infer_request.hpp"
using namespace TemplatePlugin;
// ! [plugin:ctor]
Plugin::Plugin() {
// TODO: fill with actual device name
_pluginName = "TEMPLATE";
}
// ! [plugin:ctor]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork & network,
const ConfigMap &config) {
auto cfg = Configuration{ config, _cfg };
InferenceEngine::InputsDataMap networkInputs;
InferenceEngine::OutputsDataMap networkOutputs;
network.getInputsInfo(networkInputs);
network.getOutputsInfo(networkOutputs);
// TODO: check with precisions supported by Template device
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
for (auto networkInput : networkInputs) {
auto input_precision = networkInput.second->getTensorDesc().getPrecision();
if (input_precision != InferenceEngine::Precision::FP32 &&
input_precision != InferenceEngine::Precision::FP16 &&
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::U8) {
THROW_IE_EXCEPTION << "Input image format " << input_precision << " is not supported yet.\n"
<< "Supported formats are: FP32, FP16, I16 and U8.";
}
}
auto clonedNetwork = cloneNet(network);
ConstTransformer transformator(clonedNetwork.get());
transformator.fullTrim();
return std::make_shared<ExecutableNetwork>(*clonedNetwork, cfg);
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) {
// TODO: Import network from stream is not mandatory functionality;
// Can just throw an exception and remove the code below
Configuration exportedCfg;
// some code below which reads exportedCfg from `model` stream
// ..
auto cfg = Configuration(config, exportedCfg);
IExecutableNetwork::Ptr executableNetwork;
auto exec_network_impl = std::make_shared<ExecutableNetwork>(model, cfg);
executableNetwork.reset(new ExecutableNetworkBase<ExecutableNetworkInternal>(exec_network_impl),
[](InferenceEngine::details::IRelease *p) {p->Release(); });
return InferenceEngine::ExecutableNetwork{ executableNetwork };
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
Configuration cfg{config, _cfg, false};
res.rc = StatusCode::OK;
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
auto ops = ngraphFunction->get_ordered_ops();
for (auto&& op : ops) {
// TODO: investigate if an op is actually supported by Template device
bool supported = true;
if (supported) {
res.supportedLayersMap.insert({ op->get_friendly_name(), GetName() });
}
}
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can query only IR v10 networks";
}
}
// ! [plugin:query_network]
// ! [plugin:add_extension]
void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// TODO: add extensions if plugin supports extensions
}
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> confiKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT) };
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, confiKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, name);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32), TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION) };
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
THROW_IE_EXCEPTION << "Unsupported device metric: " << name;
}
}
// ! [plugin:get_metric]
IE_SUPPRESS_DEPRECATED_START
// ! [plugin:create_plugin_engine]
INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept {
try {
plugin = make_ie_compatible_plugin({2, 1, CI_BUILD_NUMBER, "templatePlugin"},
std::make_shared<Plugin>());
return OK;
}
catch (std::exception &ex) {
return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
}
}
// ! [plugin:create_plugin_engine]
IE_SUPPRESS_DEPRECATED_END

View File

@@ -0,0 +1,47 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include <description_buffer.hpp>
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include <memory>
#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include "template_executable_network.hpp"
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
class Plugin : public InferenceEngine::InferencePluginInternal {
public:
using Ptr = std::shared_ptr<Plugin>;
Plugin();
~Plugin() override = default;
void SetConfig(const std::map<std::string, std::string> &config) override;
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
Configuration _cfg;
};
} // namespace TemplatePlugin
//! [plugin:header]

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME TemplateFuncTests)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDENCIES
templatePlugin
LINK_LIBRARIES
IE::funcSharedTests
ADD_CPPLINT
LABELS
TEMPLATE
)

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior/infer_request_callback.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{{{}}}
};
const std::vector<std::string> devices{CommonTestUtils::DEVICE_CPU};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(devices),
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,28 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreProcessTests,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
PreProcessTests::getTestCaseName);
} // namespace

View File

@@ -0,0 +1,13 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
std::vector<std::string> disabledTestPatterns() {
return {
};
}

View File

@@ -0,0 +1,33 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
disable_deprecated_warnings()
set(TARGET_NAME TemplateBehaviorTests)
file(GLOB_RECURSE TEST_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
file(GLOB_RECURSE TEST_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
)
list(APPEND DEPENDENCIES
templatePlugin)
source_group("src" FILES ${TEST_SRC})
source_group("include" FILES ${TEST_INCLUDE})
add_executable(${TARGET_NAME}
${TEST_SRC}
${TEST_INCLUDE})
target_link_libraries(${TARGET_NAME} PRIVATE IE::IEBehaviorSharedTests)
add_test(NAME ${TARGET_NAME}
COMMAND ${TARGET_NAME})
add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,19 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "holders_tests.hpp"
#include <vector>
INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
// 0 - plugin
// 1 - executable_network
// 2 - infer_request
{0, 1, 2},
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0},
}), testing::Values("TEMPLATE")));

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_config.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTest, ValuesIn(withIncorrectConfValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTestInferRequestAPI,
ValuesIn(supportedValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTestInferRequestAPI,
ValuesIn(supportedValues),
getTestCaseName);

View File

@@ -0,0 +1,12 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_exec_graph_info.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(
BehaviorTest,
BehaviorPluginTestExecGraphInfo,
ValuesIn(supportedValues),
getTestCaseName);

View File

@@ -0,0 +1,8 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);

View File

@@ -0,0 +1,13 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_config.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
getConfigTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig,
ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)),
getConfigTestCaseName);

View File

@@ -0,0 +1,9 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_input.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);

View File

@@ -0,0 +1,9 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_infer_request_output.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@@ -0,0 +1,93 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_layers.hpp"
conv_test_params deconv_test_cases[] = {
conv_test_params("TEMPLATE", conv_case),
};
conv_test_params conv_test_cases[] = {
conv_test_params("TEMPLATE", conv_dw_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, DeconvolutionLayerTest,
::testing::ValuesIn(deconv_test_cases),
getTestName<conv_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ConvolutionLayerTest,
::testing::ValuesIn(conv_test_cases),
getTestName<conv_test_params>);
pool_test_params roi_pool_test_cases[] = {
pool_test_params("TEMPLATE", "FP32", pool_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ROIPoolingLayerTest,
::testing::ValuesIn(roi_pool_test_cases),
getTestName<pool_test_params>);
activ_test_params activ_test_cases[] = {
activ_test_params("TEMPLATE", "FP16", activation_case),
};
activ_test_params clamp_test_cases[] = {
activ_test_params("TEMPLATE", "FP16", clamp_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ActivationLayerTest,
::testing::ValuesIn(activ_test_cases),
getTestName<activ_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ReLULayerTest,
::testing::Values(activ_test_params("TEMPLATE", "FP32", activation_case)),
getTestName<activ_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ClampLayerTest,
::testing::ValuesIn(clamp_test_cases),
getTestName<activ_test_params>);
norm_test_params norm_test_cases[] = {
norm_test_params("TEMPLATE", "FP32", norm_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, NormalizeLayerTest,
::testing::ValuesIn(norm_test_cases),
getTestName<norm_test_params>);
scale_test_params scale_test_cases[] = {
scale_test_params("TEMPLATE", "FP32", scale_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ScalingLayerTest,
::testing::ValuesIn(scale_test_cases),
getTestName<scale_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ShapingLayerTest,
::testing::Values(shaping_test_params("TEMPLATE", "FP32", shape_case)),
getTestName<shaping_test_params>);
INSTANTIATE_TEST_CASE_P(BehaviorTest, ElementWiseLayerTest,
::testing::Values(element_test_params("TEMPLATE", "FP32", shape_case)),
getTestName<element_test_params>);
object_test_params object_test_cases[] = {
object_test_params("TEMPLATE", "FP32", object_case),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, ObjectDetectionLayerTest,
::testing::ValuesIn(object_test_cases),
getTestName<object_test_params>);
memory_test_params memory_test_cases[] = {
memory_test_params("TEMPLATE", "FP32", memory_case),
};
// FIXME
// #if (defined INSTANTIATE_TESTS)
// INSTANTIATE_TEST_CASE_P(BehaviorTest, MemoryLayerTest,
// ::testing::ValuesIn(memory_test_cases),
// getTestName<memory_test_params>);
// #endif

View File

@@ -0,0 +1,36 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_layout.hpp"
layout_test_params power_test_cases[] = {
layout_test_params("TEMPLATE", "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
};
layout_test_params conv_test_cases_1[] = {
layout_test_params("TEMPLATE", "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
};
layout_test_params power_neg_test_cases[] = {
// Graph Error Description: Error: Tensor size should not be 0.
layout_test_params("TEMPLATE", "FP16", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 1, 2, 2)),
};
layout_test_params conv_neg_test_cases[] = {
// LoadNetwork hangs if Network has 1 dims format: CVS-8508
layout_test_params("TEMPLATE", "FP16", Layout::C, power_params({ { 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
layout_test_params("TEMPLATE", "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 1, 2, 2)),
};
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadPower,
::testing::ValuesIn(power_test_cases), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadConv,
::testing::ValuesIn(conv_test_cases_1), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadPower,
::testing::ValuesIn(power_neg_test_cases), getTestName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadConv,
::testing::ValuesIn(conv_neg_test_cases), getTestName);

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_unsupported.hpp"
#include "template_test_data.hpp"
// INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
// getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestBatchUnsupported, ValuesIn(batchUnSupportedValues),
getTestCaseName);

View File

@@ -0,0 +1,8 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin_version.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);

View File

@@ -0,0 +1,12 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior_test_plugin.h"
#include "behavior_test_plugins.hpp"
#include "template_test_data.hpp"
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
getTestCaseName);
INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
getOutputTestCaseName);

View File

@@ -0,0 +1,71 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "behavior_test_plugin.h"
// correct params
#define BEH_HETERO BehTestParams("HETERO", \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
Precision::FP32)
#define BEH_TEMPLATE BehTestParams("TEMPLATE", \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob, \
Precision::FP32)
// all parameters are unsupported - reversed
#define BEH_US_ALL_TEMPLATE BehTestParams("TEMPLATE", \
FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.model_xml_str, \
FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.weights_blob, \
Precision::Q78)
const BehTestParams supportedValues[] = {
BEH_TEMPLATE,
};
const BehTestParams requestsSupportedValues[] = {
BEH_TEMPLATE,
};
const BehTestParams allInputSupportedValues[] = {
BEH_TEMPLATE,
BEH_TEMPLATE.withIn(Precision::FP16),
BEH_TEMPLATE.withIn(Precision::U8),
BEH_TEMPLATE.withIn(Precision::I16),
};
const BehTestParams allOutputSupportedValues[] = {
BEH_TEMPLATE,
BEH_TEMPLATE.withOut(Precision::FP16),
};
const BehTestParams typeUnSupportedValues[] = {
BEH_TEMPLATE.withIn(Precision::Q78),
BEH_TEMPLATE.withIn(Precision::U16),
BEH_TEMPLATE.withIn(Precision::I8),
BEH_TEMPLATE.withIn(Precision::I32),
};
const BehTestParams batchUnSupportedValues[] = {
BEH_TEMPLATE.withBatchSize(0),
};
const BehTestParams allUnSupportedValues[] = {
BEH_US_ALL_TEMPLATE,
};
const std::vector<BehTestParams> withCorrectConfValuesNetworkOnly = {
BEH_TEMPLATE.withConfig({ { KEY_DEVICE_ID, "0" } }),
};
const BehTestParams withIncorrectConfValues[] = {
BEH_TEMPLATE.withConfig({ { KEY_CPU_BIND_THREAD, "ON" } }),
};
const std::vector<BehTestParams> withCorrectConfValues = {
BEH_TEMPLATE.withConfig({}),
};

View File

@@ -0,0 +1,13 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
std::vector<std::string> disabledTestPatterns() {
return {
};
}

View File

@@ -0,0 +1,21 @@
# Copyright (C) 2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
disable_deprecated_warnings()
# [cmake:functional_tests]
set(TARGET_NAME TemplateFunctionalTests)
file(GLOB_RECURSE TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
add_executable(${TARGET_NAME} ${TEST_SOURCES})
# link a library with common Inference Engine tests
target_link_libraries(${TARGET_NAME} PRIVATE IE::IESharedTests)
# make sure plugin is built before tests are run
add_dependencies(${TARGET_NAME} templatePlugin)
# [cmake:functional_tests]
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,205 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <string>
#include <vector>
#include "ie_class.hpp"
//
// IE Class Common tests with <pluginName, deviceName params>
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassBasicTestP, IEClassBasicTestP,
::testing::Values(std::make_pair("templatePlugin", "TEMPLATE")));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassNetworkTestP, IEClassNetworkTestP,
::testing::Values("TEMPLATE"));
//
// IE Class GetMetric
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
::testing::Values("TEMPLATE"));
//
// IE Class SetConfig
//
using IEClassSetConfigTestHETERO = IEClassNetworkTest;
TEST_F(IEClassSetConfigTestHETERO, nightly_SetConfigNoThrow) {
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_TRUE(dump);
}
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_FALSE(dump);
}
{
Core ie;
Parameter p;
ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES) } }, "HETERO"));
ASSERT_NO_THROW(p = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)));
bool dump = p.as<bool>();
ASSERT_TRUE(dump);
}
}
//
// IE Class GetConfig
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassGetConfigTest, IEClassGetConfigTest,
::testing::Values("TEMPLATE"));
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
TEST_F(IEClassGetConfigTestTEMPLATE, nightly_GetConfigNoThrow) {
Core ie;
Parameter p;
std::string deviceName = "TEMPLATE";
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
std::vector<std::string> configValues = p;
for (auto && confKey : configValues) {
if (CONFIG_KEY(DEVICE_ID) == confKey) {
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
} else if (CONFIG_KEY(PERF_COUNT) == confKey) {
bool defaultPerfCount = ie.GetConfig(deviceName, CONFIG_KEY(PERF_COUNT));
std::cout << CONFIG_KEY(PERF_COUNT) << " : " << defaultPerfCount << std::endl;
} else if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) == confKey) {
bool defaultExclusive = ie.GetConfig(deviceName, CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS));
std::cout << CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) << " : " << defaultExclusive << std::endl;
}
}
}
//
// Executable Network GetMetric
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
::testing::Values("TEMPLATE", "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
//
// Executable Network GetConfig / SetConfig
//
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
::testing::Values("TEMPLATE"));
// IE Class Query network
INSTANTIATE_TEST_CASE_P(
nightly_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
::testing::Values("TEMPLATE"));
// IE Class Load network
INSTANTIATE_TEST_CASE_P(
nightly_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
::testing::Values("TEMPLATE"));
//
// Hetero Executable Network GetMetric
//
#ifdef ENABLE_MKL_DNN
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
::testing::Values("TEMPLATE"));
INSTANTIATE_TEST_CASE_P(
nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
::testing::Values("TEMPLATE"));
#endif // ENABLE_MKL_DNN

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_model_repo.hpp"
std::string get_model_repo() {
return ":";
}
const char* TestDataHelpers::getModelPathNonFatal() noexcept {
return TestDataHelpers::getModelPathNonFatalDefault();
}
std::string TestDataHelpers::get_data_path() {
return TestDataHelpers::get_data_path_default();
}