Refactored ie_plugin_config.hpp (#5899)

This commit is contained in:
Ilya Lavrenov
2021-06-01 16:31:29 +03:00
committed by GitHub
parent 07085debd9
commit eff9f00320
117 changed files with 250 additions and 411 deletions

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,5 +1,4 @@
#include <inference_engine.hpp>
#include <vector>
#include <ie_core.hpp>
int main() {
int FLAGS_bl = 1;

View File

@@ -1,4 +1,5 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include "cldnn/cldnn_config.hpp"
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,5 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include "cldnn/cldnn_config.hpp"
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,7 @@
#define CL_HPP_MINIMUM_OPENCL_VERSION 120
#define CL_HPP_TARGET_OPENCL_VERSION 120
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <CL/cl2.hpp>
#include <gpu/gpu_context_api_ocl.hpp>

View File

@@ -1,7 +1,7 @@
#define CL_HPP_MINIMUM_OPENCL_VERSION 120
#define CL_HPP_TARGET_OPENCL_VERSION 120
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <CL/cl2.hpp>
#include <gpu/gpu_context_api_ocl.hpp>

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <gpu/gpu_context_api_va.hpp>
#include <cldnn/cldnn_config.hpp>

View File

@@ -1,7 +1,7 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <ngraph/function.hpp>
#include <ngraph/pass/visualize_tree.hpp>
int main() {
using namespace InferenceEngine;
//! [part0]

View File

@@ -1,6 +1,5 @@
#include <inference_engine.hpp>
#include <ngraph/pass/visualize_tree.hpp>
#include <ie_core.hpp>
#include <ngraph/function.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,6 +1,6 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
#include <ngraph/function.hpp>
#include <ngraph/variant.hpp>
int main() {
InferenceEngine::Core core;

View File

@@ -1,7 +1,6 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>
#include <ie_core.hpp>
#include <ngraph/function.hpp>
#include "hetero/hetero_plugin_config.hpp"
#include <ngraph/variant.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,5 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <ie_plugin_config.hpp>
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -3,7 +3,7 @@
//
#include <iostream>
#include <inference_engine.hpp>
#include <ie_core.hpp>
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
const std::string output_name = "output_name";

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
//! [part3]

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
const std::map<std::string, std::string> hddl_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } };

View File

@@ -1,6 +1,4 @@
#include <inference_engine.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_core.hpp>
int main() {
std::string device_name = "MULTI:HDDL,GPU";

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
std::string deviceName = "Device name";

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <ngraph/ngraph.hpp>
#include "onnx_import/onnx.hpp"
#include <iostream>

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <ngraph/ngraph.hpp>
#include "onnx_import/onnx.hpp"

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <ngraph/ngraph.hpp>
#include "onnx_import/onnx.hpp"
#include <iostream>

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <ngraph/ngraph.hpp>
#include "onnx_import/onnx.hpp"
#include <iostream>

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,8 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include <ie_input_info.hpp>
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,8 +1,5 @@
#include <inference_engine.hpp>
#include <opencv2/core/core.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
InferenceEngine::InferRequest inferRequest;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
InferenceEngine::Core core;

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
//! [part8]

View File

@@ -1,7 +1,4 @@
#include <inference_engine.hpp>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include <ie_core.hpp>
int main() {
using namespace InferenceEngine;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
int main() {
InferenceEngine::Core core;

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include "ngraph/opsets/opset.hpp"
#include "ngraph/opsets/opset3.hpp"

View File

@@ -1,4 +1,4 @@
#include <inference_engine.hpp>
#include <ie_core.hpp>
#include <fstream>
#include <vector>

View File

@@ -7,7 +7,6 @@
#include <ie_plugin_config.hpp>
#include <ie_algorithm.hpp>
#include <hetero/hetero_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <ngraph/op/util/op_types.hpp>

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp"
#include <template/template_config.hpp>

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -4,7 +4,6 @@
#include "ie_api_impl.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp"
#include "ie_plugin_config.hpp"

View File

@@ -1,38 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for Auto plugin.
* These properties should be used in SetConfig() and LoadNetwork() methods
*
* @file auto_config.hpp
*/
#pragma once
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
/**
* @brief Auto plugin configuration
*/
namespace AutoConfigParams {
/**
* @def AUTO_CONFIG_KEY(name)
* @brief A macro which provides a AUTO-mangled name for configuration key with name `name`
*/
#define AUTO_CONFIG_KEY(name) InferenceEngine::AutoConfigParams::_CONFIG_KEY(AUTO_##name)
#define DECLARE_AUTO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(AUTO_##name)
#define DECLARE_AUTO_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(AUTO_##name)
/**
* @brief Limit device list config option, with comma-separated devices listed
*/
DECLARE_AUTO_CONFIG_KEY(DEVICE_LIST);
} // namespace AutoConfigParams
} // namespace InferenceEngine

View File

@@ -58,72 +58,120 @@ DECLARE_GPU_METRIC_VALUE(HW_MATMUL);
namespace CLDNNConfigParams {
/**
* @brief shortcut for defining configuration keys
*/
* @brief shortcut for defining configuration keys
*/
#define CLDNN_CONFIG_KEY(name) InferenceEngine::CLDNNConfigParams::_CONFIG_KEY(CLDNN_##name)
#define DECLARE_CLDNN_CONFIG_KEY(name) DECLARE_CONFIG_KEY(CLDNN_##name)
#define DECLARE_CLDNN_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(CLDNN_##name)
/**
* @brief This key instructs the clDNN plugin to use the OpenCL queue priority hint
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf
* this option should be used with an unsigned integer value (1 is lowest priority)
* 0 means no priority hint is set and default queue is created.
*/
* @brief This key instructs the clDNN plugin to use the OpenCL queue priority hint
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf
* this option should be used with an unsigned integer value (1 is lowest priority)
* 0 means no priority hint is set and default queue is created.
*/
DECLARE_CLDNN_CONFIG_KEY(PLUGIN_PRIORITY);
/**
* @brief This key instructs the clDNN plugin to use throttle hints the OpenCL queue throttle hint
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf,
* chapter 9.19. This option should be used with an unsigned integer value (1 is lowest energy consumption)
* 0 means no throttle hint is set and default queue created.
*/
* @brief This key instructs the clDNN plugin to use throttle hints the OpenCL queue throttle hint
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf,
* chapter 9.19. This option should be used with an unsigned integer value (1 is lowest energy consumption)
* 0 means no throttle hint is set and default queue created.
*/
DECLARE_CLDNN_CONFIG_KEY(PLUGIN_THROTTLE);
/**
* @brief This key controls clDNN memory pool optimization.
* Turned off by default.
*/
* @brief This key controls clDNN memory pool optimization.
* Turned off by default.
*/
DECLARE_CLDNN_CONFIG_KEY(MEM_POOL);
/**
* @brief This key defines the directory name to which clDNN graph visualization will be dumped.
*/
* @brief This key defines the directory name to which clDNN graph visualization will be dumped.
*/
DECLARE_CLDNN_CONFIG_KEY(GRAPH_DUMPS_DIR);
/**
* @brief This key defines the directory name to which full program sources will be dumped.
*/
* @brief This key defines the directory name to which full program sources will be dumped.
*/
DECLARE_CLDNN_CONFIG_KEY(SOURCES_DUMPS_DIR);
/**
* @brief This key enables FP16 precision for quantized models.
* By default the model is converted to FP32 precision before running LPT. If this key is enabled (default), then non-quantized layers
* will be converted back to FP16 after LPT, which might imrpove the performance if a model has a lot of compute operations in
* non-quantized path. This key has no effect if current device doesn't have INT8 optimization capabilities.
*/
* @brief This key enables FP16 precision for quantized models.
* By default the model is converted to FP32 precision before running LPT. If this key is enabled (default), then non-quantized layers
* will be converted back to FP16 after LPT, which might imrpove the performance if a model has a lot of compute operations in
* non-quantized path. This key has no effect if current device doesn't have INT8 optimization capabilities.
*/
DECLARE_CLDNN_CONFIG_KEY(ENABLE_FP16_FOR_QUANTIZED_MODELS);
/**
* @brief This key should be set to correctly handle NV12 input without pre-processing.
* Turned off by default.
*/
* @brief This key should be set to correctly handle NV12 input without pre-processing.
* Turned off by default.
*/
DECLARE_CLDNN_CONFIG_KEY(NV12_TWO_INPUTS);
/**
* @brief This key sets the max number of host threads that can be used by GPU plugin on model loading.
* Default value is maximum number of threads available in the environment.
*/
* @brief This key sets the max number of host threads that can be used by GPU plugin on model loading.
* Default value is maximum number of threads available in the environment.
*/
DECLARE_CLDNN_CONFIG_KEY(MAX_NUM_THREADS);
/**
* @brief Turning on this key enables to unroll recurrent layers such as TensorIterator or Loop with fixed iteration count.
* This key is turned on by default. Turning this key on will achieve better inference performance for loops with not too many iteration counts (less than 16, as a rule of thumb).
* Turning this key off will achieve better performance for both graph loading time and inference time with many iteration counts (greater than 16).
* Note that turning this key on will increase the graph loading time in proportion to the iteration counts.
* Thus, this key should be turned off if graph loading time is considered to be most important target to optimize.*/
* @brief Turning on this key enables to unroll recurrent layers such as TensorIterator or Loop with fixed iteration count.
* This key is turned on by default. Turning this key on will achieve better inference performance for loops with not too many iteration counts (less than 16, as a rule of thumb).
* Turning this key off will achieve better performance for both graph loading time and inference time with many iteration counts (greater than 16).
* Note that turning this key on will increase the graph loading time in proportion to the iteration counts.
* Thus, this key should be turned off if graph loading time is considered to be most important target to optimize.*/
DECLARE_CLDNN_CONFIG_KEY(ENABLE_LOOP_UNROLLING);
} // namespace CLDNNConfigParams
namespace PluginConfigParams {
/**
* @brief Optimize GPU plugin execution to maximize throughput.
*
* It is passed to Core::SetConfig(), this option should be used with values:
* - KEY_GPU_THROUGHPUT_AUTO creates bare minimum of streams that might improve performance in some cases,
* this option allows to enable throttle hint for opencl queue thus reduce CPU load without significant performance
* drop
* - a positive integer value creates the requested number of streams
*/
DECLARE_CONFIG_VALUE(GPU_THROUGHPUT_AUTO);
DECLARE_CONFIG_KEY(GPU_THROUGHPUT_STREAMS);
/**
* @brief This key enables dumping of the kernels used by the plugin for custom layers.
*
* This option should be used with values: PluginConfigParams::YES or PluginConfigParams::NO (default)
*/
DECLARE_CONFIG_KEY(DUMP_KERNELS);
/**
* @brief This key controls performance tuning done or used by the plugin.
*
* This option should be used with values:
* PluginConfigParams::TUNING_DISABLED (default)
* PluginConfigParams::TUNING_USE_EXISTING - use existing data from tuning file
* PluginConfigParams::TUNING_CREATE - create tuning data for parameters not present in tuning file
* PluginConfigParams::TUNING_UPDATE - perform non-tuning updates like removal of invalid/deprecated data
* PluginConfigParams::TUNING_RETUNE - create tuning data for all parameters, even if already present
*
* For values TUNING_CREATE and TUNING_RETUNE the file will be created if it does not exist.
*/
DECLARE_CONFIG_KEY(TUNING_MODE);
DECLARE_CONFIG_VALUE(TUNING_CREATE);
DECLARE_CONFIG_VALUE(TUNING_USE_EXISTING);
DECLARE_CONFIG_VALUE(TUNING_DISABLED);
DECLARE_CONFIG_VALUE(TUNING_UPDATE);
DECLARE_CONFIG_VALUE(TUNING_RETUNE);
/**
* @brief This key defines the tuning data filename to be created/used
*/
DECLARE_CONFIG_KEY(TUNING_FILE);
} // namespace PluginConfigParams
} // namespace InferenceEngine

View File

@@ -136,4 +136,16 @@ namespace Metrics {
DECLARE_METRIC_KEY(GNA_LIBRARY_FULL_VERSION, std::string);
} // namespace Metrics
namespace PluginConfigParams {
/**
* @brief The key controls threading inside GNA Inference Engine plugin.
*
* It is passed to Core::SetConfig(), this option should be used with values:
* PluginConfigParams::YES or PluginConfigParams::NO
*/
DECLARE_CONFIG_KEY(SINGLE_THREAD);
} // namespace PluginConfigParams
} // namespace InferenceEngine

View File

@@ -26,7 +26,6 @@ namespace HeteroConfigParams {
*/
#define HETERO_CONFIG_KEY(name) InferenceEngine::HeteroConfigParams::_CONFIG_KEY(HETERO_##name)
#define DECLARE_HETERO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(HETERO_##name)
#define DECLARE_HETERO_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(HETERO_##name)
/**
* @brief The key for enabling of dumping the topology with details of layers and details how

View File

@@ -17,6 +17,7 @@
#include "ie_version.hpp"
#include "ie_extension.h"
#include "ie_plugin_config.hpp"
#include "ie_remote_context.hpp"
#include "cpp/ie_executable_network.hpp"

View File

@@ -3,7 +3,7 @@
//
/**
* @brief A header for advanced hardware related properties for IE plugins
* @brief A header for advanced hardware related properties for Inference Engine plugins
* To use in SetConfig, LoadNetwork, ImportNetwork methods of plugins
*
* @file ie_plugin_config.hpp
@@ -268,18 +268,6 @@ DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_NUMA);
DECLARE_CONFIG_VALUE(CPU_THROUGHPUT_AUTO);
DECLARE_CONFIG_KEY(CPU_THROUGHPUT_STREAMS);
/**
* @brief Optimize GPU plugin execution to maximize throughput.
*
* It is passed to Core::SetConfig(), this option should be used with values:
* - KEY_GPU_THROUGHPUT_AUTO creates bare minimum of streams that might improve performance in some cases,
* this option allows to enable throttle hint for opencl queue thus reduce CPU load without significant performance
* drop
* - a positive integer value creates the requested number of streams
*/
DECLARE_CONFIG_VALUE(GPU_THROUGHPUT_AUTO);
DECLARE_CONFIG_KEY(GPU_THROUGHPUT_STREAMS);
/**
* @brief The name for setting performance counters option.
*
@@ -303,18 +291,10 @@ DECLARE_CONFIG_KEY(PERF_COUNT);
*/
DECLARE_CONFIG_KEY(DYN_BATCH_LIMIT);
DECLARE_CONFIG_KEY(DYN_BATCH_ENABLED);
DECLARE_CONFIG_KEY(DUMP_QUANTIZED_GRAPH_AS_DOT);
DECLARE_CONFIG_KEY(DUMP_QUANTIZED_GRAPH_AS_IR);
/**
* @brief The key controls threading inside Inference Engine.
*
* It is passed to Core::SetConfig(), this option should be used with values:
* PluginConfigParams::YES or PluginConfigParams::NO
* @brief The key checks whether dynamic batch is enabled.
*/
DECLARE_CONFIG_KEY(SINGLE_THREAD);
DECLARE_CONFIG_KEY(DYN_BATCH_ENABLED);
/**
* @brief This key directs the plugin to load a configuration file.
@@ -323,38 +303,6 @@ DECLARE_CONFIG_KEY(SINGLE_THREAD);
*/
DECLARE_CONFIG_KEY(CONFIG_FILE);
/**
* @brief This key enables dumping of the kernels used by the plugin for custom layers.
*
* This option should be used with values: PluginConfigParams::YES or PluginConfigParams::NO (default)
*/
DECLARE_CONFIG_KEY(DUMP_KERNELS);
/**
* @brief This key controls performance tuning done or used by the plugin.
*
* This option should be used with values:
* PluginConfigParams::TUNING_DISABLED (default)
* PluginConfigParams::TUNING_USE_EXISTING - use existing data from tuning file
* PluginConfigParams::TUNING_CREATE - create tuning data for parameters not present in tuning file
* PluginConfigParams::TUNING_UPDATE - perform non-tuning updates like removal of invalid/deprecated data
* PluginConfigParams::TUNING_RETUNE - create tuning data for all parameters, even if already present
*
* For values TUNING_CREATE and TUNING_RETUNE the file will be created if it does not exist.
*/
DECLARE_CONFIG_KEY(TUNING_MODE);
DECLARE_CONFIG_VALUE(TUNING_CREATE);
DECLARE_CONFIG_VALUE(TUNING_USE_EXISTING);
DECLARE_CONFIG_VALUE(TUNING_DISABLED);
DECLARE_CONFIG_VALUE(TUNING_UPDATE);
DECLARE_CONFIG_VALUE(TUNING_RETUNE);
/**
* @brief This key defines the tuning data filename to be created/used
*/
DECLARE_CONFIG_KEY(TUNING_FILE);
/**
* @brief the key for setting desirable log level.
*
@@ -391,12 +339,14 @@ DECLARE_CONFIG_KEY(DEVICE_ID);
DECLARE_CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS);
/**
* @deprecated Use InferenceEngine::ExecutableNetwork::GetExecGraphInfo::serialize method
* @brief This key enables dumping of the internal primitive graph.
*
* Should be passed into LoadNetwork method to enable dumping of internal graph of primitives and
* corresponding configuration information. Value is a name of output dot file without extension.
* Files `<dot_file_name>_init.dot` and `<dot_file_name>_perf.dot` will be produced.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::ExecutableNetwork::GetExecGraphInfo::serialize method")
DECLARE_CONFIG_KEY(DUMP_EXEC_GRAPH_AS_DOT);
@@ -432,4 +382,25 @@ DECLARE_CONFIG_KEY(ENFORCE_BF16);
DECLARE_CONFIG_KEY(CACHE_DIR);
} // namespace PluginConfigParams
/**
* @def AUTO_CONFIG_KEY(name)
* @brief A macro which provides an AUTO-mangled name for configuration key with name `name`
*/
#define AUTO_CONFIG_KEY(name) InferenceEngine::_CONFIG_KEY(AUTO_##name)
#define DECLARE_AUTO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(AUTO_##name)
/**
* @brief Limit device list config option, with comma-separated devices listed
*/
DECLARE_AUTO_CONFIG_KEY(DEVICE_LIST);
} // namespace InferenceEngine
#include "hetero/hetero_plugin_config.hpp"
#include "multi-device/multi_device_config.hpp"
// remove in 2022.1 major release
#include "cldnn/cldnn_config.hpp"
#include "gna/gna_config.hpp"

View File

@@ -9,7 +9,6 @@
#pragma once
#include "ie_transformations.hpp"
#include "ie_plugin_config.hpp"
#include "ie_compound_blob.h"
#include "ie_core.hpp"

View File

@@ -15,7 +15,6 @@
#include <transformations/utils/utils.hpp>
#include <ie_icore.hpp>
#include <auto_plugin/auto_config.hpp>
#include "auto_plugin.hpp"
#include "ngraph_ops/convolution_ie.hpp"
#include "ngraph_ops/deconvolution_ie.hpp"
@@ -144,7 +143,7 @@ std::vector<AutoPlugin::DeviceInformation> AutoInferencePlugin::GetDeviceChoice(
std::vector<DeviceInformation> metaDevices;
std::vector<std::string> availableDevices;
auto deviceListConfig = config.find(IE::AutoConfigParams::KEY_AUTO_DEVICE_LIST);
auto deviceListConfig = config.find(IE::KEY_AUTO_DEVICE_LIST);
if (deviceListConfig == config.end()) {
availableDevices = GetCore()->GetAvailableDevices();
} else {

View File

@@ -26,7 +26,6 @@
#include "ie_plugin_config.hpp"
#include "ie_algorithm.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "hetero_plugin.hpp"
#include <ie_algorithm.hpp>

View File

@@ -12,7 +12,6 @@
#include <fstream>
#include <unordered_set>
#include "ie_plugin_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "hetero_executable_network.hpp"
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>

View File

@@ -9,10 +9,8 @@
#include <mutex>
#include <sys/stat.h>
#include <auto_plugin/auto_config.hpp>
#include <ie_core.hpp>
#include <ie_icore.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ngraph/opsets/opset.hpp>
#include <ngraph/ngraph.hpp>
#include <ngraph/graph_util.hpp>
@@ -60,7 +58,7 @@ Parsed<T> parseDeviceNameIntoConfig(const std::string& deviceName, const std::ma
if (deviceList.find("AUTO") != std::string::npos) {
IE_THROW() << "Device list for AUTO should not be AUTO";
}
config_[InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST] = deviceName.substr(std::string("AUTO:").size());
config_[InferenceEngine::KEY_AUTO_DEVICE_LIST] = deviceName.substr(std::string("AUTO:").size());
}
} else {
if (deviceName_.empty()) {

View File

@@ -82,7 +82,9 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
else
IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_DYN_BATCH_ENABLED
<< ". Expected only YES/NO";
IE_SUPPRESS_DEPRECATED_START
} else if (key.compare(PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT) == 0) {
IE_SUPPRESS_DEPRECATED_END
// empty string means that dumping is switched off
dumpToDot = val;
} else if (key.compare(PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE) == 0) {
@@ -92,10 +94,6 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
lpTransformsMode = LPTransformsMode::On;
else
IE_THROW() << "Wrong value for property key " << PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE;
} else if (key.compare(PluginConfigParams::KEY_DUMP_QUANTIZED_GRAPH_AS_DOT) == 0) {
dumpQuantizedGraphToDot = val;
} else if (key.compare(PluginConfigParams::KEY_DUMP_QUANTIZED_GRAPH_AS_IR) == 0) {
dumpQuantizedGraphToIr = val;
} else if (key == PluginConfigParams::KEY_ENFORCE_BF16) {
if (val == PluginConfigParams::YES) {
if (with_cpu_x86_avx512_core()) {
@@ -153,7 +151,9 @@ void Config::updateProperties() {
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_LIMIT, std::to_string(batchLimit) });
_config.insert({ PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, std::to_string(streamExecutorConfig._streams) });
_config.insert({ PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads) });
IE_SUPPRESS_DEPRECATED_START
_config.insert({ PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot });
IE_SUPPRESS_DEPRECATED_END
if (enforceBF16)
_config.insert({ PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES });
else

View File

@@ -22,8 +22,6 @@ struct Config {
bool exclusiveAsyncRequests = false;
bool enableDynamicBatch = false;
std::string dumpToDot = "";
std::string dumpQuantizedGraphToDot = "";
std::string dumpQuantizedGraphToIr = "";
int batchLimit = 0;
InferenceEngine::IStreamsExecutor::Config streamExecutorConfig;

View File

@@ -13,7 +13,6 @@
#include "ie_metric_helpers.hpp"
#include <multi-device/multi_device_config.hpp>
#include <ie_plugin_config.hpp>
#include "multi_device_exec_network.hpp"
#include "multi_device_async_infer_request.hpp"

View File

@@ -12,7 +12,6 @@
#include <ie_metric_helpers.hpp>
#include <multi-device/multi_device_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include "multi_device_plugin.hpp"
#include <ie_algorithm.hpp>

View File

@@ -199,7 +199,6 @@ public:
options[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] = InferenceEngine::PluginConfigParams::NO;
}
options[InferenceEngine::PluginConfigParams::KEY_PERF_COUNT] = InferenceEngine::PluginConfigParams::YES;
options[InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT] = "egraph_test";
auto exec_net1 = ie.LoadNetwork(cnnNet, targetDevice, options);
auto req1 = exec_net1.CreateInferRequest();

View File

@@ -160,7 +160,6 @@ public:
options[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] = InferenceEngine::PluginConfigParams::NO;
}
options[InferenceEngine::PluginConfigParams::KEY_PERF_COUNT] = InferenceEngine::PluginConfigParams::YES;
options[InferenceEngine::PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT] = "egraph_test";
auto exec_net1 = ie.LoadNetwork(cnnNet, targetDevice, options);
auto req1 = exec_net1.CreateInferRequest();

View File

@@ -2,9 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "auto_plugin/auto_config.hpp"
#include "ie_plugin_config.hpp"
#include "behavior/config.hpp"
using namespace BehaviorTestsDefinitions;
@@ -44,17 +42,17 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> AutoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}}
};
@@ -95,11 +93,11 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoinconfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "OFF"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, "OFF"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "NAN"}}
};
@@ -108,7 +106,7 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoconf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,

View File

@@ -3,8 +3,7 @@
//
#include "behavior/exec_graph_info.hpp"
#include "multi-device/multi_device_config.hpp"
#include "auto_plugin/auto_config.hpp"
#include "ie_plugin_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
@@ -20,7 +19,7 @@ namespace {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{ InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
{{ InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_callback.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_config.hpp"
using namespace BehaviorTestsDefinitions;
@@ -54,19 +51,19 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> AutoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}}
};

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp"
using namespace BehaviorTestsDefinitions;
@@ -29,8 +26,8 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
using namespace BehaviorTestsDefinitions;
@@ -25,8 +22,8 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/perf_counters.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,10 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "behavior/set_blob_of_kind.hpp"
#include "common_test_utils/test_constants.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions;
using namespace InferenceEngine;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include <auto_plugin/auto_config.hpp>
#include <base/behavior_test_utils.hpp>
#include "behavior/set_preprocess.hpp"
@@ -30,7 +28,7 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{ InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
{{ InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_CPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/test_plugin.hpp"
using namespace BehaviorTestsDefinitions;
@@ -40,8 +37,8 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> AutoConfigsInputOutput = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_CPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_CPU}},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_CPU},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}
};

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "behavior/version.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_callback.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero/hetero_plugin_config.hpp"
#include "behavior/version.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -2,10 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp"
#include "cldnn/cldnn_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
@@ -37,15 +35,15 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoinconfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_DUMP_KERNELS, "ON"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}
};
@@ -80,11 +78,11 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoconf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "behavior/exec_graph_info.hpp"
using namespace BehaviorTestsDefinitions;

View File

@@ -19,11 +19,11 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoconfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_callback.hpp"
using namespace BehaviorTestsDefinitions;
@@ -27,7 +24,7 @@ const std::vector<std::map<std::string, std::string>> autoConfigs = {
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_config.hpp"
using namespace BehaviorTestsDefinitions;
@@ -23,11 +20,11 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,

View File

@@ -2,10 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include <auto_plugin/auto_config.hpp>
#include "behavior/infer_request_input.hpp"
#include "cldnn/cldnn_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
@@ -28,13 +26,13 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS,
InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,

View File

@@ -2,10 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp"
#include "cldnn/cldnn_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
@@ -24,12 +22,12 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/perf_counters.hpp"
using namespace BehaviorTestsDefinitions;
@@ -21,7 +19,7 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PerfCountersTest,

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include <base/behavior_test_utils.hpp>
#include "behavior/set_preprocess.hpp"
@@ -25,11 +23,11 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> autoConfigs = {
{{ InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU}}
{{ InferenceEngine::KEY_AUTO_DEVICE_LIST , CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,

View File

@@ -2,10 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "behavior/test_plugin.hpp"
#include "cldnn/cldnn_config.hpp"
using namespace BehaviorTestsDefinitions;
namespace {
@@ -30,7 +28,7 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> configsInput = {
@@ -45,14 +43,14 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> AutoConfigsInputOutput = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU}},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};
const std::vector<std::map<std::string, std::string>> AutoCGConfigsInputOutput = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}},
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST, std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}},
{{InferenceEngine::KEY_AUTO_DEVICE_LIST, std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}
};

View File

@@ -2,9 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "auto_plugin/auto_config.hpp"
#include "multi-device/multi_device_config.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "behavior/version.hpp"
using namespace BehaviorTestsDefinitions;
@@ -22,7 +19,7 @@ namespace {
};
const std::vector<std::map<std::string, std::string>> auto_cpu_gpu_conf = {
{{InferenceEngine::AutoConfigParams::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
{{InferenceEngine::KEY_AUTO_DEVICE_LIST , std::string(CommonTestUtils::DEVICE_CPU) + "," + CommonTestUtils::DEVICE_GPU}}
};
const std::vector<std::map<std::string, std::string>> Heteroconfigs = {

View File

@@ -4,6 +4,7 @@
#include <string>
#include <vector>
#include "cldnn/cldnn_config.hpp"
#include "multi/multi_remote_blob_tests.hpp"
#include "common_test_utils/test_constants.hpp"
@@ -19,8 +20,7 @@ INSTANTIATE_TEST_CASE_P(smoke_RemoteBlobMultiGPU, MultiDevice_SupportTest,
::testing::ValuesIn(device_names_and_support_for_remote_blobs), MultiDevice_SupportTest::getTestCaseName);
TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
InferenceEngine::CNNNetwork net;
net = CNNNetwork(fn_ptr);
InferenceEngine::CNNNetwork net(fn_ptr);
auto ie = PluginCache::get().ie();
// load a network to the GPU to make sure we have a remote context
auto exec_net = ie->LoadNetwork(net, GPU);
@@ -32,14 +32,14 @@ TEST_P(MultiDevice_Test, cannotInferRemoteBlobIfNotInitializedForDevice) {
auto rblob = InferenceEngine::make_shared_blob(first_input->getTensorDesc(), ctx);
rblob->allocate();
ExecutableNetwork exec_net_multi;
InferenceEngine::ExecutableNetwork exec_net_multi;
try {
exec_net_multi = ie->LoadNetwork(net, device_names);
} catch(...) {
// device is unavailable (e.g. for the "second GPU" test) or other (e.g. env) issues not related to the test
return;
}
InferRequest req = exec_net_multi.CreateInferRequest();
InferenceEngine::InferRequest req = exec_net_multi.CreateInferRequest();
ASSERT_TRUE(req);
ASSERT_NO_THROW(req.SetBlob(first_input_name, rblob));
ASSERT_NO_THROW(req.StartAsync());

View File

@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "multi-device/multi_device_config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "vpu/private_plugin_config.hpp"
#include "behavior/config.hpp"

Some files were not shown because too many files have changed in this diff Show More