[GPU] Removed legacy public gpu config and related processing (#14707)
This commit is contained in:
parent
787ba3de4f
commit
ee256e801c
@ -1,130 +0,0 @@
|
|||||||
// Copyright (C) 2018-2022 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: Apache-2.0
|
|
||||||
//
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A header for advanced hardware related properties for clDNN plugin
|
|
||||||
* To use in SetConfig() method of plugins
|
|
||||||
*
|
|
||||||
* @file cldnn_config.hpp
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "gpu/gpu_config.hpp"
|
|
||||||
#include "ie_api.h"
|
|
||||||
#include "ie_plugin_config.hpp"
|
|
||||||
|
|
||||||
namespace InferenceEngine {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief GPU plugin configuration
|
|
||||||
*/
|
|
||||||
namespace CLDNNConfigParams {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief shortcut for defining configuration keys
|
|
||||||
*/
|
|
||||||
#define CLDNN_CONFIG_KEY(name) InferenceEngine::CLDNNConfigParams::_CONFIG_KEY(CLDNN_##name)
|
|
||||||
#define DECLARE_CLDNN_CONFIG_KEY(name) DECLARE_CONFIG_KEY(CLDNN_##name)
|
|
||||||
#define DECLARE_CLDNN_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(CLDNN_##name)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key instructs the clDNN plugin to use the OpenCL queue priority hint
|
|
||||||
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf
|
|
||||||
* this option should be used with an unsigned integer value (1 is lowest priority)
|
|
||||||
* 0 means no priority hint is set and default queue is created.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GPUConfigParams::GPU_PLUGIN_PRIORITY instead")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(PLUGIN_PRIORITY);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key instructs the clDNN plugin to use throttle hints the OpenCL queue throttle hint
|
|
||||||
* as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf,
|
|
||||||
* chapter 9.19. This option should be used with an unsigned integer value (1 is lowest energy consumption)
|
|
||||||
* 0 means no throttle hint is set and default queue created.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GPUConfigParams::GPU_PLUGIN_THROTTLE instead")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(PLUGIN_THROTTLE);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key controls clDNN memory pool optimization.
|
|
||||||
* Turned off by default.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(MEM_POOL);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key defines the directory name to which clDNN graph visualization will be dumped.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(GRAPH_DUMPS_DIR);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key defines the directory name to which full program sources will be dumped.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(SOURCES_DUMPS_DIR);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key enables FP16 precision for quantized models.
|
|
||||||
* By default the model is converted to FP32 precision before running LPT. If this key is enabled (default), then
|
|
||||||
* non-quantized layers will be converted back to FP16 after LPT, which might imrpove the performance if a model has a
|
|
||||||
* lot of compute operations in non-quantized path. This key has no effect if current device doesn't have INT8
|
|
||||||
* optimization capabilities.
|
|
||||||
*/
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(ENABLE_FP16_FOR_QUANTIZED_MODELS);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key should be set to correctly handle NV12 input without pre-processing.
|
|
||||||
* Turned off by default.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::GPUConfigParams::GPU_NV12_TWO_INPUTS instead")
|
|
||||||
DECLARE_CLDNN_CONFIG_KEY(NV12_TWO_INPUTS);
|
|
||||||
|
|
||||||
} // namespace CLDNNConfigParams
|
|
||||||
|
|
||||||
namespace PluginConfigParams {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key enables dumping of the kernels used by the plugin for custom layers.
|
|
||||||
*
|
|
||||||
* This option should be used with values: PluginConfigParams::YES or PluginConfigParams::NO (default)
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CONFIG_KEY(DUMP_KERNELS);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key controls performance tuning done or used by the plugin.
|
|
||||||
*
|
|
||||||
* This option should be used with values:
|
|
||||||
* PluginConfigParams::TUNING_DISABLED (default)
|
|
||||||
* PluginConfigParams::TUNING_USE_EXISTING - use existing data from tuning file
|
|
||||||
* PluginConfigParams::TUNING_CREATE - create tuning data for parameters not present in tuning file
|
|
||||||
* PluginConfigParams::TUNING_UPDATE - perform non-tuning updates like removal of invalid/deprecated data
|
|
||||||
* PluginConfigParams::TUNING_RETUNE - create tuning data for all parameters, even if already present
|
|
||||||
*
|
|
||||||
* For values TUNING_CREATE and TUNING_RETUNE the file will be created if it does not exist.
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CONFIG_KEY(TUNING_MODE);
|
|
||||||
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config value will be removed")
|
|
||||||
DECLARE_CONFIG_VALUE(TUNING_CREATE);
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config value will be removed")
|
|
||||||
DECLARE_CONFIG_VALUE(TUNING_USE_EXISTING);
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config value will be removed")
|
|
||||||
DECLARE_CONFIG_VALUE(TUNING_DISABLED);
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config value will be removed")
|
|
||||||
DECLARE_CONFIG_VALUE(TUNING_UPDATE);
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config value will be removed")
|
|
||||||
DECLARE_CONFIG_VALUE(TUNING_RETUNE);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This key defines the tuning data filename to be created/used
|
|
||||||
*/
|
|
||||||
INFERENCE_ENGINE_DEPRECATED("The config key will be removed")
|
|
||||||
DECLARE_CONFIG_KEY(TUNING_FILE);
|
|
||||||
|
|
||||||
} // namespace PluginConfigParams
|
|
||||||
|
|
||||||
} // namespace InferenceEngine
|
|
@ -475,9 +475,9 @@ DECLARE_CONFIG_KEY(FORCE_TBB_TERMINATE);
|
|||||||
|
|
||||||
} // namespace InferenceEngine
|
} // namespace InferenceEngine
|
||||||
|
|
||||||
|
#include "gpu/gpu_config.hpp"
|
||||||
#include "hetero/hetero_plugin_config.hpp"
|
#include "hetero/hetero_plugin_config.hpp"
|
||||||
#include "multi-device/multi_device_config.hpp"
|
#include "multi-device/multi_device_config.hpp"
|
||||||
|
|
||||||
// remove in 2022.1 major release
|
// remove in 2022.1 major release
|
||||||
#include "cldnn/cldnn_config.hpp"
|
|
||||||
#include "gna/gna_config.hpp"
|
#include "gna/gna_config.hpp"
|
||||||
|
@ -24,18 +24,13 @@ struct Config {
|
|||||||
useProfiling(false),
|
useProfiling(false),
|
||||||
dumpCustomKernels(false),
|
dumpCustomKernels(false),
|
||||||
exclusiveAsyncRequests(false),
|
exclusiveAsyncRequests(false),
|
||||||
memory_pool_on(true),
|
|
||||||
enableDynamicBatch(false),
|
enableDynamicBatch(false),
|
||||||
enableInt8(true),
|
enableInt8(true),
|
||||||
nv12_two_inputs(false),
|
nv12_two_inputs(false),
|
||||||
enable_fp16_for_quantized_models(true),
|
|
||||||
queuePriority(cldnn::priority_mode_types::med),
|
queuePriority(cldnn::priority_mode_types::med),
|
||||||
queueThrottle(cldnn::throttle_mode_types::med),
|
queueThrottle(cldnn::throttle_mode_types::med),
|
||||||
max_dynamic_batch(1),
|
max_dynamic_batch(1),
|
||||||
customLayers({}),
|
customLayers({}),
|
||||||
tuningConfig(),
|
|
||||||
graph_dumps_dir(""),
|
|
||||||
sources_dumps_dir(""),
|
|
||||||
kernels_cache_dir(""),
|
kernels_cache_dir(""),
|
||||||
inference_precision(ov::element::undefined),
|
inference_precision(ov::element::undefined),
|
||||||
task_exec_config({"GPU plugin internal task executor", // name
|
task_exec_config({"GPU plugin internal task executor", // name
|
||||||
@ -70,18 +65,13 @@ struct Config {
|
|||||||
bool useProfiling;
|
bool useProfiling;
|
||||||
bool dumpCustomKernels;
|
bool dumpCustomKernels;
|
||||||
bool exclusiveAsyncRequests;
|
bool exclusiveAsyncRequests;
|
||||||
bool memory_pool_on;
|
|
||||||
bool enableDynamicBatch;
|
bool enableDynamicBatch;
|
||||||
bool enableInt8;
|
bool enableInt8;
|
||||||
bool nv12_two_inputs;
|
bool nv12_two_inputs;
|
||||||
bool enable_fp16_for_quantized_models;
|
|
||||||
cldnn::priority_mode_types queuePriority;
|
cldnn::priority_mode_types queuePriority;
|
||||||
cldnn::throttle_mode_types queueThrottle;
|
cldnn::throttle_mode_types queueThrottle;
|
||||||
int max_dynamic_batch;
|
int max_dynamic_batch;
|
||||||
CustomLayerMap customLayers;
|
CustomLayerMap customLayers;
|
||||||
cldnn::tuning_config_options tuningConfig;
|
|
||||||
std::string graph_dumps_dir;
|
|
||||||
std::string sources_dumps_dir;
|
|
||||||
std::string kernels_cache_dir;
|
std::string kernels_cache_dir;
|
||||||
ov::element::Type inference_precision;
|
ov::element::Type inference_precision;
|
||||||
InferenceEngine::IStreamsExecutor::Config task_exec_config;
|
InferenceEngine::IStreamsExecutor::Config task_exec_config;
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <ie_system_conf.h>
|
#include <ie_system_conf.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
|
||||||
#include <cldnn/cldnn_config.hpp>
|
|
||||||
#include <gpu/gpu_config.hpp>
|
#include <gpu/gpu_config.hpp>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
@ -92,16 +91,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap,
|
|||||||
} else {
|
} else {
|
||||||
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
|
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
|
||||||
}
|
}
|
||||||
} else if (key.compare(PluginConfigParams::KEY_DUMP_KERNELS) == 0) {
|
} else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY) == 0) {
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
|
||||||
dumpCustomKernels = true;
|
|
||||||
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
|
||||||
dumpCustomKernels = false;
|
|
||||||
} else {
|
|
||||||
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
|
|
||||||
}
|
|
||||||
} else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY) == 0 ||
|
|
||||||
key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY) == 0) {
|
|
||||||
std::stringstream ss(val);
|
std::stringstream ss(val);
|
||||||
uint32_t uVal(0);
|
uint32_t uVal(0);
|
||||||
ss >> uVal;
|
ss >> uVal;
|
||||||
@ -160,8 +150,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap,
|
|||||||
task_exec_config._streams =
|
task_exec_config._streams =
|
||||||
std::min(task_exec_config._streams, static_cast<int>(std::thread::hardware_concurrency()));
|
std::min(task_exec_config._streams, static_cast<int>(std::thread::hardware_concurrency()));
|
||||||
}
|
}
|
||||||
} else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) == 0 ||
|
} else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) == 0) {
|
||||||
key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) == 0) {
|
|
||||||
std::stringstream ss(val);
|
std::stringstream ss(val);
|
||||||
uint32_t uVal(0);
|
uint32_t uVal(0);
|
||||||
ss >> uVal;
|
ss >> uVal;
|
||||||
@ -200,45 +189,11 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap,
|
|||||||
for (auto& file : configFiles) {
|
for (auto& file : configFiles) {
|
||||||
CustomLayer::LoadFromFile(file, customLayers);
|
CustomLayer::LoadFromFile(file, customLayers);
|
||||||
}
|
}
|
||||||
} else if (key.compare(PluginConfigParams::KEY_TUNING_MODE) == 0) {
|
|
||||||
if (val.compare(PluginConfigParams::TUNING_DISABLED) == 0) {
|
|
||||||
tuningConfig.mode = cldnn::tuning_mode::tuning_disabled;
|
|
||||||
} else if (val.compare(PluginConfigParams::TUNING_CREATE) == 0) {
|
|
||||||
tuningConfig.mode = cldnn::tuning_mode::tuning_tune_and_cache;
|
|
||||||
} else if (val.compare(PluginConfigParams::TUNING_USE_EXISTING) == 0) {
|
|
||||||
tuningConfig.mode = cldnn::tuning_mode::tuning_use_cache;
|
|
||||||
} else if (val.compare(PluginConfigParams::TUNING_UPDATE) == 0) {
|
|
||||||
tuningConfig.mode = cldnn::tuning_mode::tuning_use_and_update;
|
|
||||||
} else if (val.compare(PluginConfigParams::TUNING_RETUNE) == 0) {
|
|
||||||
tuningConfig.mode = cldnn::tuning_mode::tuning_retune_and_cache;
|
|
||||||
} else {
|
|
||||||
IE_THROW(NotFound) << "Unsupported tuning mode value by plugin: " << val;
|
|
||||||
}
|
|
||||||
} else if (key.compare(PluginConfigParams::KEY_TUNING_FILE) == 0) {
|
|
||||||
tuningConfig.cache_file_path = val;
|
|
||||||
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_MEM_POOL) == 0) {
|
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
|
||||||
memory_pool_on = true;
|
|
||||||
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
|
||||||
memory_pool_on = false;
|
|
||||||
} else {
|
|
||||||
IE_THROW(NotFound) << "Unsupported memory pool flag value: " << val;
|
|
||||||
}
|
|
||||||
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR) == 0) {
|
|
||||||
if (!val.empty()) {
|
|
||||||
graph_dumps_dir = val;
|
|
||||||
createDirectory(graph_dumps_dir);
|
|
||||||
}
|
|
||||||
} else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || key == ov::cache_dir) {
|
} else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || key == ov::cache_dir) {
|
||||||
if (!val.empty()) {
|
if (!val.empty()) {
|
||||||
kernels_cache_dir = val;
|
kernels_cache_dir = val;
|
||||||
createDirectory(kernels_cache_dir);
|
createDirectory(kernels_cache_dir);
|
||||||
}
|
}
|
||||||
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR) == 0) {
|
|
||||||
if (!val.empty()) {
|
|
||||||
sources_dumps_dir = val;
|
|
||||||
createDirectory(sources_dumps_dir);
|
|
||||||
}
|
|
||||||
} else if (key.compare(PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS) == 0) {
|
} else if (key.compare(PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS) == 0) {
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
if (val.compare(PluginConfigParams::YES) == 0) {
|
||||||
exclusiveAsyncRequests = true;
|
exclusiveAsyncRequests = true;
|
||||||
@ -282,8 +237,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap,
|
|||||||
} else {
|
} else {
|
||||||
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
|
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
|
||||||
}
|
}
|
||||||
} else if (key.compare(GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS) == 0 ||
|
} else if (key.compare(GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS) == 0) {
|
||||||
key.compare(CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS) == 0) {
|
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
if (val.compare(PluginConfigParams::YES) == 0) {
|
||||||
nv12_two_inputs = true;
|
nv12_two_inputs = true;
|
||||||
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
||||||
@ -291,14 +245,6 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap,
|
|||||||
} else {
|
} else {
|
||||||
IE_THROW(NotFound) << "Unsupported NV12 flag value: " << val;
|
IE_THROW(NotFound) << "Unsupported NV12 flag value: " << val;
|
||||||
}
|
}
|
||||||
} else if (key.compare(CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS) == 0) {
|
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
|
||||||
enable_fp16_for_quantized_models = true;
|
|
||||||
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
|
||||||
enable_fp16_for_quantized_models = false;
|
|
||||||
} else {
|
|
||||||
IE_THROW(NotFound) << "Unsupported KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS flag value: " << val;
|
|
||||||
}
|
|
||||||
} else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || key == ov::compilation_num_threads) {
|
} else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || key == ov::compilation_num_threads) {
|
||||||
int max_threads = std::max(1, static_cast<int>(std::thread::hardware_concurrency()));
|
int max_threads = std::max(1, static_cast<int>(std::thread::hardware_concurrency()));
|
||||||
try {
|
try {
|
||||||
@ -353,39 +299,22 @@ void Config::adjustKeyMapValues() {
|
|||||||
key_config_map[ov::enable_profiling.name()] = PluginConfigParams::NO;
|
key_config_map[ov::enable_profiling.name()] = PluginConfigParams::NO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dumpCustomKernels)
|
|
||||||
key_config_map[PluginConfigParams::KEY_DUMP_KERNELS] = PluginConfigParams::YES;
|
|
||||||
else
|
|
||||||
key_config_map[PluginConfigParams::KEY_DUMP_KERNELS] = PluginConfigParams::NO;
|
|
||||||
|
|
||||||
if (exclusiveAsyncRequests)
|
if (exclusiveAsyncRequests)
|
||||||
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES;
|
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES;
|
||||||
else
|
else
|
||||||
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO;
|
key_config_map[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO;
|
||||||
|
|
||||||
if (memory_pool_on)
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_MEM_POOL] = PluginConfigParams::YES;
|
|
||||||
else
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_MEM_POOL] = PluginConfigParams::NO;
|
|
||||||
|
|
||||||
if (enableDynamicBatch)
|
if (enableDynamicBatch)
|
||||||
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
|
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
|
||||||
else
|
else
|
||||||
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::NO;
|
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::NO;
|
||||||
|
|
||||||
if (nv12_two_inputs) {
|
if (nv12_two_inputs) {
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::YES;
|
|
||||||
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::YES;
|
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::YES;
|
||||||
} else {
|
} else {
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::NO;
|
|
||||||
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::NO;
|
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::NO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (enable_fp16_for_quantized_models)
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS] = PluginConfigParams::YES;
|
|
||||||
else
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS] = PluginConfigParams::NO;
|
|
||||||
|
|
||||||
key_config_map[ov::hint::inference_precision.name()] = inference_precision.get_type_name();
|
key_config_map[ov::hint::inference_precision.name()] = inference_precision.get_type_name();
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -420,7 +349,6 @@ void Config::adjustKeyMapValues() {
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY] = qp;
|
|
||||||
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY] = qp;
|
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY] = qp;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@ -448,7 +376,6 @@ void Config::adjustKeyMapValues() {
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE] = qt;
|
|
||||||
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = qt;
|
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = qt;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@ -471,30 +398,7 @@ void Config::adjustKeyMapValues() {
|
|||||||
hostTaskPriority = ov::util::to_string(ov::hint::Priority::MEDIUM);
|
hostTaskPriority = ov::util::to_string(ov::hint::Priority::MEDIUM);
|
||||||
key_config_map[ov::intel_gpu::hint::host_task_priority.name()] = hostTaskPriority;
|
key_config_map[ov::intel_gpu::hint::host_task_priority.name()] = hostTaskPriority;
|
||||||
}
|
}
|
||||||
{
|
|
||||||
std::string tm = PluginConfigParams::TUNING_DISABLED;
|
|
||||||
switch (tuningConfig.mode) {
|
|
||||||
case cldnn::tuning_mode::tuning_tune_and_cache:
|
|
||||||
tm = PluginConfigParams::TUNING_CREATE;
|
|
||||||
break;
|
|
||||||
case cldnn::tuning_mode::tuning_use_cache:
|
|
||||||
tm = PluginConfigParams::TUNING_USE_EXISTING;
|
|
||||||
break;
|
|
||||||
case cldnn::tuning_mode::tuning_use_and_update:
|
|
||||||
tm = PluginConfigParams::TUNING_UPDATE;
|
|
||||||
break;
|
|
||||||
case cldnn::tuning_mode::tuning_retune_and_cache:
|
|
||||||
tm = PluginConfigParams::TUNING_RETUNE;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm;
|
|
||||||
key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path;
|
|
||||||
}
|
|
||||||
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR] = graph_dumps_dir;
|
|
||||||
key_config_map[CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR] = sources_dumps_dir;
|
|
||||||
key_config_map[PluginConfigParams::KEY_CACHE_DIR] = kernels_cache_dir;
|
key_config_map[PluginConfigParams::KEY_CACHE_DIR] = kernels_cache_dir;
|
||||||
key_config_map[ov::cache_dir.name()] = kernels_cache_dir;
|
key_config_map[ov::cache_dir.name()] = kernels_cache_dir;
|
||||||
|
|
||||||
@ -560,12 +464,8 @@ bool Config::CanShareContextWith(const Config& other) const {
|
|||||||
return this->throughput_streams == other.throughput_streams &&
|
return this->throughput_streams == other.throughput_streams &&
|
||||||
this->useProfiling == other.useProfiling &&
|
this->useProfiling == other.useProfiling &&
|
||||||
this->dumpCustomKernels == other.dumpCustomKernels &&
|
this->dumpCustomKernels == other.dumpCustomKernels &&
|
||||||
this->memory_pool_on == other.memory_pool_on &&
|
|
||||||
this->queueThrottle == other.queueThrottle &&
|
this->queueThrottle == other.queueThrottle &&
|
||||||
this->queuePriority == other.queuePriority &&
|
this->queuePriority == other.queuePriority &&
|
||||||
this->sources_dumps_dir == other.sources_dumps_dir &&
|
|
||||||
this->tuningConfig.mode == other.tuningConfig.mode &&
|
|
||||||
this->tuningConfig.cache_file_path == other.tuningConfig.cache_file_path &&
|
|
||||||
this->kernels_cache_dir == other.kernels_cache_dir &&
|
this->kernels_cache_dir == other.kernels_cache_dir &&
|
||||||
this->device_id == other.device_id &&
|
this->device_id == other.device_id &&
|
||||||
this->task_exec_config._streams == other.task_exec_config._streams &&
|
this->task_exec_config._streams == other.task_exec_config._streams &&
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
#include "intel_gpu/plugin/graph.hpp"
|
#include "intel_gpu/plugin/graph.hpp"
|
||||||
#include "intel_gpu/plugin/simple_math.hpp"
|
#include "intel_gpu/plugin/simple_math.hpp"
|
||||||
#include <cldnn/cldnn_config.hpp>
|
|
||||||
#include "intel_gpu/plugin/infer_request.hpp"
|
#include "intel_gpu/plugin/infer_request.hpp"
|
||||||
#include "intel_gpu/plugin/itt.hpp"
|
#include "intel_gpu/plugin/itt.hpp"
|
||||||
|
|
||||||
@ -136,14 +135,14 @@ std::shared_ptr<cldnn::network> Graph::BuildNetwork(std::shared_ptr<cldnn::progr
|
|||||||
network = std::make_shared<cldnn::network>(program, m_stream_id);
|
network = std::make_shared<cldnn::network>(program, m_stream_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GPU_DEBUG_GET_INSTANCE(debug_config);
|
||||||
if (!m_config.graph_dumps_dir.empty() && m_stream_id == 0) {
|
GPU_DEBUG_IF(!debug_config->dump_graphs.empty() && m_stream_id == 0) {
|
||||||
static int net_id = 0;
|
static int net_id = 0;
|
||||||
auto steps_info = network->get_optimizer_passes_info();
|
auto steps_info = network->get_optimizer_passes_info();
|
||||||
size_t step_idx = 0;
|
size_t step_idx = 0;
|
||||||
for (auto& step : steps_info) {
|
for (auto& step : steps_info) {
|
||||||
CNNNetwork net(GetExecGraphInfoByPrimitivesInfo(step.second, true));
|
CNNNetwork net(GetExecGraphInfoByPrimitivesInfo(step.second, true));
|
||||||
net.serialize(m_config.graph_dumps_dir + std::to_string(net_id) + "_" +
|
net.serialize(debug_config->dump_graphs + std::to_string(net_id) + "_" +
|
||||||
std::to_string(step_idx) + "_" + step.first + "_graph.xml");
|
std::to_string(step_idx) + "_" + step.first + "_graph.xml");
|
||||||
step_idx++;
|
step_idx++;
|
||||||
}
|
}
|
||||||
|
@ -353,7 +353,6 @@ void Plugin::SetConfig(const std::map<std::string, std::string> &config) {
|
|||||||
streamsSet = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end() ||
|
streamsSet = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end() ||
|
||||||
config.find(ov::num_streams.name()) != config.end();
|
config.find(ov::num_streams.name()) != config.end();
|
||||||
throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
|
throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
|
||||||
config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end() ||
|
|
||||||
config.find(ov::intel_gpu::hint::queue_throttle.name()) != config.end();
|
config.find(ov::intel_gpu::hint::queue_throttle.name()) != config.end();
|
||||||
std::string device_id;
|
std::string device_id;
|
||||||
cldnn::device_info device_info = device_map.begin()->second->get_info();
|
cldnn::device_info device_info = device_map.begin()->second->get_info();
|
||||||
@ -882,7 +881,7 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
|
|||||||
auto engine_params = Plugin::GetParams(config, device, nullptr);
|
auto engine_params = Plugin::GetParams(config, device, nullptr);
|
||||||
auto engine = cldnn::engine::create(engine_params.engine_type, engine_params.runtime_type, device,
|
auto engine = cldnn::engine::create(engine_params.engine_type, engine_params.runtime_type, device,
|
||||||
cldnn::engine_configuration(false, engine_params.queue_type, std::string(),
|
cldnn::engine_configuration(false, engine_params.queue_type, std::string(),
|
||||||
config.queuePriority, config.queueThrottle, config.memory_pool_on,
|
config.queuePriority, config.queueThrottle, true,
|
||||||
engine_params.use_unified_shared_memory, std::string(), config.throughput_streams),
|
engine_params.use_unified_shared_memory, std::string(), config.throughput_streams),
|
||||||
engine_params.task_executor);
|
engine_params.task_executor);
|
||||||
|
|
||||||
|
@ -314,9 +314,6 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
|
|||||||
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::BuildProgram");
|
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Program::BuildProgram");
|
||||||
cldnn::build_options options;
|
cldnn::build_options options;
|
||||||
|
|
||||||
if (!m_config.graph_dumps_dir.empty()) {
|
|
||||||
options.set_option(cldnn::build_option::graph_dumps_dir(m_config.graph_dumps_dir));
|
|
||||||
}
|
|
||||||
for (const auto& op : ops) {
|
for (const auto& op : ops) {
|
||||||
if (op->is_dynamic()) {
|
if (op->is_dynamic()) {
|
||||||
allow_new_shape_infer = true;
|
allow_new_shape_infer = true;
|
||||||
@ -326,7 +323,6 @@ std::shared_ptr<cldnn::program> Program::BuildProgram(const std::vector<std::sha
|
|||||||
|
|
||||||
options.set_option(cldnn::build_option::allow_new_shape_infer(allow_new_shape_infer));
|
options.set_option(cldnn::build_option::allow_new_shape_infer(allow_new_shape_infer));
|
||||||
options.set_option(cldnn::build_option::optimize_data(true));
|
options.set_option(cldnn::build_option::optimize_data(true));
|
||||||
options.set_option(cldnn::build_option::tuning_config(m_config.tuningConfig));
|
|
||||||
if (partialBuild) {
|
if (partialBuild) {
|
||||||
options.set_option(cldnn::build_option::partial_build_program(true));
|
options.set_option(cldnn::build_option::partial_build_program(true));
|
||||||
}
|
}
|
||||||
|
@ -336,19 +336,15 @@ ExecutionContextImpl::ExecutionContextImpl(const std::shared_ptr<IInferencePlugi
|
|||||||
iter = device_map.begin();
|
iter = device_map.begin();
|
||||||
auto& dev = iter->second;
|
auto& dev = iter->second;
|
||||||
|
|
||||||
bool enable_profiling = (m_config.useProfiling ||
|
|
||||||
(m_config.tuningConfig.mode == cldnn::tuning_mode::tuning_tune_and_cache) ||
|
|
||||||
(m_config.tuningConfig.mode == cldnn::tuning_mode::tuning_retune_and_cache));
|
|
||||||
|
|
||||||
auto engine_params = Plugin::GetParams(m_config, dev, m_external_queue);
|
auto engine_params = Plugin::GetParams(m_config, dev, m_external_queue);
|
||||||
m_engine = cldnn::engine::create(engine_params.engine_type,
|
m_engine = cldnn::engine::create(engine_params.engine_type,
|
||||||
engine_params.runtime_type, dev,
|
engine_params.runtime_type, dev,
|
||||||
cldnn::engine_configuration(enable_profiling,
|
cldnn::engine_configuration(m_config.useProfiling,
|
||||||
engine_params.queue_type,
|
engine_params.queue_type,
|
||||||
m_config.sources_dumps_dir,
|
std::string(),
|
||||||
m_config.queuePriority,
|
m_config.queuePriority,
|
||||||
m_config.queueThrottle,
|
m_config.queueThrottle,
|
||||||
m_config.memory_pool_on,
|
true,
|
||||||
engine_params.use_unified_shared_memory,
|
engine_params.use_unified_shared_memory,
|
||||||
m_config.kernels_cache_dir,
|
m_config.kernels_cache_dir,
|
||||||
m_config.throughput_streams),
|
m_config.throughput_streams),
|
||||||
|
@ -424,14 +424,6 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
|
|||||||
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "TransformationsPipeline::apply::lpt");
|
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "TransformationsPipeline::apply::lpt");
|
||||||
using namespace ngraph::pass::low_precision;
|
using namespace ngraph::pass::low_precision;
|
||||||
|
|
||||||
// Conversion to FP32 might be needed for quantized models that face any fp16 related issues (e.g. overflow) for non-quantized layers
|
|
||||||
// With this key users can work-around such issues
|
|
||||||
if (!config.enable_fp16_for_quantized_models) {
|
|
||||||
ngraph::pass::Manager manager;
|
|
||||||
manager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ ngraph::element::f16, ngraph::element::f32 }});
|
|
||||||
manager.run_passes(func);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto supportedPrecisions = std::vector<PrecisionsRestriction>({
|
auto supportedPrecisions = std::vector<PrecisionsRestriction>({
|
||||||
PrecisionsRestriction::create<ngraph::opset1::Convolution>({
|
PrecisionsRestriction::create<ngraph::opset1::Convolution>({
|
||||||
{{0}, {ngraph::element::u8, ngraph::element::i8}},
|
{{0}, {ngraph::element::u8, ngraph::element::i8}},
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
#include "behavior/plugin/configuration_tests.hpp"
|
#include "behavior/plugin/configuration_tests.hpp"
|
||||||
#include "cldnn/cldnn_config.hpp"
|
|
||||||
#include "gpu/gpu_config.hpp"
|
#include "gpu/gpu_config.hpp"
|
||||||
|
|
||||||
using namespace BehaviorTestsDefinitions;
|
using namespace BehaviorTestsDefinitions;
|
||||||
@ -28,8 +27,6 @@ namespace {
|
|||||||
{{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, "OFF"}},
|
{{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, "OFF"}},
|
||||||
{{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
{{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
||||||
{{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
{{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
||||||
{{InferenceEngine::PluginConfigParams::KEY_DUMP_KERNELS, "ON"}},
|
|
||||||
{{InferenceEngine::PluginConfigParams::KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}},
|
|
||||||
{{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}};
|
{{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -55,10 +52,6 @@ namespace {
|
|||||||
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DUMP_KERNELS, "ON"}},
|
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}},
|
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}},
|
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}},
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_CPU},
|
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_CPU},
|
||||||
@ -80,12 +73,6 @@ namespace {
|
|||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
||||||
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
|
||||||
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DUMP_KERNELS, "ON"}},
|
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
|
||||||
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}},
|
|
||||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
||||||
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}},
|
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}},
|
||||||
@ -113,10 +100,6 @@ namespace {
|
|||||||
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, "ON"}},
|
||||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "unknown_file"}},
|
||||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DUMP_KERNELS, "ON"}},
|
|
||||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
|
||||||
{InferenceEngine::PluginConfigParams::KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}},
|
|
||||||
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), CommonTestUtils::DEVICE_GPU},
|
||||||
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}};
|
{InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}};
|
||||||
};
|
};
|
||||||
@ -155,14 +138,6 @@ namespace {
|
|||||||
IE_SUPPRESS_DEPRECATED_START
|
IE_SUPPRESS_DEPRECATED_START
|
||||||
auto conf_gpu = []() {
|
auto conf_gpu = []() {
|
||||||
return std::vector<std::map<std::string, std::string>>{
|
return std::vector<std::map<std::string, std::string>>{
|
||||||
// Deprecated
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::YES}},
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::NO}},
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE, "0"}},
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE, "1"}},
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY, "0"}},
|
|
||||||
{{InferenceEngine::CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY, "1"}},
|
|
||||||
|
|
||||||
{{InferenceEngine::GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::YES}},
|
{{InferenceEngine::GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::YES}},
|
||||||
{{InferenceEngine::GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::NO}},
|
{{InferenceEngine::GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS, InferenceEngine::PluginConfigParams::NO}},
|
||||||
{{InferenceEngine::GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE, "0"}},
|
{{InferenceEngine::GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE, "0"}},
|
||||||
|
Loading…
Reference in New Issue
Block a user