[VPU] update config header (#9857)

* [VPU] update config header

* Review fixes

* Performance hint config update

* Removal deprecated vpu config stuff

* Review changes

* Rename myriad properties from camelCase to snake_case

* Review changes

* Review fixes

* Removal intel_myriad::common namespace

* OV throughput stream option

* Test fix

* Reverted disable_convert & disable_reorder

* Bugfixes

* Change default value for PerformanceHintNumRequestsOption
This commit is contained in:
Aleksandr Korolev 2022-02-04 16:32:00 +03:00 committed by GitHub
parent 72216a9b95
commit 9743784f91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 528 additions and 809 deletions

View File

@ -16,7 +16,6 @@
#include "gna/gna_config.hpp"
#include "gpu/gpu_config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "samples/args_helper.hpp"
#include "samples/common.hpp"

View File

@ -1,254 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @deprecated Use vpu/hddl_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file hddl_plugin_config.hpp
*/
#pragma once
#include "ie_api.h"
#include "ie_plugin_config.hpp"
//
// Options
//
/**
* @def VPU_HDDL_CONFIG_KEY(name)
* @brief Shortcut for defining VPU HDDL configuration key
*/
#define VPU_HDDL_CONFIG_KEY(name) InferenceEngine::VPUConfigParams::_CONFIG_KEY(VPU_HDDL_##name)
/**
* @def VPU_HDDL_CONFIG_VALUE(name)
* @brief Shortcut for defining VPU HDDL configuration value
*/
#define VPU_HDDL_CONFIG_VALUE(name) InferenceEngine::VPUConfigParams::VPU_HDDL_##name
#define DECLARE_VPU_HDDL_CONFIG_KEY(name) DECLARE_CONFIG_KEY(VPU_HDDL_##name)
#define DECLARE_VPU_HDDL_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(VPU_HDDL_##name)
//
// Metrics
//
/**
* @def VPU_HDDL_METRIC(name)
* @brief Shortcut for defining VPU HDDL metric
*/
#define VPU_HDDL_METRIC(name) METRIC_KEY(VPU_HDDL_##name)
#define DECLARE_VPU_HDDL_METRIC(name, ...) DECLARE_METRIC_KEY(VPU_HDDL_##name, __VA_ARGS__)
namespace InferenceEngine {
namespace Metrics {
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_NUM instead
* @brief Metric to get a int of the device number, String value is METRIC_VPU_HDDL_DEVICE_NUM
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_NUM, int);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_NAME instead
* @brief Metric to get a std::vector<std::string> of device names, String value is METRIC_VPU_HDDL_DEVICE_NAME
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_NAME instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_NAME, std::vector<std::string>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_THERMAL instead
* @brief Metric to get a std::vector<float> of device thermal, String value is METRIC_VPU_HDDL_DEVICE_THERMAL
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_THERMAL instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_THERMAL, std::vector<float>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_ID instead
* @brief Metric to get a std::vector<uint32> of device ids, String value is METRIC_VPU_HDDL_DEVICE_ID
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_ID instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_ID, std::vector<unsigned int>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_SUBCLASS instead
* @brief Metric to get a std::vector<int> of device subclasses, String value is METRIC_VPU_HDDL_DEVICE_SUBCLASS
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_SUBCLASS instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_SUBCLASS, std::vector<int>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_TOTAL instead
* @brief Metric to get a std::vector<uint32> of device total memory, String value is METRIC_VPU_HDDL_MEMORY_TOTAL
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_TOTAL instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_MEMORY_TOTAL, std::vector<unsigned int>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_USED instead
* @brief Metric to get a std::vector<uint32> of device used memory, String value is METRIC_VPU_HDDL_DEVICE_MEMORY_USED
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_USED instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_MEMORY_USED, std::vector<unsigned int>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_UTILIZATION instead
* @brief Metric to get a std::vector<float> of device utilization, String value is METRIC_VPU_HDDL_DEVICE_UTILIZATION
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_UTILIZATION instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_UTILIZATION, std::vector<float>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_STREAM_ID instead
* @brief Metric to get a std::vector<std::string> of stream ids, String value is METRIC_VPU_HDDL_DEVICE_STREAM_ID
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_STREAM_ID instead")
DECLARE_VPU_HDDL_METRIC(STREAM_ID, std::vector<std::string>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_TAG instead
* @brief Metric to get a std::vector<std::string> of device tags, String value is METRIC_VPU_HDDL_DEVICE_TAG
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_TAG instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_TAG, std::vector<std::string>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_GROUP_ID instead
* @brief Metric to get a std::vector<int> of group ids, String value is METRIC_VPU_HDDL_GROUP_ID
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_GROUP_ID instead")
DECLARE_VPU_HDDL_METRIC(GROUP_ID, std::vector<int>);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_GROUP_USING_NUM instead
* @brief Metric to get a int number of device be using for group, String value is
* METRIC_VPU_HDDL_DEVICE_GROUP_USING_NUM
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_GROUP_USING_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_GROUP_USING_NUM, int);
/**
* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_TOTAL_NUM instead
* @brief Metric to get a int number of total device, String value is METRIC_VPU_HDDL_DEVICE_TOTAL_NUM
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_TOTAL_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_TOTAL_NUM, int);
} // namespace Metrics
namespace VPUConfigParams {
/**
* @deprecated Use InferenceEngine::HDDL_GRAPH_TAG instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This option allows to specify the number of MYX devices used for inference a specific Executable network.
* Note: Only one network would be allocated to one device.
* The number of devices for the tag is specified in the hddl_service.config file.
* Example:
* "service_settings":
* {
* "graph_tag_map":
* {
* "tagA":3
* }
* }
* It means that an executable network marked with tagA will be executed on 3 devices
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_GRAPH_TAG instead")
DECLARE_VPU_HDDL_CONFIG_KEY(GRAPH_TAG);
/**
* @deprecated Use InferenceEngine::HDDL_STREAM_ID instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config makes the executable networks to be allocated on one certain device (instead of multiple devices).
* And all inference through this executable network, will be done on this device.
* Note: Only one network would be allocated to one device.
* The number of devices which will be used for stream-affinity must be specified in hddl_service.config file.
* Example:
* "service_settings":
* {
* "stream_device_number":5
* }
* It means that 5 device will be used for stream-affinity
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_STREAM_ID instead")
DECLARE_VPU_HDDL_CONFIG_KEY(STREAM_ID);
/**
* @deprecated Use InferenceEngine::HDDL_DEVICE_TAG instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config allows user to control device flexibly. This config gives a "tag" for a certain device while
* allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this "tag".
* Devices used for such use case is controlled by a so-called "Bypass Scheduler" in HDDL backend, and the number
* of such device need to be specified in hddl_service.config file.
* Example:
* "service_settings":
* {
* "bypass_device_number": 5
* }
* It means that 5 device will be used for Bypass scheduler.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_DEVICE_TAG instead")
DECLARE_VPU_HDDL_CONFIG_KEY(DEVICE_TAG);
/**
* @deprecated Use InferenceEngine::HDDL_BIND_DEVICE instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: "YES/NO", default is "NO".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a
* network, the user got a handle for the network.
* If "YES", the network allocated is bind to the device (with the specified "DEVICE_TAG"), which means all afterwards
* inference through this network handle will be executed on this device only.
* If "NO", the network allocated is not bind to the device (with the specified "DEVICE_TAG"). If the same network
* is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these
* networks may be executed on any of these devices those have the network loaded.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_BIND_DEVICE instead")
DECLARE_VPU_HDDL_CONFIG_KEY(BIND_DEVICE);
/**
* @deprecated Use InferenceEngine::HDDL_RUNTIME_PRIORITY instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: A signed int wrapped in a string, default is "0".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False".
* When there are multiple devices running a certain network (a same network running on multiple devices in Bypass
* Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with
* priority.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_RUNTIME_PRIORITY instead")
DECLARE_VPU_HDDL_CONFIG_KEY(RUNTIME_PRIORITY);
/**
* @deprecated Use InferenceEngine::HDDL_USE_SGAD instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: "YES/NO", default is "NO".
* SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices
* (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device
* can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_USE_SGAD instead")
DECLARE_VPU_HDDL_CONFIG_KEY(USE_SGAD);
/**
* @deprecated Use InferenceEngine::HDDL_GROUP_DEVICE instead
* @brief [Only for OpenVINO Intel HDDL device]
* Type: A signed int wrapped in a string, default is "0".
* This config gives a "group id" for a certain device when this device has been reserved for certain client, client
* can use this device grouped by calling this group id while other client can't use this device
* Each device has their own group id. Device in one group shares same group id.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_GROUP_DEVICE instead")
DECLARE_VPU_HDDL_CONFIG_KEY(GROUP_DEVICE);
} // namespace VPUConfigParams
} // namespace InferenceEngine

View File

@ -1,68 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @deprecated Use vpu/myriad_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file myriad_plugin_config.hpp
*/
#pragma once
#include "ie_api.h"
#include "ie_plugin_config.hpp"
/**
* @def VPU_MYRIAD_CONFIG_KEY(name)
* @brief Shortcut for defining VPU MYRIAD configuration key
*/
#define VPU_MYRIAD_CONFIG_KEY(name) InferenceEngine::VPUConfigParams::_CONFIG_KEY(VPU_MYRIAD_##name)
/**
* @def VPU_MYRIAD_CONFIG_VALUE(name)
* @brief Shortcut for defining VPU MYRIAD configuration value
*/
#define VPU_MYRIAD_CONFIG_VALUE(name) InferenceEngine::VPUConfigParams::VPU_MYRIAD_##name
#define DECLARE_VPU_MYRIAD_CONFIG_KEY(name) DECLARE_CONFIG_KEY(VPU_MYRIAD_##name)
#define DECLARE_VPU_MYRIAD_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(VPU_MYRIAD_##name)
namespace InferenceEngine {
namespace VPUConfigParams {
/**
* @deprecated Use InferenceEngine::MYRIAD_ENABLE_FORCE_RESET instead.
* @brief The flag to reset stalled devices: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
* This is a plugin scope option and must be used with the plugin's SetConfig method
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_FORCE_RESET instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(FORCE_RESET);
/**
* @deprecated Use InferenceEngine::MYRIAD_DDR_TYPE instead
* @brief This option allows to specify device memory type.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_TYPE instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE);
/**
* @deprecated Use DDR type values from InferenceEngine namespace with MYRIAD_DDR_ prefix
* @brief Supported keys definition for VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE) option.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_AUTO instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO);
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_MICRON_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB);
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_SAMSUNG_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB);
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_HYNIX_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB);
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_MICRON_1GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB);
} // namespace VPUConfigParams
} // namespace InferenceEngine

View File

@ -1,125 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @deprecated Use vpu/myriad_config.hpp or vpu/hddl_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file vpu_plugin_config.hpp
*/
#pragma once
#include "ie_api.h"
#include "vpu/hddl_plugin_config.hpp"
#include "vpu/myriad_plugin_config.hpp"
//
// Common options
//
#define VPU_CONFIG_KEY(name) InferenceEngine::VPUConfigParams::_CONFIG_KEY(VPU_##name)
#define VPU_CONFIG_VALUE(name) InferenceEngine::VPUConfigParams::VPU_##name
#define DECLARE_VPU_CONFIG_KEY(name) DECLARE_CONFIG_KEY(VPU_##name)
#define DECLARE_VPU_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(VPU_##name)
//
// Common metrics
//
#define VPU_METRIC(name) METRIC_KEY(VPU_##name)
#define DECLARE_VPU_METRIC(name, ...) DECLARE_METRIC_KEY(VPU_##name, __VA_ARGS__)
namespace InferenceEngine {
/**
* @brief VPU plugin configuration
*/
namespace VPUConfigParams {
//
// Common options
//
/**
* @deprecated Use InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION instead.
* @brief Turn on HW stages usage (applicable for MyriadX devices only).
* This option should be used with values: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION instead")
DECLARE_VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION);
/**
* @deprecated Use CONFIG_KEY(LOG_LEVEL) instead.
* @brief The key to define log level
*/
INFERENCE_ENGINE_DEPRECATED("Use CONFIG_KEY(LOG_LEVEL) instead")
DECLARE_VPU_CONFIG_KEY(LOG_LEVEL);
/**
* @deprecated Use InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME instead.
* @brief The flag for adding to the profiling information the time of obtaining a tensor.
* This option should be used with values: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME instead")
DECLARE_VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME);
/**
* @deprecated Use InputInfo::setLayout on input data from CNNNetwork::getInputsInfo() or
* Data::setLayout on output data from CNNNetwork::getOutputsInfo()
* @brief This option allows to to specify input output layouts for network layers.
* By default, this value set to VPU_CONFIG_VALUE(AUTO) value.
* Supported values:
* VPU_CONFIG_VALUE(AUTO) executable network configured to use optimal layer layout depending on available HW
* VPU_CONFIG_VALUE(NCHW) executable network forced to use NCHW input/output layouts
* VPU_CONFIG_VALUE(NHWC) executable network forced to use NHWC input/output layouts
*/
INFERENCE_ENGINE_DEPRECATED("Use InputInfo::setLayout on input data from CNNNetwork::getInputsInfo() or"
"Data::setLayout on output data from CNNNetwork::getOutputsInfo()")
DECLARE_VPU_CONFIG_KEY(COMPUTE_LAYOUT);
/**
* @deprecated See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info.
* @brief Supported keys definition for VPU_CONFIG_KEY(COMPUTE_LAYOUT) option.
*/
INFERENCE_ENGINE_DEPRECATED("See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info")
DECLARE_VPU_CONFIG_VALUE(AUTO);
INFERENCE_ENGINE_DEPRECATED("See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info")
DECLARE_VPU_CONFIG_VALUE(NCHW);
INFERENCE_ENGINE_DEPRECATED("See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info")
DECLARE_VPU_CONFIG_VALUE(NHWC);
INFERENCE_ENGINE_DEPRECATED("See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info")
DECLARE_VPU_CONFIG_VALUE(NCDHW);
INFERENCE_ENGINE_DEPRECATED("See VPU_CONFIG_KEY(COMPUTE_LAYOUT) deprecation info")
DECLARE_VPU_CONFIG_VALUE(NDHWC);
/**
* @deprecated Use InferenceEngine::MYRIAD_CUSTOM_LAYERS instead.
* @brief This option allows to pass custom layers binding xml.
* If layer is present in such an xml, it would be used during inference even if the layer is natively supported
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_CUSTOM_LAYERS instead")
DECLARE_VPU_CONFIG_KEY(CUSTOM_LAYERS);
/**
* @deprecated Use InferenceEngine::MYRIAD_PROTOCOL instead.
* @brief This option allows to specify protocol.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_PROTOCOL instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(PROTOCOL);
/**
* @deprecated Use InferenceEngine::MYRIAD_PCIE or InferenceEngine::MYRIAD_USB instead.
* @brief Supported keys definition for VPU_MYRIAD_CONFIG_KEY(PROTOCOL) option.
*/
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_PCIE instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(PCIE);
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_USB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(USB);
} // namespace VPUConfigParams
} // namespace InferenceEngine

View File

@ -0,0 +1,174 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/runtime/properties.hpp>
namespace ov {
namespace intel_myriad {
namespace hddl {
// RO properties
/**
* @brief Property to get a int of the device number
*/
static constexpr Property<int, PropertyMutability::RO> device_num{"HDDL_DEVICE_NUM"};
/**
* @brief Property to get a std::vector<std::string> of device names
*/
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> device_name{"HDDL_DEVICE_NAME"};
/**
* @brief Property to get a std::vector<float> of device thermal
*/
static constexpr Property<std::vector<float>, PropertyMutability::RO> device_thermal{"HDDL_DEVICE_THERMAL"};
/**
* @brief Property to get a std::vector<uint32> of device ids
*/
static constexpr Property<std::vector<unsigned int>, PropertyMutability::RO> device_id{"HDDL_DEVICE_ID"};
/**
* @brief Property to get a std::vector<int> of device subclasses
*/
static constexpr Property<std::vector<int>, PropertyMutability::RO> device_subclass{"HDDL_DEVICE_SUBCLASS"};
/**
* @brief Property to get a std::vector<uint32> of device total memory
*/
static constexpr Property<std::vector<unsigned int>, PropertyMutability::RO> device_memory_total{
"HDDL_DEVICE_MEMORY_TOTAL"};
/**
* @brief Property to get a std::vector<uint32> of device used memory
*/
static constexpr Property<std::vector<unsigned int>, PropertyMutability::RO> device_memory_used{
"HDDL_DEVICE_MEMORY_USED"};
/**
* @brief Property to get a std::vector<float> of device utilization
*/
static constexpr Property<std::vector<float>, PropertyMutability::RO> device_utilization{"HDDL_DEVICE_UTILIZATION"};
/**
* @brief Property to get a std::vector<std::string> of stream ids
*/
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> stream_id{"HDDL_STREAM_ID"};
/**
* @brief Property to get a std::vector<std::string> of device tags
*/
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> device_tag{"HDDL_DEVICE_TAG"};
/**
* @brief Property to get a std::vector<int> of group ids
*/
static constexpr Property<std::vector<int>, PropertyMutability::RO> group_id{"HDDL_GROUP_ID"};
/**
* @brief Property to get a int number of device be using for group
*/
static constexpr Property<int, PropertyMutability::RO> device_group_using_num{"HDDL_DEVICE_GROUP_USING_NUM"};
/**
* @brief Property to get a int number of total device
*/
static constexpr Property<int, PropertyMutability::RO> device_total_num{"HDDL_DEVICE_TOTAL_NUM"};
// RW properties
/**
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This option allows to specify the number of MYX devices used for inference a specific Executable network.
* Note: Only one network would be allocated to one device.
* The number of devices for the tag is specified in the hddl_service.config file.
* Example:
* "service_settings":
* {
* "graph_tag_map":
* {
* "tagA":3
* }
* }
* It means that an executable network marked with tagA will be executed on 3 devices
*/
static constexpr Property<std::string, PropertyMutability::RW> graph_tag{"HDDL_GRAPH_TAG"};
/**
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config makes the executable networks to be allocated on one certain device (instead of multiple devices).
* And all inference through this executable network, will be done on this device.
* Note: Only one network would be allocated to one device.
* The number of devices which will be used for stream-affinity must be specified in hddl_service.config file.
* Example:
* "service_settings":
* {
* "stream_device_number":5
* }
* It means that 5 device will be used for stream-affinity
*/
static constexpr Property<std::string, PropertyMutability::RW> set_stream_id{"HDDL_SET_STREAM_ID"};
/**
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config allows user to control device flexibly. This config gives a "tag" for a certain device while
* allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this "tag".
* Devices used for such use case is controlled by a so-called "Bypass Scheduler" in HDDL backend, and the number
* of such device need to be specified in hddl_service.config file.
* Example:
* "service_settings":
* {
* "bypass_device_number": 5
* }
* It means that 5 device will be used for Bypass scheduler.
*/
static constexpr Property<std::string, PropertyMutability::RW> set_device_tag{"HDDL_SET_DEVICE_TAG"};
/**
* @brief [Only for HDDLPlugin]
* Type: "bool", default is "false".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a
* network, the user got a handle for the network.
* If "YES", the network allocated is bind to the device (with the specified "DEVICE_TAG"), which means all afterwards
* inference through this network handle will be executed on this device only.
* If "NO", the network allocated is not bind to the device (with the specified "DEVICE_TAG"). If the same network
* is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these
* networks may be executed on any of these devices those have the network loaded.
*/
static constexpr Property<bool, PropertyMutability::RW> bind_device{"HDDL_BIND_DEVICE"};
/**
* @brief [Only for HDDLPlugin]
* Type: A signed int wrapped in a string, default is "0".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False".
* When there are multiple devices running a certain network (a same network running on multiple devices in Bypass
* Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with
* priority.
*/
static constexpr Property<std::string, PropertyMutability::RW> runtime_priority{"HDDL_RUNTIME_PRIORITY"};
/**
* @brief [Only for HDDLPlugin]
* Type: "bool", default is "false".
* SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices
* (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device
* can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them.
*/
static constexpr Property<bool, PropertyMutability::RW> use_sgad{"HDDL_USE_SGAD"};
/**
* @brief [Only for HDDLPlugin]
* Type: A signed int wrapped in a string, default is "0".
* This config gives a "group id" for a certain device when this device has been reserved for certain client, client
* can use this device grouped by calling this group id while other client can't use this device
* Each device has their own group id. Device in one group shares same group id.
*/
static constexpr Property<std::string, PropertyMutability::RW> group_device{"HDDL_GROUP_DEVICE"};
} // namespace hddl
} // namespace intel_myriad
}; // namespace ov

View File

@ -0,0 +1,129 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/runtime/properties.hpp>
#include <string>
namespace ov {
namespace intel_myriad {
/**
* @brief Turn on HW stages usage (applicable for MyriadX devices only).
*/
static constexpr Property<bool, PropertyMutability::RW> enable_hw_acceleration{"MYRIAD_ENABLE_HW_ACCELERATION"};
/**
* @brief The flag for adding to the profiling information the time of obtaining a tensor.
*/
static constexpr Property<bool, PropertyMutability::RW> enable_receiving_tensor_time{
"MYRIAD_ENABLE_RECEIVING_TENSOR_TIME"};
/**
* @brief This option allows to pass custom layers binding xml.
* If layer is present in such an xml, it would be used during inference even if the layer is natively supported
*/
static constexpr Property<std::string, PropertyMutability::RW> custom_layers{"MYRIAD_CUSTOM_LAYERS"};
/**
* @brief Enum to define possible device protocols
*/
enum class Protocol {
PCIE = 0, //!< Will use a device with PCIE protocol
USB = 1, //!< Will use a device with USB protocol
};
/** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const Protocol& protocol) {
switch (protocol) {
case Protocol::PCIE:
return os << "MYRIAD_PCIE";
case Protocol::USB:
return os << "MYRIAD_USB";
default:
throw ov::Exception{"Unsupported myriad protocol"};
}
};
inline std::istream& operator>>(std::istream& is, Protocol& protocol) {
std::string str;
is >> str;
if (str == "MYRIAD_PCIE") {
protocol = Protocol::PCIE;
} else if (str == "MYRIAD_USB") {
protocol = Protocol::USB;
} else {
throw ov::Exception{"Unsupported myriad protocol: " + str};
}
return is;
};
/** @endcond */
// Myriad specific properties
/**
* @brief This option allows to specify protocol.
*/
static constexpr Property<Protocol, PropertyMutability::RW> protocol{"MYRIAD_PROTOCOL"};
/**
* @brief The flag to reset stalled devices.
*/
static constexpr Property<bool, PropertyMutability::RW> enable_force_reset{"MYRIAD_ENABLE_FORCE_RESET"};
/**
* @brief Enum to define possible device mymory types
*/
enum class DDRType {
MYRIAD_DDR_AUTO = 0, //!< Automatic setting of DDR memory type
MYRIAD_DDR_MICRON_2GB = 1, //!< Using a device with MICRON_2GB DDR memory type
MYRIAD_DDR_SAMSUNG_2GB = 2, //!< Using a device with SAMSUNG_2GB DDR memory type
MYRIAD_DDR_HYNIX_2GB = 3, //!< Using a device with HYNIX_2GB DDR memory type
MYRIAD_DDR_MICRON_1GB = 4, //!< Using a device with MICRON_1GB DDR memory type
};
/** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const DDRType& ddrType) {
switch (ddrType) {
case DDRType::MYRIAD_DDR_AUTO:
return os << "MYRIAD_DDR_AUTO";
case DDRType::MYRIAD_DDR_MICRON_2GB:
return os << "MYRIAD_DDR_MICRON_2GB";
case DDRType::MYRIAD_DDR_SAMSUNG_2GB:
return os << "MYRIAD_DDR_SAMSUNG_2GB";
case DDRType::MYRIAD_DDR_HYNIX_2GB:
return os << "MYRIAD_DDR_HYNIX_2GB";
case DDRType::MYRIAD_DDR_MICRON_1GB:
return os << "MYRIAD_DDR_MICRON_1GB";
default:
throw ov::Exception{"Unsupported myriad ddr type"};
}
};
inline std::istream& operator>>(std::istream& is, DDRType& ddrType) {
std::string str;
is >> str;
if (str == "MYRIAD_DDR_AUTO") {
ddrType = DDRType::MYRIAD_DDR_AUTO;
} else if (str == "MYRIAD_DDR_MICRON_2GB") {
ddrType = DDRType::MYRIAD_DDR_MICRON_2GB;
} else if (str == "MYRIAD_DDR_SAMSUNG_2GB") {
ddrType = DDRType::MYRIAD_DDR_SAMSUNG_2GB;
} else if (str == "MYRIAD_DDR_HYNIX_2GB") {
ddrType = DDRType::MYRIAD_DDR_HYNIX_2GB;
} else if (str == "MYRIAD_DDR_MICRON_1GB") {
ddrType = DDRType::MYRIAD_DDR_MICRON_1GB;
} else {
throw ov::Exception{"Unsupported myriad protocol: " + str};
}
return is;
};
/** @endcond */
/**
* @brief This option allows to specify device memory type.
*/
static constexpr Property<DDRType, PropertyMutability::RW> ddr_type{"MYRIAD_DDR_TYPE"};
} // namespace intel_myriad
}; // namespace ov

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include "vpu/configuration/as_parameter_enabler.hpp"
#include "vpu/utils/optional.hpp"
#include "openvino/runtime/properties.hpp"
namespace vpu {
namespace details {
enum class Access;
enum class Category;
} // namespace details
class PluginConfiguration;
struct OvThroughputStreamsOption : public AsParameterEnabler {
using value_type = Optional<unsigned int>;
static std::string key();
static void validate(const std::string&);
static void validate(const PluginConfiguration&);
static std::string defaultValue();
static value_type parse(const std::string&);
static details::Access access();
static details::Category category();
};
} // namespace vpu

View File

@ -7,6 +7,7 @@
#include <string>
#include "vpu/configuration/as_parameter_enabler.hpp"
#include "openvino/runtime/properties.hpp"
namespace vpu {

View File

@ -8,6 +8,7 @@
#include <string>
#include "vpu/configuration/as_parameter_enabler.hpp"
#include "openvino/runtime/properties.hpp"
namespace vpu {

View File

@ -7,7 +7,6 @@
#include <string>
#include <vpu/myriad_config.hpp>
#include <vpu/vpu_plugin_config.hpp>
namespace InferenceEngine {
@ -150,13 +149,4 @@ DECLARE_VPU_CONFIG(MYRIAD_DEVICE_CONNECT_TIMEOUT);
DECLARE_VPU_CONFIG(MYRIAD_ENABLE_ASYNC_DMA);
namespace VPUConfigParams {
IE_SUPPRESS_DEPRECATED_START
// Used to update API usage in the dependent repos.
DECLARE_VPU_CONFIG_KEY(DETECT_NETWORK_BATCH);
IE_SUPPRESS_DEPRECATED_END
} // namespace VPUConfigParams
} // namespace InferenceEngine

View File

@ -8,7 +8,6 @@
#include "vpu/configuration/plugin_configuration.hpp"
#include <vpu/myriad_config.hpp>
#include <vpu/myriad_plugin_config.hpp>
#include <unordered_map>
@ -17,22 +16,13 @@ namespace vpu {
namespace {
const std::unordered_map<std::string, MovidiusDdrType>& string2type() {
IE_SUPPRESS_DEPRECATED_START
static const std::unordered_map<std::string, MovidiusDdrType> converters = {
{InferenceEngine::MYRIAD_DDR_AUTO, MovidiusDdrType::AUTO },
{InferenceEngine::MYRIAD_DDR_MICRON_2GB, MovidiusDdrType::MICRON_2GB },
{InferenceEngine::MYRIAD_DDR_SAMSUNG_2GB, MovidiusDdrType::SAMSUNG_2GB },
{InferenceEngine::MYRIAD_DDR_HYNIX_2GB, MovidiusDdrType::HYNIX_2GB },
{InferenceEngine::MYRIAD_DDR_MICRON_1GB, MovidiusDdrType::MICRON_1GB },
// Deprecated
{VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO), MovidiusDdrType::AUTO },
{VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB), MovidiusDdrType::MICRON_2GB },
{VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB), MovidiusDdrType::SAMSUNG_2GB },
{VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB), MovidiusDdrType::HYNIX_2GB },
{VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB), MovidiusDdrType::MICRON_1GB }
};
IE_SUPPRESS_DEPRECATED_END
return converters;
}

View File

@ -0,0 +1,70 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "vpu/utils/containers.hpp"
#include "vpu/configuration/options/ov_throughput_streams.hpp"
#include "vpu/configuration/plugin_configuration.hpp"
#include "vpu/utils/error.hpp"
#include <openvino/runtime/properties.hpp>
#include <sstream>
#include <vpu/myriad_config.hpp>
namespace vpu {
void OvThroughputStreamsOption::validate(const std::string& value) {
if (value == defaultValue()) {
return;
}
int intValue;
try {
intValue = std::stoi(value);
} catch (const std::exception& e) {
VPU_THROW_FORMAT(R"(unexpected {} option value "{}", must be a number)", key(), value);
}
VPU_THROW_UNLESS(intValue >= 0,
R"(unexpected {} option value "{}", only not negative numbers are supported)", key(), value);
}
void OvThroughputStreamsOption::validate(const PluginConfiguration& configuration) {
validate(configuration[key()]);
}
std::string OvThroughputStreamsOption::key() {
return ov::streams::num.name();
}
details::Access OvThroughputStreamsOption::access() {
return details::Access::Public;
}
details::Category OvThroughputStreamsOption::category() {
return details::Category::CompileTime;
}
std::string OvThroughputStreamsOption::defaultValue() {
std::stringstream ss;
ss << ov::streams::AUTO;
return ss.str();
}
OvThroughputStreamsOption::value_type OvThroughputStreamsOption::parse(const std::string& value) {
if (value == defaultValue()) {
return OvThroughputStreamsOption::value_type();
}
int intValue;
try {
intValue = std::stoi(value);
} catch (const std::exception& e) {
VPU_THROW_FORMAT(R"(unexpected {} option value "{}", must be a number)", key(), value);
}
VPU_THROW_UNSUPPORTED_OPTION_UNLESS(intValue >= 0,
R"(unexpected {} option value "{}", only not negative numbers are supported)", key(), value);
return intValue;
}
} // namespace vpu

View File

@ -7,6 +7,8 @@
#include "vpu/utils/containers.hpp"
#include "vpu/configuration/plugin_configuration.hpp"
#include <ie_plugin_config.hpp>
#include <openvino/runtime/properties.hpp>
#include <sstream>
namespace vpu {
@ -17,7 +19,7 @@ void PerformanceHintOption::validate(const PluginConfiguration& configuration) {
}
std::string PerformanceHintOption::key() {
return CONFIG_KEY(PERFORMANCE_HINT);
return ov::hint::performance_mode.name();
}
details::Access PerformanceHintOption::access() {
@ -33,7 +35,16 @@ std::string PerformanceHintOption::defaultValue() {
}
PerformanceHintOption::value_type PerformanceHintOption::parse(const std::string& value) {
if (value == CONFIG_VALUE(LATENCY) || value == CONFIG_VALUE(THROUGHPUT) || value == "") {
std::string latencyValue;
std::string throughputValue;
std::stringstream tmp;
tmp << ov::hint::PerformanceMode::LATENCY;
latencyValue = tmp.str();
tmp.clear();
tmp << ov::hint::PerformanceMode::THROUGHPUT;
throughputValue = tmp.str();
if (value == latencyValue || value == throughputValue || value == "") {
return value;
} else {
VPU_THROW_EXCEPTION << "Wrong value for property key " << CONFIG_KEY(PERFORMANCE_HINT) << ". Expected only "

View File

@ -8,6 +8,7 @@
#include "vpu/configuration/plugin_configuration.hpp"
#include "vpu/utils/error.hpp"
#include <ie_plugin_config.hpp>
#include <openvino/runtime/properties.hpp>
#include <string>
namespace vpu {
@ -19,7 +20,7 @@ void PerformanceHintNumRequestsOption::validate(const PluginConfiguration& confi
}
std::string PerformanceHintNumRequestsOption::key() {
return CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS);
return ov::hint::num_requests.name();
}
details::Access PerformanceHintNumRequestsOption::access() {
@ -31,20 +32,19 @@ details::Category PerformanceHintNumRequestsOption::category() {
}
std::string PerformanceHintNumRequestsOption::defaultValue() {
return "-1";
return "0";
}
PerformanceHintNumRequestsOption::value_type PerformanceHintNumRequestsOption::parse(const std::string& value) {
try {
auto returnValue = std::stoi(value);
if (returnValue > 0 || returnValue == -1) {
if (returnValue >= 0) {
return returnValue;
} else {
throw std::logic_error("wrong val");
}
} catch (...) {
VPU_THROW_EXCEPTION << "Wrong value of " << value << " for property key "
<< CONFIG_VALUE(KEY_PERFORMANCE_HINT_NUM_REQUESTS)
VPU_THROW_EXCEPTION << "Wrong value of " << value << " for property key " << ov::hint::num_requests.name()
<< ". Expected only positive integer numbers";
}
}

View File

@ -7,7 +7,6 @@
#include "vpu/configuration/plugin_configuration.hpp"
#include <vpu/myriad_config.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include <unordered_map>
@ -16,17 +15,11 @@ namespace vpu {
namespace {
const std::unordered_map<std::string, ncDeviceProtocol_t>& string2protocol() {
IE_SUPPRESS_DEPRECATED_START
static const std::unordered_map<std::string, ncDeviceProtocol_t> converters = {
{InferenceEngine::MYRIAD_USB, ncDeviceProtocol_t::NC_USB},
{InferenceEngine::MYRIAD_PCIE, ncDeviceProtocol_t::NC_PCIE},
{std::string(), ncDeviceProtocol_t::NC_ANY_PROTOCOL},
// Deprecated
{VPU_MYRIAD_CONFIG_VALUE(USB), ncDeviceProtocol_t::NC_USB},
{VPU_MYRIAD_CONFIG_VALUE(PCIE), ncDeviceProtocol_t::NC_PCIE}
};
IE_SUPPRESS_DEPRECATED_END
return converters;
}

View File

@ -53,9 +53,9 @@
#include <legacy/transformations/convert_opset1_to_legacy/convert_matmul_to_fc_or_gemm.hpp>
#include <legacy/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.hpp>
#include <vpu/ngraph/transformations/extract_dynamic_batch/extract_dynamic_batch.hpp>
#include <vpu/configuration/options/disable_convert_stages.hpp>
#include <vpu/ngraph/transformations/merge_gather_gather_elements.hpp>
#include <transformations/op_conversions/mvn6_decomposition.hpp>
#include <vpu/configuration/options/disable_convert_stages.hpp>
#include <vpu/configuration/options/ignore_unknown_layers.hpp>
#include <vpu/configuration/options/custom_layers.hpp>
#include <vpu/configuration/options/config_file.hpp>

View File

@ -52,6 +52,7 @@
#include <vpu/configuration/options/number_of_cmx_slices.hpp>
#include <vpu/configuration/options/vpu_scales_option.hpp>
#include <vpu/configuration/options/performance_hint.hpp>
#include <vpu/configuration/options/ov_throughput_streams.hpp>
namespace vpu {
@ -92,6 +93,8 @@ void CompileEnv::init(const PluginConfiguration& config, const Logger::Ptr& log)
int numExecutors = 0;
if (config.get<ThroughputStreamsOption>().hasValue()) {
numExecutors = config.get<ThroughputStreamsOption>().get();
} else if (config.get<OvThroughputStreamsOption>().hasValue()) {
numExecutors = config.get<OvThroughputStreamsOption>().get();
} else if (!config.get<PerformanceHintOption>().empty()) {
numExecutors = config.get<PerformanceHintOption>() == CONFIG_VALUE(LATENCY) ? 1 : 2;
}

View File

@ -118,24 +118,21 @@ Stage StageBuilder::addPermuteStage(
return stage;
}
Stage StageBuilder::addReorderStage(
const Model& model,
const std::string& name,
const ie::CNNLayerPtr& layer,
const Data& input,
const Data& output) {
Stage StageBuilder::addReorderStage(const Model& model,
const std::string& name,
const ie::CNNLayerPtr& layer,
const Data& input,
const Data& output) {
const auto* env = CompileEnv::getOrNull();
VPU_THROW_UNLESS(
env == nullptr || !env->config.get<DisableReorderOption>(),
"Tried to add Reorder Stage %v, while DISABLE_REORDER option was set",
name);
VPU_THROW_UNLESS(env == nullptr || !env->config.get<DisableReorderOption>(),
"Tried to add Reorder Stage %v, while DISABLE_REORDER option was set",
name);
for (const auto& p : input->desc().dims()) {
IE_ASSERT(p.second == output->desc().dim(p.first));
}
PermutationDimsMap permutationMap;
for (const auto & dim : output->desc().dimsOrder().toPermutation()) {
for (const auto& dim : output->desc().dimsOrder().toPermutation()) {
permutationMap.set(dim, dim);
}

View File

@ -20,6 +20,7 @@
#include <vpu/configuration/options/exclusive_async_requests.hpp>
#include <vpu/configuration/options/performance_hint.hpp>
#include "vpu/configuration/options/performance_hint_num_requests.hpp"
#include <vpu/configuration/options/ov_throughput_streams.hpp>
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
#include <ngraph/opsets/opset3.hpp>
@ -64,6 +65,8 @@ void ExecutableNetwork::openDevice(std::vector<DevicePtr>& devicePool) {
int executors = 0;
if (_config.get<ThroughputStreamsOption>().hasValue()) {
executors = _config.get<ThroughputStreamsOption>().get();
} else if (_config.get<OvThroughputStreamsOption>().hasValue()) {
executors = _config.get<OvThroughputStreamsOption>().get();
} else if (!_config.get<PerformanceHintOption>().empty()) {
executors = _config.get<PerformanceHintOption>() == CONFIG_VALUE(LATENCY) ? 1 : 2;
}
@ -242,7 +245,7 @@ InferenceEngine::Parameter ExecutableNetwork::GetMetric(const std::string &name)
optimalNumOfInferRequests =
_config.get<PerformanceHintOption>() == CONFIG_VALUE(THROUGHPUT) ? optimalNumOfInferRequests : 1;
}
if (_config.get<PerformanceHintNumRequestsOption>() != -1) {
if (_config.get<PerformanceHintNumRequestsOption>() != 0) {
optimalNumOfInferRequests =
std::min(optimalNumOfInferRequests,
static_cast<unsigned int>(_config.get<PerformanceHintNumRequestsOption>()));

View File

@ -15,7 +15,6 @@
#include <ie_common.h>
#include <thread>
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/utils/logger.hpp>
#include <vpu/utils/profiling.hpp>
@ -37,7 +36,6 @@
using namespace vpu::MyriadPlugin;
using namespace InferenceEngine;
using namespace InferenceEngine::VPUConfigParams;
using namespace std;
using namespace vpu;

View File

@ -9,9 +9,11 @@
#include <vpu/utils/error.hpp>
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/intel_myriad/myriad_properties.hpp"
using namespace vpu::MyriadPlugin;
using namespace InferenceEngine;
using namespace VPUConfigParams;
using namespace PluginConfigParams;
//------------------------------------------------------------------------------
@ -34,23 +36,17 @@ MyriadMetrics::MyriadMetrics() {
IE_SUPPRESS_DEPRECATED_START
// TODO: remove once all options are migrated
_supportedConfigKeys = {
MYRIAD_CUSTOM_LAYERS,
MYRIAD_ENABLE_FORCE_RESET,
ov::intel_myriad::custom_layers.name(),
ov::intel_myriad::enable_force_reset.name(),
MYRIAD_ENABLE_MX_BOOT,
// deprecated
KEY_VPU_CUSTOM_LAYERS,
KEY_VPU_MYRIAD_FORCE_RESET,
CONFIG_KEY(CONFIG_FILE),
};
IE_SUPPRESS_DEPRECATED_END
_optimizationCapabilities = { METRIC_VALUE(FP16) };
_optimizationCapabilities = {ov::device::capability::FP16, ov::device::capability::EXPORT_IMPORT};
_rangeForAsyncInferRequests = RangeType(3, 6, 1);
_idToDeviceFullNameMap = {
{"5", "Intel Movidius Myriad 2 VPU"},
{"8", "Intel Movidius Myriad X VPU"},
};
}
@ -119,11 +115,14 @@ RangeType MyriadMetrics::RangeForAsyncInferRequests(
const std::map<std::string, std::string>& config) const {
auto throughput_streams_str = config.find(InferenceEngine::MYRIAD_THROUGHPUT_STREAMS);
if (throughput_streams_str == config.end()) {
throughput_streams_str = config.find(ov::streams::num.name());
}
if (throughput_streams_str != config.end()) {
try {
int throughput_streams = std::stoi(throughput_streams_str->second);
if (throughput_streams > 0) {
return RangeType(throughput_streams+1, throughput_streams*3, 1);
return RangeType(throughput_streams + 1, throughput_streams * 3, 1);
}
}
catch(...) {

View File

@ -9,12 +9,11 @@
#include <ie_metric_helpers.hpp>
#include <cpp/ie_cnn_network.h>
#include <ie_api.h>
#include <cpp_interfaces/interface/ie_iexecutable_network_internal.hpp>
#include <legacy/ie_util_internal.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/frontend/frontend.hpp>
#include <vpu/utils/profiling.hpp>
#include <vpu/utils/error.hpp>
@ -52,10 +51,10 @@
#include <vpu/configuration/options/dump_internal_graph_file_name.hpp>
#include <vpu/configuration/options/dump_all_passes_directory.hpp>
#include <vpu/configuration/options/dump_all_passes.hpp>
#include <vpu/configuration/options/disable_convert_stages.hpp>
#include <vpu/configuration/options/disable_reorder.hpp>
#include <vpu/configuration/options/device_id.hpp>
#include <vpu/configuration/options/device_connect_timeout.hpp>
#include <vpu/configuration/options/disable_convert_stages.hpp>
#include <vpu/configuration/options/disable_reorder.hpp>
#include <vpu/configuration/options/detect_network_batch.hpp>
#include <vpu/configuration/options/custom_layers.hpp>
#include <vpu/configuration/options/config_file.hpp>
@ -67,14 +66,17 @@
#include <vpu/configuration/options/none_layers.hpp>
#include <vpu/configuration/options/enable_async_dma.hpp>
#include <vpu/configuration/options/enable_mx_boot.hpp>
#include "myriad_plugin.h"
#include "vpu/configuration/options/performance_hint.hpp"
#include "vpu/configuration/options/performance_hint_num_requests.hpp"
#include "vpu/configuration/options/ov_throughput_streams.hpp"
#include "myriad_plugin.h"
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/intel_myriad/myriad_properties.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::PluginConfigParams;
using namespace InferenceEngine::VPUConfigParams;
using namespace vpu::MyriadPlugin;
@ -246,18 +248,10 @@ Engine::Engine(std::shared_ptr<IMvnc> mvnc) :
_parsedConfig.registerOption<EnableMXBootOption>();
_parsedConfig.registerOption<PerformanceHintOption>();
_parsedConfig.registerOption<PerformanceHintNumRequestsOption>();
_parsedConfig.registerOption<OvThroughputStreamsOption>();
IE_SUPPRESS_DEPRECATED_START
_parsedConfig.registerDeprecatedOption<DisableConvertStagesOption>(InferenceEngine::MYRIAD_DISABLE_CONVERT_STAGES);
_parsedConfig.registerDeprecatedOption<DisableReorderOption>(InferenceEngine::MYRIAD_DISABLE_REORDER);
_parsedConfig.registerDeprecatedOption<LogLevelOption>(VPU_CONFIG_KEY(LOG_LEVEL));
_parsedConfig.registerDeprecatedOption<ProtocolOption>(VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
_parsedConfig.registerDeprecatedOption<HwAccelerationOption>(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
_parsedConfig.registerDeprecatedOption<EnableReceivingTensorTimeOption>(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
_parsedConfig.registerDeprecatedOption<DetectNetworkBatchOption>(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
_parsedConfig.registerDeprecatedOption<CustomLayersOption>(VPU_CONFIG_KEY(CUSTOM_LAYERS));
_parsedConfig.registerDeprecatedOption<MemoryTypeOption>(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
_parsedConfig.registerDeprecatedOption<EnableForceResetOption>(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
IE_SUPPRESS_DEPRECATED_END
}
@ -292,45 +286,55 @@ InferenceEngine::Parameter Engine::GetMetric(const std::string& name,
return availableDevices.front();
};
const auto getDeviceByName = [&devicePool](const std::string& deviceName) {
const auto deviceIt = std::find_if(
devicePool.begin(), devicePool.end(), [&deviceName](DevicePtr device) {
return device->_name == deviceName;
});
const auto deviceIt = std::find_if(devicePool.begin(), devicePool.end(), [&deviceName](DevicePtr device) {
return device->_name == deviceName;
});
if (deviceIt == devicePool.end()) {
return DevicePtr();
}
return *deviceIt;
};
if (name == METRIC_KEY(AVAILABLE_DEVICES)) {
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, _metrics->AvailableDevicesNames(_mvnc, _devicePool));
} else if (name == METRIC_KEY(FULL_DEVICE_NAME)) {
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, _metrics->FullName(getSpecifiedDeviceName()));
if (ov::available_devices == name) {
return _metrics->AvailableDevicesNames(_mvnc, _devicePool);
} else if (ov::device::full_name == name) {
return _metrics->FullName(getSpecifiedDeviceName());
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
const auto& supportedMetrics = _metrics->SupportedMetrics();
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{supportedMetrics.cbegin(), supportedMetrics.cend()});
IE_SET_METRIC_RETURN(SUPPORTED_METRICS,
std::vector<std::string>{supportedMetrics.cbegin(), supportedMetrics.cend()});
} else if (ov::supported_properties == name) {
return decltype(ov::supported_properties)::value_type {
ov::available_devices.name(),
ov::device::full_name.name(),
ov::supported_properties.name(),
ov::device::capabilities.name(),
ov::range_for_async_infer_requests.name(),
ov::device::thermal.name(),
ov::device::architecture.name(),
};
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
// TODO: remove once all options are migrated
auto supportedConfigKeys = _metrics->SupportedConfigKeys();
const auto& publicKeys = _parsedConfig.getPublicKeys();
supportedConfigKeys.insert(publicKeys.cbegin(), publicKeys.cend());
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{supportedConfigKeys.cbegin(), supportedConfigKeys.cend()});
} else if (name == METRIC_KEY(OPTIMIZATION_CAPABILITIES)) {
const auto& optimizationCapabilities = _metrics->OptimizationCapabilities();
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{optimizationCapabilities.cbegin(), optimizationCapabilities.cend()});
} else if (name == METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)) {
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, _metrics->RangeForAsyncInferRequests(_config));
} else if (name == METRIC_KEY(DEVICE_ARCHITECTURE)) {
IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, _metrics->DeviceArchitecture(options));
} else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) {
IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true);
} else if (name == METRIC_KEY(DEVICE_THERMAL)) {
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS,
std::vector<std::string>{supportedConfigKeys.cbegin(), supportedConfigKeys.cend()});
} else if (ov::device::capabilities == name) {
return std::vector<std::string> {_metrics->OptimizationCapabilities().begin(), _metrics->OptimizationCapabilities().end()};
} else if (ov::range_for_async_infer_requests == name) {
return _metrics->RangeForAsyncInferRequests(_config);
} else if (ov::device::architecture == name) {
return _metrics->DeviceArchitecture(options);
} else if (ov::device::thermal == name) {
const auto& device = getDeviceByName(getSpecifiedDeviceName());
if (device != nullptr) {
IE_SET_METRIC_RETURN(DEVICE_THERMAL, _metrics->DevicesThermal(device));
return _metrics->DevicesThermal(device);
} else {
return Parameter();
}
} else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) {
IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true);
}
IE_THROW(NotImplemented);
}

View File

@ -4,7 +4,6 @@
#include "behavior/infer_request/config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "vpu/private_plugin_config.hpp"
#include "vpu/myriad_config.hpp"
@ -48,16 +47,6 @@ namespace {
{{InferenceEngine::MYRIAD_ENABLE_WEIGHTS_ANALYSIS, CONFIG_VALUE(YES)}},
{{InferenceEngine::MYRIAD_ENABLE_WEIGHTS_ANALYSIS, CONFIG_VALUE(NO)}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}},
};
const std::vector<std::map<std::string, std::string>> inferMultiConfigs = {
@ -65,10 +54,6 @@ namespace {
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}},
// Deprecated
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestConfigTest,

View File

@ -3,7 +3,9 @@
//
#include "behavior/ov_plugin/core_integration.hpp"
#include <gtest/gtest.h>
#include <algorithm>
#include <functional_test_utils/skip_tests_config.hpp>
#include "common_test_utils/file_utils.hpp"
@ -36,13 +38,13 @@ using OVClassNetworkTestP_VPU_GetMetric = OVClassNetworkTestP;
TEST_P(OVClassNetworkTestP_VPU_GetMetric, smoke_OptimizationCapabilitiesReturnsFP16) {
ov::Core ie;
OV_ASSERT_PROPERTY_SUPPORTED(ov::device::capabilities)
std::vector<std::string> device_capabilities;
ASSERT_NO_THROW(device_capabilities =
ie.get_property(deviceName, ov::device::capabilities));
ASSERT_EQ(device_capabilities.size(), 1);
ASSERT_EQ(device_capabilities.front(), ov::device::capability::FP16);
ASSERT_NO_THROW(device_capabilities = ie.get_property(deviceName, ov::device::capabilities));
ASSERT_EQ(device_capabilities.size(), 2);
ASSERT_NE(std::find(device_capabilities.begin(), device_capabilities.end(), ov::device::capability::EXPORT_IMPORT),
device_capabilities.end());
ASSERT_NE(std::find(device_capabilities.begin(), device_capabilities.end(), ov::device::capability::FP16),
device_capabilities.end());
}
INSTANTIATE_TEST_SUITE_P(smoke_OVClassGetMetricP, OVClassNetworkTestP_VPU_GetMetric, ::testing::ValuesIn(devices));

View File

@ -3,7 +3,6 @@
//
#include <ie_plugin_config.hpp>
#include "vpu/vpu_plugin_config.hpp"
#include "vpu/private_plugin_config.hpp"
#include "vpu/utils/optional.hpp"
#include "behavior/plugin/configuration_tests.hpp"
@ -151,30 +150,6 @@ std::vector<std::map<std::string, std::string>> getCorrectConfigs() {
{{InferenceEngine::MYRIAD_ENABLE_ASYNC_DMA, CONFIG_VALUE(YES)}},
{{InferenceEngine::MYRIAD_ENABLE_ASYNC_DMA, CONFIG_VALUE(NO)}},
// Deprecated
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_NONE}},
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_ERROR}},
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_WARNING}},
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_INFO}},
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_DEBUG}},
{{VPU_CONFIG_KEY(LOG_LEVEL), LOG_TRACE}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(CUSTOM_LAYERS), ""}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}},
{
{KEY_LOG_LEVEL, LOG_INFO},
{InferenceEngine::MYRIAD_COPY_OPTIMIZATION, CONFIG_VALUE(NO)},
@ -225,12 +200,10 @@ std::vector<std::map<std::string, std::string>> getCorrectConfigs() {
MyriadDevicesInfo info;
if (info.getAmountOfDevices(ncDeviceProtocol_t::NC_PCIE) > 0) {
correctConfigs.emplace_back(std::map<std::string, std::string>{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}});
correctConfigs.emplace_back(std::map<std::string, std::string>{{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}});
}
if (info.getAmountOfDevices(ncDeviceProtocol_t::NC_USB) > 0) {
correctConfigs.emplace_back(std::map<std::string, std::string>{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}});
correctConfigs.emplace_back(std::map<std::string, std::string>{{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}});
}
@ -332,13 +305,6 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(KEY_LOG_LEVEL, LOG_DEBUG, InferenceEngine::Parameter{LOG_DEBUG}),
std::make_tuple(KEY_LOG_LEVEL, LOG_TRACE, InferenceEngine::Parameter{LOG_TRACE}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_NONE, InferenceEngine::Parameter{LOG_NONE}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_ERROR, InferenceEngine::Parameter{LOG_ERROR}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_WARNING, InferenceEngine::Parameter{LOG_WARNING}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_INFO, InferenceEngine::Parameter{LOG_INFO}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_DEBUG, InferenceEngine::Parameter{LOG_DEBUG}),
std::make_tuple(VPU_CONFIG_KEY(LOG_LEVEL), LOG_TRACE, InferenceEngine::Parameter{LOG_TRACE}),
std::make_tuple(InferenceEngine::MYRIAD_COPY_OPTIMIZATION, InferenceEngine::PluginConfigParams::YES,
InferenceEngine::Parameter{true}),
std::make_tuple(InferenceEngine::MYRIAD_COPY_OPTIMIZATION, InferenceEngine::PluginConfigParams::NO,
@ -349,11 +315,6 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE,
InferenceEngine::Parameter{InferenceEngine::MYRIAD_PCIE}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(USB)}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(PCIE)}),
std::make_tuple(InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_FULL,
InferenceEngine::Parameter{InferenceEngine::MYRIAD_POWER_FULL}),
std::make_tuple(InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_INFER,
@ -370,9 +331,6 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, InferenceEngine::PluginConfigParams::NO,
InferenceEngine::Parameter{false}),
std::make_tuple(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES), InferenceEngine::Parameter{true}),
std::make_tuple(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO), InferenceEngine::Parameter{false}),
std::make_tuple(InferenceEngine::MYRIAD_HW_EXTRA_SPLIT, InferenceEngine::PluginConfigParams::YES,
InferenceEngine::Parameter{true}),
std::make_tuple(InferenceEngine::MYRIAD_HW_EXTRA_SPLIT, InferenceEngine::PluginConfigParams::NO,
@ -422,9 +380,6 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, InferenceEngine::PluginConfigParams::NO,
InferenceEngine::Parameter{false}),
std::make_tuple(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES), InferenceEngine::Parameter{true}),
std::make_tuple(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO), InferenceEngine::Parameter{false}),
std::make_tuple(InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_LAYER,
InferenceEngine::Parameter{InferenceEngine::MYRIAD_PER_LAYER}),
std::make_tuple(InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE,
@ -510,9 +465,6 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, InferenceEngine::PluginConfigParams::NO,
InferenceEngine::Parameter{false}),
std::make_tuple(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(YES), InferenceEngine::Parameter{true}),
std::make_tuple(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(NO), InferenceEngine::Parameter{false}),
std::make_tuple(InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO,
InferenceEngine::Parameter{InferenceEngine::MYRIAD_DDR_AUTO}),
std::make_tuple(InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_MICRON_2GB,
@ -524,25 +476,11 @@ const std::vector<std::tuple<std::string, std::string, InferenceEngine::Paramete
std::make_tuple(InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_MICRON_1GB,
InferenceEngine::Parameter{InferenceEngine::MYRIAD_DDR_MICRON_1GB}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB)}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB)}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB)}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB),
InferenceEngine::Parameter{VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB)}),
std::make_tuple(InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, InferenceEngine::PluginConfigParams::YES,
InferenceEngine::Parameter{true}),
std::make_tuple(InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, InferenceEngine::PluginConfigParams::NO,
InferenceEngine::Parameter{false}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES), InferenceEngine::Parameter{true}),
std::make_tuple(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO), InferenceEngine::Parameter{false}),
std::make_tuple(InferenceEngine::MYRIAD_CHECK_PREPROCESSING_INSIDE_MODEL, InferenceEngine::PluginConfigParams::YES,
InferenceEngine::Parameter{true}),
std::make_tuple(InferenceEngine::MYRIAD_CHECK_PREPROCESSING_INSIDE_MODEL, InferenceEngine::PluginConfigParams::NO,
@ -577,13 +515,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, CorrectSingleOptionCustomValueConf
const std::vector<std::string>& getPublicOptions() {
static const std::vector<std::string> publicOptions = {
KEY_LOG_LEVEL,
VPU_CONFIG_KEY(LOG_LEVEL),
InferenceEngine::MYRIAD_PROTOCOL,
VPU_MYRIAD_CONFIG_KEY(PROTOCOL),
InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION,
VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME,
VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME),
KEY_PERF_COUNT,
KEY_PERFORMANCE_HINT,
KEY_PERFORMANCE_HINT_NUM_REQUESTS,
@ -591,12 +525,9 @@ const std::vector<std::string>& getPublicOptions() {
KEY_EXCLUSIVE_ASYNC_REQUESTS,
KEY_DEVICE_ID,
InferenceEngine::MYRIAD_CUSTOM_LAYERS,
VPU_CONFIG_KEY(CUSTOM_LAYERS),
KEY_CONFIG_FILE,
InferenceEngine::MYRIAD_DDR_TYPE,
VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE),
InferenceEngine::MYRIAD_ENABLE_FORCE_RESET,
VPU_MYRIAD_CONFIG_KEY(FORCE_RESET),
};
return publicOptions;
}
@ -637,7 +568,6 @@ const std::vector<std::string>& getPrivateOptions() {
InferenceEngine::MYRIAD_DISABLE_REORDER,
InferenceEngine::MYRIAD_DEVICE_CONNECT_TIMEOUT,
InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH,
VPU_CONFIG_KEY(DETECT_NETWORK_BATCH),
InferenceEngine::MYRIAD_CHECK_PREPROCESSING_INSIDE_MODEL,
InferenceEngine::MYRIAD_ENABLE_EARLY_ELTWISE_RELU_FUSION,
InferenceEngine::MYRIAD_ENABLE_CUSTOM_RESHAPE_PARAM,
@ -767,28 +697,6 @@ const std::vector<std::map<std::string, std::string>>& getIncorrectConfigs() {
{{InferenceEngine::MYRIAD_ENABLE_ASYNC_DMA, "ON"}},
{{InferenceEngine::MYRIAD_ENABLE_ASYNC_DMA, "OFF"}},
// Deprecated
{{VPU_CONFIG_KEY(LOG_LEVEL), "INCORRECT_LOG_LEVEL"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "LAN"}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "OFF"}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "ON"}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "OFF"}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}},
{{VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), "ON"}},
{{VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), "OFF"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "AUTO"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "2GB"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "1GB"}},
{
{KEY_LOG_LEVEL, LOG_INFO},
{InferenceEngine::MYRIAD_COPY_OPTIMIZATION, "ON"},
@ -873,28 +781,6 @@ const std::vector<std::map<std::string, std::string>>& getIncorrectMultiConfigs(
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{InferenceEngine::MYRIAD_DDR_TYPE, "1GB"}
},
// Deprecated
{
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(LOG_LEVEL), "INCORRECT_LOG_LEVEL"},
},
{
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}
},
{
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}
},
{
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}
},
{
{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "1GB"}
},
};
return incorrectMultiConfigs;
}

View File

@ -3,6 +3,7 @@
//
#include <functional_test_utils/skip_tests_config.hpp>
#include <openvino/runtime/properties.hpp>
#include "behavior/plugin/core_integration.hpp"
#include "common_test_utils/file_utils.hpp"
@ -45,8 +46,13 @@ TEST_P(IEClassNetworkTestP_VPU_GetMetric, smoke_OptimizationCapabilitiesReturnsF
ASSERT_NO_THROW(optimizationCapabilitiesParameter = ie.GetMetric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES)));
const auto optimizationCapabilities = optimizationCapabilitiesParameter.as<std::vector<std::string>>();
ASSERT_EQ(optimizationCapabilities.size(), 1);
ASSERT_EQ(optimizationCapabilities.front(), METRIC_VALUE(FP16));
ASSERT_EQ(optimizationCapabilities.size(), 2);
ASSERT_NE(std::find(optimizationCapabilities.begin(),
optimizationCapabilities.end(),
ov::device::capability::EXPORT_IMPORT),
optimizationCapabilities.end());
ASSERT_NE(std::find(optimizationCapabilities.begin(), optimizationCapabilities.end(), ov::device::capability::FP16),
optimizationCapabilities.end());
}
INSTANTIATE_TEST_SUITE_P(

View File

@ -5,7 +5,6 @@
#include "single_layer_tests/nonzero.hpp"
#include "common/myriad_common_test_utils.hpp"
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vector>

View File

@ -7,7 +7,6 @@
#include <ngraph_functions/builders.hpp>
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
#include <vpu/myriad_plugin_config.hpp>
namespace {

View File

@ -5,7 +5,6 @@
#include <shared_test_classes/base/layer_test_utils.hpp>
#include <ngraph_functions/builders.hpp>
#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
#include <vpu/myriad_plugin_config.hpp>
namespace {

View File

@ -55,6 +55,7 @@
#include <vpu/configuration/options/enable_async_dma.hpp>
#include <vpu/configuration/options/performance_hint.hpp>
#include "vpu/configuration/options/performance_hint_num_requests.hpp"
#include "vpu/configuration/options/ov_throughput_streams.hpp"
#include <atomic>
#include <iomanip>
@ -443,18 +444,10 @@ PluginConfiguration createConfiguration() {
configuration.registerOption<EnableAsyncDMAOption>();
configuration.registerOption<PerformanceHintOption>();
configuration.registerOption<PerformanceHintNumRequestsOption>();
configuration.registerOption<OvThroughputStreamsOption>();
IE_SUPPRESS_DEPRECATED_START
configuration.registerDeprecatedOption<DisableConvertStagesOption>(InferenceEngine::MYRIAD_DISABLE_CONVERT_STAGES);
configuration.registerDeprecatedOption<DisableReorderOption>(InferenceEngine::MYRIAD_DISABLE_REORDER);
configuration.registerDeprecatedOption<LogLevelOption>(VPU_CONFIG_KEY(LOG_LEVEL));
configuration.registerDeprecatedOption<ProtocolOption>(VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
configuration.registerDeprecatedOption<HwAccelerationOption>(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
configuration.registerDeprecatedOption<EnableReceivingTensorTimeOption>(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
configuration.registerDeprecatedOption<DetectNetworkBatchOption>(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
configuration.registerDeprecatedOption<CustomLayersOption>(VPU_CONFIG_KEY(CUSTOM_LAYERS));
configuration.registerDeprecatedOption<MemoryTypeOption>(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
configuration.registerDeprecatedOption<EnableForceResetOption>(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
IE_SUPPRESS_DEPRECATED_END
return configuration;

View File

@ -9,7 +9,6 @@
#include <tests_common.hpp>
#include <inference_engine.hpp>
#include <ie_plugin_config.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include <gna/gna_config.hpp>
#include <common_test_utils/test_assertions.hpp>
#include <memory>
@ -22,7 +21,6 @@ using namespace ::testing;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
using namespace InferenceEngine::PluginConfigParams;
using namespace InferenceEngine::VPUConfigParams;
using namespace InferenceEngine::GNAConfigParams;
class BehTestParams {

View File

@ -4,7 +4,6 @@
#include <gtest/gtest.h>
#include <inference_engine.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include "behavior_test_plugin.h"
class VPUGetMetric : public testing::Test {

View File

@ -80,9 +80,6 @@ const BehTestParams allUnSupportedValues[] = {
const std::vector<BehTestParams> deviceSpecificConfigurations = {
BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}}),
BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}}),
};
const std::vector<BehTestParams> deviceAgnosticConfigurations = {
@ -118,24 +115,6 @@ const std::vector<BehTestParams> deviceAgnosticConfigurations = {
// Please do not use other types of DDR in tests with a real device, because it may hang.
BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO}}),
// Deprecated
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}}),
BEH_MULTI_CONFIG.withConfig({
{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}
}),
// Please do not use other types of DDR in tests with a real device, because it may hang.
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}}),
};
const std::vector<BehTestParams> withCorrectConfValuesPluginOnly = {
@ -169,25 +148,6 @@ const BehTestParams withIncorrectConfValues[] = {
BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}}),
BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, "MICRON"}}),
// Deprecated
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "LAN"}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "OFF"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "ON"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "OFF"}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}}),
BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),"ON"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "MICRON"}}),
};
const BehTestParams withIncorrectConfKeys[] = {

View File

@ -690,7 +690,7 @@ public:
// Disable HW pooling
std::map<std::string, std::string> networkConfig;
networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "NO";
networkConfig[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = "NO";
ExecutableNetwork exeNetwork;
ASSERT_NO_THROW(exeNetwork = _vpuPluginPtr->LoadNetwork(network_part, networkConfig));
@ -744,7 +744,7 @@ public:
// Disable HW pooling
std::map<std::string, std::string> networkConfig;
networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "NO";
networkConfig[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = "NO";
ASSERT_NO_THROW(_exeNetwork = _vpuPluginPtr->LoadNetwork(network, networkConfig));
ASSERT_NO_THROW(_inferRequest = _exeNetwork.CreateInferRequest());

View File

@ -3,6 +3,7 @@
//
#include "vpu_case_common.hpp"
#include "ie_plugin_config.hpp"
bool CheckMyriadX() {
if (auto envVar = std::getenv("IE_VPU_MYRIADX")) {

View File

@ -11,7 +11,6 @@
#include <string>
#include <precision_utils.h>
#include <tests_common.hpp>
#include <vpu/vpu_plugin_config.hpp>
#include "vpu_case_params.hpp"
#include "vpu_param_containers.hpp"

View File

@ -3,6 +3,7 @@
//
#include "vpu_classification_case.hpp"
#include "vpu/private_plugin_config.hpp"
#include "functional_test_utils/plugin_cache.hpp"
//------------------------------------------------------------------------------
@ -37,8 +38,8 @@ void VpuNoClassificationRegression::InitConfig() {
VpuNoRegressionBase::InitConfig();
if (resources_ != -1) {
config_["VPU_NUMBER_OF_CMX_SLICES"] = std::to_string(resources_);
config_["VPU_NUMBER_OF_SHAVES"] = std::to_string(resources_);
config_[InferenceEngine::MYRIAD_NUMBER_OF_CMX_SLICES] = std::to_string(resources_);
config_[InferenceEngine::MYRIAD_NUMBER_OF_SHAVES] = std::to_string(resources_);
}
}

View File

@ -39,9 +39,9 @@
#include <vpu/configuration/options/dump_internal_graph_file_name.hpp>
#include <vpu/configuration/options/dump_all_passes_directory.hpp>
#include <vpu/configuration/options/dump_all_passes.hpp>
#include <vpu/configuration/options/device_id.hpp>
#include <vpu/configuration/options/disable_convert_stages.hpp>
#include <vpu/configuration/options/disable_reorder.hpp>
#include <vpu/configuration/options/device_id.hpp>
#include <vpu/configuration/options/device_connect_timeout.hpp>
#include <vpu/configuration/options/detect_network_batch.hpp>
#include <vpu/configuration/options/custom_layers.hpp>
@ -55,6 +55,7 @@
#include <vpu/configuration/options/enable_async_dma.hpp>
#include <vpu/configuration/options/performance_hint.hpp>
#include "vpu/configuration/options/performance_hint_num_requests.hpp"
#include "vpu/configuration/options/ov_throughput_streams.hpp"
using namespace InferenceEngine;
using namespace vpu;
@ -137,20 +138,14 @@ void graphTransformerFunctionalTests::PrepareGraphCompilation() {
_configuration.registerOption<EnableAsyncDMAOption>();
_configuration.registerOption<PerformanceHintOption>();
_configuration.registerOption<PerformanceHintNumRequestsOption>();
_configuration.registerOption<OvThroughputStreamsOption>();
IE_SUPPRESS_DEPRECATED_START
_configuration.registerDeprecatedOption<DisableConvertStagesOption>(InferenceEngine::MYRIAD_DISABLE_CONVERT_STAGES);
_configuration.registerDeprecatedOption<DisableReorderOption>(InferenceEngine::MYRIAD_DISABLE_REORDER);
_configuration.registerDeprecatedOption<LogLevelOption>(VPU_CONFIG_KEY(LOG_LEVEL));
_configuration.registerDeprecatedOption<ProtocolOption>(VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
_configuration.registerDeprecatedOption<HwAccelerationOption>(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
_configuration.registerDeprecatedOption<EnableReceivingTensorTimeOption>(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
_configuration.registerDeprecatedOption<DetectNetworkBatchOption>(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
_configuration.registerDeprecatedOption<CustomLayersOption>(VPU_CONFIG_KEY(CUSTOM_LAYERS));
_configuration.registerDeprecatedOption<MemoryTypeOption>(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
_configuration.registerDeprecatedOption<EnableForceResetOption>(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
IE_SUPPRESS_DEPRECATED_END
_inputsInfo.clear();
_outputsInfo.clear();
_inputMap.clear();

View File

@ -61,18 +61,11 @@ TEST_P(myriadIncorrectModelsConfigsTests_nightly, LoadNetworkWithIncorrectConfig
static const std::vector<config_t> myriadCorrectPackageTypeConfigValues = {
// Please do not use other types of DDR in tests with a real device, because it may hang.
{{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}}
};
static const std::vector<config_t> myriadIncorrectPackageTypeConfigValues = {
{{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}},
{{InferenceEngine::MYRIAD_DDR_TYPE, "-MICRON_1GB"}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-MICRON_1GB"}},
};
INSTANTIATE_TEST_SUITE_P(MyriadPackageConfigs, myriadCorrectModelsConfigsTests_nightly,

View File

@ -12,7 +12,6 @@
#include <tuple>
#include "tests_common.hpp"
#include "single_layer_common.hpp"
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include "myriad_layers_reference_functions.hpp"
#include "xml_net_builder.hpp"
@ -126,12 +125,6 @@ public:
_pad_val = std::get<2>(params);
_stride_val = std::get<3>(params);
#if 0 // 4DGP
// TODO: make it the test argument
_config[VPU_CONFIG_KEY(COMPUTE_LAYOUT)] = VPU_CONFIG_VALUE(NCHW);
// _config[VPU_CONFIG_KEY(COMPUTE_LAYOUT)] = VPU_CONFIG_VALUE(NHWC);
#endif
_params["kernel-x"] = std::to_string(_kernel_val.x);
_params["kernel-y"] = std::to_string(_kernel_val.y);
_params["stride-x"] = std::to_string(_stride_val.x);

View File

@ -12,7 +12,6 @@
#include <ie_version.hpp>
#include <precision_utils.h>
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/utils/enums.hpp>
#include <vpu/utils/ie_helpers.hpp>

View File

@ -12,7 +12,6 @@
#include <ie_precision.hpp>
#include "single_layer_common.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include <vpu/private_plugin_config.hpp>

View File

@ -57,6 +57,7 @@
#include <vpu/configuration/options/enable_async_dma.hpp>
#include "vpu/configuration/options/performance_hint.hpp"
#include "vpu/configuration/options/performance_hint_num_requests.hpp"
#include "vpu/configuration/options/ov_throughput_streams.hpp"
namespace vpu {
@ -255,20 +256,11 @@ PluginConfiguration createConfiguration() {
configuration.registerOption<EnableAsyncDMAOption>();
configuration.registerOption<PerformanceHintOption>();
configuration.registerOption<PerformanceHintNumRequestsOption>();
configuration.registerOption<OvThroughputStreamsOption>();
IE_SUPPRESS_DEPRECATED_START
configuration.registerDeprecatedOption<DisableConvertStagesOption>(InferenceEngine::MYRIAD_DISABLE_CONVERT_STAGES);
configuration.registerDeprecatedOption<DisableReorderOption>(InferenceEngine::MYRIAD_DISABLE_REORDER);
configuration.registerDeprecatedOption<LogLevelOption>(VPU_CONFIG_KEY(LOG_LEVEL));
configuration.registerDeprecatedOption<ProtocolOption>(VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
configuration.registerDeprecatedOption<HwAccelerationOption>(VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
configuration.registerDeprecatedOption<EnableReceivingTensorTimeOption>(VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
configuration.registerDeprecatedOption<DetectNetworkBatchOption>(VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
configuration.registerDeprecatedOption<CustomLayersOption>(VPU_CONFIG_KEY(CUSTOM_LAYERS));
configuration.registerDeprecatedOption<MemoryTypeOption>(VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
configuration.registerDeprecatedOption<EnableForceResetOption>(VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
IE_SUPPRESS_DEPRECATED_END
return configuration;
}

View File

@ -6,7 +6,6 @@
#include "myriad_test_case.h"
#include "vpu/myriad_config.hpp"
#include "vpu/vpu_plugin_config.hpp"
#include "vpu/private_plugin_config.hpp"
using MyriadEngineSetCorrectConfigTest = MyriadEngineSetConfigTest;
@ -20,33 +19,20 @@ TEST_P(MyriadEngineSetIncorrectConfigTest, SetIncorrectConfig) {
ASSERT_ANY_THROW(myriad_engine_->SetConfig(GetParam()));
}
IE_SUPPRESS_DEPRECATED_START
static const std::vector<config_t> myriadCorrectProtocolConfigValues = {
{{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}},
{{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}},
{{InferenceEngine::MYRIAD_PROTOCOL, ""}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}},
};
static const std::vector<config_t> myriadIncorrectProtocolConfigValues = {
// Protocols
{{InferenceEngine::MYRIAD_PROTOCOL, "0"}},
{{InferenceEngine::MYRIAD_PROTOCOL, "PCI"}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "0"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "PCI"}},
};
static const std::vector<config_t> myriadCorrectConfigCombinationValues = {
{{InferenceEngine::MYRIAD_PROTOCOL, ""},
// Deprecated
{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}}
{{InferenceEngine::MYRIAD_PROTOCOL, ""}}
};
static const std::vector<config_t> myriadIncorrectPowerConfigValues = {
@ -70,26 +56,13 @@ static const std::vector<config_t> myriadCorrectPackageTypeConfigValues = {
{{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_SAMSUNG_2GB}},
{{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_HYNIX_2GB}},
{{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_MICRON_1GB}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB)}},
};
static const std::vector<config_t> myriadIncorrectPackageTypeConfigValues = {
{{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}},
{{InferenceEngine::MYRIAD_DDR_TYPE, "-MICRON_1GB"}},
// Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-MICRON_1GB"}},
};
IE_SUPPRESS_DEPRECATED_END
/// Protocol
INSTANTIATE_TEST_SUITE_P(MyriadProtocolConfigs, MyriadEngineSetCorrectConfigTest,
::testing::ValuesIn(myriadCorrectProtocolConfigValues));

View File

@ -7,12 +7,10 @@
#include "myriad_test_case.h"
#include <memory>
#include "vpu/vpu_plugin_config.hpp"
#include "vpu/myriad_config.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::PluginConfigParams;
using namespace InferenceEngine::VPUConfigParams;
using str_vector = std::vector<std::string>;

View File

@ -16,7 +16,6 @@
#include "inference_engine.hpp"
#include "openvino/openvino.hpp"
#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/utils/string.hpp>