2022-01-19 01:07:49 +03:00
|
|
|
// Copyright (C) 2018-2022 Intel Corporation
|
2019-08-09 19:02:42 +03:00
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
//
|
|
|
|
|
|
2021-12-30 19:09:12 +03:00
|
|
|
#include <format_reader_ptr.h>
|
|
|
|
|
|
2019-08-09 19:02:42 +03:00
|
|
|
#include <algorithm>
|
|
|
|
|
#include <map>
|
2022-01-18 11:22:47 +03:00
|
|
|
#include <nlohmann/json.hpp>
|
2020-05-13 21:12:22 +03:00
|
|
|
#include <regex>
|
2021-04-22 14:02:54 +03:00
|
|
|
#include <string>
|
|
|
|
|
#include <utility>
|
|
|
|
|
#include <vector>
|
2019-08-09 19:02:42 +03:00
|
|
|
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
// clang-format off
|
|
|
|
|
#include <samples/args_helper.hpp>
|
|
|
|
|
#include <samples/common.hpp>
|
|
|
|
|
#include <samples/slog.hpp>
|
2021-12-13 11:30:58 +03:00
|
|
|
|
2019-08-09 19:02:42 +03:00
|
|
|
#include "utils.hpp"
|
2021-04-22 14:02:54 +03:00
|
|
|
// clang-format on
|
2019-08-09 19:02:42 +03:00
|
|
|
|
2020-04-15 19:01:57 +03:00
|
|
|
#ifdef USE_OPENCV
|
2021-08-11 14:47:29 +03:00
|
|
|
# include <opencv2/core.hpp>
|
2020-04-15 19:01:57 +03:00
|
|
|
#endif
|
|
|
|
|
|
2021-02-11 12:57:05 +03:00
|
|
|
namespace benchmark_app {
|
2022-01-19 01:08:07 +03:00
|
|
|
bool InputInfo::is_image() const {
|
2021-04-22 14:02:54 +03:00
|
|
|
if ((layout != "NCHW") && (layout != "NHWC") && (layout != "CHW") && (layout != "HWC"))
|
|
|
|
|
return false;
|
2022-02-07 13:31:38 +03:00
|
|
|
// If data_shape is still empty, assume this is still an Image and tensor shape will be filled later
|
2021-12-30 19:09:12 +03:00
|
|
|
return (dataShape.empty() || channels() == 3);
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
2022-01-19 01:08:07 +03:00
|
|
|
bool InputInfo::is_image_info() const {
|
2021-04-22 14:02:54 +03:00
|
|
|
if (layout != "NC")
|
|
|
|
|
return false;
|
|
|
|
|
return (channels() >= 2);
|
|
|
|
|
}
|
|
|
|
|
size_t InputInfo::width() const {
|
2021-12-30 19:09:12 +03:00
|
|
|
return dataShape.at(ov::layout::width_idx(layout));
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
|
|
|
|
size_t InputInfo::height() const {
|
2021-12-30 19:09:12 +03:00
|
|
|
return dataShape.at(ov::layout::height_idx(layout));
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
|
|
|
|
size_t InputInfo::channels() const {
|
2021-12-30 19:09:12 +03:00
|
|
|
return dataShape.at(ov::layout::channels_idx(layout));
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
|
|
|
|
size_t InputInfo::batch() const {
|
2021-12-30 19:09:12 +03:00
|
|
|
return dataShape.at(ov::layout::batch_idx(layout));
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
|
|
|
|
size_t InputInfo::depth() const {
|
2021-12-30 19:09:12 +03:00
|
|
|
return dataShape.at(ov::layout::depth_idx(layout));
|
2021-04-22 14:02:54 +03:00
|
|
|
}
|
|
|
|
|
} // namespace benchmark_app
|
2021-02-11 12:57:05 +03:00
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
uint32_t device_default_device_duration_in_seconds(const std::string& device) {
|
2021-08-11 14:47:29 +03:00
|
|
|
static const std::map<std::string, uint32_t> deviceDefaultDurationInSeconds{{"CPU", 60},
|
|
|
|
|
{"GPU", 60},
|
|
|
|
|
{"VPU", 60},
|
|
|
|
|
{"MYRIAD", 60},
|
|
|
|
|
{"HDDL", 60},
|
|
|
|
|
{"UNKNOWN", 120}};
|
2019-08-09 19:02:42 +03:00
|
|
|
uint32_t duration = 0;
|
|
|
|
|
for (const auto& deviceDurationInSeconds : deviceDefaultDurationInSeconds) {
|
|
|
|
|
if (device.find(deviceDurationInSeconds.first) != std::string::npos) {
|
|
|
|
|
duration = std::max(duration, deviceDurationInSeconds.second);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (duration == 0) {
|
2021-08-11 14:47:29 +03:00
|
|
|
const auto unknownDeviceIt = find_if(deviceDefaultDurationInSeconds.begin(),
|
|
|
|
|
deviceDefaultDurationInSeconds.end(),
|
|
|
|
|
[](std::pair<std::string, uint32_t> deviceDuration) {
|
|
|
|
|
return deviceDuration.first == "UNKNOWN";
|
|
|
|
|
});
|
2019-08-09 19:02:42 +03:00
|
|
|
|
|
|
|
|
if (unknownDeviceIt == deviceDefaultDurationInSeconds.end()) {
|
|
|
|
|
throw std::logic_error("UNKNOWN device was not found in the device duration list");
|
|
|
|
|
}
|
|
|
|
|
duration = unknownDeviceIt->second;
|
2021-08-11 14:47:29 +03:00
|
|
|
slog::warn << "Default duration " << duration << " seconds for unknown device '" << device << "' is used"
|
|
|
|
|
<< slog::endl;
|
2019-08-09 19:02:42 +03:00
|
|
|
}
|
|
|
|
|
return duration;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-22 14:02:54 +03:00
|
|
|
std::vector<std::string> split(const std::string& s, char delim) {
|
2019-08-09 19:02:42 +03:00
|
|
|
std::vector<std::string> result;
|
|
|
|
|
std::stringstream ss(s);
|
|
|
|
|
std::string item;
|
|
|
|
|
|
|
|
|
|
while (getline(ss, item, delim)) {
|
|
|
|
|
result.push_back(item);
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<float> split_float(const std::string& s, char delim) {
|
2021-08-05 01:51:46 +09:00
|
|
|
std::vector<float> result;
|
|
|
|
|
std::stringstream ss(s);
|
|
|
|
|
std::string item;
|
|
|
|
|
|
|
|
|
|
while (getline(ss, item, delim)) {
|
|
|
|
|
result.push_back(std::stof(item));
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<std::string> parse_devices(const std::string& device_string) {
|
2019-08-09 19:02:42 +03:00
|
|
|
std::string comma_separated_devices = device_string;
|
Auto Batching impl (#7883)
* auto-batching POC squashed (all commits from auto-batch-2021.3 branch)
(cherry picked from commit d7742f2c747bc514a126cc9a4d5b99f0ff5cbbc7)
* applying/accomodating the API changes after rebase to the master
* replaying modified version of actual batch selection
* eearly experiments with model mem footprint
* changes from rebasing to the latest master
* experimenting with DG1 on the batch size selection, also collecting the mem footprint
* WIP:moving the auto-batching to the icore to let the MULT/AUTO support that, ALLOW_AUTO_BATCHING as a conventional config key. still fials hot device swap
* quick-n-dirty batch footpint vs device total mem
* code style
* testing which models perform badly due to kernels and NOT (batched) footprint
* stub pipeline task to comunicate the readiness rather than promise/future
* quick-n-dirty timeout impl
* explicit _completionTasks,reverting BA to use the timeout
* inputs outputs copies, works with AUTO and demo now
* accomodate the config per device-id, after rebase to the latest master
* allowing the auto-batching only with tput hint to let more conventional tests pass
* fix the pre-mature timeout restaring via waiting for batch1 requests completion
* moved the bacthed request statring ( along with input copies) to the dedicated thread
* [IE CLDNN] Disable bs_fs_yx_bsv16_fsv16 format for int8 convolution
* code style
* increasing the timeout to test the ssd_* models perf (timeout?) issues
* reducing number of output stuff in BA to avoid bloating the logs in experiments
* more aggressive batching for experiments, not limited to 32 and also 4 as a min
* more accurate timeout debugging info
* getting the reqs limitation from the plugin SetConfig as well
* refactor the reshape logic a bit to accomodate CPU for bathcing, also added remeote context
* let the benchamrk_app to consume specific batch values for the auto-batching such as BATCH:GPU(4)
* auto-batching functional test (with results check vs ref) and GPU instance for that
* fixed arithemtic on blobs ptrs
* clang
* handling possible batched network failure
* BATCH as the constants device name in test
* ENABLE_BATCH
* func tests for CPU, also DetectionOutput hetero tests (CPU and GPU)
* DetectionOutput hetero test for the CPU
* reenabling the Auto-Batching in the AUTO
* auto-batching device enabled in the test
* fixed the DO test
* improve the loading loop logic
* brushed the config keys
* allow hetero code-path for explicit device name like BATCH:GPU(4), used in the hetero code-path tests
* fix the test after refactoring
* clang
* moving ThreadSafeQueue to the ie_parallel, as it is re-used in the AUTO/MULTI and BATCH now
* auto-batching hetero test (subgraph with DetectionOutput)
* fixed minor changes that were result of experiments with impl
* code-style
* brushing, disabling CPU's HETERO tests until planned activity for 22.2
* removing home-baked MAX_BATCH_SZIE and swicthing to the official impl by GPU team
* remote blobs tests for the auto-batching (old API)
* brushed names a bit
* CreateContext and LoadNEtwork with context for the Auto-Batching plus remote-blobs tests
* fixed the ieUnitTests with adding CreateContext stub to the MockICore
* clang
* improved remote-blobs tests
* revert the back BA from exeprimenents with AB + device_use_mem
* conformance tests for BATCH, alos batch size 1 is default for BATCH:DEVICE
* remote blobs 2.0 tests, issue with context having the orig device name
* debugging DG1 perf drop (presumably due to non-fitting the device-mem)
* disbaling WA with batch/=2 for excesive mem footptint, leaving only streams 2
* remote blobs 2.0 tests for different tensor sharing types
* converting assert to throw to accomodate legacy API where the lock() was possible to be called
* revert the timeout back to avoid mixing the studies, fixed the footprint calc
* reverting to estimating the max batch by extrapolating from bacth1 size
* more conservative footptint etimation (with bacth1), graceful bacth 1 handling without duplication
* even graceful batch 1 handling without duplication
* WA for MAX_BATCH_SIZE failure, removing batch4 as a min for the auto-batching
* AutoBatchPlugin -> ov_auto_batch_plugin
* WA for gcc 4.8
* clang
* fix misprint
* fixed errors resulted from recent OV's Variant to Any transition
* skip auto-batching for already-batched networks
* AUTO_BATCH_TIMEOUT and tests
* GPU-specific L3
* switched to pure config, also improved ALLOW_AUTO_BATCHING config key handling logic
* debugging device info
* enabling the config tests for the GPU and fixing the Auto-batching tests to pass
* making the default (when not recognized the driver) cache size more aggressive, to accomodate recent HW with old drivers
* skip auto-batching for RNNs and alikes (e.g. single CHW input)
* fixed fallback to the bacth1 and moved HETERO path under condition to avoid bloating
* brushing
* Auto plugin GetMetric support gpu auto-batch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add test case
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add comments on test
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the vars names, alos adding the excpetion handling
* disabling the auto-batching for the networks with non-batched outputs and faster-rcnn and alikes (CVS-74085) to minimize the of #failures
* add try catch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the code changed in the GPU plugin
* Auto-Batch requests tests
* brushed varibles a bit (ref)
* cleaned debug output from the ie_core
* cleaned cmake for the Auto-Batch
* removed batchN estimation from batch1
* cleaned from debug printf
* comments, cleanup
* WA the mock test errors introduced with merging the https://github.com/myshevts/openvino/pull/13
* Adding back removed batchN estimation from batch1 to debug degradations on DG1 (resulted from too optimistic MAX_BATCH_SIZE?). This partially reverts commit e8f1738ac19d20dd56f36d4e824bf273fd6ea917.
* brushing ie_core.cpp
* fix 32bit compilation
* Code review: ENABLE_AUTO_BATCH
* consolidate the auot-batching logic in ie_core.cpp into single ApplyAutoBAtching
* renamed brushed the OPTIMAL_BATCH (now with_SIZE) and mimicks the MAX_BATCH_SZIE wrt MODEL_PTR
* default value for the OPTIMAL_BATCH_SIZE
* clang
* accomodate new func tests location
* fix shuffle of headers after clang + copyrights
* fixed misprint made during code refactoring
* moving the common therad-safe containers (like ThreadSafeQueue) to the dedicated dev_api header
* switch from the device name to the OPTIMAL_BATCH_SIZE metric presence as a conditin to consider Auto-Batching
* switching from the unsafe size() and minimizing time under lock
* code style
* brushed the ApplyAutoBatching
* brushed the netric/config names and descriptions
* completed the core intergration tests for the auto-batching
* ExecGraphInfo and check for incorrect cfg
* removed explicit dependencies from cmake file of the plugin
* disabling Auto-Batching thru the tput hint (to preserve current product default), only excplicit like BATCH:GPU used in the tests
Co-authored-by: Roman Lyamin <roman.lyamin@intel.com>
Co-authored-by: Hu, Yuan2 <yuan2.hu@intel.com>
2021-12-24 12:55:22 +03:00
|
|
|
auto colon = comma_separated_devices.find(":");
|
[AUTO plugin] Fix benchmark failed to set nstreams on MULTI plugin (#12855)
* fix benchmark filed to pass on MULTI device when enable num_streams.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove part of test cases because MULTI plguin will not check if the unspported property is valid now.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove some incorrect config for MULTI test case since MULTI will pass through those unrecognized config without any exception.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support MULTI to set nstreams to multi target devices by using ov::device:properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support AUTO to set nstreams with multi target devices bu using ov:device::properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Co-authored-by: Chen Peter <peter.chen@intel.com>
2022-09-27 23:42:40 +08:00
|
|
|
std::vector<std::string> result;
|
Auto Batching impl (#7883)
* auto-batching POC squashed (all commits from auto-batch-2021.3 branch)
(cherry picked from commit d7742f2c747bc514a126cc9a4d5b99f0ff5cbbc7)
* applying/accomodating the API changes after rebase to the master
* replaying modified version of actual batch selection
* eearly experiments with model mem footprint
* changes from rebasing to the latest master
* experimenting with DG1 on the batch size selection, also collecting the mem footprint
* WIP:moving the auto-batching to the icore to let the MULT/AUTO support that, ALLOW_AUTO_BATCHING as a conventional config key. still fials hot device swap
* quick-n-dirty batch footpint vs device total mem
* code style
* testing which models perform badly due to kernels and NOT (batched) footprint
* stub pipeline task to comunicate the readiness rather than promise/future
* quick-n-dirty timeout impl
* explicit _completionTasks,reverting BA to use the timeout
* inputs outputs copies, works with AUTO and demo now
* accomodate the config per device-id, after rebase to the latest master
* allowing the auto-batching only with tput hint to let more conventional tests pass
* fix the pre-mature timeout restaring via waiting for batch1 requests completion
* moved the bacthed request statring ( along with input copies) to the dedicated thread
* [IE CLDNN] Disable bs_fs_yx_bsv16_fsv16 format for int8 convolution
* code style
* increasing the timeout to test the ssd_* models perf (timeout?) issues
* reducing number of output stuff in BA to avoid bloating the logs in experiments
* more aggressive batching for experiments, not limited to 32 and also 4 as a min
* more accurate timeout debugging info
* getting the reqs limitation from the plugin SetConfig as well
* refactor the reshape logic a bit to accomodate CPU for bathcing, also added remeote context
* let the benchamrk_app to consume specific batch values for the auto-batching such as BATCH:GPU(4)
* auto-batching functional test (with results check vs ref) and GPU instance for that
* fixed arithemtic on blobs ptrs
* clang
* handling possible batched network failure
* BATCH as the constants device name in test
* ENABLE_BATCH
* func tests for CPU, also DetectionOutput hetero tests (CPU and GPU)
* DetectionOutput hetero test for the CPU
* reenabling the Auto-Batching in the AUTO
* auto-batching device enabled in the test
* fixed the DO test
* improve the loading loop logic
* brushed the config keys
* allow hetero code-path for explicit device name like BATCH:GPU(4), used in the hetero code-path tests
* fix the test after refactoring
* clang
* moving ThreadSafeQueue to the ie_parallel, as it is re-used in the AUTO/MULTI and BATCH now
* auto-batching hetero test (subgraph with DetectionOutput)
* fixed minor changes that were result of experiments with impl
* code-style
* brushing, disabling CPU's HETERO tests until planned activity for 22.2
* removing home-baked MAX_BATCH_SZIE and swicthing to the official impl by GPU team
* remote blobs tests for the auto-batching (old API)
* brushed names a bit
* CreateContext and LoadNEtwork with context for the Auto-Batching plus remote-blobs tests
* fixed the ieUnitTests with adding CreateContext stub to the MockICore
* clang
* improved remote-blobs tests
* revert the back BA from exeprimenents with AB + device_use_mem
* conformance tests for BATCH, alos batch size 1 is default for BATCH:DEVICE
* remote blobs 2.0 tests, issue with context having the orig device name
* debugging DG1 perf drop (presumably due to non-fitting the device-mem)
* disbaling WA with batch/=2 for excesive mem footptint, leaving only streams 2
* remote blobs 2.0 tests for different tensor sharing types
* converting assert to throw to accomodate legacy API where the lock() was possible to be called
* revert the timeout back to avoid mixing the studies, fixed the footprint calc
* reverting to estimating the max batch by extrapolating from bacth1 size
* more conservative footptint etimation (with bacth1), graceful bacth 1 handling without duplication
* even graceful batch 1 handling without duplication
* WA for MAX_BATCH_SIZE failure, removing batch4 as a min for the auto-batching
* AutoBatchPlugin -> ov_auto_batch_plugin
* WA for gcc 4.8
* clang
* fix misprint
* fixed errors resulted from recent OV's Variant to Any transition
* skip auto-batching for already-batched networks
* AUTO_BATCH_TIMEOUT and tests
* GPU-specific L3
* switched to pure config, also improved ALLOW_AUTO_BATCHING config key handling logic
* debugging device info
* enabling the config tests for the GPU and fixing the Auto-batching tests to pass
* making the default (when not recognized the driver) cache size more aggressive, to accomodate recent HW with old drivers
* skip auto-batching for RNNs and alikes (e.g. single CHW input)
* fixed fallback to the bacth1 and moved HETERO path under condition to avoid bloating
* brushing
* Auto plugin GetMetric support gpu auto-batch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add test case
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add comments on test
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the vars names, alos adding the excpetion handling
* disabling the auto-batching for the networks with non-batched outputs and faster-rcnn and alikes (CVS-74085) to minimize the of #failures
* add try catch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the code changed in the GPU plugin
* Auto-Batch requests tests
* brushed varibles a bit (ref)
* cleaned debug output from the ie_core
* cleaned cmake for the Auto-Batch
* removed batchN estimation from batch1
* cleaned from debug printf
* comments, cleanup
* WA the mock test errors introduced with merging the https://github.com/myshevts/openvino/pull/13
* Adding back removed batchN estimation from batch1 to debug degradations on DG1 (resulted from too optimistic MAX_BATCH_SIZE?). This partially reverts commit e8f1738ac19d20dd56f36d4e824bf273fd6ea917.
* brushing ie_core.cpp
* fix 32bit compilation
* Code review: ENABLE_AUTO_BATCH
* consolidate the auot-batching logic in ie_core.cpp into single ApplyAutoBAtching
* renamed brushed the OPTIMAL_BATCH (now with_SIZE) and mimicks the MAX_BATCH_SZIE wrt MODEL_PTR
* default value for the OPTIMAL_BATCH_SIZE
* clang
* accomodate new func tests location
* fix shuffle of headers after clang + copyrights
* fixed misprint made during code refactoring
* moving the common therad-safe containers (like ThreadSafeQueue) to the dedicated dev_api header
* switch from the device name to the OPTIMAL_BATCH_SIZE metric presence as a conditin to consider Auto-Batching
* switching from the unsafe size() and minimizing time under lock
* code style
* brushed the ApplyAutoBatching
* brushed the netric/config names and descriptions
* completed the core intergration tests for the auto-batching
* ExecGraphInfo and check for incorrect cfg
* removed explicit dependencies from cmake file of the plugin
* disabling Auto-Batching thru the tput hint (to preserve current product default), only excplicit like BATCH:GPU used in the tests
Co-authored-by: Roman Lyamin <roman.lyamin@intel.com>
Co-authored-by: Hu, Yuan2 <yuan2.hu@intel.com>
2021-12-24 12:55:22 +03:00
|
|
|
if (colon != std::string::npos) {
|
[AUTO] Update logic of setting TPUT by default for MULTI and AUTO. (#12279)
* update logic of setting TPUT by default for MULTI and AUTO.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Set config to MULTI instaed of target device when using -d MULTI:target_device in benchmark app.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update logit to check if perfomance hint is set.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update test cases.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Set the correct _so from passthrough executable network when batch plugin is disabel to fix lifecycle coredump.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Same update for AUTO plugin
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Co-authored-by: Chen Peter <peter.chen@intel.com>
2022-08-26 17:31:22 +08:00
|
|
|
auto target_device = comma_separated_devices.substr(0, colon);
|
|
|
|
|
if (target_device == "AUTO" || target_device == "MULTI") {
|
|
|
|
|
result.push_back(target_device);
|
2022-02-18 14:56:56 +08:00
|
|
|
}
|
Auto Batching impl (#7883)
* auto-batching POC squashed (all commits from auto-batch-2021.3 branch)
(cherry picked from commit d7742f2c747bc514a126cc9a4d5b99f0ff5cbbc7)
* applying/accomodating the API changes after rebase to the master
* replaying modified version of actual batch selection
* eearly experiments with model mem footprint
* changes from rebasing to the latest master
* experimenting with DG1 on the batch size selection, also collecting the mem footprint
* WIP:moving the auto-batching to the icore to let the MULT/AUTO support that, ALLOW_AUTO_BATCHING as a conventional config key. still fials hot device swap
* quick-n-dirty batch footpint vs device total mem
* code style
* testing which models perform badly due to kernels and NOT (batched) footprint
* stub pipeline task to comunicate the readiness rather than promise/future
* quick-n-dirty timeout impl
* explicit _completionTasks,reverting BA to use the timeout
* inputs outputs copies, works with AUTO and demo now
* accomodate the config per device-id, after rebase to the latest master
* allowing the auto-batching only with tput hint to let more conventional tests pass
* fix the pre-mature timeout restaring via waiting for batch1 requests completion
* moved the bacthed request statring ( along with input copies) to the dedicated thread
* [IE CLDNN] Disable bs_fs_yx_bsv16_fsv16 format for int8 convolution
* code style
* increasing the timeout to test the ssd_* models perf (timeout?) issues
* reducing number of output stuff in BA to avoid bloating the logs in experiments
* more aggressive batching for experiments, not limited to 32 and also 4 as a min
* more accurate timeout debugging info
* getting the reqs limitation from the plugin SetConfig as well
* refactor the reshape logic a bit to accomodate CPU for bathcing, also added remeote context
* let the benchamrk_app to consume specific batch values for the auto-batching such as BATCH:GPU(4)
* auto-batching functional test (with results check vs ref) and GPU instance for that
* fixed arithemtic on blobs ptrs
* clang
* handling possible batched network failure
* BATCH as the constants device name in test
* ENABLE_BATCH
* func tests for CPU, also DetectionOutput hetero tests (CPU and GPU)
* DetectionOutput hetero test for the CPU
* reenabling the Auto-Batching in the AUTO
* auto-batching device enabled in the test
* fixed the DO test
* improve the loading loop logic
* brushed the config keys
* allow hetero code-path for explicit device name like BATCH:GPU(4), used in the hetero code-path tests
* fix the test after refactoring
* clang
* moving ThreadSafeQueue to the ie_parallel, as it is re-used in the AUTO/MULTI and BATCH now
* auto-batching hetero test (subgraph with DetectionOutput)
* fixed minor changes that were result of experiments with impl
* code-style
* brushing, disabling CPU's HETERO tests until planned activity for 22.2
* removing home-baked MAX_BATCH_SZIE and swicthing to the official impl by GPU team
* remote blobs tests for the auto-batching (old API)
* brushed names a bit
* CreateContext and LoadNEtwork with context for the Auto-Batching plus remote-blobs tests
* fixed the ieUnitTests with adding CreateContext stub to the MockICore
* clang
* improved remote-blobs tests
* revert the back BA from exeprimenents with AB + device_use_mem
* conformance tests for BATCH, alos batch size 1 is default for BATCH:DEVICE
* remote blobs 2.0 tests, issue with context having the orig device name
* debugging DG1 perf drop (presumably due to non-fitting the device-mem)
* disbaling WA with batch/=2 for excesive mem footptint, leaving only streams 2
* remote blobs 2.0 tests for different tensor sharing types
* converting assert to throw to accomodate legacy API where the lock() was possible to be called
* revert the timeout back to avoid mixing the studies, fixed the footprint calc
* reverting to estimating the max batch by extrapolating from bacth1 size
* more conservative footptint etimation (with bacth1), graceful bacth 1 handling without duplication
* even graceful batch 1 handling without duplication
* WA for MAX_BATCH_SIZE failure, removing batch4 as a min for the auto-batching
* AutoBatchPlugin -> ov_auto_batch_plugin
* WA for gcc 4.8
* clang
* fix misprint
* fixed errors resulted from recent OV's Variant to Any transition
* skip auto-batching for already-batched networks
* AUTO_BATCH_TIMEOUT and tests
* GPU-specific L3
* switched to pure config, also improved ALLOW_AUTO_BATCHING config key handling logic
* debugging device info
* enabling the config tests for the GPU and fixing the Auto-batching tests to pass
* making the default (when not recognized the driver) cache size more aggressive, to accomodate recent HW with old drivers
* skip auto-batching for RNNs and alikes (e.g. single CHW input)
* fixed fallback to the bacth1 and moved HETERO path under condition to avoid bloating
* brushing
* Auto plugin GetMetric support gpu auto-batch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add test case
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* add comments on test
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the vars names, alos adding the excpetion handling
* disabling the auto-batching for the networks with non-batched outputs and faster-rcnn and alikes (CVS-74085) to minimize the of #failures
* add try catch
Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>
* brushing the code changed in the GPU plugin
* Auto-Batch requests tests
* brushed varibles a bit (ref)
* cleaned debug output from the ie_core
* cleaned cmake for the Auto-Batch
* removed batchN estimation from batch1
* cleaned from debug printf
* comments, cleanup
* WA the mock test errors introduced with merging the https://github.com/myshevts/openvino/pull/13
* Adding back removed batchN estimation from batch1 to debug degradations on DG1 (resulted from too optimistic MAX_BATCH_SIZE?). This partially reverts commit e8f1738ac19d20dd56f36d4e824bf273fd6ea917.
* brushing ie_core.cpp
* fix 32bit compilation
* Code review: ENABLE_AUTO_BATCH
* consolidate the auot-batching logic in ie_core.cpp into single ApplyAutoBAtching
* renamed brushed the OPTIMAL_BATCH (now with_SIZE) and mimicks the MAX_BATCH_SZIE wrt MODEL_PTR
* default value for the OPTIMAL_BATCH_SIZE
* clang
* accomodate new func tests location
* fix shuffle of headers after clang + copyrights
* fixed misprint made during code refactoring
* moving the common therad-safe containers (like ThreadSafeQueue) to the dedicated dev_api header
* switch from the device name to the OPTIMAL_BATCH_SIZE metric presence as a conditin to consider Auto-Batching
* switching from the unsafe size() and minimizing time under lock
* code style
* brushed the ApplyAutoBatching
* brushed the netric/config names and descriptions
* completed the core intergration tests for the auto-batching
* ExecGraphInfo and check for incorrect cfg
* removed explicit dependencies from cmake file of the plugin
* disabling Auto-Batching thru the tput hint (to preserve current product default), only excplicit like BATCH:GPU used in the tests
Co-authored-by: Roman Lyamin <roman.lyamin@intel.com>
Co-authored-by: Hu, Yuan2 <yuan2.hu@intel.com>
2021-12-24 12:55:22 +03:00
|
|
|
auto bracket = comma_separated_devices.find("("); // e.g. in BATCH:GPU(4)
|
|
|
|
|
comma_separated_devices = comma_separated_devices.substr(colon + 1, bracket - colon - 1);
|
2019-08-09 19:02:42 +03:00
|
|
|
}
|
2022-02-18 14:56:56 +08:00
|
|
|
|
2019-08-09 19:02:42 +03:00
|
|
|
auto devices = split(comma_separated_devices, ',');
|
[AUTO plugin] Fix benchmark failed to set nstreams on MULTI plugin (#12855)
* fix benchmark filed to pass on MULTI device when enable num_streams.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove part of test cases because MULTI plguin will not check if the unspported property is valid now.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove some incorrect config for MULTI test case since MULTI will pass through those unrecognized config without any exception.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support MULTI to set nstreams to multi target devices by using ov::device:properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support AUTO to set nstreams with multi target devices bu using ov:device::properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Co-authored-by: Chen Peter <peter.chen@intel.com>
2022-09-27 23:42:40 +08:00
|
|
|
result.insert(result.end(), devices.begin(), devices.end());
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void parse_value_for_virtual_device(const std::string& device, std::map<std::string, std::string>& values_string) {
|
|
|
|
|
auto item_virtual = values_string.find(device);
|
|
|
|
|
if (item_virtual != values_string.end() && values_string.size() > 1) {
|
|
|
|
|
if (device == "MULTI") {
|
|
|
|
|
// Remove the element that the key is virtual device MULTI
|
[AUTO] Update property setting rules (#13848)
* [AUTO] update property setting logic.
* Update core::set_property() logic to only support primary property for AUTO and MULTI.
* Separate AUTO and MULTI supported properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test case.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. revert the changes for ie core.
2. Enable AUTO/MULTI only accepting its own properties.
3. Enable AUTO/MULTI accepting device properties passed from loadnetwork().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "[AUTO]Update the incorrect config test for Myriad (#13271)"
This reverts commit 0552d9880270ea95bb1b457de8daeed12bbaffab.
* MULTI only accepts its own properties that is same as AUTO currently.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test cases for AUTO/MULTI property test.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update to enable MULTI supporting hw primary property setting throw the compile_model().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove the added test case for setting secondary property through set_property().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. For AUTO/MULTI, property setting will be passed via core::compile_model() instead of core::set_property().
2. update the logic to infer precision setting that will transform into secondary property setting to each hw device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* set default value for nstreams when -d AUTO/MULTI and no nstreams setting from command line.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update code format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Enable BA to support if -d AUTO:MULTI,xxx/MULTI:AUTO,xxx. while AUTO Plugin need to update the logic of generating supported config list to virtual device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* move device configuration handling outside form function main.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "move device configuration handling outside form function main."
This reverts commit ef77bfc60235be9f7478d5ecf6cd06820f9c6f2b.
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Setting property performance hint to 'THROUGHPUT' and passing to executable network if no setting for of AUTO/MULTI.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
2022-11-17 16:38:47 +08:00
|
|
|
// e.g. MULTI:xxx -nstreams 2 will set nstreams 2 to xxx.
|
[AUTO plugin] Fix benchmark failed to set nstreams on MULTI plugin (#12855)
* fix benchmark filed to pass on MULTI device when enable num_streams.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove part of test cases because MULTI plguin will not check if the unspported property is valid now.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove some incorrect config for MULTI test case since MULTI will pass through those unrecognized config without any exception.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support MULTI to set nstreams to multi target devices by using ov::device:properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support AUTO to set nstreams with multi target devices bu using ov:device::properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Co-authored-by: Chen Peter <peter.chen@intel.com>
2022-09-27 23:42:40 +08:00
|
|
|
values_string.erase(item_virtual);
|
|
|
|
|
} else if (device == "AUTO") {
|
|
|
|
|
// Just keep the element that the key is virtual device AUTO
|
|
|
|
|
// e.g. AUTO:xxx,xxx -nstreams 2 will trigger exception that AUTO plugin didn't support nstream property.
|
|
|
|
|
auto value = item_virtual->second;
|
|
|
|
|
values_string.clear();
|
|
|
|
|
values_string[device] = value;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
auto iter = values_string.begin();
|
|
|
|
|
while (iter != values_string.end()) {
|
|
|
|
|
if (iter->first == device) {
|
|
|
|
|
iter++;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
values_string[device] += iter->first + " " + iter->second + " ";
|
|
|
|
|
iter = values_string.erase(iter);
|
|
|
|
|
}
|
|
|
|
|
if (values_string.find(device) != values_string.end()) {
|
|
|
|
|
auto& nstreams = values_string[device];
|
|
|
|
|
// Remove the space at the tail.
|
[AUTO] Update property setting rules (#13848)
* [AUTO] update property setting logic.
* Update core::set_property() logic to only support primary property for AUTO and MULTI.
* Separate AUTO and MULTI supported properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test case.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. revert the changes for ie core.
2. Enable AUTO/MULTI only accepting its own properties.
3. Enable AUTO/MULTI accepting device properties passed from loadnetwork().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "[AUTO]Update the incorrect config test for Myriad (#13271)"
This reverts commit 0552d9880270ea95bb1b457de8daeed12bbaffab.
* MULTI only accepts its own properties that is same as AUTO currently.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test cases for AUTO/MULTI property test.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update to enable MULTI supporting hw primary property setting throw the compile_model().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove the added test case for setting secondary property through set_property().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. For AUTO/MULTI, property setting will be passed via core::compile_model() instead of core::set_property().
2. update the logic to infer precision setting that will transform into secondary property setting to each hw device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* set default value for nstreams when -d AUTO/MULTI and no nstreams setting from command line.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update code format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Enable BA to support if -d AUTO:MULTI,xxx/MULTI:AUTO,xxx. while AUTO Plugin need to update the logic of generating supported config list to virtual device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* move device configuration handling outside form function main.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "move device configuration handling outside form function main."
This reverts commit ef77bfc60235be9f7478d5ecf6cd06820f9c6f2b.
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Setting property performance hint to 'THROUGHPUT' and passing to executable network if no setting for of AUTO/MULTI.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
2022-11-17 16:38:47 +08:00
|
|
|
nstreams.pop_back();
|
[AUTO plugin] Fix benchmark failed to set nstreams on MULTI plugin (#12855)
* fix benchmark filed to pass on MULTI device when enable num_streams.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove part of test cases because MULTI plguin will not check if the unspported property is valid now.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove some incorrect config for MULTI test case since MULTI will pass through those unrecognized config without any exception.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support MULTI to set nstreams to multi target devices by using ov::device:properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Support AUTO to set nstreams with multi target devices bu using ov:device::properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
Co-authored-by: Chen Peter <peter.chen@intel.com>
2022-09-27 23:42:40 +08:00
|
|
|
}
|
|
|
|
|
return;
|
2019-08-09 19:02:42 +03:00
|
|
|
}
|
|
|
|
|
|
2022-02-11 09:22:45 +03:00
|
|
|
std::map<std::string, std::string> parse_value_per_device(const std::vector<std::string>& devices,
|
|
|
|
|
const std::string& values_string) {
|
2019-08-09 19:02:42 +03:00
|
|
|
// Format: <device1>:<value1>,<device2>:<value2> or just <value>
|
2020-04-15 19:01:57 +03:00
|
|
|
std::map<std::string, std::string> result;
|
|
|
|
|
auto device_value_strings = split(values_string, ',');
|
2019-08-09 19:02:42 +03:00
|
|
|
for (auto& device_value_string : device_value_strings) {
|
2020-04-15 19:01:57 +03:00
|
|
|
auto device_value_vec = split(device_value_string, ':');
|
2019-08-09 19:02:42 +03:00
|
|
|
if (device_value_vec.size() == 2) {
|
2020-04-13 21:17:23 +03:00
|
|
|
auto device_name = device_value_vec.at(0);
|
[AUTO] Update property setting rules (#13848)
* [AUTO] update property setting logic.
* Update core::set_property() logic to only support primary property for AUTO and MULTI.
* Separate AUTO and MULTI supported properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test case.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. revert the changes for ie core.
2. Enable AUTO/MULTI only accepting its own properties.
3. Enable AUTO/MULTI accepting device properties passed from loadnetwork().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "[AUTO]Update the incorrect config test for Myriad (#13271)"
This reverts commit 0552d9880270ea95bb1b457de8daeed12bbaffab.
* MULTI only accepts its own properties that is same as AUTO currently.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test cases for AUTO/MULTI property test.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update to enable MULTI supporting hw primary property setting throw the compile_model().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove the added test case for setting secondary property through set_property().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. For AUTO/MULTI, property setting will be passed via core::compile_model() instead of core::set_property().
2. update the logic to infer precision setting that will transform into secondary property setting to each hw device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* set default value for nstreams when -d AUTO/MULTI and no nstreams setting from command line.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update code format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Enable BA to support if -d AUTO:MULTI,xxx/MULTI:AUTO,xxx. while AUTO Plugin need to update the logic of generating supported config list to virtual device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* move device configuration handling outside form function main.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "move device configuration handling outside form function main."
This reverts commit ef77bfc60235be9f7478d5ecf6cd06820f9c6f2b.
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Setting property performance hint to 'THROUGHPUT' and passing to executable network if no setting for of AUTO/MULTI.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
2022-11-17 16:38:47 +08:00
|
|
|
auto value = device_value_vec.at(1);
|
2020-04-13 21:17:23 +03:00
|
|
|
auto it = std::find(devices.begin(), devices.end(), device_name);
|
2019-08-09 19:02:42 +03:00
|
|
|
if (it != devices.end()) {
|
[AUTO] Update property setting rules (#13848)
* [AUTO] update property setting logic.
* Update core::set_property() logic to only support primary property for AUTO and MULTI.
* Separate AUTO and MULTI supported properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test case.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. revert the changes for ie core.
2. Enable AUTO/MULTI only accepting its own properties.
3. Enable AUTO/MULTI accepting device properties passed from loadnetwork().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "[AUTO]Update the incorrect config test for Myriad (#13271)"
This reverts commit 0552d9880270ea95bb1b457de8daeed12bbaffab.
* MULTI only accepts its own properties that is same as AUTO currently.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test cases for AUTO/MULTI property test.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update to enable MULTI supporting hw primary property setting throw the compile_model().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove the added test case for setting secondary property through set_property().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. For AUTO/MULTI, property setting will be passed via core::compile_model() instead of core::set_property().
2. update the logic to infer precision setting that will transform into secondary property setting to each hw device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* set default value for nstreams when -d AUTO/MULTI and no nstreams setting from command line.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update code format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Enable BA to support if -d AUTO:MULTI,xxx/MULTI:AUTO,xxx. while AUTO Plugin need to update the logic of generating supported config list to virtual device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* move device configuration handling outside form function main.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "move device configuration handling outside form function main."
This reverts commit ef77bfc60235be9f7478d5ecf6cd06820f9c6f2b.
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Setting property performance hint to 'THROUGHPUT' and passing to executable network if no setting for of AUTO/MULTI.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
2022-11-17 16:38:47 +08:00
|
|
|
result[device_name] = value;
|
2020-04-13 21:17:23 +03:00
|
|
|
} else {
|
[AUTO] Update property setting rules (#13848)
* [AUTO] update property setting logic.
* Update core::set_property() logic to only support primary property for AUTO and MULTI.
* Separate AUTO and MULTI supported properties.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test case.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. revert the changes for ie core.
2. Enable AUTO/MULTI only accepting its own properties.
3. Enable AUTO/MULTI accepting device properties passed from loadnetwork().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "[AUTO]Update the incorrect config test for Myriad (#13271)"
This reverts commit 0552d9880270ea95bb1b457de8daeed12bbaffab.
* MULTI only accepts its own properties that is same as AUTO currently.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Add test cases for AUTO/MULTI property test.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update to enable MULTI supporting hw primary property setting throw the compile_model().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Remove the added test case for setting secondary property through set_property().
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* 1. For AUTO/MULTI, property setting will be passed via core::compile_model() instead of core::set_property().
2. update the logic to infer precision setting that will transform into secondary property setting to each hw device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* set default value for nstreams when -d AUTO/MULTI and no nstreams setting from command line.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update code format.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Enable BA to support if -d AUTO:MULTI,xxx/MULTI:AUTO,xxx. while AUTO Plugin need to update the logic of generating supported config list to virtual device.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* move device configuration handling outside form function main.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Revert "move device configuration handling outside form function main."
This reverts commit ef77bfc60235be9f7478d5ecf6cd06820f9c6f2b.
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Setting property performance hint to 'THROUGHPUT' and passing to executable network if no setting for of AUTO/MULTI.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
* Update.
Signed-off-by: Wang, Yang <yang4.wang@intel.com>
2022-11-17 16:38:47 +08:00
|
|
|
std::string devices_list = "";
|
|
|
|
|
for (auto& device : devices)
|
|
|
|
|
devices_list += device + " ";
|
|
|
|
|
devices_list.pop_back();
|
|
|
|
|
throw std::logic_error("Failed to set property to '" + device_name +
|
|
|
|
|
"' which is not found whthin the target devices list '" + devices_list + "'!");
|
2019-08-09 19:02:42 +03:00
|
|
|
}
|
|
|
|
|
} else if (device_value_vec.size() == 1) {
|
2020-04-15 19:01:57 +03:00
|
|
|
auto value = device_value_vec.at(0);
|
2019-08-09 19:02:42 +03:00
|
|
|
for (auto& device : devices) {
|
|
|
|
|
result[device] = value;
|
|
|
|
|
}
|
|
|
|
|
} else if (device_value_vec.size() != 0) {
|
|
|
|
|
throw std::runtime_error("Unknown string format: " + values_string);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2020-04-15 19:01:57 +03:00
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info) {
|
2021-02-11 12:57:05 +03:00
|
|
|
size_t batch_size = 0;
|
|
|
|
|
for (auto& info : inputs_info) {
|
2021-12-30 19:09:12 +03:00
|
|
|
if (ov::layout::has_batch(info.second.layout)) {
|
2021-02-11 12:57:05 +03:00
|
|
|
if (batch_size == 0)
|
2021-12-30 19:09:12 +03:00
|
|
|
batch_size = info.second.batch();
|
|
|
|
|
else if (batch_size != info.second.batch())
|
2021-04-22 14:02:54 +03:00
|
|
|
throw std::logic_error("Can't deterimine batch size: batch is "
|
|
|
|
|
"different for different inputs!");
|
2020-05-13 21:12:22 +03:00
|
|
|
}
|
|
|
|
|
}
|
2021-12-30 19:09:12 +03:00
|
|
|
if (batch_size == 0) {
|
2022-01-13 23:34:38 +03:00
|
|
|
slog::warn << "No batch dimension was found at any input, asssuming batch to be 1. Beware: this might affect "
|
|
|
|
|
"FPS calculation."
|
|
|
|
|
<< slog::endl;
|
2021-02-11 12:57:05 +03:00
|
|
|
batch_size = 1;
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
2021-02-11 12:57:05 +03:00
|
|
|
return batch_size;
|
2020-05-13 21:12:22 +03:00
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::string get_shape_string(const ov::Shape& shape) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
std::stringstream ss;
|
2021-12-30 19:09:12 +03:00
|
|
|
ss << shape;
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
return ss.str();
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::string get_shapes_string(const benchmark_app::PartialShapes& shapes) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
std::stringstream ss;
|
|
|
|
|
for (auto& shape : shapes) {
|
|
|
|
|
if (!ss.str().empty())
|
|
|
|
|
ss << ", ";
|
|
|
|
|
ss << "\'" << shape.first << "': " << shape.second;
|
|
|
|
|
}
|
|
|
|
|
return ss.str();
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::map<std::string, std::vector<float>> parse_scale_or_mean(const std::string& scale_mean,
|
|
|
|
|
const benchmark_app::InputsInfo& inputs_info) {
|
2021-08-05 01:51:46 +09:00
|
|
|
// Format: data:[255,255,255],info[255,255,255]
|
|
|
|
|
std::map<std::string, std::vector<float>> return_value;
|
|
|
|
|
|
|
|
|
|
std::string search_string = scale_mean;
|
|
|
|
|
auto start_pos = search_string.find_first_of('[');
|
|
|
|
|
while (start_pos != std::string::npos) {
|
|
|
|
|
auto end_pos = search_string.find_first_of(']');
|
|
|
|
|
if (end_pos == std::string::npos)
|
|
|
|
|
break;
|
|
|
|
|
auto input_name = search_string.substr(0, start_pos);
|
|
|
|
|
auto input_value_string = search_string.substr(start_pos + 1, end_pos - start_pos - 1);
|
2022-01-19 01:08:07 +03:00
|
|
|
auto input_value = split_float(input_value_string, ',');
|
2021-08-05 01:51:46 +09:00
|
|
|
|
|
|
|
|
if (!input_name.empty()) {
|
|
|
|
|
if (inputs_info.count(input_name)) {
|
|
|
|
|
return_value[input_name] = input_value;
|
|
|
|
|
}
|
|
|
|
|
// ignore wrong input name
|
|
|
|
|
} else {
|
|
|
|
|
for (auto& item : inputs_info) {
|
2022-01-19 01:08:07 +03:00
|
|
|
if (item.second.is_image())
|
2021-08-05 01:51:46 +09:00
|
|
|
return_value[item.first] = input_value;
|
|
|
|
|
}
|
|
|
|
|
search_string.clear();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
search_string = search_string.substr(end_pos + 1);
|
|
|
|
|
if (search_string.empty() || search_string.front() != ',')
|
|
|
|
|
break;
|
|
|
|
|
search_string = search_string.substr(1);
|
|
|
|
|
start_pos = search_string.find_first_of('[');
|
|
|
|
|
}
|
|
|
|
|
if (!search_string.empty())
|
|
|
|
|
throw std::logic_error("Can't parse input parameter string: " + scale_mean);
|
|
|
|
|
return return_value;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<ngraph::Dimension> parse_partial_shape(const std::string& partial_shape) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
std::vector<ngraph::Dimension> shape;
|
|
|
|
|
for (auto& dim : split(partial_shape, ',')) {
|
|
|
|
|
if (dim == "?" || dim == "-1") {
|
|
|
|
|
shape.push_back(ngraph::Dimension::dynamic());
|
|
|
|
|
} else {
|
|
|
|
|
const std::string range_divider = "..";
|
|
|
|
|
size_t range_index = dim.find(range_divider);
|
|
|
|
|
if (range_index != std::string::npos) {
|
|
|
|
|
std::string min = dim.substr(0, range_index);
|
|
|
|
|
std::string max = dim.substr(range_index + range_divider.length());
|
|
|
|
|
shape.push_back(ngraph::Dimension(min.empty() ? 0 : std::stoi(min),
|
|
|
|
|
max.empty() ? ngraph::Interval::s_max : std::stoi(max)));
|
|
|
|
|
} else {
|
|
|
|
|
shape.push_back(std::stoi(dim));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return shape;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
ov::Shape parse_data_shape(const std::string& dataShapeStr) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
std::vector<size_t> shape;
|
2021-12-30 19:09:12 +03:00
|
|
|
for (auto& dim : split(dataShapeStr, ',')) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
shape.push_back(std::stoi(dim));
|
|
|
|
|
}
|
|
|
|
|
return shape;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::pair<std::string, std::vector<std::string>> parse_input_files(const std::string& file_paths_string) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
auto search_string = file_paths_string;
|
|
|
|
|
std::string input_name = "";
|
|
|
|
|
std::vector<std::string> file_paths;
|
|
|
|
|
|
|
|
|
|
// parse strings like <input1>:file1,file2,file3 and get name from them
|
|
|
|
|
size_t semicolon_pos = search_string.find_first_of(":");
|
|
|
|
|
size_t quote_pos = search_string.find_first_of("\"");
|
|
|
|
|
if (semicolon_pos != std::string::npos && quote_pos != std::string::npos && semicolon_pos > quote_pos) {
|
|
|
|
|
// if : is found after opening " symbol - this means that " belongs to pathname
|
|
|
|
|
semicolon_pos = std::string::npos;
|
|
|
|
|
}
|
|
|
|
|
if (search_string.length() > 2 && semicolon_pos == 1 && search_string[2] == '\\') {
|
|
|
|
|
// Special case like C:\ denotes drive name, not an input name
|
|
|
|
|
semicolon_pos = std::string::npos;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (semicolon_pos != std::string::npos) {
|
|
|
|
|
input_name = search_string.substr(0, semicolon_pos);
|
|
|
|
|
search_string = search_string.substr(semicolon_pos + 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// parse file1,file2,file3 and get vector of paths
|
|
|
|
|
size_t coma_pos = 0;
|
|
|
|
|
do {
|
|
|
|
|
coma_pos = search_string.find_first_of(',');
|
|
|
|
|
file_paths.push_back(search_string.substr(0, coma_pos));
|
|
|
|
|
if (coma_pos == std::string::npos) {
|
|
|
|
|
search_string = "";
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
search_string = search_string.substr(coma_pos + 1);
|
|
|
|
|
} while (coma_pos != std::string::npos);
|
|
|
|
|
|
|
|
|
|
if (!search_string.empty())
|
|
|
|
|
throw std::logic_error("Can't parse file paths for input " + input_name +
|
|
|
|
|
" in input parameter string: " + file_paths_string);
|
|
|
|
|
|
|
|
|
|
return {input_name, file_paths};
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::map<std::string, std::vector<std::string>> parse_input_arguments(const std::vector<std::string>& args) {
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
std::map<std::string, std::vector<std::string>> mapped_files = {};
|
|
|
|
|
auto args_it = begin(args);
|
|
|
|
|
const auto is_image_arg = [](const std::string& s) {
|
|
|
|
|
return s == "-i";
|
|
|
|
|
};
|
|
|
|
|
const auto is_arg = [](const std::string& s) {
|
|
|
|
|
return s.front() == '-';
|
|
|
|
|
};
|
|
|
|
|
while (args_it != args.end()) {
|
|
|
|
|
const auto files_start = std::find_if(args_it, end(args), is_image_arg);
|
|
|
|
|
if (files_start == end(args)) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
const auto files_begin = std::next(files_start);
|
|
|
|
|
const auto files_end = std::find_if(files_begin, end(args), is_arg);
|
|
|
|
|
for (auto f = files_begin; f != files_end; ++f) {
|
2022-01-19 01:08:07 +03:00
|
|
|
auto files = parse_input_files(*f);
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
if (mapped_files.find(files.first) == mapped_files.end()) {
|
|
|
|
|
mapped_files[files.first] = {};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (auto& file : files.second) {
|
2022-02-17 00:42:51 +03:00
|
|
|
if (file == "image_info" || file == "random") {
|
|
|
|
|
mapped_files[files.first].push_back(file);
|
|
|
|
|
} else {
|
|
|
|
|
readInputFilesArguments(mapped_files[files.first], file);
|
|
|
|
|
}
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
args_it = files_end;
|
|
|
|
|
}
|
|
|
|
|
size_t max_files = 20;
|
|
|
|
|
for (auto& files : mapped_files) {
|
|
|
|
|
if (files.second.size() <= max_files) {
|
|
|
|
|
slog::info << "For input " << files.first << " " << files.second.size() << " files were added. "
|
|
|
|
|
<< slog::endl;
|
|
|
|
|
} else {
|
|
|
|
|
slog::info << "For input " << files.first << " " << files.second.size() << " files were added. "
|
|
|
|
|
<< " The number of files will be limited to " << max_files << "." << slog::endl;
|
|
|
|
|
files.second.resize(20);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return mapped_files;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::map<std::string, std::vector<std::string>> parse_input_parameters(
|
2021-12-30 19:09:12 +03:00
|
|
|
const std::string& parameter_string,
|
|
|
|
|
const std::vector<ov::Output<const ov::Node>>& input_info) {
|
|
|
|
|
// Parse parameter string like "input0[value0],input1[value1]" or "[value]" (applied to all
|
|
|
|
|
// inputs)
|
|
|
|
|
std::map<std::string, std::vector<std::string>> return_value;
|
|
|
|
|
std::string search_string = parameter_string;
|
|
|
|
|
auto start_pos = search_string.find_first_of('[');
|
|
|
|
|
auto input_name = search_string.substr(0, start_pos);
|
|
|
|
|
while (start_pos != std::string::npos) {
|
|
|
|
|
auto end_pos = search_string.find_first_of(']');
|
|
|
|
|
if (end_pos == std::string::npos)
|
|
|
|
|
break;
|
|
|
|
|
if (start_pos)
|
|
|
|
|
input_name = search_string.substr(0, start_pos);
|
|
|
|
|
auto input_value = search_string.substr(start_pos + 1, end_pos - start_pos - 1);
|
|
|
|
|
if (!input_name.empty()) {
|
2022-02-01 16:05:00 +03:00
|
|
|
return_value[parameter_name_to_tensor_name(input_name, input_info)].push_back(input_value);
|
2021-12-30 19:09:12 +03:00
|
|
|
} else {
|
|
|
|
|
for (auto& item : input_info) {
|
|
|
|
|
return_value[item.get_any_name()].push_back(input_value);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
search_string = search_string.substr(end_pos + 1);
|
|
|
|
|
if (search_string.empty() || (search_string.front() != ',' && search_string.front() != '['))
|
|
|
|
|
break;
|
|
|
|
|
if (search_string.front() == ',')
|
|
|
|
|
search_string = search_string.substr(1);
|
|
|
|
|
start_pos = search_string.find_first_of('[');
|
|
|
|
|
}
|
|
|
|
|
if (!search_string.empty())
|
|
|
|
|
throw std::logic_error("Can't parse input parameter string: " + parameter_string);
|
|
|
|
|
return return_value;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<benchmark_app::InputsInfo> get_inputs_info(const std::string& shape_string,
|
|
|
|
|
const std::string& layout_string,
|
|
|
|
|
const size_t batch_size,
|
|
|
|
|
const std::string& data_shapes_string,
|
|
|
|
|
const std::map<std::string, std::vector<std::string>>& fileNames,
|
|
|
|
|
const std::string& scale_string,
|
|
|
|
|
const std::string& mean_string,
|
|
|
|
|
const std::vector<ov::Output<const ov::Node>>& input_info,
|
|
|
|
|
bool& reshape_required) {
|
|
|
|
|
std::map<std::string, std::vector<std::string>> shape_map = parse_input_parameters(shape_string, input_info);
|
2021-12-30 19:09:12 +03:00
|
|
|
std::map<std::string, std::vector<std::string>> data_shapes_map =
|
2022-01-19 01:08:07 +03:00
|
|
|
parse_input_parameters(data_shapes_string, input_info);
|
|
|
|
|
std::map<std::string, std::vector<std::string>> layout_map = parse_input_parameters(layout_string, input_info);
|
2021-12-30 19:09:12 +03:00
|
|
|
|
|
|
|
|
size_t min_size = 1, max_size = 1;
|
|
|
|
|
if (!data_shapes_map.empty()) {
|
|
|
|
|
min_size = std::min_element(data_shapes_map.begin(),
|
|
|
|
|
data_shapes_map.end(),
|
|
|
|
|
[](std::pair<std::string, std::vector<std::string>> a,
|
|
|
|
|
std::pair<std::string, std::vector<std::string>> b) {
|
|
|
|
|
return a.second.size() < b.second.size() && a.second.size() != 1;
|
|
|
|
|
})
|
|
|
|
|
->second.size();
|
|
|
|
|
|
|
|
|
|
max_size = std::max_element(data_shapes_map.begin(),
|
|
|
|
|
data_shapes_map.end(),
|
|
|
|
|
[](std::pair<std::string, std::vector<std::string>> a,
|
|
|
|
|
std::pair<std::string, std::vector<std::string>> b) {
|
|
|
|
|
return a.second.size() < b.second.size();
|
|
|
|
|
})
|
|
|
|
|
->second.size();
|
|
|
|
|
if (min_size != max_size) {
|
|
|
|
|
throw std::logic_error(
|
|
|
|
|
"Shapes number for every input should be either 1 or should be equal to shapes number of other inputs");
|
|
|
|
|
}
|
2022-02-07 13:31:38 +03:00
|
|
|
slog::info << "Number of test configurations is calculated basing on -data_shape parameter" << slog::endl;
|
2021-12-30 19:09:12 +03:00
|
|
|
} else if (fileNames.size() > 0) {
|
|
|
|
|
slog::info << "Number of test configurations is calculated basing on number of input images" << slog::endl;
|
|
|
|
|
min_size = std::min_element(fileNames.begin(),
|
|
|
|
|
fileNames.end(),
|
|
|
|
|
[](std::pair<std::string, std::vector<std::string>> a,
|
|
|
|
|
std::pair<std::string, std::vector<std::string>> b) {
|
|
|
|
|
return a.second.size() < b.second.size() && a.second.size() != 1;
|
|
|
|
|
})
|
|
|
|
|
->second.size();
|
|
|
|
|
|
|
|
|
|
max_size = std::max_element(fileNames.begin(),
|
|
|
|
|
fileNames.end(),
|
|
|
|
|
[](std::pair<std::string, std::vector<std::string>> a,
|
|
|
|
|
std::pair<std::string, std::vector<std::string>> b) {
|
|
|
|
|
return a.second.size() < b.second.size();
|
|
|
|
|
})
|
|
|
|
|
->second.size();
|
|
|
|
|
if (min_size != max_size) {
|
|
|
|
|
slog::warn << "Number of input files is different for some inputs, minimal number of files will be used ("
|
|
|
|
|
<< min_size << ")" << slog::endl;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
[BENCHMARK_APP/PYTHON/CPP] Align benchmark_app output across languages (#12814)
* [PYTHON] Pipeline transfer
* [PYTHON] Align python benchmark
* [PYTHON] Align last step
* [PYTHON] Fix innacuracies of the last step - median
* [PYTHON/CPP] Add Core::get_version method to python API, refactor Ben benchmark to print version with this func
* [PYTHON] Remove get_version_string from API
* [PYTHON/CPP] Align output for model input/output info
* [PYTHON/CPP] Step 4,6 alignment of outputs, step 8 dumps all info stored in config parameters
* [CPP] Fix a bug causing nstreams parameter to never be set to AUTO in CPP benchmark_app
* [CPP] Fix clang format errors
* [CPP] Modify print order and data output for 8th step
* [PYTHON] Add verification checks from C++, modify set_thoughtput_streams to match documentation
* [CPP] Revert changes to C++ benchmark_app
* [CPP] Remove additional spacebar
* Update submodules versions on remote
* Update module from master branch
* Redownload submodules from master and override changes from commit
* [PYTHON] Remove unneccesary parse_status from validation function
* [PYTHON] Check for HINT in map, fix circular import
* [PYTHON] Remove artifacts from commit, fix args.perf_hint set to '' instead to 'none'
* [PYTHON] Reverse changes to perf hint, add key in map check, fix validation function throwing error on set hint
* [PYTHON] Fix linter
* [PYTHON] Remove linter spacebar
* [CPP] Fix wait_all exception throw
* [CPP/PYTHON] Clean artifacts and unwanted changes from work process
* [PYTHON] Fix artifacts from merge, clean submodule update
* [C++ CPU] Fix device name string by removing padding NULL characters from the back
* [CPP] Fix ba infer_request_wrap in other throw-catch clauses
* [PYTHON/CPP] Fix missing latencies in final step for shape group, fix minor misaligned messages, add missing report parameter create infer requests time
* [CPP] Clang fix formatting
* [CPP] Reverse clang fix format on plugin.cpp
* [PYTHON/CPP] Fix C++ progressbar printing endl when disabled, fix rounding in python creating infer request message
* [CPP] Fix foramtiing error
* [PYTHON/C++] Refactor network to model based on naming conventions, provide fresh README output example
* [PYTHON/C++] Add example output to C++ README, remove unnecessary device loop
* [BENCHMARK_APP/C++] Fix artifact from refactoring, remove try-catch clause
* Update samples/cpp/benchmark_app/benchmark_app.hpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* [CPP] Fix clang errors
* [CPP/PLUGIN Reverse modification to extract to separate task
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/parameters.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* [PYTHON/C++/BENCHMARK_APP] Fix language inconsistencies, remove unnecessary checks
* Update pyopenvino.cpp
* [CPP/BENCHMARK_APP] Remove unnecessary try-catch, fix linter errors
* [PYTHON/CPP/BENCHMARK_APP] Revert changes to Core, align version prints usin only provided methods
* [DOCS/BENCHMARK_APP] Update README with proper model examples
* Update README.md
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
2022-11-14 14:10:36 +01:00
|
|
|
slog::info << "Model batch size: " << batch_size << slog::endl;
|
|
|
|
|
|
2021-12-30 19:09:12 +03:00
|
|
|
reshape_required = false;
|
|
|
|
|
|
|
|
|
|
std::map<std::string, int> currentFileCounters;
|
|
|
|
|
for (auto& item : input_info) {
|
|
|
|
|
currentFileCounters[item.get_any_name()] = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::vector<benchmark_app::InputsInfo> info_maps;
|
|
|
|
|
for (size_t i = 0; i < min_size; ++i) {
|
|
|
|
|
benchmark_app::InputsInfo info_map;
|
|
|
|
|
|
2022-02-23 01:30:08 +03:00
|
|
|
bool is_there_at_least_one_batch_dim = false;
|
2021-12-30 19:09:12 +03:00
|
|
|
for (auto& item : input_info) {
|
|
|
|
|
benchmark_app::InputInfo info;
|
|
|
|
|
auto name = item.get_any_name();
|
|
|
|
|
|
|
|
|
|
// Layout
|
|
|
|
|
if (layout_map.count(name)) {
|
|
|
|
|
if (layout_map.at(name).size() > 1) {
|
|
|
|
|
throw std::logic_error(
|
|
|
|
|
"layout command line parameter doesn't support multiple layouts for one input.");
|
|
|
|
|
}
|
|
|
|
|
info.layout = ov::Layout(layout_map.at(name)[0]);
|
|
|
|
|
// reshape_required = true;
|
|
|
|
|
} else {
|
|
|
|
|
info.layout = dynamic_cast<const ov::op::v0::Parameter&>(*item.get_node()).get_layout();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculating default layout values if needed
|
|
|
|
|
std::string newLayout = "";
|
|
|
|
|
if (info.layout.empty()) {
|
|
|
|
|
switch (item.get_partial_shape().size()) {
|
|
|
|
|
case 3:
|
2022-03-23 16:54:56 +03:00
|
|
|
newLayout = (item.get_partial_shape()[2].get_max_length() <= 4 &&
|
|
|
|
|
item.get_partial_shape()[0].get_max_length() > 4)
|
|
|
|
|
? "HWC"
|
|
|
|
|
: "CHW";
|
2021-12-30 19:09:12 +03:00
|
|
|
break;
|
|
|
|
|
case 4:
|
|
|
|
|
// Rough check for layout type, basing on max number of image channels
|
|
|
|
|
newLayout = (item.get_partial_shape()[3].get_max_length() <= 4 &&
|
|
|
|
|
item.get_partial_shape()[1].get_max_length() > 4)
|
|
|
|
|
? "NHWC"
|
|
|
|
|
: "NCHW";
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (newLayout != "") {
|
|
|
|
|
info.layout = ov::Layout(newLayout);
|
|
|
|
|
}
|
|
|
|
|
if (info_maps.empty()) { // Show warnings only for 1st test case config, as for other test cases
|
|
|
|
|
// they will be the same
|
2022-01-18 13:40:54 +03:00
|
|
|
slog::warn << item.get_any_name() << ": layout is not set explicitly"
|
2021-12-30 19:09:12 +03:00
|
|
|
<< (newLayout != "" ? std::string(", so it is defaulted to ") + newLayout : "")
|
|
|
|
|
<< ". It is STRONGLY recommended to set layout manually to avoid further issues."
|
|
|
|
|
<< slog::endl;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Precision
|
|
|
|
|
info.type = item.get_element_type();
|
|
|
|
|
// Partial Shape
|
|
|
|
|
if (shape_map.count(name)) {
|
|
|
|
|
if (shape_map.at(name).size() > 1) {
|
|
|
|
|
throw std::logic_error(
|
|
|
|
|
"shape command line parameter doesn't support multiple shapes for one input.");
|
|
|
|
|
}
|
2022-01-19 01:08:07 +03:00
|
|
|
info.partialShape = parse_partial_shape(shape_map.at(name)[0]);
|
2021-12-30 19:09:12 +03:00
|
|
|
reshape_required = true;
|
|
|
|
|
} else {
|
|
|
|
|
info.partialShape = item.get_partial_shape();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Files might be mapped without input name. In case of only one input we may map them to the only input
|
|
|
|
|
// directly
|
|
|
|
|
std::string filesInputName =
|
|
|
|
|
fileNames.size() == 1 && input_info.size() == 1 && fileNames.begin()->first == "" ? "" : name;
|
|
|
|
|
|
|
|
|
|
// Tensor Shape
|
|
|
|
|
if (info.partialShape.is_dynamic() && data_shapes_map.count(name)) {
|
2022-01-19 01:08:07 +03:00
|
|
|
info.dataShape = parse_data_shape(data_shapes_map.at(name)[i % data_shapes_map.at(name).size()]);
|
|
|
|
|
} else if (info.partialShape.is_dynamic() && fileNames.count(filesInputName) && info.is_image()) {
|
2021-12-30 19:09:12 +03:00
|
|
|
auto& namesVector = fileNames.at(filesInputName);
|
2022-01-19 01:08:07 +03:00
|
|
|
if (contains_binaries(namesVector)) {
|
2021-12-30 19:09:12 +03:00
|
|
|
throw std::logic_error("Input files list for input " + item.get_any_name() +
|
|
|
|
|
" contains binary file(s) and input shape is dynamic. Tensor shape should "
|
2022-02-07 13:31:38 +03:00
|
|
|
"be defined explicitly (using -data_shape).");
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info.dataShape = ov::Shape(info.partialShape.size(), 0);
|
|
|
|
|
for (int i = 0; i < info.partialShape.size(); i++) {
|
|
|
|
|
auto& dim = info.partialShape[i];
|
|
|
|
|
if (dim.is_static()) {
|
|
|
|
|
info.dataShape[i] = dim.get_length();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t tensorBatchSize = std::max(batch_size, (size_t)1);
|
|
|
|
|
if (ov::layout::has_batch(info.layout)) {
|
|
|
|
|
if (info.batch()) {
|
|
|
|
|
tensorBatchSize = std::max(tensorBatchSize, info.batch());
|
|
|
|
|
} else {
|
|
|
|
|
info.dataShape[ov::layout::batch_idx(info.layout)] = tensorBatchSize;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t w = 0;
|
|
|
|
|
size_t h = 0;
|
|
|
|
|
size_t fileIdx = currentFileCounters[item.get_any_name()];
|
|
|
|
|
for (; fileIdx < currentFileCounters[item.get_any_name()] + tensorBatchSize; fileIdx++) {
|
|
|
|
|
if (fileIdx >= namesVector.size()) {
|
|
|
|
|
throw std::logic_error(
|
|
|
|
|
"Not enough files to fill in full batch (number of files should be a multiple of batch "
|
2022-02-07 13:31:38 +03:00
|
|
|
"size if -data_shape parameter is omitted and shape is dynamic)");
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
FormatReader::ReaderPtr reader(namesVector[fileIdx].c_str());
|
|
|
|
|
if ((w && w != reader->width()) || (h && h != reader->height())) {
|
|
|
|
|
throw std::logic_error("Image sizes putting into one batch should be of the same size if input "
|
2022-02-07 13:31:38 +03:00
|
|
|
"shape is dynamic and -data_shape is omitted. Problem file: " +
|
2021-12-30 19:09:12 +03:00
|
|
|
namesVector[fileIdx]);
|
|
|
|
|
}
|
|
|
|
|
w = reader->width();
|
|
|
|
|
h = reader->height();
|
|
|
|
|
}
|
|
|
|
|
currentFileCounters[item.get_any_name()] = fileIdx;
|
|
|
|
|
|
|
|
|
|
if (!info.dataShape[ov::layout::height_idx(info.layout)]) {
|
|
|
|
|
info.dataShape[ov::layout::height_idx(info.layout)] = h;
|
|
|
|
|
}
|
|
|
|
|
if (!info.dataShape[ov::layout::width_idx(info.layout)]) {
|
|
|
|
|
info.dataShape[ov::layout::width_idx(info.layout)] = w;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (std::any_of(info.dataShape.begin(), info.dataShape.end(), [](size_t d) {
|
|
|
|
|
return d == 0;
|
|
|
|
|
})) {
|
|
|
|
|
throw std::logic_error("Not enough information in shape and image to determine tensor shape "
|
|
|
|
|
"automatically autmatically. Input: " +
|
2022-01-18 13:40:54 +03:00
|
|
|
item.get_any_name() + ", File name: " + namesVector[fileIdx - 1]);
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} else if (info.partialShape.is_static()) {
|
|
|
|
|
info.dataShape = info.partialShape.get_shape();
|
|
|
|
|
if (data_shapes_map.find(name) != data_shapes_map.end()) {
|
|
|
|
|
throw std::logic_error(
|
[BENCHMARK_APP/PYTHON/CPP] Align benchmark_app output across languages (#12814)
* [PYTHON] Pipeline transfer
* [PYTHON] Align python benchmark
* [PYTHON] Align last step
* [PYTHON] Fix innacuracies of the last step - median
* [PYTHON/CPP] Add Core::get_version method to python API, refactor Ben benchmark to print version with this func
* [PYTHON] Remove get_version_string from API
* [PYTHON/CPP] Align output for model input/output info
* [PYTHON/CPP] Step 4,6 alignment of outputs, step 8 dumps all info stored in config parameters
* [CPP] Fix a bug causing nstreams parameter to never be set to AUTO in CPP benchmark_app
* [CPP] Fix clang format errors
* [CPP] Modify print order and data output for 8th step
* [PYTHON] Add verification checks from C++, modify set_thoughtput_streams to match documentation
* [CPP] Revert changes to C++ benchmark_app
* [CPP] Remove additional spacebar
* Update submodules versions on remote
* Update module from master branch
* Redownload submodules from master and override changes from commit
* [PYTHON] Remove unneccesary parse_status from validation function
* [PYTHON] Check for HINT in map, fix circular import
* [PYTHON] Remove artifacts from commit, fix args.perf_hint set to '' instead to 'none'
* [PYTHON] Reverse changes to perf hint, add key in map check, fix validation function throwing error on set hint
* [PYTHON] Fix linter
* [PYTHON] Remove linter spacebar
* [CPP] Fix wait_all exception throw
* [CPP/PYTHON] Clean artifacts and unwanted changes from work process
* [PYTHON] Fix artifacts from merge, clean submodule update
* [C++ CPU] Fix device name string by removing padding NULL characters from the back
* [CPP] Fix ba infer_request_wrap in other throw-catch clauses
* [PYTHON/CPP] Fix missing latencies in final step for shape group, fix minor misaligned messages, add missing report parameter create infer requests time
* [CPP] Clang fix formatting
* [CPP] Reverse clang fix format on plugin.cpp
* [PYTHON/CPP] Fix C++ progressbar printing endl when disabled, fix rounding in python creating infer request message
* [CPP] Fix foramtiing error
* [PYTHON/C++] Refactor network to model based on naming conventions, provide fresh README output example
* [PYTHON/C++] Add example output to C++ README, remove unnecessary device loop
* [BENCHMARK_APP/C++] Fix artifact from refactoring, remove try-catch clause
* Update samples/cpp/benchmark_app/benchmark_app.hpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* [CPP] Fix clang errors
* [CPP/PLUGIN Reverse modification to extract to separate task
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/parameters.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* [PYTHON/C++/BENCHMARK_APP] Fix language inconsistencies, remove unnecessary checks
* Update pyopenvino.cpp
* [CPP/BENCHMARK_APP] Remove unnecessary try-catch, fix linter errors
* [PYTHON/CPP/BENCHMARK_APP] Revert changes to Core, align version prints usin only provided methods
* [DOCS/BENCHMARK_APP] Update README with proper model examples
* Update README.md
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
2022-11-14 14:10:36 +01:00
|
|
|
"Model's input \"" + name +
|
2021-12-30 19:09:12 +03:00
|
|
|
"\" is static. Use -shape argument for static inputs instead of -data_shape.");
|
|
|
|
|
}
|
|
|
|
|
} else if (!data_shapes_map.empty()) {
|
[BENCHMARK_APP/PYTHON/CPP] Align benchmark_app output across languages (#12814)
* [PYTHON] Pipeline transfer
* [PYTHON] Align python benchmark
* [PYTHON] Align last step
* [PYTHON] Fix innacuracies of the last step - median
* [PYTHON/CPP] Add Core::get_version method to python API, refactor Ben benchmark to print version with this func
* [PYTHON] Remove get_version_string from API
* [PYTHON/CPP] Align output for model input/output info
* [PYTHON/CPP] Step 4,6 alignment of outputs, step 8 dumps all info stored in config parameters
* [CPP] Fix a bug causing nstreams parameter to never be set to AUTO in CPP benchmark_app
* [CPP] Fix clang format errors
* [CPP] Modify print order and data output for 8th step
* [PYTHON] Add verification checks from C++, modify set_thoughtput_streams to match documentation
* [CPP] Revert changes to C++ benchmark_app
* [CPP] Remove additional spacebar
* Update submodules versions on remote
* Update module from master branch
* Redownload submodules from master and override changes from commit
* [PYTHON] Remove unneccesary parse_status from validation function
* [PYTHON] Check for HINT in map, fix circular import
* [PYTHON] Remove artifacts from commit, fix args.perf_hint set to '' instead to 'none'
* [PYTHON] Reverse changes to perf hint, add key in map check, fix validation function throwing error on set hint
* [PYTHON] Fix linter
* [PYTHON] Remove linter spacebar
* [CPP] Fix wait_all exception throw
* [CPP/PYTHON] Clean artifacts and unwanted changes from work process
* [PYTHON] Fix artifacts from merge, clean submodule update
* [C++ CPU] Fix device name string by removing padding NULL characters from the back
* [CPP] Fix ba infer_request_wrap in other throw-catch clauses
* [PYTHON/CPP] Fix missing latencies in final step for shape group, fix minor misaligned messages, add missing report parameter create infer requests time
* [CPP] Clang fix formatting
* [CPP] Reverse clang fix format on plugin.cpp
* [PYTHON/CPP] Fix C++ progressbar printing endl when disabled, fix rounding in python creating infer request message
* [CPP] Fix foramtiing error
* [PYTHON/C++] Refactor network to model based on naming conventions, provide fresh README output example
* [PYTHON/C++] Add example output to C++ README, remove unnecessary device loop
* [BENCHMARK_APP/C++] Fix artifact from refactoring, remove try-catch clause
* Update samples/cpp/benchmark_app/benchmark_app.hpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* [CPP] Fix clang errors
* [CPP/PLUGIN Reverse modification to extract to separate task
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/parameters.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* [PYTHON/C++/BENCHMARK_APP] Fix language inconsistencies, remove unnecessary checks
* Update pyopenvino.cpp
* [CPP/BENCHMARK_APP] Remove unnecessary try-catch, fix linter errors
* [PYTHON/CPP/BENCHMARK_APP] Revert changes to Core, align version prints usin only provided methods
* [DOCS/BENCHMARK_APP] Update README with proper model examples
* Update README.md
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
2022-11-14 14:10:36 +01:00
|
|
|
throw std::logic_error("Can't find model input name \"" + name + "\" in \"-data_shape " +
|
2021-12-30 19:09:12 +03:00
|
|
|
data_shapes_string + "\" command line parameter");
|
|
|
|
|
} else {
|
|
|
|
|
throw std::logic_error("-i or -data_shape command line parameter should be set for all inputs in case "
|
[BENCHMARK_APP/PYTHON/CPP] Align benchmark_app output across languages (#12814)
* [PYTHON] Pipeline transfer
* [PYTHON] Align python benchmark
* [PYTHON] Align last step
* [PYTHON] Fix innacuracies of the last step - median
* [PYTHON/CPP] Add Core::get_version method to python API, refactor Ben benchmark to print version with this func
* [PYTHON] Remove get_version_string from API
* [PYTHON/CPP] Align output for model input/output info
* [PYTHON/CPP] Step 4,6 alignment of outputs, step 8 dumps all info stored in config parameters
* [CPP] Fix a bug causing nstreams parameter to never be set to AUTO in CPP benchmark_app
* [CPP] Fix clang format errors
* [CPP] Modify print order and data output for 8th step
* [PYTHON] Add verification checks from C++, modify set_thoughtput_streams to match documentation
* [CPP] Revert changes to C++ benchmark_app
* [CPP] Remove additional spacebar
* Update submodules versions on remote
* Update module from master branch
* Redownload submodules from master and override changes from commit
* [PYTHON] Remove unneccesary parse_status from validation function
* [PYTHON] Check for HINT in map, fix circular import
* [PYTHON] Remove artifacts from commit, fix args.perf_hint set to '' instead to 'none'
* [PYTHON] Reverse changes to perf hint, add key in map check, fix validation function throwing error on set hint
* [PYTHON] Fix linter
* [PYTHON] Remove linter spacebar
* [CPP] Fix wait_all exception throw
* [CPP/PYTHON] Clean artifacts and unwanted changes from work process
* [PYTHON] Fix artifacts from merge, clean submodule update
* [C++ CPU] Fix device name string by removing padding NULL characters from the back
* [CPP] Fix ba infer_request_wrap in other throw-catch clauses
* [PYTHON/CPP] Fix missing latencies in final step for shape group, fix minor misaligned messages, add missing report parameter create infer requests time
* [CPP] Clang fix formatting
* [CPP] Reverse clang fix format on plugin.cpp
* [PYTHON/CPP] Fix C++ progressbar printing endl when disabled, fix rounding in python creating infer request message
* [CPP] Fix foramtiing error
* [PYTHON/C++] Refactor network to model based on naming conventions, provide fresh README output example
* [PYTHON/C++] Add example output to C++ README, remove unnecessary device loop
* [BENCHMARK_APP/C++] Fix artifact from refactoring, remove try-catch clause
* Update samples/cpp/benchmark_app/benchmark_app.hpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* [CPP] Fix clang errors
* [CPP/PLUGIN Reverse modification to extract to separate task
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/parameters.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* [PYTHON/C++/BENCHMARK_APP] Fix language inconsistencies, remove unnecessary checks
* Update pyopenvino.cpp
* [CPP/BENCHMARK_APP] Remove unnecessary try-catch, fix linter errors
* [PYTHON/CPP/BENCHMARK_APP] Revert changes to Core, align version prints usin only provided methods
* [DOCS/BENCHMARK_APP] Update README with proper model examples
* Update README.md
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
2022-11-14 14:10:36 +01:00
|
|
|
"of model with dynamic shapes.");
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update shape with batch if needed (only in static shape case)
|
|
|
|
|
// Update blob shape only not affecting network shape to trigger dynamic batch size case
|
|
|
|
|
if (batch_size != 0) {
|
|
|
|
|
if (ov::layout::has_batch(info.layout)) {
|
|
|
|
|
std::size_t batch_index = ov::layout::batch_idx(info.layout);
|
|
|
|
|
if (info.dataShape.at(batch_index) != batch_size) {
|
|
|
|
|
if (info.partialShape.is_static()) {
|
|
|
|
|
info.partialShape[batch_index] = batch_size;
|
|
|
|
|
}
|
|
|
|
|
info.dataShape[batch_index] = batch_size;
|
|
|
|
|
reshape_required = true;
|
2022-02-23 01:30:08 +03:00
|
|
|
is_there_at_least_one_batch_dim = true;
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
} else {
|
2022-01-18 13:40:54 +03:00
|
|
|
slog::warn << "Input '" << item.get_any_name()
|
2021-12-30 19:09:12 +03:00
|
|
|
<< "' doesn't have batch dimension in layout. -b option will be ignored for this input."
|
|
|
|
|
<< slog::endl;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
info_map[name] = info;
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-23 01:30:08 +03:00
|
|
|
if (batch_size > 1 && !is_there_at_least_one_batch_dim) {
|
|
|
|
|
throw std::runtime_error("-b option is provided in command line, but there's no inputs with batch(B) "
|
|
|
|
|
"dimension in input layout, so batch cannot be set. "
|
|
|
|
|
"You may specify layout explicitly using -layout option.");
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-30 19:09:12 +03:00
|
|
|
// Update scale and mean
|
2022-01-19 01:08:07 +03:00
|
|
|
std::map<std::string, std::vector<float>> scale_map = parse_scale_or_mean(scale_string, info_map);
|
|
|
|
|
std::map<std::string, std::vector<float>> mean_map = parse_scale_or_mean(mean_string, info_map);
|
2021-12-30 19:09:12 +03:00
|
|
|
|
|
|
|
|
for (auto& item : info_map) {
|
2022-01-19 01:08:07 +03:00
|
|
|
if (item.second.is_image()) {
|
2021-12-30 19:09:12 +03:00
|
|
|
item.second.scale.assign({1, 1, 1});
|
|
|
|
|
item.second.mean.assign({0, 0, 0});
|
|
|
|
|
|
|
|
|
|
if (scale_map.count(item.first)) {
|
|
|
|
|
item.second.scale = scale_map.at(item.first);
|
|
|
|
|
}
|
|
|
|
|
if (mean_map.count(item.first)) {
|
|
|
|
|
item.second.mean = mean_map.at(item.first);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info_maps.push_back(info_map);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return info_maps;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<benchmark_app::InputsInfo> get_inputs_info(const std::string& shape_string,
|
|
|
|
|
const std::string& layout_string,
|
|
|
|
|
const size_t batch_size,
|
|
|
|
|
const std::string& tensors_shape_string,
|
|
|
|
|
const std::map<std::string, std::vector<std::string>>& fileNames,
|
|
|
|
|
const std::string& scale_string,
|
|
|
|
|
const std::string& mean_string,
|
|
|
|
|
const std::vector<ov::Output<const ov::Node>>& input_info) {
|
2021-12-30 19:09:12 +03:00
|
|
|
bool reshape_required = false;
|
2022-01-19 01:08:07 +03:00
|
|
|
return get_inputs_info(shape_string,
|
|
|
|
|
layout_string,
|
|
|
|
|
batch_size,
|
|
|
|
|
tensors_shape_string,
|
|
|
|
|
fileNames,
|
|
|
|
|
scale_string,
|
|
|
|
|
mean_string,
|
|
|
|
|
input_info,
|
|
|
|
|
reshape_required);
|
2021-12-30 19:09:12 +03:00
|
|
|
}
|
|
|
|
|
|
2020-04-15 19:01:57 +03:00
|
|
|
#ifdef USE_OPENCV
|
2022-01-25 12:43:56 +03:00
|
|
|
void dump_config(const std::string& filename, const std::map<std::string, ov::AnyMap>& config) {
|
2022-01-18 11:22:47 +03:00
|
|
|
slog::warn << "YAML and XML formats for config file won't be supported soon." << slog::endl;
|
2021-10-19 16:51:38 +03:00
|
|
|
auto plugin_to_opencv_format = [](const std::string& str) -> std::string {
|
|
|
|
|
if (str.find("_") != std::string::npos) {
|
|
|
|
|
slog::warn
|
|
|
|
|
<< "Device name contains \"_\" and will be changed during loading of configuration due to limitations."
|
|
|
|
|
"This configuration file could not be loaded correctly."
|
|
|
|
|
<< slog::endl;
|
|
|
|
|
}
|
|
|
|
|
std::string new_str(str);
|
|
|
|
|
auto pos = new_str.find(".");
|
|
|
|
|
if (pos != std::string::npos) {
|
|
|
|
|
new_str.replace(pos, 1, "_");
|
|
|
|
|
}
|
|
|
|
|
return new_str;
|
|
|
|
|
};
|
2020-04-15 19:01:57 +03:00
|
|
|
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
|
|
|
|
|
if (!fs.isOpened())
|
|
|
|
|
throw std::runtime_error("Error: Can't open config file : " + filename);
|
|
|
|
|
for (auto device_it = config.begin(); device_it != config.end(); ++device_it) {
|
2021-10-19 16:51:38 +03:00
|
|
|
fs << plugin_to_opencv_format(device_it->first) << "{:";
|
2022-01-25 12:43:56 +03:00
|
|
|
std::stringstream strm;
|
|
|
|
|
for (auto param_it = device_it->second.begin(); param_it != device_it->second.end(); ++param_it) {
|
|
|
|
|
strm << param_it->first;
|
|
|
|
|
param_it->second.print(strm);
|
|
|
|
|
}
|
|
|
|
|
fs << strm.str();
|
2020-04-15 19:01:57 +03:00
|
|
|
fs << "}";
|
|
|
|
|
}
|
|
|
|
|
fs.release();
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-25 12:43:56 +03:00
|
|
|
void load_config(const std::string& filename, std::map<std::string, ov::AnyMap>& config) {
|
2022-01-18 11:22:47 +03:00
|
|
|
slog::warn << "YAML and XML formats for config file won't be supported soon." << slog::endl;
|
2021-10-19 16:51:38 +03:00
|
|
|
auto opencv_to_plugin_format = [](const std::string& str) -> std::string {
|
|
|
|
|
std::string new_str(str);
|
|
|
|
|
auto pos = new_str.find("_");
|
|
|
|
|
if (pos != std::string::npos) {
|
|
|
|
|
new_str.replace(pos, 1, ".");
|
|
|
|
|
}
|
|
|
|
|
return new_str;
|
|
|
|
|
};
|
2020-04-15 19:01:57 +03:00
|
|
|
cv::FileStorage fs(filename, cv::FileStorage::READ);
|
|
|
|
|
if (!fs.isOpened())
|
|
|
|
|
throw std::runtime_error("Error: Can't load config file : " + filename);
|
|
|
|
|
cv::FileNode root = fs.root();
|
|
|
|
|
for (auto it = root.begin(); it != root.end(); ++it) {
|
|
|
|
|
auto device = *it;
|
|
|
|
|
if (!device.isMap()) {
|
|
|
|
|
throw std::runtime_error("Error: Can't parse config file : " + filename);
|
|
|
|
|
}
|
|
|
|
|
for (auto iit = device.begin(); iit != device.end(); ++iit) {
|
|
|
|
|
auto item = *iit;
|
2021-10-19 16:51:38 +03:00
|
|
|
config[opencv_to_plugin_format(device.name())][item.name()] = item.string();
|
2020-04-15 19:01:57 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-01-18 11:22:47 +03:00
|
|
|
#else
|
2022-01-25 12:43:56 +03:00
|
|
|
void dump_config(const std::string& filename, const std::map<std::string, ov::AnyMap>& config) {
|
2022-01-18 11:22:47 +03:00
|
|
|
nlohmann::json jsonConfig;
|
|
|
|
|
for (const auto& item : config) {
|
|
|
|
|
std::string deviceName = item.first;
|
|
|
|
|
for (const auto& option : item.second) {
|
2022-01-25 12:43:56 +03:00
|
|
|
std::stringstream strm;
|
|
|
|
|
option.second.print(strm);
|
|
|
|
|
jsonConfig[deviceName][option.first] = strm.str();
|
2022-01-18 11:22:47 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::ofstream ofs(filename);
|
|
|
|
|
if (!ofs.is_open()) {
|
|
|
|
|
throw std::runtime_error("Can't load config file \"" + filename + "\".");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ofs << jsonConfig;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-25 12:43:56 +03:00
|
|
|
void load_config(const std::string& filename, std::map<std::string, ov::AnyMap>& config) {
|
2022-01-18 11:22:47 +03:00
|
|
|
std::ifstream ifs(filename);
|
|
|
|
|
if (!ifs.is_open()) {
|
|
|
|
|
throw std::runtime_error("Can't load config file \"" + filename + "\".");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nlohmann::json jsonConfig;
|
|
|
|
|
try {
|
|
|
|
|
ifs >> jsonConfig;
|
|
|
|
|
} catch (const nlohmann::json::parse_error& e) {
|
|
|
|
|
throw std::runtime_error("Can't parse config file \"" + filename + "\".\n" + e.what());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (const auto& item : jsonConfig.items()) {
|
|
|
|
|
std::string deviceName = item.key();
|
|
|
|
|
for (const auto& option : item.value().items()) {
|
|
|
|
|
config[deviceName][option.key()] = option.value().get<std::string>();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
Dynamic reshapes (#7788)
* Merged and compiling
* Fix for dynamic shape type
* review fixes
* renamed blob shape to tensor shape, small improvements
* fix code style
* added parsing of multiple shapes
* store latency per group, add isIdleRequestAvailable() to Infer Queue
* added cached random inputs
* redesign pipeline, added new metrics(avg, max, min), added metrics per groups
* fixed code style
* small improvements
* modified tensor parameters parsing
* modified -i parameter parsing: added possibility to specify input names
* implemented image cashing
* added cashed blobs creating
* added -pcseq flag, modified batch filling, changes fps formula
* improvements
* code formatting
* code formatting2
* apply suggestions from review
* replaced Buffer class with InferenceEngine Blobs
* use batch size in blobs filling
* added shared blob allocator to handle blob's data
* fixed warnings & code style
* allocate blobs
* fix for networks with image info input
* added comments & fixed codestyle
* clear data in free() in SharedBlobAllocator
* remove unnecessary check
* Delimeter is changed to ::
* stylefix
* added layout from string function, small improvements
* modified parsing to enable : in input parameters
* small fixes
* small fixes
* added missed blob allocation, fixes
* [TEST]added support for remote blobs
* fix remote blobs
* new inputs/files output format
* removed vectors resize which caused bugs
* made cl::Buffer type under ifdef, fix inputs filling
* changed batch() function to not throwing exceptions
* removed unused var
* fix code style
* replace empty name in input files with name from net input
* restored old behaviour for static models
* fix code style
* fix warning - made const iterator
* fix warning - remove reference in loop variable
* added random and image_info input types to -i, fix problem with layout
* replaced batch() with getBatchSize() in main
* fix layout, shape, tensor shape parameters parsing
* upd help messages for input, tensor shape and pcseq command
* added buffer for cl output blobs, small fixes
Signed-off-by: ivikhrev <ivan.vikhrev@intel.com>
* added legacy mode
* restore setBlob
* code style formatting
* move collecting latency for groups under flag
* removed not applicable layouts
* added hint to error message when wrong input name in -tensor_shape was specified
* added new metrics to statistics report
* Apply suggestions from code review
* fix binary blobs filling when layout is CN
* apply suggestions
* moved file in the right place after rebase
* improved -pcseq output
* updated args and readme
* removed TEMPLATE plugin registration
* fix -shape arg decsription
* enable providing several -i args as input
* renamed legacy_mode to inference_only and made it default for static models, renamed tensor_shape to data_shape
* upd readme
* use getBlob() in inference only mode
* fix old input type for static case
* fix typo
* upd readme
* move log about benchmark mode to the measuring perfomance step
* added class for latency metrics
* upd readme, fix typos, renamed funcs
* fix warning and upd parsing to avoid error with : in file paths
* fix error on centos : error: use of deleted function ‘std::basic_stringstream<char>::basic_stringstream(const std::basic_stringstream<char>&)
* added check for key in inputs
* renamed input to inputs
* adjust batch size for binary blobs
* replaced warning with exception in bench mode defining
* align measurement cycle with master
Co-authored-by: ivikhrev <ivan.vikhrev@intel.com>
2021-12-17 12:20:43 +03:00
|
|
|
#endif
|
2021-12-30 19:09:12 +03:00
|
|
|
|
|
|
|
|
#ifdef USE_OPENCV
|
|
|
|
|
const std::vector<std::string> supported_image_extensions =
|
|
|
|
|
{"bmp", "dib", "jpeg", "jpg", "jpe", "jp2", "png", "pbm", "pgm", "ppm", "sr", "ras", "tiff", "tif"};
|
|
|
|
|
#else
|
|
|
|
|
const std::vector<std::string> supported_image_extensions = {"bmp"};
|
|
|
|
|
#endif
|
|
|
|
|
const std::vector<std::string> supported_binary_extensions = {"bin"};
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
std::string get_extension(const std::string& name) {
|
2021-12-30 19:09:12 +03:00
|
|
|
auto extensionPosition = name.rfind('.', name.size());
|
|
|
|
|
return extensionPosition == std::string::npos ? "" : name.substr(extensionPosition + 1, name.size() - 1);
|
|
|
|
|
};
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
bool is_binary_file(const std::string& filePath) {
|
|
|
|
|
auto extension = get_extension(filePath);
|
2021-12-30 19:09:12 +03:00
|
|
|
std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower);
|
|
|
|
|
return std::find(supported_binary_extensions.begin(), supported_binary_extensions.end(), extension) !=
|
|
|
|
|
supported_binary_extensions.end();
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
bool is_image_file(const std::string& filePath) {
|
|
|
|
|
auto extension = get_extension(filePath);
|
2021-12-30 19:09:12 +03:00
|
|
|
std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower);
|
|
|
|
|
return std::find(supported_binary_extensions.begin(), supported_binary_extensions.end(), extension) !=
|
|
|
|
|
supported_binary_extensions.end();
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-19 01:08:07 +03:00
|
|
|
bool contains_binaries(const std::vector<std::string>& filePaths) {
|
2021-12-30 19:09:12 +03:00
|
|
|
std::vector<std::string> filtered;
|
|
|
|
|
for (auto& filePath : filePaths) {
|
2022-01-19 01:08:07 +03:00
|
|
|
auto extension = get_extension(filePath);
|
2021-12-30 19:09:12 +03:00
|
|
|
std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower);
|
|
|
|
|
if (std::find(supported_binary_extensions.begin(), supported_binary_extensions.end(), extension) !=
|
|
|
|
|
supported_binary_extensions.end()) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2022-01-19 01:08:07 +03:00
|
|
|
std::vector<std::string> filter_files_by_extensions(const std::vector<std::string>& filePaths,
|
|
|
|
|
const std::vector<std::string>& extensions) {
|
2021-12-30 19:09:12 +03:00
|
|
|
std::vector<std::string> filtered;
|
|
|
|
|
for (auto& filePath : filePaths) {
|
2022-01-19 01:08:07 +03:00
|
|
|
auto extension = get_extension(filePath);
|
2021-12-30 19:09:12 +03:00
|
|
|
std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower);
|
|
|
|
|
if (std::find(extensions.begin(), extensions.end(), extension) != extensions.end()) {
|
|
|
|
|
filtered.push_back(filePath);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return filtered;
|
2022-01-19 01:08:07 +03:00
|
|
|
}
|
2022-02-01 16:05:00 +03:00
|
|
|
|
|
|
|
|
std::string parameter_name_to_tensor_name(const std::string& name,
|
|
|
|
|
const std::vector<ov::Output<const ov::Node>>& inputs_info,
|
|
|
|
|
const std::vector<ov::Output<const ov::Node>>& outputs_info) {
|
|
|
|
|
if (std::any_of(inputs_info.begin(), inputs_info.end(), [name](const ov::Output<const ov::Node>& port) {
|
|
|
|
|
try {
|
2022-03-04 09:49:03 +03:00
|
|
|
return port.get_names().count(name) > 0;
|
2022-02-01 16:05:00 +03:00
|
|
|
} catch (const ov::Exception&) {
|
|
|
|
|
return false; // Some ports might have no names - so this is workaround
|
|
|
|
|
}
|
|
|
|
|
})) {
|
|
|
|
|
return name;
|
|
|
|
|
} else if (std::any_of(outputs_info.begin(), outputs_info.end(), [name](const ov::Output<const ov::Node>& port) {
|
|
|
|
|
try {
|
2022-03-04 09:49:03 +03:00
|
|
|
return port.get_names().count(name) > 0;
|
2022-02-01 16:05:00 +03:00
|
|
|
} catch (const ov::Exception&) {
|
|
|
|
|
return false; // Some ports might have no names - so this is workaround
|
|
|
|
|
}
|
|
|
|
|
})) {
|
|
|
|
|
return name;
|
|
|
|
|
} else {
|
|
|
|
|
for (const auto& port : inputs_info) {
|
|
|
|
|
if (name == port.get_node()->get_friendly_name()) {
|
|
|
|
|
return port.get_any_name();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for (const auto& port : outputs_info) {
|
|
|
|
|
if (name == port.get_node()->get_input_node_ptr(0)->get_friendly_name()) {
|
|
|
|
|
return port.get_any_name();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
throw std::runtime_error("Provided I/O name \"" + name +
|
|
|
|
|
"\" is not found neither in tensor names nor in nodes names.");
|
[BENCHMARK_APP/PYTHON/CPP] Align benchmark_app output across languages (#12814)
* [PYTHON] Pipeline transfer
* [PYTHON] Align python benchmark
* [PYTHON] Align last step
* [PYTHON] Fix innacuracies of the last step - median
* [PYTHON/CPP] Add Core::get_version method to python API, refactor Ben benchmark to print version with this func
* [PYTHON] Remove get_version_string from API
* [PYTHON/CPP] Align output for model input/output info
* [PYTHON/CPP] Step 4,6 alignment of outputs, step 8 dumps all info stored in config parameters
* [CPP] Fix a bug causing nstreams parameter to never be set to AUTO in CPP benchmark_app
* [CPP] Fix clang format errors
* [CPP] Modify print order and data output for 8th step
* [PYTHON] Add verification checks from C++, modify set_thoughtput_streams to match documentation
* [CPP] Revert changes to C++ benchmark_app
* [CPP] Remove additional spacebar
* Update submodules versions on remote
* Update module from master branch
* Redownload submodules from master and override changes from commit
* [PYTHON] Remove unneccesary parse_status from validation function
* [PYTHON] Check for HINT in map, fix circular import
* [PYTHON] Remove artifacts from commit, fix args.perf_hint set to '' instead to 'none'
* [PYTHON] Reverse changes to perf hint, add key in map check, fix validation function throwing error on set hint
* [PYTHON] Fix linter
* [PYTHON] Remove linter spacebar
* [CPP] Fix wait_all exception throw
* [CPP/PYTHON] Clean artifacts and unwanted changes from work process
* [PYTHON] Fix artifacts from merge, clean submodule update
* [C++ CPU] Fix device name string by removing padding NULL characters from the back
* [CPP] Fix ba infer_request_wrap in other throw-catch clauses
* [PYTHON/CPP] Fix missing latencies in final step for shape group, fix minor misaligned messages, add missing report parameter create infer requests time
* [CPP] Clang fix formatting
* [CPP] Reverse clang fix format on plugin.cpp
* [PYTHON/CPP] Fix C++ progressbar printing endl when disabled, fix rounding in python creating infer request message
* [CPP] Fix foramtiing error
* [PYTHON/C++] Refactor network to model based on naming conventions, provide fresh README output example
* [PYTHON/C++] Add example output to C++ README, remove unnecessary device loop
* [BENCHMARK_APP/C++] Fix artifact from refactoring, remove try-catch clause
* Update samples/cpp/benchmark_app/benchmark_app.hpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update samples/cpp/benchmark_app/main.cpp
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* [CPP] Fix clang errors
* [CPP/PLUGIN Reverse modification to extract to separate task
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/parameters.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* Update tools/benchmark_tool/openvino/tools/benchmark/main.py
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
* [PYTHON/C++/BENCHMARK_APP] Fix language inconsistencies, remove unnecessary checks
* Update pyopenvino.cpp
* [CPP/BENCHMARK_APP] Remove unnecessary try-catch, fix linter errors
* [PYTHON/CPP/BENCHMARK_APP] Revert changes to Core, align version prints usin only provided methods
* [DOCS/BENCHMARK_APP] Update README with proper model examples
* Update README.md
Co-authored-by: Nadezhda Ageeva <nkogteva@gmail.com>
Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
2022-11-14 14:10:36 +01:00
|
|
|
}
|