No-break space->space, asssuming->assuming (#14276)

* No-break space->space, asssuming->assuming

* benchmark_app: don't print warning each iteration

* Russian с->English c

* Warn no batch only once

* Allow anonymus namespaces

* Keep only one report of model bach size, warn_if_no_batch for static model as well
This commit is contained in:
Zlobin Vladimir
2022-11-30 15:25:16 +04:00
committed by GitHub
parent 9eb43bb8b4
commit c09af40b4c
11 changed files with 49 additions and 54 deletions

View File

@@ -155,26 +155,27 @@ Options:
-dump_config Optional. Path to JSON file to dump IE parameters, which were set by application.
-load_config Optional. Path to JSON file to load custom IE parameters. Please note, command line parameters have higher priority then parameters from configuration file.
Example 1: a simple JSON file for HW device with primary properties.
{
"CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"}
}
{
"CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"}
}
Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties.
{
"AUTO": {
"PERFORMANCE_HINT": "",
"PERF_COUNT": "NO",
"DEVICE_PROPERTIES": {
"CPU": {
"INFERENCE_PRECISION_HINT": "f32",
"NUM_STREAMS": "3"
},
"GPU": {
"INFERENCE_PRECISION_HINT": "f32",
"NUM_STREAMS": "5"
}
}
}
}
{
"AUTO": {
"PERFORMANCE_HINT": "",
"PERF_COUNT": "NO",
"DEVICE_PROPERTIES": {
"CPU": {
"INFERENCE_PRECISION_HINT": "f32",
"NUM_STREAMS": "3"
},
"GPU": {
"INFERENCE_PRECISION_HINT": "f32",
"NUM_STREAMS": "5"
}
}
}
}
-infer_precision "<element type>"Optional. Inference precision
-ip <value> Optional. Specifies precision for all input layers of the model.
-op <value> Optional. Specifies precision for all output layers of the model.

View File

@@ -29,6 +29,7 @@
#include "utils.hpp"
// clang-format on
namespace {
bool parse_and_check_command_line(int argc, char* argv[]) {
// ---------------------------Parsing and validating input
// arguments--------------------------------------
@@ -92,7 +93,7 @@ bool parse_and_check_command_line(int argc, char* argv[]) {
return true;
}
static void next_step(const std::string additional_info = "") {
void next_step(const std::string additional_info = "") {
static size_t step_id = 0;
static const std::map<size_t, std::string> step_names = {{1, "Parsing and validating input arguments"},
{2, "Loading OpenVINO Runtime"},
@@ -188,6 +189,19 @@ void setDeviceProperty(ov::Core& core,
}
}
void warn_if_no_batch(const benchmark_app::InputsInfo& first_inputs) {
if (!std::any_of(first_inputs.begin(),
first_inputs.end(),
[](const std::pair<const std::string, benchmark_app::InputInfo>& info) {
return ov::layout::has_batch(info.second.layout);
})) {
slog::warn
<< "No batch dimension was found, asssuming batch to be 1. Beware: this might affect FPS calculation."
<< slog::endl;
}
}
} // namespace
/**
* @brief The entry point of the benchmark application
*/
@@ -608,7 +622,7 @@ int main(int argc, char* argv[]) {
if (statistics)
statistics->add_parameters(
StatisticsReport::Category::EXECUTION_RESULTS,
{StatisticsVariant("сompile model time (ms)", "load_model_time", duration_ms)});
{StatisticsVariant("compile model time (ms)", "load_model_time", duration_ms)});
convert_io_names_in_map(inputFiles, compiledModel.inputs());
app_inputs_info = get_inputs_info(FLAGS_shape,
@@ -767,14 +781,9 @@ int main(int argc, char* argv[]) {
topology_name = model->get_friendly_name();
// Calculate batch size according to provided layout and shapes (static case)
if (!isDynamicNetwork && app_inputs_info.size()) {
batchSize = get_batch_size(app_inputs_info.front());
slog::info << "Model batch size: " << batchSize << slog::endl;
} else if (batchSize == 0) {
batchSize = 1;
}
batchSize = get_batch_size(app_inputs_info.at(0));
warn_if_no_batch(app_inputs_info.at(0));
slog::info << "Model batch size: " << batchSize << slog::endl;
printInputAndOutputsInfoShort(*model);
// ----------------- 7. Loading the model to the device
@@ -1136,16 +1145,6 @@ int main(int argc, char* argv[]) {
if (isDynamicNetwork) {
batchSize = get_batch_size(inputs);
if (!std::any_of(inputs.begin(),
inputs.end(),
[](const std::pair<const std::string, benchmark_app::InputInfo>& info) {
return ov::layout::has_batch(info.second.layout);
})) {
slog::warn
<< "No batch dimension was found, asssuming batch to be 1. Beware: this might affect "
"FPS calculation."
<< slog::endl;
}
}
for (auto& item : inputs) {

View File

@@ -201,9 +201,6 @@ size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info) {
}
}
if (batch_size == 0) {
slog::warn << "No batch dimension was found at any input, asssuming batch to be 1. Beware: this might affect "
"FPS calculation."
<< slog::endl;
batch_size = 1;
}
return batch_size;
@@ -440,8 +437,6 @@ std::vector<benchmark_app::InputsInfo> get_inputs_info(const std::string& shape_
}
}
slog::info << "Model batch size: " << batch_size << slog::endl;
reshape_required = false;
std::map<std::string, int> currentFileCounters;