[IE Tools] replace .format on f-string (#4970)

* replace .format on f-string in cross_check_tool

* Replace f-string on .format in utils.py

* replace f-string in benchmark tool

* Replace .format on f-string in benchmark tool

* Add f-string after update

* Fix some lines

* Fix utils
This commit is contained in:
Alexey Lebedev 2021-04-02 14:50:57 +03:00 committed by GitHub
parent 60bd0178a4
commit 3a6fba913c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 136 additions and 145 deletions

View File

@ -15,14 +15,14 @@ try:
from openvino.inference_engine import IENetwork, IECore
except Exception as e:
exception_type = type(e).__name__
print("The following error happened while importing Python API module:\n[ {} ] {}".format(exception_type, e))
print(f"The following error happened while importing Python API module:\n[ {exception_type} ] {e}")
sys.exit(1)
try:
import ngraph as ng
except Exception as e:
exception_type = type(e).name
print("The following error happened while importing nGraph module:\n[ {} ] {}".format(exception_type, e))
print(f"The following error happened while importing nGraph module:\n[ {exception_type} ] {e}")
sys.exit(1)
from utils import get_config_dictionary, get_layers_list, print_output_layers, input_processing, \
@ -120,7 +120,7 @@ def infer(net: IENetwork, core: IECore, device: str, inputs: dict, output: list)
result = {}
for out in output:
if out not in infer_dict:
log.warning("There is no '{}' layer in Inference Engine outputs results".format(out))
log.warning(f"There is no '{out}' layer in Inference Engine outputs results")
continue
pc = pc[out] if out in pc else no_info_pc
pc['device'] = device
@ -151,8 +151,8 @@ def one_ir_mode(args):
core = get_plugin(args.device, args.l, args.config)
net = get_net(model=args.model, core=core)
net_layers, net_inputs, net_outputs = get_model_info(net)
log.info('{} vs {}'.format(args.device, args.reference_device))
log.info('The same IR on both devices: {}'.format(args.model))
log.info(f'{args.device} vs {args.reference_device}')
log.info(f'The same IR on both devices: {args.model}')
out_layers = get_layers_list(net_layers, net_inputs, net_outputs, args.layers)
print_input_layers(net_inputs)
print_output_layers(out_layers)
@ -166,7 +166,7 @@ def one_ir_mode(args):
ref_device=args.reference_device, layers=args.layers,
num_of_iterations=args.num_of_iterations)
for out_layer in out_layers:
log.info('Layer {} statistics'.format(out_layer))
log.info(f'Layer {out_layer} statistics')
net_copy = get_net_copy_with_output(model=args.model, output=out_layer, core=core)
results = infer(net=net_copy, core=core, device=args.device, inputs=inputs, output=[out_layer])
if out_layer not in results:
@ -192,9 +192,9 @@ def two_ir_mode(args):
net_layers, net_inputs, net_outputs = get_model_info(net)
ref_net = get_net(model=args.reference_model, core=ref_core)
ref_net_layers, ref_net_inputs, ref_net_outputs = get_model_info(ref_net)
log.info('{} vs {}'.format(args.device, args.reference_device))
log.info('IR for {} : {}'.format(args.device, args.model))
log.info('IR for {} : {}'.format(args.reference_device, args.reference_model))
log.info(f'{args.device} vs {args.reference_device}')
log.info(f'IR for {args.device} : {args.model}')
log.info(f'IR for {args.reference_device} : {args.reference_model}')
out_layers = get_layers_list(net_layers, net_inputs, net_outputs, args.layers)
ref_out_layers = get_layers_list(ref_net_layers, ref_net_inputs, ref_net_outputs, args.layers)
print_input_layers(net_inputs)
@ -215,9 +215,9 @@ def two_ir_mode(args):
for out_layer in layers_map:
ref_out_layer = layers_map[out_layer]
if out_layer == ref_out_layer:
log.info('Layer {} statistics'.format(out_layer))
log.info(f'Layer {out_layer} statistics')
else:
log.info('Statistics \'{}\' vs \'{}\''.format(out_layer, ref_out_layer))
log.info(f'Statistics \'{out_layer}\' vs \'{ref_out_layer}\'')
net_copy = get_net_copy_with_output(model=args.model, output=out_layer, core=core)
ref_net_copy = get_net_copy_with_output(model=args.reference_model, output=ref_out_layer, core=ref_core)
results = infer(net=net_copy, core=core, device=args.device, inputs=inputs, output=[out_layer])
@ -246,7 +246,7 @@ def dump_mode(args):
inputs = input_processing(args.model, net.input_info, args.input)
dump_dict = {}
for out_layer in out_layers:
log.info('Layer {} processing'.format(out_layer))
log.info(f'Layer {out_layer} processing')
net_copy = get_net_copy_with_output(model=args.model, output=out_layer, core=core)
results = infer(net=net_copy, core=core, device=args.device, inputs=inputs, output=[out_layer])
if out_layer not in results:
@ -258,8 +258,8 @@ def dump_mode(args):
def load_mode(args):
core = get_plugin(args.device, args.l, args.config)
log.info('IR for {} : {}'.format(args.device, args.model))
log.info('Loading blob from {}'.format(args.load))
log.info(f'IR for {args.device} : {args.model}')
log.info(f'Loading blob from {args.load}')
net = get_net(model=args.model, core=core)
net_layers, net_inputs, net_outputs = get_model_info(net)
out_layers = get_layers_list(net_layers, net_inputs, net_outputs, args.layers)
@ -273,9 +273,9 @@ def load_mode(args):
for out_layer in layers_map:
ref_out_layer = layers_map[out_layer]
if out_layer == ref_out_layer:
log.info('Layer {} statistics'.format(out_layer))
log.info(f'Layer {out_layer} statistics')
else:
log.info('Statistics \'{}\' vs \'{}\''.format(out_layer, ref_out_layer))
log.info(f'Statistics \'{out_layer}\' vs \'{ref_out_layer}\'')
net_copy = get_net_copy_with_output(model=args.model, output=out_layer, core=core)
results = infer(net=net_copy, core=core, device=args.device, inputs=inputs, output=[out_layer])
if out_layer not in results:
@ -294,7 +294,7 @@ def load_mode(args):
def main(args):
log.info('Inference Engine:\n API version ............ {}'.format(ie.__version__), extra={'no_lvl': True})
log.info(f'Inference Engine:\n API version ............ {ie.__version__}', extra={'no_lvl': True})
set_verbosity(args.verbosity)
mode = find_out_cct_mode(args)
if mode == 1:

View File

@ -11,15 +11,15 @@ import xml
try:
import cv2
except Exception as e:
log.error("Can not import OpenCV Python package.\nPlease install required python packages by running:\n"
"pip3 install -r requirements.txt\n\n Original error message: {}".format(e))
log.error(f"Can not import OpenCV Python package.\nPlease install required python packages by running:\n"
f"pip3 install -r requirements.txt\n\n Original error message: {e}")
sys.exit(1)
try:
import numpy as np
except Exception as e:
log.error("Can not import numpy python package.\nPlease install required python packages by running:\n"
"pip3 install -r requirements.txt\n\n Original error message: {}".format(e))
log.error(f"Can not import numpy python package.\nPlease install required python packages by running:\n"
f"pip3 install -r requirements.txt\n\n Original error message: {e}")
sys.exit(1)
verbosity = False
@ -78,8 +78,7 @@ def error_handling(desc: str):
return func(*args, **kwargs)
except Exception as e:
exception_type = type(e).__name__
log.error("The following error happened while {}:\n[ {} ] {}".format(desc.format(**kwargs),
exception_type, e))
log.error(f"The following error happened while {desc.format(**kwargs)}:\n[ {exception_type} ] {e}")
global verbosity
if verbosity:
traceback.print_tb(tb=e.__traceback__, file=sys.stdout)
@ -98,7 +97,7 @@ class ExistingFileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values is not None:
if not os.path.isfile(values):
log.error("File was not found: {}".format(values))
log.error(f"File was not found: {values}")
sys.exit(1)
setattr(namespace, self.dest, values)
@ -111,7 +110,7 @@ class ExistingDirAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values is not None:
if not os.path.isdir(values):
log.error("Directory was not found: {}".format(values))
log.error(f"Directory was not found: {values}")
sys.exit(1)
setattr(namespace, self.dest, values)
@ -276,12 +275,12 @@ def find_out_cct_mode(args):
def print_input_layers(inputs: list):
word = 'inputs' if len(inputs) > 1 else 'input'
log.info('{} {} detected: {}'.format(len(inputs), word, ', '.join(inputs)))
log.info(f"{len(inputs)} {word} detected: {', '.join(inputs)}")
def print_output_layers(outputs: list):
layers = 'layers' if len(outputs) > 1 else 'layer'
log.info('Statistics will be dumped for {} {}: {}'.format(len(outputs), layers, ', '.join(outputs)))
log.info(f"Statistics will be dumped for {len(outputs)} {layers}: {', '.join(outputs)}")
###
@ -313,24 +312,21 @@ def read_multi_input_file(input_file: str, net_inputs: dict):
dump = {}
for net_input in net_inputs:
if net_input not in files:
raise Exception('Can not find input data for input {} in multi-input file {}.\n'
'Input data was provided for layers: {}\n'
'Network inputs: {}'.format(net_input, input_file, ', '.join(files),
', '.join(net_inputs.keys())))
raise Exception(f"Can not find input data for input {net_input} in multi-input file {input_file}.\n"
f"Input data was provided for layers: {', '.join(files)}\n"
f"Network inputs: {', '.join(net_inputs.keys())}")
if 'blob' in npz[net_input].item(0):
just_blob = npz[net_input].item(0)['blob']
network_shape = net_inputs[net_input].input_data.shape
log.info('Layer {} shape = {}, input blob from multi-input file shape = {}'
''.format(net_input, network_shape, just_blob.shape))
log.info(f'Layer {net_input} shape = {network_shape}, input blob from multi-input file shape = {just_blob.shape}')
try:
reshaped_blob = np.reshape(just_blob, network_shape)
except:
raise Exception('Can not reshape input blob from multi-input file for layer {} to shape {}'
''.format(net_input, network_shape))
raise Exception(f'Can not reshape input blob from multi-input file for layer {net_input} to shape {network_shape}')
dump[net_input] = reshaped_blob
else:
raise Exception(
'Can not find \'blob\' parameter for input {} in input file {}'.format(net_input, input_file))
f'Can not find \'blob\' parameter for input {net_input} in input file {input_file}')
return dump
@ -372,8 +368,7 @@ def input_processing(model_path: str, net_inputs: dict, input_file: str, layers_
def accuracy_metrics(out_blob, ref_out_blob):
if out_blob.size != ref_out_blob.size:
raise Exception('Different number of elements in blobs {} and {}. Can not compare'
''.format(out_blob.size, ref_out_blob.size))
raise Exception(f'Different number of elements in blobs {out_blob.size} and {ref_out_blob.size}. Can not compare')
abs_diff = np.absolute(out_blob - ref_out_blob)
rel_diff = np.divide(abs_diff, np.min(abs_diff) if np.min(abs_diff) != 0 else 1e-20)
@ -394,9 +389,9 @@ def accuracy_metrics(out_blob, ref_out_blob):
for key, value in metrics:
if len(str(value)) > 5:
log.info('{:>35} : {:.5E}'.format(key, value), extra={'no_lvl': True})
log.info(f'{key:>35} : {value:.5E}', extra={'no_lvl': True})
else:
log.info('{:>35} : {}'.format(key, value), extra={'no_lvl': True})
log.info(f'{key:>35} : {value}', extra={'no_lvl': True})
return {metric: value for metric, value in metrics}
@ -409,7 +404,7 @@ def performance_metrics(pc, ref_pc):
]
for metric, actual, reference in compare:
log.info('{:>35}: {:>16} {:>16}'.format(metric, actual, reference), extra={'no_lvl': True})
log.info(f'{metric:>35}: {actual:>16} {reference:>16}', extra={'no_lvl': True})
def blob_counters(out_blob, ref_out_blob):
@ -420,7 +415,7 @@ def blob_counters(out_blob, ref_out_blob):
ref_out_blob.size - np.count_nonzero(ref_out_blob))
]
for metric, actual, reference in counters:
log.info('{:>35}: {:>16} {:>16}'.format(metric, actual, reference), extra={'no_lvl': True})
log.info(f'{metric:>35}: {actual:>16} {reference:>16}', extra={'no_lvl': True})
def update_global_accuracy_matrics(global_accuracy: list, current_accuracy: dict):
@ -444,12 +439,13 @@ def print_all_over_the_net_metrics(global_accuracy: (str, float), global_times:
ref_global_times: list = None):
if global_times is not None and ref_global_times is not None and len(global_times) and len(ref_global_times):
log.info('-' * 70, extra={'no_lvl': True})
log.info('{:>35}: {:>16,.5E} {:>16,.5E}'.format(
'Overall performance, microseconds', global_times[len(global_times) // 2].microseconds,
ref_global_times[len(ref_global_times) // 2].microseconds), extra={'no_lvl': True})
log.info(f'{"Overall performance, microseconds":>35}: '
f'{global_times[len(global_times) // 2].microseconds:>16,.5E} '
f'{ref_global_times[len(ref_global_times) // 2].microseconds:>16,.5E}',
extra={'no_lvl': True})
log.info('-' * 70, extra={'no_lvl': True})
for metric, value in global_accuracy:
log.info('{} {} = {}'.format('Overall', metric.lower(), value))
log.info(f"Overall {metric.lower()} = {value}")
###
@ -493,9 +489,9 @@ def manage_user_outputs_with_mapping(mapping, reference_mapping, user_layers):
if layer not in layers_map:
if mapping is not None and reference_mapping is not None:
log.warning(
'Can not map layer {} from --model/-m to any layer from --reference_model/-ref_m'.format(layer))
f'Can not map layer {layer} from --model/-m to any layer from --reference_model/-ref_m')
else:
log.warning('Can not find layer {} in --reference_model/-ref_m model'.format(layer))
log.warning(f'Can not find layer {layer} in --reference_model/-ref_m model')
for layer in layers_map:
if layer not in user_layers:
del layers_map[layer]
@ -513,9 +509,9 @@ def get_layers_list(all_layers: list, inputs: dict, outputs: list, layers: str):
layers_to_check = []
for user_layer in user_layers:
if user_layer not in all_layers_names:
raise Exception("Layer {} doesn't exist in the model".format(user_layer))
raise Exception(f"Layer {user_layer} doesn't exist in the model")
if user_layer in inputs:
raise Exception("Layer {} is input layer. Can not proceed".format(user_layer))
raise Exception(f"Layer {user_layer} is input layer. Can not proceed")
if all_layers_names[user_layer].get_type_name() != 'Result':
layers_to_check.append(user_layer)
else:
@ -533,7 +529,7 @@ def get_layers_list(all_layers: list, inputs: dict, outputs: list, layers: str):
def dump_output_file(output_file, dump_dict):
np.savez_compressed(output_file, **dump_dict)
log.info('Dump file path: {}'.format(output_file))
log.info(f'Dump file path: {output_file}')
def load_dump(file_to_load: str):

View File

@ -27,20 +27,19 @@ class Benchmark:
def add_extension(self, path_to_extension: str=None, path_to_cldnn_config: str=None):
if path_to_cldnn_config:
self.ie.set_config({'CONFIG_FILE': path_to_cldnn_config}, GPU_DEVICE_NAME)
logger.info('GPU extensions is loaded {}'.format(path_to_cldnn_config))
logger.info(f'GPU extensions is loaded {path_to_cldnn_config}')
if path_to_extension:
self.ie.add_extension(extension_path=path_to_extension, device_name=CPU_DEVICE_NAME)
logger.info('CPU extensions is loaded {}'.format(path_to_extension))
logger.info(f'CPU extensions is loaded {path_to_extension}')
def get_version_info(self) -> str:
logger.info('InferenceEngine:\n{: <9}{:.<24} {}'.format('', 'API version', get_version()))
logger.info(f"InferenceEngine:\n{'': <9}{'API version':.<24} {get_version()}")
version_string = 'Device info\n'
for device, version in self.ie.get_versions(self.device).items():
version_string += '{: <9}{}\n'.format('', device)
version_string += '{: <9}{:.<24}{} {}.{}\n'.format('', version.description, ' version', version.major,
version.minor)
version_string += '{: <9}{:.<24} {}\n'.format('', 'Build', version.build_number)
version_string += f"{'': <9}{device}\n"
version_string += f"{'': <9}{version.description:.<24}{' version'} {version.major}.{version.minor}\n"
version_string += f"{'': <9}{'Build':.<24} {version.build_number}\n"
return version_string
def set_config(self, config = {}):
@ -83,7 +82,7 @@ class Benchmark:
infer_request.async_infer()
status = exe_network.wait()
if status != StatusCode.OK:
raise Exception("Wait for all requests is failed with status code {}!".format(status))
raise Exception(f"Wait for all requests is failed with status code {status}!")
return infer_request.latency
def infer(self, exe_network, batch_size, progress_bar=None):
@ -137,7 +136,7 @@ class Benchmark:
# wait the latest inference executions
status = exe_network.wait()
if status != StatusCode.OK:
raise Exception("Wait for all requests is failed with status code {}!".format(status))
raise Exception(f"Wait for all requests is failed with status code {status}!")
total_duration_sec = (datetime.utcnow() - start_time).total_seconds()
for infer_request_id in in_fly:

View File

@ -93,14 +93,14 @@ def run(args):
## set to user defined value
config[device]['PERF_COUNT'] = 'YES' if args.perf_counts else 'NO'
elif 'PERF_COUNT' in config[device].keys() and config[device]['PERF_COUNT'] == 'YES':
logger.warning("Performance counters for {} device is turned on. ".format(device) +
logger.warning(f"Performance counters for {device} device is turned on. " +
"To print results use -pc option.")
elif args.report_type in [ averageCntReport, detailedCntReport ]:
logger.warning("Turn on performance counters for {} device ".format(device) +
"since report type is {}.".format(args.report_type))
logger.warning(f"Turn on performance counters for {device} device " +
f"since report type is {args.report_type}.")
config[device]['PERF_COUNT'] = 'YES'
elif args.exec_graph_path is not None:
logger.warning("Turn on performance counters for {} device ".format(device) +
logger.warning(f"Turn on performance counters for {device} device " +
"due to execution graph dumping.")
config[device]['PERF_COUNT'] = 'YES'
else:
@ -114,11 +114,11 @@ def run(args):
## set to user defined value
supported_config_keys = benchmark.ie.get_metric(device, 'SUPPORTED_CONFIG_KEYS')
if key not in supported_config_keys:
raise Exception("Device {} doesn't support config key '{}'! ".format(device, key) +
raise Exception(f"Device {device} doesn't support config key '{key}'! " +
"Please specify -nstreams for correct devices in format <dev1>:<nstreams1>,<dev2>:<nstreams2>")
config[device][key] = device_number_streams[device]
elif key not in config[device].keys() and args.api_type == "async":
logger.warning("-nstreams default value is determined automatically for {} device. ".format(device) +
logger.warning(f"-nstreams default value is determined automatically for {device} device. " +
"Although the automatic selection usually provides a reasonable performance,"
"but it still may be non-optimal for some cases, for more information look at README.")
if device != MYRIAD_DEVICE_NAME: ## MYRIAD sets the default number of streams implicitly
@ -139,7 +139,7 @@ def run(args):
config[device]['CPU_BIND_THREAD'] = args.infer_threads_pinning
elif 'CPU_BIND_THREAD' not in config[device].keys():
if MULTI_DEVICE_NAME in device_name and GPU_DEVICE_NAME in device_name:
logger.warning("Turn off threads pinning for {}".format(device) +
logger.warning(f"Turn off threads pinning for {device} " +
"device since multi-scenario with GPU device is used.")
config[device]['CPU_BIND_THREAD'] = 'NO'
else:
@ -185,8 +185,8 @@ def run(args):
start_time = datetime.utcnow()
ie_network = benchmark.read_network(args.path_to_model)
duration_ms = "{:.2f}".format((datetime.utcnow() - start_time).total_seconds() * 1000)
logger.info("Read network took {} ms".format(duration_ms))
duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
logger.info(f"Read network took {duration_ms} ms")
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
@ -203,8 +203,8 @@ def run(args):
logger.info(
'Reshaping network: {}'.format(', '.join("'{}': {}".format(k, v) for k, v in shapes.items())))
ie_network.reshape(shapes)
duration_ms = "{:.2f}".format((datetime.utcnow() - start_time).total_seconds() * 1000)
logger.info("Reshape network took {} ms".format(duration_ms))
duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
logger.info(f"Reshape network took {duration_ms} ms")
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
@ -214,7 +214,7 @@ def run(args):
# use batch size according to provided layout and shapes
batch_size = get_batch_size(app_inputs_info) if args.layout else ie_network.batch_size
logger.info('Network batch size: {}'.format(batch_size))
logger.info(f'Network batch size: {batch_size}')
# --------------------- 6. Configuring inputs and outputs of the model --------------------------------------------------
next_step()
@ -227,8 +227,8 @@ def run(args):
start_time = datetime.utcnow()
exe_network = benchmark.load_network(ie_network)
duration_ms = "{:.2f}".format((datetime.utcnow() - start_time).total_seconds() * 1000)
logger.info("Load network took {} ms".format(duration_ms))
duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
logger.info(f"Load network took {duration_ms} ms")
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
@ -247,8 +247,8 @@ def run(args):
start_time = datetime.utcnow()
exe_network = benchmark.import_network(args.path_to_model)
duration_ms = "{:.2f}".format((datetime.utcnow() - start_time).total_seconds() * 1000)
logger.info("Import network took {} ms".format(duration_ms))
duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
logger.info(f"Import network took {duration_ms} ms")
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
@ -297,7 +297,7 @@ def run(args):
for nstreams in device_number_streams.items():
statistics.add_parameters(StatisticsReport.Category.RUNTIME_CONFIG,
[
("number of {} streams".format(nstreams[0]), str(nstreams[1])),
(f"number of {nstreams[0]} streams", str(nstreams[1])),
])
# ------------------------------------ 10. Measuring performance -----------------------------------------------
@ -311,8 +311,8 @@ def run(args):
progress_bar = ProgressBar(progress_bar_total_count, args.stream_output, args.progress) if args.progress else None
duration_ms = "{:.2f}".format(benchmark.first_infer(exe_network))
logger.info("First inference took {} ms".format(duration_ms))
duration_ms = f"{benchmark.first_infer(exe_network):.2f}"
logger.info(f"First inference took {duration_ms} ms")
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
@ -325,7 +325,7 @@ def run(args):
if args.dump_config:
dump_config(args.dump_config, config)
logger.info("Inference Engine configuration settings were dumped to {}".format(args.dump_config))
logger.info(f"Inference Engine configuration settings were dumped to {args.dump_config}")
if args.exec_graph_path:
dump_exec_graph(exe_network, args.exec_graph_path)
@ -342,28 +342,28 @@ def run(args):
if statistics:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
('total execution time (ms)', '{:.2f}'.format(get_duration_in_milliseconds(total_duration_sec))),
('total execution time (ms)', f'{get_duration_in_milliseconds(total_duration_sec):.2f}'),
('total number of iterations', str(iteration)),
])
if MULTI_DEVICE_NAME not in device_name:
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
('latency (ms)', '{:.2f}'.format(latency_ms)),
('latency (ms)', f'{latency_ms:.2f}'),
])
statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
[
('throughput', '{:.2f}'.format(fps)),
('throughput', f'{fps:.2f}'),
])
if statistics:
statistics.dump()
print('Count: {} iterations'.format(iteration))
print('Duration: {:.2f} ms'.format(get_duration_in_milliseconds(total_duration_sec)))
print(f'Count: {iteration} iterations')
print(f'Duration: {get_duration_in_milliseconds(total_duration_sec):.2f} ms')
if MULTI_DEVICE_NAME not in device_name:
print('Latency: {:.2f} ms'.format(latency_ms))
print('Throughput: {:.2f} FPS'.format(fps))
print(f'Latency: {latency_ms:.2f} ms')
print(f'Throughput: {fps:.2f} FPS')
del exe_network

View File

@ -17,7 +17,7 @@ def str2bool(v):
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
return ivalue
class print_help(argparse.Action):

View File

@ -15,7 +15,7 @@ def set_inputs(paths_to_input, batch_size, app_input_info, requests):
inputs = requests[i].input_blobs
for k, v in requests_input_data[i].items():
if k not in inputs.keys():
raise Exception("No input with name {} found!".format(k))
raise Exception(f"No input with name {k} found!")
inputs[k].buffer[:] = v
def get_inputs(paths_to_input, batch_size, app_input_info, requests):
@ -42,33 +42,34 @@ def get_inputs(paths_to_input, batch_size, app_input_info, requests):
else:
binary_to_be_used = binaries_count * batch_size * len(requests)
if binary_to_be_used > 0 and len(binary_files) == 0:
logger.warning("No supported binary inputs found! Please check your file extensions: {}".format(
",".join(BINARY_EXTENSIONS)))
logger.warning(f"No supported binary inputs found! "
f"Please check your file extensions: {','.join(BINARY_EXTENSIONS)}")
elif binary_to_be_used > len(binary_files):
logger.warning(
"Some binary input files will be duplicated: {} files are required, but only {} were provided".format(
binary_to_be_used, len(binary_files)))
f"Some binary input files will be duplicated: "
f"{binary_to_be_used} files are required, "
f"but only {len(binary_files)} were provided")
elif binary_to_be_used < len(binary_files):
logger.warning(
"Some binary input files will be ignored: only {} files are required from {}".format(binary_to_be_used,
len(binary_files)))
f"Some binary input files will be ignored: only {binary_to_be_used} "
f"files are required from {len(binary_files)}")
images_to_be_used = images_count * batch_size * len(requests)
if images_to_be_used > 0 and len(image_files) == 0:
logger.warning("No supported image inputs found! Please check your file extensions: {}".format(
",".join(IMAGE_EXTENSIONS)))
logger.warning(f"No supported image inputs found! Please check your "
f"file extensions: {','.join(IMAGE_EXTENSIONS)}")
elif images_to_be_used > len(image_files):
logger.warning(
"Some image input files will be duplicated: {} files are required, but only {} were provided".format(
images_to_be_used, len(image_files)))
f"Some image input files will be duplicated: {images_to_be_used} "
f"files are required, but only {len(image_files)} were provided")
elif images_to_be_used < len(image_files):
logger.warning(
"Some image input files will be ignored: only {} files are required from {}".format(images_to_be_used,
len(image_files)))
f"Some image input files will be ignored: only {images_to_be_used} "
f"files are required from {len(image_files)}")
requests_input_data = []
for request_id in range(0, len(requests)):
logger.info("Infer Request {} filling".format(request_id))
logger.info(f"Infer Request {request_id} filling")
input_data = {}
keys = list(sorted(app_input_info.keys()))
for key in keys:
@ -95,8 +96,8 @@ def get_inputs(paths_to_input, batch_size, app_input_info, requests):
continue
# fill with random data
logger.info("Fill input '{}' with random values ({} is expected)".format(key, "image"
if info.is_image else "some binary data"))
logger.info(f"Fill input '{key}' with random values "
f"({'image' if info.is_image else 'some binary data'} is expected)")
input_data[key] = fill_blob_with_random(info)
requests_input_data.append(input_data)
@ -128,11 +129,11 @@ def fill_blob_with_image(image_paths, request_id, batch_size, input_id, input_si
for b in range(batch_size):
image_index %= len(image_paths)
image_filename = image_paths[image_index]
logger.info('Prepare image {}'.format(image_filename))
logger.info(f'Prepare image {image_filename}')
image = cv2.imread(image_filename)
new_im_size = tuple((info.width, info.height))
if image.shape[:-1] != new_im_size:
logger.warning("Image is resized from ({}) to ({})".format(image.shape[:-1], new_im_size))
logger.warning(f"Image is resized from ({image.shape[:-1]}) to ({new_im_size})")
image = cv2.resize(image, new_im_size)
if info.layout in ['NCHW', 'CHW']:
image = image.transpose((2, 0, 1))
@ -173,7 +174,7 @@ def fill_blob_with_binary(binary_paths, request_id, batch_size, input_id, input_
blob_size = dtype().nbytes * int(np.prod(shape))
if blob_size != binary_file_size:
raise Exception(
"File {} contains {} bytes but network expects {}".format(binary_filename, binary_file_size, blob_size))
f"File {binary_filename} contains {binary_file_size} bytes but network expects {blob_size}")
binaries[b] = np.reshape(np.fromfile(binary_filename, dtype), shape)
binary_index += input_size

View File

@ -38,7 +38,7 @@ class StatisticsReport:
def dump(self):
def dump_parameters(f, parameters):
for k, v in parameters:
f.write('{}{}{}\n'.format(k, self.csv_separator, v))
f.write(f'{k}{self.csv_separator}{v}\n')
with open(os.path.join(self.config.report_folder, 'benchmark_report.csv'), 'w') as f:
if self.Category.COMMAND_LINE_PARAMETERS in self.parameters.keys():
@ -56,7 +56,7 @@ class StatisticsReport:
dump_parameters(f, self.parameters[self.Category.EXECUTION_RESULTS])
f.write('\n')
logger.info("Statistics report is stored to {}".format(f.name))
logger.info(f"Statistics report is stored to {f.name}")
def dump_performance_counters_request(self, f, perf_counts):
total = 0
@ -79,7 +79,7 @@ class StatisticsReport:
logger.info('Performance counters are empty. No reports are dumped.')
return
filename = os.path.join(self.config.report_folder, 'benchmark_{}_report.csv'.format(self.config.report_type))
filename = os.path.join(self.config.report_folder, f'benchmark_{self.config.report_type}_report.csv')
with open(filename, 'w') as f:
if self.config.report_type == detailedCntReport:
for pc in perf_counts:
@ -104,4 +104,4 @@ class StatisticsReport:
else:
raise Exception('PM data can only be collected for average or detailed report types')
logger.info('Performance counters report is stored to {}'.format(filename))
logger.info(f'Performance counters report is stored to {filename}')

View File

@ -40,10 +40,10 @@ def next_step(additional_info='', step_id=0):
next_step.step_id += 1
if next_step.step_id not in step_names.keys():
raise Exception('Step ID {} is out of total steps number '.format(next_step.step_id, str(len(step_names))))
raise Exception(f'Step ID {next_step.step_id} is out of total steps number {str(len(step_names))}')
step_info_template = '[Step {}/{}] {}'
step_name = step_names[next_step.step_id] + (' ({})'.format(additional_info) if additional_info else '')
step_name = step_names[next_step.step_id] + (f' ({additional_info})' if additional_info else '')
step_info_template = step_info_template.format(next_step.step_id, len(step_names), step_name)
print(step_info_template)
@ -81,7 +81,7 @@ def _configure_network_inputs_and_outputs(ie_network: IENetwork, input_output_pr
elif key in output_info:
output_info[key].precision = value
else:
raise Exception("Element '{}' does not exist in network".format(key))
raise Exception(f"Element '{key}' does not exist in network")
def _parse_arg_map(arg_map: str):
arg_map = arg_map.replace(" ", "")
@ -98,19 +98,15 @@ def print_inputs_and_outputs_info(ie_network: IENetwork):
input_info = ie_network.input_info
for key in input_info.keys():
tensor_desc = input_info[key].tensor_desc
logger.info("Network input '{}' precision {}, dimensions ({}): {}".format(key,
tensor_desc.precision,
tensor_desc.layout,
" ".join(str(x) for x in
tensor_desc.dims)))
logger.info(f"Network input '{key}' precision {tensor_desc.precision}, "
f"dimensions ({tensor_desc.layout}): "
f"{' '.join(str(x) for x in tensor_desc.dims)}")
output_info = ie_network.outputs
for key in output_info.keys():
info = output_info[key]
logger.info("Network output '{}' precision {}, dimensions ({}): {}".format(key,
info.precision,
info.layout,
" ".join(str(x) for x in
info.shape)))
logger.info(f"Network output '{key}' precision {info.precision}, "
f"dimensions ({info.layout}): "
f"{' '.join(str(x) for x in info.shape)}")
def get_number_iterations(number_iterations: int, nireq: int, api_type: str):
niter = number_iterations
@ -119,7 +115,7 @@ def get_number_iterations(number_iterations: int, nireq: int, api_type: str):
niter = int((niter + nireq - 1) / nireq) * nireq
if number_iterations != niter:
logger.warning('Number of iterations was aligned by request number '
'from {} to {} using number of requests {}'.format(number_iterations, niter, nireq))
f'from {number_iterations} to {niter} using number of requests {nireq}')
return niter
@ -146,7 +142,7 @@ def get_duration_in_secs(target_device):
if duration == 0:
duration = DEVICE_DURATION_IN_SECS[UNKNOWN_DEVICE_TYPE]
logger.warning('Default duration {} seconds is used for unknown device {}'.format(duration, target_device))
logger.warning(f'Default duration {duration} seconds is used for unknown device {target_device}')
return duration
@ -187,18 +183,18 @@ def parse_nstreams_value_per_device(devices, values_string):
def process_help_inference_string(benchmark_app):
output_string = 'Start inference {}hronously'.format(benchmark_app.api_type)
output_string = f'Start inference {benchmark_app.api_type}hronously'
if benchmark_app.api_type == 'async':
output_string += ', {} inference requests'.format(benchmark_app.nireq)
output_string += f', {benchmark_app.nireq} inference requests'
device_ss = ''
if CPU_DEVICE_NAME in benchmark_app.device:
device_ss += str(benchmark_app.ie.get_config(CPU_DEVICE_NAME, 'CPU_THROUGHPUT_STREAMS'))
device_ss += ' streams for {}'.format(CPU_DEVICE_NAME)
device_ss += f' streams for {CPU_DEVICE_NAME}'
if GPU_DEVICE_NAME in benchmark_app.device:
device_ss += ', ' if device_ss else ''
device_ss += str(benchmark_app.ie.get_config(GPU_DEVICE_NAME, 'GPU_THROUGHPUT_STREAMS'))
device_ss += ' streams for {}'.format(GPU_DEVICE_NAME)
device_ss += f' streams for {GPU_DEVICE_NAME}'
if device_ss:
output_string += ' using ' + device_ss
@ -206,10 +202,10 @@ def process_help_inference_string(benchmark_app):
limits = ''
if benchmark_app.niter and not benchmark_app.duration_seconds:
limits += '{} iterations'.format(benchmark_app.niter)
limits += f'{benchmark_app.niter} iterations'
if benchmark_app.duration_seconds:
limits += '{} ms duration'.format(get_duration_in_milliseconds(benchmark_app.duration_seconds))
limits += f'{get_duration_in_milliseconds(benchmark_app.duration_seconds)} ms duration'
if limits:
output_string += ', limits: ' + limits
@ -220,7 +216,7 @@ def dump_exec_graph(exe_network, exec_graph_path):
try:
exec_graph_info = exe_network.get_exec_graph_info()
exec_graph_info.serialize(exec_graph_path)
logger.info('Executable graph is stored to {}'.format(exec_graph_path))
logger.info(f'Executable graph is stored to {exec_graph_path}')
del exec_graph_info
except Exception as e:
logger.exception(e)
@ -231,20 +227,19 @@ def print_perf_counters(perf_counts_list):
perf_counts = perf_counts_list[ni]
total_time = 0
total_time_cpu = 0
logger.info("Performance counts for {}-th infer request".format(ni))
logger.info(f"Performance counts for {ni}-th infer request")
for layer, stats in sorted(perf_counts.items(), key=lambda x: x[1]['execution_index']):
max_layer_name = 30
print("{:<30}{:<15}{:<30}{:<20}{:<20}{:<20}".format(
layer[:max_layer_name - 4] + '...' if (len(layer) >= max_layer_name) else layer,
stats['status'],
'layerType: ' + str(stats['layer_type']),
'realTime: ' + str(stats['real_time']),
'cpu: ' + str(stats['cpu_time']),
'execType: ' + str(stats['exec_type'])))
print(f"{layer[:max_layer_name - 4] + '...' if (len(layer) >= max_layer_name) else layer:<30}"
f"{stats['status']:<15}"
f"{'layerType: ' + str(stats['layer_type']):<30}"
f"{'realTime: ' + str(stats['real_time']):<20}"
f"{'cpu: ' + str(stats['cpu_time']):<20}"
f"{'execType: ' + str(stats['exec_type']):<20}")
total_time += stats['real_time']
total_time_cpu += stats['cpu_time']
print('Total time: {} microseconds'.format(total_time))
print('Total CPU time: {} microseconds\n'.format(total_time_cpu))
print(f'Total time: {total_time} microseconds')
print(f'Total CPU time: {total_time_cpu} microseconds\n')
def get_command_line_arguments(argv):
parameters = []
@ -282,7 +277,7 @@ def parse_input_parameters(parameter_string, input_info):
return_value = { k:value for k in input_info.keys() }
break
else:
raise Exception("Can't parse input parameter: {}".format(parameter_string))
raise Exception(f"Can't parse input parameter: {parameter_string}")
return return_value
class InputInfo:
@ -305,7 +300,7 @@ class InputInfo:
def getDimentionByLayout(self, character):
if character not in self.layout:
raise Exception("Error: Can't get {} from layout {}".format(character, self.layout))
raise Exception(f"Error: Can't get {character} from layout {self.layout}")
return self.shape[self.layout.index(character)]
@property