[POT] Fix inference sample in fbc when get list prediction (#10159)

* fix: inference sample in fbc when get list prediction

* update reference metrics
This commit is contained in:
Indira Salyahova
2022-02-15 14:42:40 +03:00
committed by GitHub
parent e168c9b1c3
commit f1557c06de
8 changed files with 20 additions and 12 deletions

View File

@@ -214,7 +214,9 @@ class FastBiasCorrection(Algorithm):
calculate_input_shape[input_node.fullname] = {'shape_node': lambda x: x.shape}
calculate_metrics = self._engine.calculate_metrics
self._engine.calculate_metrics = False
self._engine.inference_for_shape = True
_, inputs_shape = self._engine.predict(calculate_input_shape, sampler)
self._engine.inference_for_shape = False
self._engine.calculate_metrics = calculate_metrics
for node_name, shape_node in inputs_shape.items():
inputs_shape[node_name] = shape_node['shape_node'][0]

View File

@@ -24,6 +24,7 @@ class Engine(ABC):
self._statistic_graph_builder = StatisticGraphBuilder()
self._stat_requests_number = self.config.get('stat_requests_number', None)
self._eval_requests_number = self.config.get('eval_requests_number', None)
self.inference_for_shape = False
self.calculate_metrics = True
def set_model(self, model):

View File

@@ -248,7 +248,7 @@ class ACEngine(Engine):
if not stats_layout:
return
dataset_index = kwargs['dataset_indices'][0]
append_stats(self._accumulated_layer_stats, stats_layout, value, dataset_index)
append_stats(self._accumulated_layer_stats, stats_layout, value, dataset_index, self.inference_for_shape)
@staticmethod
def _set_requests_number(params, requests_number):

View File

@@ -200,7 +200,7 @@ class IEEngine(Engine):
:param annotations: list of annotations [(img_id, annotation)]
"""
dataset_index = annotations[0][0] if annotations is not None and annotations[0][0] else 0
append_stats(self._accumulated_layer_stats, stats_layout, outputs, dataset_index)
append_stats(self._accumulated_layer_stats, stats_layout, outputs, dataset_index, self.inference_for_shape)
def _update_metrics(self, output, annotations, need_metrics_per_sample=False):
""" Updates metrics.

View File

@@ -20,4 +20,4 @@ class SimplifiedEngine(IEEngine):
batch_annotations, batch_meta, need_metrics_per_sample):
# Collect statistics
if stats_layout:
append_stats(self._accumulated_layer_stats, stats_layout, predictions, 0)
append_stats(self._accumulated_layer_stats, stats_layout, predictions, 0, self.inference_for_shape)

View File

@@ -13,10 +13,10 @@ from ..utils.utils import convert_output_key
logger = get_logger(__name__)
def append_stats(accumulated_layer_stats, stats_layout, value, dataset_index):
def append_stats(accumulated_layer_stats, stats_layout, value, dataset_index, inference_for_shape):
inplace_stats_mapping = get_inplace_stats_mapping(stats_layout)
if isinstance(value, list):
value = parse_sequential_stats(value, stats_layout)
value = parse_sequential_stats(value, stats_layout, inference_for_shape)
else:
value = process_raw_output(value)
for layer, stats in stats_layout.items():
@@ -29,7 +29,7 @@ def append_stats(accumulated_layer_stats, stats_layout, value, dataset_index):
(dataset_index, compute_statistic(stat_fn, value, layer_stat_name)))
def parse_sequential_stats(value_sequential, stats_layout):
def parse_sequential_stats(value_sequential, stats_layout, inference_for_shape):
stat_names_by_layer, old_names_mapping = get_per_layer_stat_mapping(stats_layout)
activation_seq = defaultdict(lambda: [])
for value in value_sequential:
@@ -40,6 +40,9 @@ def parse_sequential_stats(value_sequential, stats_layout):
for layer, act_seq in activation_seq.items():
seq_len = len(act_seq[0].shape)
if inference_for_shape:
activation_seq[layer] = act_seq[0]
continue
if not isinstance(stat_names_by_layer[layer], Statistic) or \
not stat_names_by_layer[layer].kwargs.get('inplace_statistics', False):
axis = 1 if seq_len == 2 else 2
@@ -103,7 +106,8 @@ def get_sequential_activations(activations, layer, activation_seq, stats_layout,
elif old_names_mapping.get(layer, None) in stats_layout and hasattr(stat_names_by_layer[layer], 'kwargs') \
and not stat_names_by_layer[layer].kwargs.get('inplace_statistics', False):
activation_seq[layer].append(activations)
elif old_names_mapping.get(layer, None) in stats_layout and callable(stat_names_by_layer[layer]):
elif old_names_mapping.get(layer, None) in stats_layout and (callable(stat_names_by_layer[layer]) \
or callable(stats_layout[layer][stat_names_by_layer[layer]])):
activation_seq[layer].append(activations)

View File

@@ -44,7 +44,8 @@ def run_append_stats_test(engine):
fc_layer_mock = create_ng_mock(['fc_layer'])
value = {conv_layer_mock: sample_tensor, fc_layer_mock: sample_tensor}
ref_value = {'conv_layer': sample_tensor, 'fc_layer': sample_tensor}
append_stats(engine._accumulated_layer_stats, stats_layout, value, dataset_index=0)
append_stats(engine._accumulated_layer_stats, stats_layout, value,
dataset_index=0, inference_for_shape=False)
for layer, accumulated_value in engine._accumulated_layer_stats.items():
assert np.array_equal(accumulated_value[stat_name][0][1], ref_value[layer])
@@ -57,7 +58,8 @@ def run_append_stats_test(engine):
{'conv_layer': sample_tensor, 'fc_layer': sample_tensor},
{'conv_layer': sample_tensor, 'fc_layer': sample_tensor},
]
append_stats(engine._accumulated_layer_stats, stats_layout, value, dataset_index=0)
append_stats(engine._accumulated_layer_stats, stats_layout, value,
dataset_index=0, inference_for_shape=False)
for layer, accumulated_value in engine._accumulated_layer_stats.items():
assert np.array_equal(
accumulated_value[stat_name][0][1][:, 0], ref_value[0][layer]

View File

@@ -51,10 +51,9 @@ TEST_MODELS = [
# {'drop_type': 'relative', 'max_iter_num': 1, 'accuracy_drop': 0.005, 'metrics': [
# {'name': 'accuracy@top1', 'baseline_value': 0.431}]}, 'GNA'),
#TODO: Enable after the problem with the shapes will be fixed
# ('mtcnn', 'caffe', 'DefaultQuantization', 'performance', 1, {'recall': 0.76, 'map': 0.6844}, {}, 'CPU'),
('mtcnn', 'caffe', 'DefaultQuantization', 'performance', 1, {'recall': 0.76, 'map': 0.6844}, {}, 'CPU'),
('mtcnn', 'caffe', 'DefaultQuantization', 'performance', 2, {'recall': 0.8, 'map': 0.7445},
('mtcnn', 'caffe', 'DefaultQuantization', 'performance', 2, {'recall': 0.76, 'map': 0.6638},
{'use_fast_bias': False}, 'CPU')
]
CASCADE_MAP = Dict({