[POT] Update samples and samplers with the new DataLoader format (#10595)

* Update samples and samplers with the new DataLoader format

* Update with utils

* Pylint updates

* Update metric with the exception

* Pylint

* Update with the exception

* Pylint

* Revert index sampler changes

* Update ImageLoader & SimplifiedEngine

* Update with the different solution

* Remove utils

* Pylint

* Remove list wrapping

* Remove list from meta_data
This commit is contained in:
Nikita Malinin 2022-02-28 16:26:07 +03:00 committed by GitHub
parent 7d0d950b9a
commit 33ad1b96d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 20 additions and 46 deletions

View File

@ -12,9 +12,10 @@ class Metric(ABC):
self.reset()
@property
@abstractmethod
def value(self):
""" Returns accuracy metric value for the last model output. """
raise Exception('The value() property should be implemented to use this metric '
'with AccuracyAwareQuantization algorithm!')
@property
@abstractmethod

View File

@ -38,9 +38,8 @@ class BRATSDataLoader(DataLoader):
mask_path = os.path.join(self.config.mask_dir, self._img_ids[index])
image_path = os.path.join(self.config.data_source, self._img_ids[index])
annotation = (index, self._read_image(mask_path))
image, image_meta = self._preprocess_image(self._read_image(image_path))
return annotation, image, image_meta
return image, self._read_image(mask_path), image_meta
def __len__(self):
""" Returns size of the dataset """
@ -120,13 +119,6 @@ class DiceIndex(Metric):
self._name = 'dice_index'
self._overall_metric = []
@property
def value(self):
""" Returns accuracy metric value for the last model output.
Possible format: {metric_name: [metric_values_per_image]}
"""
return {self._name: [np.mean(self._overall_metric[-1])]}
@property
def avg_value(self):
""" Returns accuracy metric value for all model outputs.
@ -195,6 +187,7 @@ class SegmentationEngine(IEEngine):
processed_outputs = []
for output, meta in zip(outputs.values(), metadata):
# Resize to bounding box size and extend to mask size
output = output[0]
low = meta['bbox'][0]
high = meta['bbox'][1]
box_shape = tuple((high - low).astype(np.int32))

View File

@ -29,9 +29,8 @@ class ImageNetDataLoader(DataLoader):
if index >= len(self):
raise IndexError
annotation = (index, self._annotations[self._img_ids[index]])\
if self._annotations else (index, None)
return annotation, self._read_image(self._img_ids[index])
annotation = self._annotations[self._img_ids[index]] if self._annotations else None
return self._read_image(self._img_ids[index]), annotation
# Methods specific to the current implementation
@staticmethod
@ -84,11 +83,6 @@ class Accuracy(Metric):
self._name = 'accuracy@top{}'.format(self._top_k)
self._matches = []
@property
def value(self):
""" Returns accuracy metric value for the last model output. """
return {self._name: self._matches[-1]}
@property
def avg_value(self):
""" Returns accuracy metric value for all model outputs. """

View File

@ -46,8 +46,7 @@ class WiderFaceLoader(DataLoader):
if index >= len(self):
raise IndexError
annotation = (index, self._annotations[self._img_ids[index]])
return annotation, self._read_image(self._img_ids[index])
return self._read_image(self._img_ids[index]), self._annotations[self._img_ids[index]]
def __len__(self):
""" Returns size of the dataset """
@ -312,15 +311,6 @@ class Recall(Metric):
self._n_recorded_faces = []
self._n_total_preds = []
@property
def value(self):
""" Returns metric value for the last model output.
Possible format: {metric_name: [metric_values_per_image]}
"""
tp = np.cumsum(self._true_positives[-1])[np.arange(self._n_total_preds[-1])]
recalls = tp / np.maximum(self._n_recorded_faces[-1], np.finfo(np.float64).eps)
return {self._name: [recalls[-1]]}
@property
def avg_value(self):
""" Returns average metric value for all model outputs.

View File

@ -12,8 +12,8 @@ from openvino.tools.pot import DataLoader
class COCOLoader(DataLoader):
def __init__(self, config):
super().__init__(config)
self.images_path = config.images_path
self.annotation_path = config.annotation_path
self.images_path = self.config.images_path
self.annotation_path = self.config.annotation_path
self.images = os.listdir(self.images_path)
self.labels = None
self.data, self.bbox = self.prepare_annotation()
@ -61,8 +61,8 @@ class COCOLoader(DataLoader):
annotation = {'boxes': bbox, 'labels': labels, 'iscrowd': iscrowd,
'x_maxs': x_maxs, 'x_mins': x_mins, 'y_maxs': y_maxs, 'y_mins': y_mins}
annotation = (index, [annotation, shape_image])
return annotation, self._read_and_preprocess_image(self.images_path + self.data[index]['file_name'])
annotation = [annotation, shape_image]
return self._read_and_preprocess_image(self.images_path + self.data[index]['file_name']), annotation
def __len__(self):
return len(self.images)

View File

@ -33,8 +33,8 @@ class VOCSegmentationLoader(DataLoader):
# Required methods:
def __init__(self, config):
super().__init__(config)
self._image_size = config.image_size
self._img_ids = self._read_img_ids(config)
self._image_size = self.config.image_size
self._img_ids = self._read_img_ids(self.config)
def __getitem__(self, index):
"""
@ -49,8 +49,7 @@ class VOCSegmentationLoader(DataLoader):
mask_path = os.path.join(self.config.mask_dir, self._img_ids[index] + '.png')
image_path = os.path.join(self.config.data_source, self._img_ids[index] + '.jpg')
annotation = (index, self._read_and_preprocess_mask(mask_path))
return annotation, self._read_and_preprocess_image(image_path)
return self._read_and_preprocess_image(image_path), self._read_and_preprocess_mask(mask_path)
def __len__(self):
""" Returns size of the dataset """
@ -93,13 +92,6 @@ class MeanIOU(Metric):
self._current_cm = []
self._total_cm = np.zeros((self._classes_num, self._classes_num))
@property
def value(self):
""" Returns metric value for the last model output.
Possible format: {metric_name: [metric_values_per_image]}
"""
return {self._name: [self._evaluate(cm) for cm in self._current_cm]}
@property
def avg_value(self):
""" Returns average metric value for all model outputs.

View File

@ -403,11 +403,15 @@ class IEEngine(Engine):
raise RuntimeError('Inconsistent data in the batch. '
'Some items contain annotation, and some do not.')
if not all([isinstance(item[0], tuple) for item in batch]):
images, image_annotation = [data[0] for data in batch], [(idx, data[1]) for idx, data in enumerate(batch)]
else:
images, image_annotation = [data[1] for data in batch], [data[0] for data in batch]
if all([len(item) == 2 for item in batch]):
image_annotation, images = map(list, zip(*batch))
meta_data = [{}]*len(images)
elif all([len(item) == 3 for item in batch]):
image_annotation, images, meta_data = map(list, zip(*batch))
meta_data = [data[2] for data in batch]
else:
raise RuntimeError('Inconsistent data in the batch. '
'Some items contain meta data, and some do not.')