Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import copy
import math
import warnings
from collections import defaultdict
from typing import List, Sequence, Tuple
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .base_dataset import BaseDataset, force_full_init
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as ``torch.utils.data.dataset.ConcatDataset`` and support lazy_init.
Args:
datasets (Sequence[BaseDataset]): A list of datasets which will be
concatenated.
lazy_init (bool, optional): Whether to load annotation during
instantiation. Defaults to False.
"""
def __init__(self,
datasets: Sequence[BaseDataset],
lazy_init: bool = False):
# Only use meta of first dataset.
self._meta = datasets[0].meta
self.datasets = datasets # type: ignore
for i, dataset in enumerate(datasets, 1):
if self._meta != dataset.meta:
warnings.warn(
f'The meta information of the {i}-th dataset does not '
'match meta information of the first dataset')
self._fully_initialized = False
if not lazy_init:
self.full_init()
@property
def meta(self) -> dict:
"""Get the meta information of the first dataset in ``self.datasets``.
Returns:
dict: Meta information of first dataset.
"""
# Prevent `self._meta` from being modified by outside.
return copy.deepcopy(self._meta)
def full_init(self):
"""Loop to ``full_init`` each dataset."""
if self._fully_initialized:
return
for d in self.datasets:
d.full_init()
# Get the cumulative sizes of `self.datasets`. For example, the length
# of `self.datasets` is [2, 3, 4], the cumulative sizes is [2, 5, 9]
super().__init__(self.datasets)
self._fully_initialized = True
@force_full_init
def _get_ori_dataset_idx(self, idx: int) -> Tuple[int, int]:
"""Convert global idx to local index.
Args:
idx (int): Global index of ``RepeatDataset``.
Returns:
Tuple[int, int]: The index of ``self.datasets`` and the local
index of data.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
f'absolute value of index({idx}) should not exceed dataset'
f'length({len(self)}).')
idx = len(self) + idx
# Get the inner index of single dataset
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
@force_full_init
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index.
Args:
idx (int): Global index of ``ConcatDataset``.
Returns:
dict: The idx-th annotation of the datasets.
"""
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx].get_data_info(sample_idx)
@force_full_init
def __len__(self):
return super().__len__()
def __getitem__(self, idx):
if not self._fully_initialized:
warnings.warn('Please call `full_init` method manually to '
'accelerate the speed.')
self.full_init()
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx][sample_idx]
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (BaseDataset): The dataset to be repeated.
times (int): Repeat times.
lazy_init (bool, optional): Whether to load annotation during
instantiation. Defaults to False.
"""
def __init__(self,
dataset: BaseDataset,
times: int,
lazy_init: bool = False):
self.dataset = dataset
self.times = times
self._meta = dataset.meta
self._fully_initialized = False
if not lazy_init:
self.full_init()
@property
def meta(self) -> dict:
"""Get the meta information of the repeated dataset.
Returns:
dict: The meta information of repeated dataset.
"""
return copy.deepcopy(self._meta)
def full_init(self):
"""Loop to ``full_init`` each dataset."""
if self._fully_initialized:
return
self.dataset.full_init()
self._ori_len = len(self.dataset)
self._fully_initialized = True
@force_full_init
def _get_ori_dataset_idx(self, idx: int) -> int:
"""Convert global index to local index.
Args:
idx: Global index of ``RepeatDataset``.
Returns:
idx (int): Local index of data.
"""
return idx % self._ori_len
@force_full_init
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index.
Args:
idx (int): Global index of ``ConcatDataset``.
Returns:
dict: The idx-th annotation of the datasets.
"""
sample_idx = self._get_ori_dataset_idx(idx)
return self.dataset.get_data_info(sample_idx)
def __getitem__(self, idx):
if not self._fully_initialized:
warnings.warn('Please call `full_init` method manually to '
'accelerate the speed.')
self.full_init()
sample_idx = self._get_ori_dataset_idx(idx)
return self.dataset[sample_idx]
@force_full_init
def __len__(self):
return self.times * self._ori_len
class ClassBalancedDataset:
"""A wrapper of class balanced dataset.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :meth:`get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (BaseDataset): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
lazy_init (bool, optional): whether to load annotation during
instantiation. Defaults to False
"""
def __init__(self,
dataset: BaseDataset,
oversample_thr: float,
lazy_init: bool = False):
self.dataset = dataset
self.oversample_thr = oversample_thr
self._meta = dataset.meta
self._fully_initialized = False
if not lazy_init:
self.full_init()
@property
def meta(self) -> dict:
"""Get the meta information of the repeated dataset.
Returns:
dict: The meta information of repeated dataset.
"""
return copy.deepcopy(self._meta)
def full_init(self):
"""Loop to ``full_init`` each dataset."""
if self._fully_initialized:
return
self.dataset.full_init()
repeat_factors = self._get_repeat_factors(self.dataset,
self.oversample_thr)
repeat_indices = []
for dataset_index, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
self._fully_initialized = True
def _get_repeat_factors(self, dataset: BaseDataset,
repeat_thr: float) -> List[float]:
"""Get repeat factor for each images in the dataset.
Args:
dataset (BaseDataset): The dataset.
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
List[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq: defaultdict = defaultdict(float)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
assert v > 0, f'caterogy {k} does not contain any images'
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I and its labels L(I), compute the image-level
# repeat factor:
# r(I) = max_{c in L(I)} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
@force_full_init
def _get_ori_dataset_idx(self, idx: int) -> int:
"""Convert global index to local index.
Args:
idx (int): Global index of ``RepeatDataset``.
Returns:
int: Local index of data.
"""
return self.repeat_indices[idx]
@force_full_init
def get_cat_ids(self, idx: int) -> List[int]:
"""Get category ids of class balanced dataset by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
sample_idx = self._get_ori_dataset_idx(idx)
return self.dataset.get_cat_ids(sample_idx)
@force_full_init
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index.
Args:
idx (int): Global index of ``ConcatDataset``.
Returns:
dict: The idx-th annotation of the dataset.
"""
sample_idx = self._get_ori_dataset_idx(idx)
return self.dataset.get_data_info(sample_idx)
def __getitem__(self, idx):
warnings.warn('Please call `full_init` method manually to '
'accelerate the speed.')
if not self._fully_initialized:
self.full_init()
ori_index = self._get_ori_dataset_idx(idx)
return self.dataset[ori_index]
@force_full_init
def __len__(self):
return len(self.repeat_indices)