Skip to content

Commit 48942b2

Browse files
Yshuo-Liliyinshuoinnerlee
authored
[Improvements] Refactor unittest folder structre (open-mmlab#386)
* [Improvements] Refactor unittest folder structre * Fix * Enable yapf Signed-off-by: lizz <[email protected]> Co-authored-by: liyinshuo <[email protected]> Co-authored-by: lizz <[email protected]>
1 parent 7f71f74 commit 48942b2

File tree

78 files changed

+440
-438
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

78 files changed

+440
-438
lines changed

mmedit/datasets/pipelines/matting_aug.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -380,8 +380,9 @@ def __call__(self, results):
380380
trimap = results['trimap']
381381

382382
# generete segmentation mask
383-
kernel = cv2.getStructuringElement(
384-
cv2.MORPH_ELLIPSE, (self.kernel_size, self.kernel_size))
383+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
384+
(self.kernel_size,
385+
self.kernel_size))
385386
seg = (alpha > 0.5).astype(np.float32)
386387
seg = cv2.erode(
387388
seg, kernel, iterations=np.random.randint(*self.erode_iter_range))
@@ -539,10 +540,12 @@ def __call__(self, results):
539540
cv2.BORDER_REPLICATE)
540541

541542
# erode/dilate segmentation mask
542-
erode_kernel = cv2.getStructuringElement(
543-
cv2.MORPH_ELLIPSE, (self.erode_ksize, self.erode_ksize))
544-
dilate_kernel = cv2.getStructuringElement(
545-
cv2.MORPH_ELLIPSE, (self.dilate_ksize, self.dilate_ksize))
543+
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
544+
(self.erode_ksize,
545+
self.erode_ksize))
546+
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
547+
(self.dilate_ksize,
548+
self.dilate_ksize))
546549
seg = cv2.erode(
547550
seg,
548551
erode_kernel,
@@ -616,8 +619,8 @@ def __call__(self, results):
616619
cv2.DIST_L2, 0)**2
617620
dt_mask = dt_mask[..., None]
618621
L = 320
619-
trimap_trans[..., 3 * k:3 * k + 3] = np.exp(
620-
dt_mask / (2 * ((factor * L)**2)))
622+
trimap_trans[..., 3 * k:3 * k +
623+
3] = np.exp(dt_mask / (2 * ((factor * L)**2)))
621624

622625
results['transformed_trimap'] = trimap_trans
623626
results['two_channel_trimap'] = trimap2

mmedit/models/backbones/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
# yapf: disable
21
from .encoder_decoders import (VGG16, ContextualAttentionNeck, DeepFillDecoder,
32
DeepFillEncoder, DeepFillEncoderDecoder,
43
DepthwiseIndexBlock, FBADecoder,
@@ -10,7 +9,6 @@
109
ResGCADecoder, ResGCAEncoder, ResNetDec,
1110
ResNetEnc, ResShortcutDec, ResShortcutEnc,
1211
SimpleEncoderDecoder)
13-
# yapf: enable
1412
from .generation_backbones import ResnetGenerator, UnetGenerator
1513
from .sr_backbones import (EDSR, RDN, SRCNN, BasicVSRNet, DICNet, EDVRNet,
1614
GLEANStyleGANv2, IconVSR, MSRResNet, RRDBNet,

setup.cfg

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ addopts=tests/
1111
based_on_style = pep8
1212
blank_line_before_nested_class_or_def = true
1313
split_before_expression_after_opening_paren = true
14+
split_penalty_import_names=0
15+
SPLIT_PENALTY_AFTER_OPENING_BRACKET=888
1416

1517
[isort]
1618
line_length = 79
Lines changed: 256 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,256 @@
1+
from pathlib import Path
2+
3+
import pytest
4+
from mmcv.utils.testing import assert_dict_has_keys
5+
6+
from mmedit.datasets import (BaseGenerationDataset, GenerationPairedDataset,
7+
GenerationUnpairedDataset)
8+
9+
10+
class TestGenerationDatasets:
11+
12+
@classmethod
13+
def setup_class(cls):
14+
cls.data_prefix = Path(__file__).parent.parent.parent / 'data'
15+
16+
def test_base_generation_dataset(self):
17+
18+
class ToyDataset(BaseGenerationDataset):
19+
"""Toy dataset for testing Generation Dataset."""
20+
21+
def load_annotations(self):
22+
pass
23+
24+
toy_dataset = ToyDataset(pipeline=[])
25+
file_paths = [
26+
'paired/test/3.jpg', 'paired/train/1.jpg', 'paired/train/2.jpg'
27+
]
28+
file_paths = [str(self.data_prefix / v) for v in file_paths]
29+
30+
# test scan_folder
31+
result = toy_dataset.scan_folder(self.data_prefix)
32+
assert set(file_paths).issubset(set(result))
33+
result = toy_dataset.scan_folder(str(self.data_prefix))
34+
assert set(file_paths).issubset(set(result))
35+
36+
with pytest.raises(TypeError):
37+
toy_dataset.scan_folder(123)
38+
39+
# test evaluate
40+
toy_dataset.data_infos = file_paths
41+
with pytest.raises(TypeError):
42+
_ = toy_dataset.evaluate(1)
43+
test_results = [dict(saved_flag=True), dict(saved_flag=True)]
44+
with pytest.raises(AssertionError):
45+
_ = toy_dataset.evaluate(test_results)
46+
test_results = [
47+
dict(saved_flag=True),
48+
dict(saved_flag=True),
49+
dict(saved_flag=False)
50+
]
51+
eval_results = toy_dataset.evaluate(test_results)
52+
assert eval_results['val_saved_number'] == 2
53+
54+
def test_generation_paired_dataset(self):
55+
# setup
56+
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
57+
pipeline = [
58+
dict(
59+
type='LoadPairedImageFromFile',
60+
io_backend='disk',
61+
key='pair',
62+
flag='color'),
63+
dict(
64+
type='Resize',
65+
keys=['img_a', 'img_b'],
66+
scale=(286, 286),
67+
interpolation='bicubic'),
68+
dict(
69+
type='FixedCrop',
70+
keys=['img_a', 'img_b'],
71+
crop_size=(256, 256)),
72+
dict(type='Flip', keys=['img_a', 'img_b'], direction='horizontal'),
73+
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
74+
dict(
75+
type='Normalize',
76+
keys=['img_a', 'img_b'],
77+
to_rgb=True,
78+
**img_norm_cfg),
79+
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
80+
dict(
81+
type='Collect',
82+
keys=['img_a', 'img_b'],
83+
meta_keys=['img_a_path', 'img_b_path'])
84+
]
85+
target_keys = ['img_a', 'img_b', 'meta']
86+
target_meta_keys = ['img_a_path', 'img_b_path']
87+
pair_folder = self.data_prefix / 'paired'
88+
89+
# input path is Path object
90+
generation_paried_dataset = GenerationPairedDataset(
91+
dataroot=pair_folder, pipeline=pipeline, test_mode=True)
92+
data_infos = generation_paried_dataset.data_infos
93+
assert data_infos == [
94+
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
95+
]
96+
result = generation_paried_dataset[0]
97+
assert (len(generation_paried_dataset) == 1)
98+
assert assert_dict_has_keys(result, target_keys)
99+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
100+
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
101+
'3.jpg'))
102+
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
103+
'3.jpg'))
104+
105+
# input path is str
106+
generation_paried_dataset = GenerationPairedDataset(
107+
dataroot=str(pair_folder), pipeline=pipeline, test_mode=True)
108+
data_infos = generation_paried_dataset.data_infos
109+
assert data_infos == [
110+
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
111+
]
112+
result = generation_paried_dataset[0]
113+
assert (len(generation_paried_dataset) == 1)
114+
assert assert_dict_has_keys(result, target_keys)
115+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
116+
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
117+
'3.jpg'))
118+
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
119+
'3.jpg'))
120+
121+
# test_mode = False
122+
generation_paried_dataset = GenerationPairedDataset(
123+
dataroot=str(pair_folder), pipeline=pipeline, test_mode=False)
124+
data_infos = generation_paried_dataset.data_infos
125+
assert data_infos == [
126+
dict(pair_path=str(pair_folder / 'train' / '1.jpg')),
127+
dict(pair_path=str(pair_folder / 'train' / '2.jpg'))
128+
]
129+
assert (len(generation_paried_dataset) == 2)
130+
result = generation_paried_dataset[0]
131+
assert assert_dict_has_keys(result, target_keys)
132+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
133+
assert (result['meta'].data['img_a_path'] == str(pair_folder /
134+
'train' / '1.jpg'))
135+
assert (result['meta'].data['img_b_path'] == str(pair_folder /
136+
'train' / '1.jpg'))
137+
result = generation_paried_dataset[1]
138+
assert assert_dict_has_keys(result, target_keys)
139+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
140+
assert (result['meta'].data['img_a_path'] == str(pair_folder /
141+
'train' / '2.jpg'))
142+
assert (result['meta'].data['img_b_path'] == str(pair_folder /
143+
'train' / '2.jpg'))
144+
145+
def test_generation_unpaired_dataset(self):
146+
# setup
147+
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
148+
pipeline = [
149+
dict(
150+
type='LoadImageFromFile',
151+
io_backend='disk',
152+
key='img_a',
153+
flag='color'),
154+
dict(
155+
type='LoadImageFromFile',
156+
io_backend='disk',
157+
key='img_b',
158+
flag='color'),
159+
dict(
160+
type='Resize',
161+
keys=['img_a', 'img_b'],
162+
scale=(286, 286),
163+
interpolation='bicubic'),
164+
dict(
165+
type='Crop',
166+
keys=['img_a', 'img_b'],
167+
crop_size=(256, 256),
168+
random_crop=True),
169+
dict(type='Flip', keys=['img_a'], direction='horizontal'),
170+
dict(type='Flip', keys=['img_b'], direction='horizontal'),
171+
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
172+
dict(
173+
type='Normalize',
174+
keys=['img_a', 'img_b'],
175+
to_rgb=True,
176+
**img_norm_cfg),
177+
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
178+
dict(
179+
type='Collect',
180+
keys=['img_a', 'img_b'],
181+
meta_keys=['img_a_path', 'img_b_path'])
182+
]
183+
target_keys = ['img_a', 'img_b', 'meta']
184+
target_meta_keys = ['img_a_path', 'img_b_path']
185+
unpair_folder = self.data_prefix / 'unpaired'
186+
187+
# input path is Path object
188+
generation_unpaired_dataset = GenerationUnpairedDataset(
189+
dataroot=unpair_folder, pipeline=pipeline, test_mode=True)
190+
data_infos_a = generation_unpaired_dataset.data_infos_a
191+
data_infos_b = generation_unpaired_dataset.data_infos_b
192+
assert data_infos_a == [
193+
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
194+
]
195+
assert data_infos_b == [
196+
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
197+
]
198+
result = generation_unpaired_dataset[0]
199+
assert (len(generation_unpaired_dataset) == 1)
200+
assert assert_dict_has_keys(result, target_keys)
201+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
202+
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
203+
'testA' / '5.jpg'))
204+
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
205+
'testB' / '6.jpg'))
206+
207+
# input path is str
208+
generation_unpaired_dataset = GenerationUnpairedDataset(
209+
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=True)
210+
data_infos_a = generation_unpaired_dataset.data_infos_a
211+
data_infos_b = generation_unpaired_dataset.data_infos_b
212+
assert data_infos_a == [
213+
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
214+
]
215+
assert data_infos_b == [
216+
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
217+
]
218+
result = generation_unpaired_dataset[0]
219+
assert (len(generation_unpaired_dataset) == 1)
220+
assert assert_dict_has_keys(result, target_keys)
221+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
222+
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
223+
'testA' / '5.jpg'))
224+
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
225+
'testB' / '6.jpg'))
226+
227+
# test_mode = False
228+
generation_unpaired_dataset = GenerationUnpairedDataset(
229+
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=False)
230+
data_infos_a = generation_unpaired_dataset.data_infos_a
231+
data_infos_b = generation_unpaired_dataset.data_infos_b
232+
assert data_infos_a == [
233+
dict(path=str(unpair_folder / 'trainA' / '1.jpg')),
234+
dict(path=str(unpair_folder / 'trainA' / '2.jpg'))
235+
]
236+
assert data_infos_b == [
237+
dict(path=str(unpair_folder / 'trainB' / '3.jpg')),
238+
dict(path=str(unpair_folder / 'trainB' / '4.jpg'))
239+
]
240+
assert (len(generation_unpaired_dataset) == 2)
241+
img_b_paths = [
242+
str(unpair_folder / 'trainB' / '3.jpg'),
243+
str(unpair_folder / 'trainB' / '4.jpg')
244+
]
245+
result = generation_unpaired_dataset[0]
246+
assert assert_dict_has_keys(result, target_keys)
247+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
248+
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
249+
'trainA' / '1.jpg'))
250+
assert result['meta'].data['img_b_path'] in img_b_paths
251+
result = generation_unpaired_dataset[1]
252+
assert assert_dict_has_keys(result, target_keys)
253+
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
254+
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
255+
'trainA' / '2.jpg'))
256+
assert result['meta'].data['img_b_path'] in img_b_paths
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import os.path as osp
2+
from pathlib import Path
3+
4+
import numpy as np
5+
import pytest
6+
7+
from mmedit.datasets import AdobeComp1kDataset
8+
9+
10+
class TestMattingDatasets:
11+
12+
@classmethod
13+
def setup_class(cls):
14+
# create para for creating a dataset.
15+
cls.data_prefix = Path(__file__).parent.parent.parent / 'data'
16+
cls.ann_file = osp.join(cls.data_prefix, 'test_list.json')
17+
cls.pipeline = [
18+
dict(type='LoadImageFromFile', key='alpha', flag='grayscale')
19+
]
20+
21+
def test_comp1k_dataset(self):
22+
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
23+
self.data_prefix)
24+
first_data = comp1k_dataset[0]
25+
26+
assert 'alpha' in first_data
27+
assert isinstance(first_data['alpha'], np.ndarray)
28+
assert first_data['alpha'].shape == (552, 800)
29+
30+
def test_comp1k_evaluate(self):
31+
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
32+
self.data_prefix)
33+
34+
with pytest.raises(TypeError):
35+
comp1k_dataset.evaluate('Not a list object')
36+
37+
results = [{
38+
'pred_alpha': None,
39+
'eval_result': {
40+
'SAD': 26,
41+
'MSE': 0.006
42+
}
43+
}, {
44+
'pred_alpha': None,
45+
'eval_result': {
46+
'SAD': 24,
47+
'MSE': 0.004
48+
}
49+
}]
50+
51+
eval_result = comp1k_dataset.evaluate(results)
52+
assert set(eval_result.keys()) == set(['SAD', 'MSE'])
53+
assert eval_result['SAD'] == 25
54+
assert eval_result['MSE'] == 0.005
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from torch.utils.data import Dataset
2+
3+
from mmedit.datasets import RepeatDataset
4+
5+
6+
def test_repeat_dataset():
7+
8+
class ToyDataset(Dataset):
9+
10+
def __init__(self):
11+
super().__init__()
12+
self.members = [1, 2, 3, 4, 5]
13+
14+
def __len__(self):
15+
return len(self.members)
16+
17+
def __getitem__(self, idx):
18+
return self.members[idx % 5]
19+
20+
toy_dataset = ToyDataset()
21+
repeat_dataset = RepeatDataset(toy_dataset, 2)
22+
assert len(repeat_dataset) == 10
23+
assert repeat_dataset[2] == 3
24+
assert repeat_dataset[8] == 4

0 commit comments

Comments
 (0)