|
| 1 | +from collections import defaultdict |
| 2 | + |
1 | 3 | import torch
|
2 |
| -import transforms as T |
| 4 | + |
| 5 | + |
| 6 | +def get_modules(use_v2): |
| 7 | + # We need a protected import to avoid the V2 warning in case just V1 is used |
| 8 | + if use_v2: |
| 9 | + import torchvision.datapoints |
| 10 | + import torchvision.transforms.v2 |
| 11 | + import v2_extras |
| 12 | + |
| 13 | + return torchvision.transforms.v2, torchvision.datapoints, v2_extras |
| 14 | + else: |
| 15 | + import transforms |
| 16 | + |
| 17 | + return transforms, None, None |
3 | 18 |
|
4 | 19 |
|
5 | 20 | class SegmentationPresetTrain:
|
6 |
| - def __init__(self, *, base_size, crop_size, hflip_prob=0.5, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): |
7 |
| - min_size = int(0.5 * base_size) |
8 |
| - max_size = int(2.0 * base_size) |
| 21 | + def __init__( |
| 22 | + self, |
| 23 | + *, |
| 24 | + base_size, |
| 25 | + crop_size, |
| 26 | + hflip_prob=0.5, |
| 27 | + mean=(0.485, 0.456, 0.406), |
| 28 | + std=(0.229, 0.224, 0.225), |
| 29 | + backend="pil", |
| 30 | + use_v2=False, |
| 31 | + ): |
| 32 | + T, datapoints, v2_extras = get_modules(use_v2) |
| 33 | + |
| 34 | + transforms = [] |
| 35 | + backend = backend.lower() |
| 36 | + if backend == "datapoint": |
| 37 | + transforms.append(T.ToImageTensor()) |
| 38 | + elif backend == "tensor": |
| 39 | + transforms.append(T.PILToTensor()) |
| 40 | + elif backend != "pil": |
| 41 | + raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}") |
| 42 | + |
| 43 | + transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))] |
9 | 44 |
|
10 |
| - trans = [T.RandomResize(min_size, max_size)] |
11 | 45 | if hflip_prob > 0:
|
12 |
| - trans.append(T.RandomHorizontalFlip(hflip_prob)) |
13 |
| - trans.extend( |
14 |
| - [ |
15 |
| - T.RandomCrop(crop_size), |
16 |
| - T.PILToTensor(), |
17 |
| - T.ConvertImageDtype(torch.float), |
18 |
| - T.Normalize(mean=mean, std=std), |
| 46 | + transforms += [T.RandomHorizontalFlip(hflip_prob)] |
| 47 | + |
| 48 | + if use_v2: |
| 49 | + # We need a custom pad transform here, since the padding we want to perform here is fundamentally |
| 50 | + # different from the padding in `RandomCrop` if `pad_if_needed=True`. |
| 51 | + transforms += [v2_extras.PadIfSmaller(crop_size, fill=defaultdict(lambda: 0, {datapoints.Mask: 255}))] |
| 52 | + |
| 53 | + transforms += [T.RandomCrop(crop_size)] |
| 54 | + |
| 55 | + if backend == "pil": |
| 56 | + transforms += [T.PILToTensor()] |
| 57 | + |
| 58 | + if use_v2: |
| 59 | + img_type = datapoints.Image if backend == "datapoint" else torch.Tensor |
| 60 | + transforms += [ |
| 61 | + T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True) |
19 | 62 | ]
|
20 |
| - ) |
21 |
| - self.transforms = T.Compose(trans) |
| 63 | + else: |
| 64 | + # No need to explicitly convert masks as they're magically int64 already |
| 65 | + transforms += [T.ConvertImageDtype(torch.float)] |
| 66 | + |
| 67 | + transforms += [T.Normalize(mean=mean, std=std)] |
| 68 | + |
| 69 | + self.transforms = T.Compose(transforms) |
22 | 70 |
|
23 | 71 | def __call__(self, img, target):
|
24 | 72 | return self.transforms(img, target)
|
25 | 73 |
|
26 | 74 |
|
27 | 75 | class SegmentationPresetEval:
|
28 |
| - def __init__(self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): |
29 |
| - self.transforms = T.Compose( |
30 |
| - [ |
31 |
| - T.RandomResize(base_size, base_size), |
32 |
| - T.PILToTensor(), |
33 |
| - T.ConvertImageDtype(torch.float), |
34 |
| - T.Normalize(mean=mean, std=std), |
35 |
| - ] |
36 |
| - ) |
| 76 | + def __init__( |
| 77 | + self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False |
| 78 | + ): |
| 79 | + T, _, _ = get_modules(use_v2) |
| 80 | + |
| 81 | + transforms = [] |
| 82 | + backend = backend.lower() |
| 83 | + if backend == "tensor": |
| 84 | + transforms += [T.PILToTensor()] |
| 85 | + elif backend == "datapoint": |
| 86 | + transforms += [T.ToImageTensor()] |
| 87 | + elif backend != "pil": |
| 88 | + raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}") |
| 89 | + |
| 90 | + if use_v2: |
| 91 | + transforms += [T.Resize(size=(base_size, base_size))] |
| 92 | + else: |
| 93 | + transforms += [T.RandomResize(min_size=base_size, max_size=base_size)] |
| 94 | + |
| 95 | + if backend == "pil": |
| 96 | + # Note: we could just convert to pure tensors even in v2? |
| 97 | + transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()] |
| 98 | + |
| 99 | + transforms += [ |
| 100 | + T.ConvertImageDtype(torch.float), |
| 101 | + T.Normalize(mean=mean, std=std), |
| 102 | + ] |
| 103 | + self.transforms = T.Compose(transforms) |
37 | 104 |
|
38 | 105 | def __call__(self, img, target):
|
39 | 106 | return self.transforms(img, target)
|
0 commit comments