-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathobject_distribution_dataset.py
134 lines (119 loc) · 4.19 KB
/
object_distribution_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from __future__ import annotations
from builtins import FileNotFoundError
from PIL import Image
import torch
from pathlib import Path
import numpy as np
from torchvision import transforms
import torchvision.transforms as T
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from torch.utils.data import random_split
import utils
from build_object_dataset import BuildObjectDataset
class ObjectDistributionDataModule(pl.LightningDataModule):
def __init__(
self,
cvusa_root: str,
obj_dataset_root: str,
batch_size: int = 2,
num_workers: int = 0,
start_index: int = 0,
num_items: int = 5,
valid_pct: float = 0.05,
):
super().__init__()
self.batch_size = batch_size
self.obj_dataset_root = obj_dataset_root
self.cvusa_root = cvusa_root
self.num_workers = num_workers
self.start_index = start_index
self.num_items = num_items
self.valid_pct = valid_pct
self.obj_csv_path = (
Path(self.obj_dataset_root)
/ f"object_dataset_{self.start_index}_{self.num_items}.csv"
)
def prepare_data(self):
if not self.obj_csv_path.is_file():
bod = BuildObjectDataset(
num_items=self.num_items, start_index=self.start_index
)
bod.build()
def setup(self, stage=None):
obj_ds = utils.read_csv(self.obj_csv_path)
self.tfm = transforms.Compose(
[
T.ToTensor(),
T.CenterCrop([750]),
T.RandomCrop([512]),
T.Resize([256, 256]),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
if self.num_items == None:
self.num_items = len(obj_ds)
rows = obj_ds[self.start_index : self.start_index + self.num_items]
valid_size = int(self.valid_pct * self.num_items)
train_rows, valid_rows = random_split(
rows,
[self.num_items - valid_size, valid_size],
generator=torch.Generator().manual_seed(42),
)
self.train_obj_dist_dataset = ObjectDistributionDataset(
data=train_rows, cvusa_root=self.cvusa_root, tfms=self.tfm
)
self.valid_obj_dist_dataset = ObjectDistributionDataset(
data=valid_rows, cvusa_root=self.cvusa_root, tfms=self.tfm
)
def train_dataloader(self):
return DataLoader(
self.train_obj_dist_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=True,
drop_last=True,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.train_obj_dist_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=False,
drop_last=True,
collate_fn=self.collate_fn,
)
def collate_fn(self, batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
class ObjectDistributionDataset(Dataset):
def __init__(self, cvusa_root: str, data=None, tfms=None) -> None:
super().__init__()
self.data = data
self.cvusa_root = cvusa_root
self.tfms = tfms
def _get_aerial_img(self, img_path, lat, lon):
aerial_path = self.cvusa_root / img_path
try:
img = Image.open(str(aerial_path))
except FileNotFoundError as e:
print(f"Image Not found {str(aerial_path)}")
return None
if self.tfms:
img = self.tfms(img)
return img
def _get_labels(self, row):
labels = [0 if r == "" else int(r) for r in row]
return np.array(labels)
def __getitem__(self, index):
row = self.data[index]
aerial_img = self._get_aerial_img(*row[:3])
if aerial_img is None:
return None
labels = self._get_labels(row[3:])
return {"aerial_img": aerial_img, "labels_counts": labels}
def __len__(self):
return len(self.data)