From 366b075daa19b2a87b24cd58cd8121dd708df7da Mon Sep 17 00:00:00 2001 From: gaoyi <617379377@qq.com> Date: Thu, 11 Apr 2019 21:37:03 +0800 Subject: [PATCH] init --- README.md | 113 ++- dataloaders/__init__.py | 0 dataloaders/atr.py | 109 +++ dataloaders/cihp.py | 107 +++ dataloaders/cihp_pascal_atr.py | 219 +++++ dataloaders/custom_transforms.py | 491 +++++++++++ dataloaders/mypath_atr.py | 8 + dataloaders/mypath_cihp.py | 8 + dataloaders/mypath_pascal.py | 8 + dataloaders/pascal.py | 106 +++ eval_cihp.sh | 6 + eval_pascal.sh | 6 + exp/inference/inference.py | 203 +++++ exp/test/__init__.py | 3 + exp/test/eval_show_cihp2pascal.py | 268 ++++++ exp/test/eval_show_pascal2cihp.py | 268 ++++++ exp/test/test_from_disk.py | 65 ++ exp/transfer/train_cihp_from_pascal.py | 331 ++++++++ exp/universal/pascal_atr_cihp_uni.py | 493 +++++++++++ img/messi.jpg | Bin 0 -> 46306 bytes img/messi_output.png | Bin 0 -> 4342 bytes networks/__init__.py | 0 networks/deeplab_xception.py | 684 +++++++++++++++ networks/deeplab_xception_synBN.py | 596 +++++++++++++ networks/deeplab_xception_transfer.py | 1003 ++++++++++++++++++++++ networks/deeplab_xception_universal.py | 1077 ++++++++++++++++++++++++ networks/gcn.py | 271 ++++++ networks/graph.py | 261 ++++++ requirements | 7 + sync_batchnorm/__init__.py | 12 + sync_batchnorm/batchnorm.py | 315 +++++++ sync_batchnorm/comm.py | 137 +++ sync_batchnorm/replicate.py | 94 +++ sync_batchnorm/unittest.py | 29 + train_transfer_cihp.sh | 2 + train_universal.sh | 3 + utils/__init__.py | 5 + utils/sampler.py | 164 ++++ utils/test_human.py | 167 ++++ utils/util.py | 244 ++++++ 40 files changed, 7881 insertions(+), 2 deletions(-) create mode 100644 dataloaders/__init__.py create mode 100644 dataloaders/atr.py create mode 100644 dataloaders/cihp.py create mode 100644 dataloaders/cihp_pascal_atr.py create mode 100644 dataloaders/custom_transforms.py create mode 100644 dataloaders/mypath_atr.py create mode 100644 dataloaders/mypath_cihp.py create mode 100644 dataloaders/mypath_pascal.py create mode 100644 dataloaders/pascal.py create mode 100644 eval_cihp.sh create mode 100644 eval_pascal.sh create mode 100644 exp/inference/inference.py create mode 100644 exp/test/__init__.py create mode 100644 exp/test/eval_show_cihp2pascal.py create mode 100644 exp/test/eval_show_pascal2cihp.py create mode 100644 exp/test/test_from_disk.py create mode 100644 exp/transfer/train_cihp_from_pascal.py create mode 100644 exp/universal/pascal_atr_cihp_uni.py create mode 100644 img/messi.jpg create mode 100644 img/messi_output.png create mode 100644 networks/__init__.py create mode 100644 networks/deeplab_xception.py create mode 100644 networks/deeplab_xception_synBN.py create mode 100644 networks/deeplab_xception_transfer.py create mode 100644 networks/deeplab_xception_universal.py create mode 100644 networks/gcn.py create mode 100644 networks/graph.py create mode 100644 requirements create mode 100644 sync_batchnorm/__init__.py create mode 100644 sync_batchnorm/batchnorm.py create mode 100644 sync_batchnorm/comm.py create mode 100644 sync_batchnorm/replicate.py create mode 100644 sync_batchnorm/unittest.py create mode 100644 train_transfer_cihp.sh create mode 100644 train_universal.sh create mode 100644 utils/__init__.py create mode 100644 utils/sampler.py create mode 100644 utils/test_human.py create mode 100644 utils/util.py diff --git a/README.md b/README.md index 1830d48..71861f8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,111 @@ -# Graphonomy-Universal-Human-Parsing-via-Graph-Transfer-Learning -coming soon. +# Graphonomy: Universal Human Parsing via Graph Transfer Learning + +This repository contains the code for the paper: + +[**Graphonomy: Universal Human Parsing via Graph Transfer Learning**](https://arxiv.org/abs/1904.04536) +,Ke Gong, Yiming Gao, Xiaodan Liang, Xiaohui Shen, Meng Wang, Liang Lin. + +# Environment and installation ++ Pytorch = 0.4.0 ++ torchvision ++ scipy ++ tensorboardX ++ numpy ++ opencv-python ++ matplotlib ++ networkx + + you can install above package by using `pip install -r requirements.txt` + +# Getting Started +### Data Preparation ++ You need to download the human parsing dataset, prepare the images and store in `/data/datasets/dataset_name/`. +We recommend to symlink the path to the dataets to `/data/dataset/` as follows + +``` +# symlink the Pascal-Person-Part dataset for example +ln -s /path_to_Pascal_Person_Part/* data/datasets/pascal/ +``` ++ The file structure should look like: +``` +/Graphonomy + /data + /datasets + /pascal + /JPEGImages + /list + /SegmentationPart + /CIHP_4w + /Images + /lists + ... +``` + +### Inference +We provide a simply script to get the visualization result on the CIHP dataset using [trained](https://drive.google.com/file/d/1O9YD4kHgs3w2DUcWxtHiEFyWjCBeS_Vc/view?usp=sharing) + models as follows : +```shell +# Example of inference +python exp/inference/inference.py \ +--loadmodel /path_to_inference_model \ +--img_path ./img/messi.jpg \ +--output_path ./img/ \ +--output_name /output_file_name +``` + +### Training +#### Transfer learning +1. Download the Pascal pretrained model(avaliable soon). +2. Run the `sh train_transfer_cihp.sh`. +3. The results and models are saved in exp/transfer/run/. +4. Evaluation and visualization script is eval_cihp.sh. You only need to change the attribute of `--loadmodel` before you run it. + +#### Universal training +1. Download the [pretrained](https://drive.google.com/file/d/18WiffKnxaJo50sCC9zroNyHjcnTxGCbk/view?usp=sharing) model and store in /data/pretrained_model/. +2. Run the `sh train_universal.sh`. +3. The results and models are saved in exp/universal/run/. + +### Testing +If you want to evaluate the performance of a pre-trained model on PASCAL-Person-Part or CIHP val/test set, +simply run the script: `sh eval_cihp/pascal.sh`. +Specify the specific model. And we provide the final model that you can download and store it in /data/pretrained_model/. + +### Models +**Pascal-Person-Part trained model** + +|Model|Google Cloud|Baidu Yun| +|--------|--------------|-----------| +|Graphonomy(CIHP)| [Download](https://drive.google.com/file/d/1cwEhlYEzC7jIShENNLnbmcBR0SNlZDE6/view?usp=sharing)| Avaliable soon| + +**CIHP trained model** + +|Model|Google Cloud|Baidu Yun| +|--------|--------------|-----------| +|Graphonomy(PASCAL)| [Download](https://drive.google.com/file/d/1O9YD4kHgs3w2DUcWxtHiEFyWjCBeS_Vc/view?usp=sharing)| Avaliable soon| + +**Universal trained model** + +|Model|Google Cloud|Baidu Yun| +|--------|--------------|-----------| +|Universal|Avaliable soon|Avaliable soon| + +### Todo: +- [ ] release pretrained and trained models +- [ ] update universal eval code&script + +# Citation + +``` +@inproceedings{Gong2019Graphonomy, +author = {Ke Gong and Yiming Gao and Xiaodan Liang and Xiaohui Shen and Meng Wang and Liang Lin}, +title = {Graphonomy: Universal Human Parsing via Graph Transfer Learning}, +booktitle = {CVPR}, +year = {2019}, +} + +``` + +# Contact +if you have any questions about this repo, please feel free to contact +[gaoym9@mail2.sysu.edu.cn](mailto:gaoym9@mail2.sysu.edu.cn). + diff --git a/dataloaders/__init__.py b/dataloaders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dataloaders/atr.py b/dataloaders/atr.py new file mode 100644 index 0000000..525ac08 --- /dev/null +++ b/dataloaders/atr.py @@ -0,0 +1,109 @@ +from __future__ import print_function, division +import os +from PIL import Image +from torch.utils.data import Dataset +from .mypath_atr import Path +import random +from PIL import ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True + +class VOCSegmentation(Dataset): + """ + ATR dataset + """ + + def __init__(self, + base_dir=Path.db_root_dir('atr'), + split='train', + transform=None, + flip=False, + ): + """ + :param base_dir: path to ATR dataset directory + :param split: train/val + :param transform: transform to apply + """ + super(VOCSegmentation).__init__() + self._flip_flag = flip + + self._base_dir = base_dir + self._image_dir = os.path.join(self._base_dir, 'JPEGImages') + self._cat_dir = os.path.join(self._base_dir, 'SegmentationClassAug') + self._flip_dir = os.path.join(self._base_dir,'SegmentationClassAug_rev') + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.transform = transform + + _splits_dir = os.path.join(self._base_dir, 'list') + + self.im_ids = [] + self.images = [] + self.categories = [] + self.flip_categories = [] + + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + + for ii, line in enumerate(lines): + + _image = os.path.join(self._image_dir, line+'.jpg' ) + _cat = os.path.join(self._cat_dir, line +'.png') + _flip = os.path.join(self._flip_dir,line + '.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + assert os.path.isfile(_flip) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + self.flip_categories.append(_flip) + + + assert (len(self.images) == len(self.categories)) + assert len(self.flip_categories) == len(self.categories) + + # Display stats + print('Number of images in {}: {:d}'.format(split, len(self.images))) + + def __len__(self): + return len(self.images) + + + def __getitem__(self, index): + _img, _target= self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + if self.transform is not None: + sample = self.transform(sample) + + return sample + + def _make_img_gt_point_pair(self, index): + # Read Image and Target + # _img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32) + # _target = np.array(Image.open(self.categories[index])).astype(np.float32) + + _img = Image.open(self.images[index]).convert('RGB') # return is RGB pic + if self._flip_flag: + if random.random() < 0.5: + _target = Image.open(self.flip_categories[index]) + _img = _img.transpose(Image.FLIP_LEFT_RIGHT) + else: + _target = Image.open(self.categories[index]) + else: + _target = Image.open(self.categories[index]) + + return _img, _target + + def __str__(self): + return 'ATR(split=' + str(self.split) + ')' + + + diff --git a/dataloaders/cihp.py b/dataloaders/cihp.py new file mode 100644 index 0000000..cc1722f --- /dev/null +++ b/dataloaders/cihp.py @@ -0,0 +1,107 @@ +from __future__ import print_function, division +import os +from PIL import Image +from torch.utils.data import Dataset +from .mypath_cihp import Path +import random + +class VOCSegmentation(Dataset): + """ + CIHP dataset + """ + + def __init__(self, + base_dir=Path.db_root_dir('cihp'), + split='train', + transform=None, + flip=False, + ): + """ + :param base_dir: path to CIHP dataset directory + :param split: train/val/test + :param transform: transform to apply + """ + super(VOCSegmentation).__init__() + self._flip_flag = flip + + self._base_dir = base_dir + self._image_dir = os.path.join(self._base_dir, 'Images') + self._cat_dir = os.path.join(self._base_dir, 'Category_ids') + self._flip_dir = os.path.join(self._base_dir,'Category_rev_ids') + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.transform = transform + + _splits_dir = os.path.join(self._base_dir, 'lists') + + self.im_ids = [] + self.images = [] + self.categories = [] + self.flip_categories = [] + + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + + for ii, line in enumerate(lines): + + _image = os.path.join(self._image_dir, line+'.jpg' ) + _cat = os.path.join(self._cat_dir, line +'.png') + _flip = os.path.join(self._flip_dir,line + '.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + assert os.path.isfile(_flip) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + self.flip_categories.append(_flip) + + + assert (len(self.images) == len(self.categories)) + assert len(self.flip_categories) == len(self.categories) + + # Display stats + print('Number of images in {}: {:d}'.format(split, len(self.images))) + + def __len__(self): + return len(self.images) + + + def __getitem__(self, index): + _img, _target= self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + if self.transform is not None: + sample = self.transform(sample) + + return sample + + def _make_img_gt_point_pair(self, index): + # Read Image and Target + # _img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32) + # _target = np.array(Image.open(self.categories[index])).astype(np.float32) + + _img = Image.open(self.images[index]).convert('RGB') # return is RGB pic + if self._flip_flag: + if random.random() < 0.5: + _target = Image.open(self.flip_categories[index]) + _img = _img.transpose(Image.FLIP_LEFT_RIGHT) + else: + _target = Image.open(self.categories[index]) + else: + _target = Image.open(self.categories[index]) + + return _img, _target + + def __str__(self): + return 'CIHP(split=' + str(self.split) + ')' + + + diff --git a/dataloaders/cihp_pascal_atr.py b/dataloaders/cihp_pascal_atr.py new file mode 100644 index 0000000..bf35b74 --- /dev/null +++ b/dataloaders/cihp_pascal_atr.py @@ -0,0 +1,219 @@ +from __future__ import print_function, division +import os +from PIL import Image +import numpy as np +from torch.utils.data import Dataset +from .mypath_cihp import Path +from .mypath_pascal import Path as PP +from .mypath_atr import Path as PA +import random +from PIL import ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True + +class VOCSegmentation(Dataset): + """ + Pascal dataset + """ + + def __init__(self, + cihp_dir=Path.db_root_dir('cihp'), + split='train', + transform=None, + flip=False, + pascal_dir = PP.db_root_dir('pascal'), + atr_dir = PA.db_root_dir('atr'), + ): + """ + :param cihp_dir: path to CIHP dataset directory + :param pascal_dir: path to PASCAL dataset directory + :param atr_dir: path to ATR dataset directory + :param split: train/val + :param transform: transform to apply + """ + super(VOCSegmentation).__init__() + ## for cihp + self._flip_flag = flip + self._base_dir = cihp_dir + self._image_dir = os.path.join(self._base_dir, 'Images') + self._cat_dir = os.path.join(self._base_dir, 'Category_ids') + self._flip_dir = os.path.join(self._base_dir,'Category_rev_ids') + ## for Pascal + self._base_dir_pascal = pascal_dir + self._image_dir_pascal = os.path.join(self._base_dir_pascal, 'JPEGImages') + self._cat_dir_pascal = os.path.join(self._base_dir_pascal, 'SegmentationPart') + # self._flip_dir_pascal = os.path.join(self._base_dir_pascal, 'Category_rev_ids') + ## for atr + self._base_dir_atr = atr_dir + self._image_dir_atr = os.path.join(self._base_dir_atr, 'JPEGImages') + self._cat_dir_atr = os.path.join(self._base_dir_atr, 'SegmentationClassAug') + self._flip_dir_atr = os.path.join(self._base_dir_atr, 'SegmentationClassAug_rev') + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.transform = transform + + _splits_dir = os.path.join(self._base_dir, 'lists') + _splits_dir_pascal = os.path.join(self._base_dir_pascal, 'list') + _splits_dir_atr = os.path.join(self._base_dir_atr, 'list') + + self.im_ids = [] + self.images = [] + self.categories = [] + self.flip_categories = [] + self.datasets_lbl = [] + + # num + self.num_cihp = 0 + self.num_pascal = 0 + self.num_atr = 0 + # for cihp is 0 + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + self.num_cihp += len(lines) + for ii, line in enumerate(lines): + + _image = os.path.join(self._image_dir, line+'.jpg' ) + _cat = os.path.join(self._cat_dir, line +'.png') + _flip = os.path.join(self._flip_dir,line + '.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + assert os.path.isfile(_flip) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + self.flip_categories.append(_flip) + self.datasets_lbl.append(0) + + # for pascal is 1 + for splt in self.split: + if splt == 'test': + splt='val' + with open(os.path.join(os.path.join(_splits_dir_pascal, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + self.num_pascal += len(lines) + for ii, line in enumerate(lines): + + _image = os.path.join(self._image_dir_pascal, line+'.jpg' ) + _cat = os.path.join(self._cat_dir_pascal, line +'.png') + # _flip = os.path.join(self._flip_dir,line + '.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + # assert os.path.isfile(_flip) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + self.flip_categories.append([]) + self.datasets_lbl.append(1) + + # for atr is 2 + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir_atr, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + self.num_atr += len(lines) + for ii, line in enumerate(lines): + _image = os.path.join(self._image_dir_atr, line + '.jpg') + _cat = os.path.join(self._cat_dir_atr, line + '.png') + _flip = os.path.join(self._flip_dir_atr, line + '.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + assert os.path.isfile(_flip) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + self.flip_categories.append(_flip) + self.datasets_lbl.append(2) + + assert (len(self.images) == len(self.categories)) + # assert len(self.flip_categories) == len(self.categories) + + # Display stats + print('Number of images in {}: {:d}'.format(split, len(self.images))) + + def __len__(self): + return len(self.images) + + def get_class_num(self): + return self.num_cihp,self.num_pascal,self.num_atr + + + + def __getitem__(self, index): + _img, _target,_lbl= self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target,} + + if self.transform is not None: + sample = self.transform(sample) + sample['pascal'] = _lbl + return sample + + def _make_img_gt_point_pair(self, index): + # Read Image and Target + # _img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32) + # _target = np.array(Image.open(self.categories[index])).astype(np.float32) + + _img = Image.open(self.images[index]).convert('RGB') # return is RGB pic + type_lbl = self.datasets_lbl[index] + if self._flip_flag: + if random.random() < 0.5 : + # _target = Image.open(self.flip_categories[index]) + _img = _img.transpose(Image.FLIP_LEFT_RIGHT) + if type_lbl == 0 or type_lbl == 2: + _target = Image.open(self.flip_categories[index]) + else: + _target = Image.open(self.categories[index]) + _target = _target.transpose(Image.FLIP_LEFT_RIGHT) + else: + _target = Image.open(self.categories[index]) + else: + _target = Image.open(self.categories[index]) + + return _img, _target,type_lbl + + def __str__(self): + return 'datasets(split=' + str(self.split) + ')' + + + + + + + + + + + + +if __name__ == '__main__': + from dataloaders import custom_transforms as tr + from dataloaders.utils import decode_segmap + from torch.utils.data import DataLoader + from torchvision import transforms + import matplotlib.pyplot as plt + + composed_transforms_tr = transforms.Compose([ + # tr.RandomHorizontalFlip(), + tr.RandomSized_new(512), + tr.RandomRotate(15), + tr.ToTensor_()]) + + + + voc_train = VOCSegmentation(split='train', + transform=composed_transforms_tr) + + dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=1) + + for ii, sample in enumerate(dataloader): + if ii >10: + break \ No newline at end of file diff --git a/dataloaders/custom_transforms.py b/dataloaders/custom_transforms.py new file mode 100644 index 0000000..1556a6f --- /dev/null +++ b/dataloaders/custom_transforms.py @@ -0,0 +1,491 @@ +import torch +import math +import numbers +import random +import numpy as np + +from PIL import Image, ImageOps +from torchvision import transforms + +class RandomCrop(object): + def __init__(self, size, padding=0): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size # h, w + self.padding = padding + + def __call__(self, sample): + img, mask = sample['image'], sample['label'] + + if self.padding > 0: + img = ImageOps.expand(img, border=self.padding, fill=0) + mask = ImageOps.expand(mask, border=self.padding, fill=0) + + assert img.size == mask.size + w, h = img.size + th, tw = self.size # target size + if w == tw and h == th: + return {'image': img, + 'label': mask} + if w < tw or h < th: + img = img.resize((tw, th), Image.BILINEAR) + mask = mask.resize((tw, th), Image.NEAREST) + return {'image': img, + 'label': mask} + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + img = img.crop((x1, y1, x1 + tw, y1 + th)) + mask = mask.crop((x1, y1, x1 + tw, y1 + th)) + + return {'image': img, + 'label': mask} + +class RandomCrop_new(object): + def __init__(self, size, padding=0): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size # h, w + self.padding = padding + + def __call__(self, sample): + img, mask = sample['image'], sample['label'] + + if self.padding > 0: + img = ImageOps.expand(img, border=self.padding, fill=0) + mask = ImageOps.expand(mask, border=self.padding, fill=0) + + assert img.size == mask.size + w, h = img.size + th, tw = self.size # target size + if w == tw and h == th: + return {'image': img, + 'label': mask} + + new_img = Image.new('RGB',(tw,th),'black') # size is w x h; and 'white' is 255 + new_mask = Image.new('L',(tw,th),'white') # same above + + # if w > tw or h > th + x1 = y1 = 0 + if w > tw: + x1 = random.randint(0,w - tw) + if h > th: + y1 = random.randint(0,h - th) + # crop + img = img.crop((x1,y1, x1 + tw, y1 + th)) + mask = mask.crop((x1,y1, x1 + tw, y1 + th)) + new_img.paste(img,(0,0)) + new_mask.paste(mask,(0,0)) + + # x1 = random.randint(0, w - tw) + # y1 = random.randint(0, h - th) + # img = img.crop((x1, y1, x1 + tw, y1 + th)) + # mask = mask.crop((x1, y1, x1 + tw, y1 + th)) + + return {'image': new_img, + 'label': new_mask} + +class Paste(object): + def __init__(self, size,): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size # h, w + + def __call__(self, sample): + img, mask = sample['image'], sample['label'] + + assert img.size == mask.size + w, h = img.size + th, tw = self.size # target size + assert (w <=tw) and (h <= th) + if w == tw and h == th: + return {'image': img, + 'label': mask} + + new_img = Image.new('RGB',(tw,th),'black') # size is w x h; and 'white' is 255 + new_mask = Image.new('L',(tw,th),'white') # same above + + new_img.paste(img,(0,0)) + new_mask.paste(mask,(0,0)) + + return {'image': new_img, + 'label': new_mask} + +class CenterCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + w, h = img.size + th, tw = self.size + x1 = int(round((w - tw) / 2.)) + y1 = int(round((h - th) / 2.)) + img = img.crop((x1, y1, x1 + tw, y1 + th)) + mask = mask.crop((x1, y1, x1 + tw, y1 + th)) + + return {'image': img, + 'label': mask} + +class RandomHorizontalFlip(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + + return {'image': img, + 'label': mask} + +class HorizontalFlip(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + + return {'image': img, + 'label': mask} + +class HorizontalFlip_only_img(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + img = img.transpose(Image.FLIP_LEFT_RIGHT) + # mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + + return {'image': img, + 'label': mask} + +class RandomHorizontalFlip_cihp(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + # mask = Image.open() + + return {'image': img, + 'label': mask} + +class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + Args: + mean (tuple): means for each channel. + std (tuple): standard deviations for each channel. + """ + def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): + self.mean = mean + self.std = std + + def __call__(self, sample): + img = np.array(sample['image']).astype(np.float32) + mask = np.array(sample['label']).astype(np.float32) + img /= 255.0 + img -= self.mean + img /= self.std + + return {'image': img, + 'label': mask} + +class Normalize_255(object): + """Normalize a tensor image with mean and standard deviation. tf use 255. + Args: + mean (tuple): means for each channel. + std (tuple): standard deviations for each channel. + """ + def __init__(self, mean=(123.15, 115.90, 103.06), std=(1., 1., 1.)): + self.mean = mean + self.std = std + + def __call__(self, sample): + img = np.array(sample['image']).astype(np.float32) + mask = np.array(sample['label']).astype(np.float32) + # img = 255.0 + img -= self.mean + img /= self.std + img = img + img = img[[0,3,2,1],...] + return {'image': img, + 'label': mask} + +class Normalize_xception_tf(object): + # def __init__(self): + # self.rgb2bgr = + + def __call__(self, sample): + img = np.array(sample['image']).astype(np.float32) + mask = np.array(sample['label']).astype(np.float32) + img = (img*2.0)/255.0 - 1 + # print(img.shape) + # img = img[[0,3,2,1],...] + return {'image': img, + 'label': mask} + +class Normalize_xception_tf_only_img(object): + # def __init__(self): + # self.rgb2bgr = + + def __call__(self, sample): + img = np.array(sample['image']).astype(np.float32) + # mask = np.array(sample['label']).astype(np.float32) + img = (img*2.0)/255.0 - 1 + # print(img.shape) + # img = img[[0,3,2,1],...] + return {'image': img, + 'label': sample['label']} + +class Normalize_cityscapes(object): + """Normalize a tensor image with mean and standard deviation. + Args: + mean (tuple): means for each channel. + std (tuple): standard deviations for each channel. + """ + def __init__(self, mean=(0., 0., 0.)): + self.mean = mean + + def __call__(self, sample): + img = np.array(sample['image']).astype(np.float32) + mask = np.array(sample['label']).astype(np.float32) + img -= self.mean + img /= 255.0 + + return {'image': img, + 'label': mask} + +class ToTensor_(object): + """Convert ndarrays in sample to Tensors.""" + def __init__(self): + self.rgb2bgr = transforms.Lambda(lambda x:x[[2,1,0],...]) + + def __call__(self, sample): + # swap color axis because + # numpy image: H x W x C + # torch image: C X H X W + img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1)) + mask = np.expand_dims(np.array(sample['label']).astype(np.float32), -1).transpose((2, 0, 1)) + # mask[mask == 255] = 0 + + img = torch.from_numpy(img).float() + img = self.rgb2bgr(img) + mask = torch.from_numpy(mask).float() + + + return {'image': img, + 'label': mask} + +class ToTensor_only_img(object): + """Convert ndarrays in sample to Tensors.""" + def __init__(self): + self.rgb2bgr = transforms.Lambda(lambda x:x[[2,1,0],...]) + + def __call__(self, sample): + # swap color axis because + # numpy image: H x W x C + # torch image: C X H X W + img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1)) + # mask = np.expand_dims(np.array(sample['label']).astype(np.float32), -1).transpose((2, 0, 1)) + # mask[mask == 255] = 0 + + img = torch.from_numpy(img).float() + img = self.rgb2bgr(img) + # mask = torch.from_numpy(mask).float() + + + return {'image': img, + 'label': sample['label']} + +class FixedResize(object): + def __init__(self, size): + self.size = tuple(reversed(size)) # size: (h, w) + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + + assert img.size == mask.size + + img = img.resize(self.size, Image.BILINEAR) + mask = mask.resize(self.size, Image.NEAREST) + + return {'image': img, + 'label': mask} + +class Keep_origin_size_Resize(object): + def __init__(self, max_size, scale=1.0): + self.size = tuple(reversed(max_size)) # size: (h, w) + self.scale = scale + self.paste = Paste(int(max_size[0]*scale)) + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + + assert img.size == mask.size + h, w = self.size + h = int(h*self.scale) + w = int(w*self.scale) + img = img.resize((h, w), Image.BILINEAR) + mask = mask.resize((h, w), Image.NEAREST) + + return self.paste({'image': img, + 'label': mask}) + +class Scale(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + w, h = img.size + + if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]): + return {'image': img, + 'label': mask} + oh, ow = self.size + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + + return {'image': img, + 'label': mask} + +class Scale_(object): + def __init__(self, scale): + self.scale = scale + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + w, h = img.size + ow = int(w*self.scale) + oh = int(h*self.scale) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + + return {'image': img, + 'label': mask} + +class Scale_only_img(object): + def __init__(self, scale): + self.scale = scale + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + # assert img.size == mask.size + w, h = img.size + ow = int(w*self.scale) + oh = int(h*self.scale) + img = img.resize((ow, oh), Image.BILINEAR) + # mask = mask.resize((ow, oh), Image.NEAREST) + + return {'image': img, + 'label': mask} + +class RandomSizedCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + for attempt in range(10): + area = img.size[0] * img.size[1] + target_area = random.uniform(0.45, 1.0) * area + aspect_ratio = random.uniform(0.5, 2) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if w <= img.size[0] and h <= img.size[1]: + x1 = random.randint(0, img.size[0] - w) + y1 = random.randint(0, img.size[1] - h) + + img = img.crop((x1, y1, x1 + w, y1 + h)) + mask = mask.crop((x1, y1, x1 + w, y1 + h)) + assert (img.size == (w, h)) + + img = img.resize((self.size, self.size), Image.BILINEAR) + mask = mask.resize((self.size, self.size), Image.NEAREST) + + return {'image': img, + 'label': mask} + + # Fallback + scale = Scale(self.size) + crop = CenterCrop(self.size) + sample = crop(scale(sample)) + return sample + +class RandomRotate(object): + def __init__(self, degree): + self.degree = degree + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + rotate_degree = random.random() * 2 * self.degree - self.degree + img = img.rotate(rotate_degree, Image.BILINEAR) + mask = mask.rotate(rotate_degree, Image.NEAREST) + + return {'image': img, + 'label': mask} + +class RandomSized_new(object): + '''what we use is this class to aug''' + def __init__(self, size,scale1=0.5,scale2=2): + self.size = size + # self.scale = Scale(self.size) + self.crop = RandomCrop_new(self.size) + self.small_scale = scale1 + self.big_scale = scale2 + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + + w = int(random.uniform(self.small_scale, self.big_scale) * img.size[0]) + h = int(random.uniform(self.small_scale, self.big_scale) * img.size[1]) + + img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST) + sample = {'image': img, 'label': mask} + # finish resize + return self.crop(sample) +# class Random + +class RandomScale(object): + def __init__(self, limit): + self.limit = limit + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + + scale = random.uniform(self.limit[0], self.limit[1]) + w = int(scale * img.size[0]) + h = int(scale * img.size[1]) + + img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST) + + return {'image': img, 'label': mask} \ No newline at end of file diff --git a/dataloaders/mypath_atr.py b/dataloaders/mypath_atr.py new file mode 100644 index 0000000..1701e5b --- /dev/null +++ b/dataloaders/mypath_atr.py @@ -0,0 +1,8 @@ +class Path(object): + @staticmethod + def db_root_dir(database): + if database == 'atr': + return './data/datasets/ATR/' # folder that contains atr/. + else: + print('Database {} not available.'.format(database)) + raise NotImplementedError diff --git a/dataloaders/mypath_cihp.py b/dataloaders/mypath_cihp.py new file mode 100644 index 0000000..02760eb --- /dev/null +++ b/dataloaders/mypath_cihp.py @@ -0,0 +1,8 @@ +class Path(object): + @staticmethod + def db_root_dir(database): + if database == 'cihp': + return './data/datasets/CIHP_4w/' + else: + print('Database {} not available.'.format(database)) + raise NotImplementedError diff --git a/dataloaders/mypath_pascal.py b/dataloaders/mypath_pascal.py new file mode 100644 index 0000000..aec4735 --- /dev/null +++ b/dataloaders/mypath_pascal.py @@ -0,0 +1,8 @@ +class Path(object): + @staticmethod + def db_root_dir(database): + if database == 'pascal': + return './data/datasets/pascal/' # folder that contains pascal/. + else: + print('Database {} not available.'.format(database)) + raise NotImplementedError diff --git a/dataloaders/pascal.py b/dataloaders/pascal.py new file mode 100644 index 0000000..bffd1ca --- /dev/null +++ b/dataloaders/pascal.py @@ -0,0 +1,106 @@ +from __future__ import print_function, division +import os +from PIL import Image +from torch.utils.data import Dataset +from .mypath_pascal import Path + +class VOCSegmentation(Dataset): + """ + Pascal dataset + """ + + def __init__(self, + base_dir=Path.db_root_dir('pascal'), + split='train', + transform=None + ): + """ + :param base_dir: path to PASCAL dataset directory + :param split: train/val + :param transform: transform to apply + """ + super(VOCSegmentation).__init__() + self._base_dir = base_dir + self._image_dir = os.path.join(self._base_dir, 'JPEGImages') + self._cat_dir = os.path.join(self._base_dir, 'SegmentationPart') + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.transform = transform + + _splits_dir = os.path.join(self._base_dir, 'list') + + self.im_ids = [] + self.images = [] + self.categories = [] + + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir, splt + '_id.txt')), "r") as f: + lines = f.read().splitlines() + + for ii, line in enumerate(lines): + + _image = os.path.join(self._image_dir, line+'.jpg' ) + _cat = os.path.join(self._cat_dir, line +'.png') + # print(self._image_dir,_image) + assert os.path.isfile(_image) + # print(_cat) + assert os.path.isfile(_cat) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + + assert (len(self.images) == len(self.categories)) + + # Display stats + print('Number of images in {}: {:d}'.format(split, len(self.images))) + + def __len__(self): + return len(self.images) + + + def __getitem__(self, index): + _img, _target= self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + if self.transform is not None: + sample = self.transform(sample) + + return sample + + def _make_img_gt_point_pair(self, index): + # Read Image and Target + # _img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32) + # _target = np.array(Image.open(self.categories[index])).astype(np.float32) + + _img = Image.open(self.images[index]).convert('RGB') # return is RGB pic + _target = Image.open(self.categories[index]) + + return _img, _target + + def __str__(self): + return 'PASCAL(split=' + str(self.split) + ')' + +class test_segmentation(VOCSegmentation): + def __init__(self,base_dir=Path.db_root_dir('pascal'), + split='train', + transform=None, + flip=True): + super(test_segmentation, self).__init__(base_dir=base_dir,split=split,transform=transform) + self._flip_flag = flip + + def __getitem__(self, index): + _img, _target= self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + if self.transform is not None: + sample = self.transform(sample) + + return sample + + + diff --git a/eval_cihp.sh b/eval_cihp.sh new file mode 100644 index 0000000..4f72122 --- /dev/null +++ b/eval_cihp.sh @@ -0,0 +1,6 @@ +python ./exp/test/eval_show_pascal2cihp.py \ + --batch 1 --gpus 1 --classes 20 \ + --gt_path './data/datasets/CIHP_4w/Category_ids/' \ + --txt_file './data/datasets/CIHP_4w/lists/test_id.txt' \ + --loadmodel './data/pretrained_model/inference.pth' + diff --git a/eval_pascal.sh b/eval_pascal.sh new file mode 100644 index 0000000..dc694d4 --- /dev/null +++ b/eval_pascal.sh @@ -0,0 +1,6 @@ +python ./exp/test/eval_show_cihp2pascal.py \ + --batch 1 --gpus 1 --classes 20 \ + --gt_path './data/datasets/CIHP_4w/Category_ids/' \ + --txt_file './data/datasets/CIHP_4w/lists/test_id.txt' \ + --loadmodel './data/pretrained_model/cihp2pascal.pth' + diff --git a/exp/inference/inference.py b/exp/inference/inference.py new file mode 100644 index 0000000..adf4113 --- /dev/null +++ b/exp/inference/inference.py @@ -0,0 +1,203 @@ +import socket +import timeit +import numpy as np +from PIL import Image +from datetime import datetime +import os +import sys +from collections import OrderedDict +sys.path.append('../../') +# PyTorch includes +import torch +from torch.autograd import Variable +from torchvision import transforms +import cv2 + + +# Custom includes +from networks import deeplab_xception_transfer, graph +from dataloaders import custom_transforms as tr + +# +import argparse +import torch.nn.functional as F + +label_colours = [(0,0,0) + , (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0) + , (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)] + + +def flip(x, dim): + indices = [slice(None)] * x.dim() + indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, + dtype=torch.long, device=x.device) + return x[tuple(indices)] + +def flip_cihp(tail_list): + ''' + + :param tail_list: tail_list size is 1 x n_class x h x w + :return: + ''' + # tail_list = tail_list[0] + tail_list_rev = [None] * 20 + for xx in range(14): + tail_list_rev[xx] = tail_list[xx].unsqueeze(0) + tail_list_rev[14] = tail_list[15].unsqueeze(0) + tail_list_rev[15] = tail_list[14].unsqueeze(0) + tail_list_rev[16] = tail_list[17].unsqueeze(0) + tail_list_rev[17] = tail_list[16].unsqueeze(0) + tail_list_rev[18] = tail_list[19].unsqueeze(0) + tail_list_rev[19] = tail_list[18].unsqueeze(0) + return torch.cat(tail_list_rev,dim=0) + + +def decode_labels(mask, num_images=1, num_classes=20): + """Decode batch of segmentation masks. + + Args: + mask: result of inference after taking argmax. + num_images: number of images to decode from the batch. + num_classes: number of classes to predict (including background). + + Returns: + A batch with num_images RGB images of the same size as the input. + """ + n, h, w = mask.shape + assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % ( + n, num_images) + outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) + for i in range(num_images): + img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) + pixels = img.load() + for j_, j in enumerate(mask[i, :, :]): + for k_, k in enumerate(j): + if k < num_classes: + pixels[k_, j_] = label_colours[k] + outputs[i] = np.array(img) + return outputs + +def read_img(img_path): + _img = Image.open(img_path).convert('RGB') # return is RGB pic + return _img + +def img_transform(img, transform=None): + sample = {'image': img, 'label': 0} + + sample = transform(sample) + return sample + +def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True): + ''' + + :param net: + :param img_path: + :param output_path: + :return: + ''' + # adj + adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float() + adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3) + + adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float()) + adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda() + + cihp_adj = graph.preprocess_adj(graph.cihp_graph) + adj3_ = Variable(torch.from_numpy(cihp_adj).float()) + adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda() + + # multi-scale + scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75] + img = read_img(img_path) + testloader_list = [] + testloader_flip_list = [] + for pv in scale_list: + composed_transforms_ts = transforms.Compose([ + tr.Scale_only_img(pv), + tr.Normalize_xception_tf_only_img(), + tr.ToTensor_only_img()]) + + composed_transforms_ts_flip = transforms.Compose([ + tr.Scale_only_img(pv), + tr.HorizontalFlip_only_img(), + tr.Normalize_xception_tf_only_img(), + tr.ToTensor_only_img()]) + + testloader_list.append(img_transform(img, composed_transforms_ts)) + # print(img_transform(img, composed_transforms_ts)) + testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip)) + # print(testloader_list) + start_time = timeit.default_timer() + # One testing epoch + net.eval() + # 1 0.5 0.75 1.25 1.5 1.75 ; flip: + + for iii, sample_batched in enumerate(zip(testloader_list, testloader_flip_list)): + inputs, labels = sample_batched[0]['image'], sample_batched[0]['label'] + inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label'] + inputs = inputs.unsqueeze(0) + inputs_f = inputs_f.unsqueeze(0) + inputs = torch.cat((inputs, inputs_f), dim=0) + if iii == 0: + _, _, h, w = inputs.size() + # assert inputs.size() == inputs_f.size() + + # Forward pass of the mini-batch + inputs = Variable(inputs, requires_grad=False) + + with torch.no_grad(): + if use_gpu >= 0: + inputs = inputs.cuda() + # outputs = net.forward(inputs) + outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda()) + outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2 + outputs = outputs.unsqueeze(0) + + if iii > 0: + outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True) + outputs_final = outputs_final + outputs + else: + outputs_final = outputs.clone() + ################ plot pic + predictions = torch.max(outputs_final, 1)[1] + results = predictions.cpu().numpy() + vis_res = decode_labels(results) + + parsing_im = Image.fromarray(vis_res[0]) + parsing_im.save(output_path+'/{}.png'.format(output_name)) + cv2.imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :]) + + end_time = timeit.default_timer() + print('time used for the multi-scale image inference' + ' is :' + str(end_time - start_time)) + +if __name__ == '__main__': + '''argparse begin''' + parser = argparse.ArgumentParser() + # parser.add_argument('--loadmodel',default=None,type=str) + parser.add_argument('--loadmodel', default='', type=str) + parser.add_argument('--img_path', default='', type=str) + parser.add_argument('--output_path', default='', type=str) + parser.add_argument('--output_name', default='', type=str) + parser.add_argument('--use_gpu', default=1, type=int) + opts = parser.parse_args() + + net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20, + hidden_layers=128, + source_classes=7, ) + if not opts.loadmodel == '': + x = torch.load(opts.loadmodel) + net.load_source_model(x) + print('load model:', opts.loadmodel) + else: + print('no model load !!!!!!!!') + raise RuntimeError('No model!!!!') + + if opts.use_gpu >0 : + net.cuda() + use_gpu = True + else: + use_gpu = False + raise RuntimeError('must use the gpu!!!!') + + inference(net=net, img_path=opts.img_path,output_path=opts.output_path , output_name=opts.output_name, use_gpu=use_gpu) + diff --git a/exp/test/__init__.py b/exp/test/__init__.py new file mode 100644 index 0000000..a09a463 --- /dev/null +++ b/exp/test/__init__.py @@ -0,0 +1,3 @@ +from .test_from_disk import eval_ + +__all__ = ['eval_'] \ No newline at end of file diff --git a/exp/test/eval_show_cihp2pascal.py b/exp/test/eval_show_cihp2pascal.py new file mode 100644 index 0000000..d38bbb2 --- /dev/null +++ b/exp/test/eval_show_cihp2pascal.py @@ -0,0 +1,268 @@ +import socket +import timeit +import numpy as np +from PIL import Image +from datetime import datetime +import os +import sys +import glob +from collections import OrderedDict +sys.path.append('../../') +# PyTorch includes +import torch +import pdb +from torch.autograd import Variable +import torch.optim as optim +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.utils import make_grid +import cv2 + +# Tensorboard include +# from tensorboardX import SummaryWriter + +# Custom includes +from dataloaders import pascal +from utils import util +from networks import deeplab_xception_transfer, graph +from dataloaders import custom_transforms as tr + +# +import argparse +import copy +import torch.nn.functional as F +from test_from_disk import eval_ + + +gpu_id = 1 + +label_colours = [(0,0,0) + # 0=background + ,(128,0,0), (0,128,0), (128,128,0), (0,0,128), (128,0,128), (0,128,128)] + + +def flip(x, dim): + indices = [slice(None)] * x.dim() + indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, + dtype=torch.long, device=x.device) + return x[tuple(indices)] + +# def flip_cihp(tail_list): +# ''' +# +# :param tail_list: tail_list size is 1 x n_class x h x w +# :return: +# ''' +# # tail_list = tail_list[0] +# tail_list_rev = [None] * 20 +# for xx in range(14): +# tail_list_rev[xx] = tail_list[xx].unsqueeze(0) +# tail_list_rev[14] = tail_list[15].unsqueeze(0) +# tail_list_rev[15] = tail_list[14].unsqueeze(0) +# tail_list_rev[16] = tail_list[17].unsqueeze(0) +# tail_list_rev[17] = tail_list[16].unsqueeze(0) +# tail_list_rev[18] = tail_list[19].unsqueeze(0) +# tail_list_rev[19] = tail_list[18].unsqueeze(0) +# return torch.cat(tail_list_rev,dim=0) + +def decode_labels(mask, num_images=1, num_classes=20): + """Decode batch of segmentation masks. + + Args: + mask: result of inference after taking argmax. + num_images: number of images to decode from the batch. + num_classes: number of classes to predict (including background). + + Returns: + A batch with num_images RGB images of the same size as the input. + """ + n, h, w = mask.shape + assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) + outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) + for i in range(num_images): + img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) + pixels = img.load() + for j_, j in enumerate(mask[i, :, :]): + for k_, k in enumerate(j): + if k < num_classes: + pixels[k_,j_] = label_colours[k] + outputs[i] = np.array(img) + return outputs + +def get_parser(): + '''argparse begin''' + parser = argparse.ArgumentParser() + LookupChoices = type('', (argparse.Action,), dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))) + + parser.add_argument('--epochs', default=100, type=int) + parser.add_argument('--batch', default=16, type=int) + parser.add_argument('--lr', default=1e-7, type=float) + parser.add_argument('--numworker', default=12, type=int) + parser.add_argument('--step', default=30, type=int) + # parser.add_argument('--loadmodel',default=None,type=str) + parser.add_argument('--classes', default=7, type=int) + parser.add_argument('--testepoch', default=10, type=int) + parser.add_argument('--loadmodel', default='', type=str) + parser.add_argument('--txt_file', default='', type=str) + parser.add_argument('--hidden_layers', default=128, type=int) + parser.add_argument('--gpus', default=4, type=int) + parser.add_argument('--output_path', default='./results/', type=str) + parser.add_argument('--gt_path', default='./results/', type=str) + opts = parser.parse_args() + return opts + + +def main(opts): + adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float() + adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda() + + adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float()) + adj1_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda() + + cihp_adj = graph.preprocess_adj(graph.cihp_graph) + adj3_ = Variable(torch.from_numpy(cihp_adj).float()) + adj3_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda() + + p = OrderedDict() # Parameters to include in report + p['trainBatch'] = opts.batch # Training batch size + p['nAveGrad'] = 1 # Average the gradient of several iterations + p['lr'] = opts.lr # Learning rate + p['lrFtr'] = 1e-5 + p['lraspp'] = 1e-5 + p['lrpro'] = 1e-5 + p['lrdecoder'] = 1e-5 + p['lrother'] = 1e-5 + p['wd'] = 5e-4 # Weight decay + p['momentum'] = 0.9 # Momentum + p['epoch_size'] = 10 # How many epochs to change learning rate + p['num_workers'] = opts.numworker + backbone = 'xception' # Use xception or resnet as feature extractor, + + with open(opts.txt_file, 'r') as f: + img_list = f.readlines() + + max_id = 0 + save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__))) + exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1] + runs = glob.glob(os.path.join(save_dir_root, 'run', 'run_*')) + for r in runs: + run_id = int(r.split('_')[-1]) + if run_id >= max_id: + max_id = run_id + 1 + # run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0 + + # Network definition + if backbone == 'xception': + net = deeplab_xception_transfer.deeplab_xception_transfer_projection(n_classes=opts.classes, os=16, + hidden_layers=opts.hidden_layers, source_classes=20, + ) + elif backbone == 'resnet': + # net = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=7, os=16, pretrained=True) + raise NotImplementedError + else: + raise NotImplementedError + + if gpu_id >= 0: + net.cuda() + + # net load weights + if not opts.loadmodel =='': + x = torch.load(opts.loadmodel) + net.load_source_model(x) + print('load model:' ,opts.loadmodel) + else: + print('no model load !!!!!!!!') + + ## multi scale + scale_list=[1,0.5,0.75,1.25,1.5,1.75] + testloader_list = [] + testloader_flip_list = [] + for pv in scale_list: + composed_transforms_ts = transforms.Compose([ + tr.Scale_(pv), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts_flip = transforms.Compose([ + tr.Scale_(pv), + tr.HorizontalFlip(), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts) + voc_val_f = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts_flip) + + testloader = DataLoader(voc_val, batch_size=1, shuffle=False, num_workers=p['num_workers']) + testloader_flip = DataLoader(voc_val_f, batch_size=1, shuffle=False, num_workers=p['num_workers']) + + testloader_list.append(copy.deepcopy(testloader)) + testloader_flip_list.append(copy.deepcopy(testloader_flip)) + + print("Eval Network") + + if not os.path.exists(opts.output_path + 'pascal_output_vis/'): + os.makedirs(opts.output_path + 'pascal_output_vis/') + if not os.path.exists(opts.output_path + 'pascal_output/'): + os.makedirs(opts.output_path + 'pascal_output/') + + start_time = timeit.default_timer() + # One testing epoch + total_iou = 0.0 + net.eval() + for ii, large_sample_batched in enumerate(zip(*testloader_list, *testloader_flip_list)): + print(ii) + #1 0.5 0.75 1.25 1.5 1.75 ; flip: + sample1 = large_sample_batched[:6] + sample2 = large_sample_batched[6:] + for iii,sample_batched in enumerate(zip(sample1,sample2)): + inputs, labels = sample_batched[0]['image'], sample_batched[0]['label'] + inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label'] + inputs = torch.cat((inputs,inputs_f),dim=0) + if iii == 0: + _,_,h,w = inputs.size() + # assert inputs.size() == inputs_f.size() + + # Forward pass of the mini-batch + inputs, labels = Variable(inputs, requires_grad=False), Variable(labels) + + with torch.no_grad(): + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + # outputs = net.forward(inputs) + # pdb.set_trace() + outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda()) + outputs = (outputs[0] + flip(outputs[1], dim=-1)) / 2 + outputs = outputs.unsqueeze(0) + + if iii>0: + outputs = F.upsample(outputs,size=(h,w),mode='bilinear',align_corners=True) + outputs_final = outputs_final + outputs + else: + outputs_final = outputs.clone() + ################ plot pic + predictions = torch.max(outputs_final, 1)[1] + prob_predictions = torch.max(outputs_final,1)[0] + results = predictions.cpu().numpy() + prob_results = prob_predictions.cpu().numpy() + vis_res = decode_labels(results) + + parsing_im = Image.fromarray(vis_res[0]) + parsing_im.save(opts.output_path + 'pascal_output_vis/{}.png'.format(img_list[ii][:-1])) + cv2.imwrite(opts.output_path + 'pascal_output/{}.png'.format(img_list[ii][:-1]), results[0,:,:]) + # np.save('../../cihp_prob_output/{}.npy'.format(img_list[ii][:-1]), prob_results[0, :, :]) + # pred_list.append(predictions.cpu()) + # label_list.append(labels.squeeze(1).cpu()) + # loss = criterion(outputs, labels, batch_average=True) + # running_loss_ts += loss.item() + + # total_iou += utils.get_iou(predictions, labels) + end_time = timeit.default_timer() + print('time use for '+str(ii) + ' is :' + str(end_time - start_time)) + + # Eval + pred_path = opts.output_path + 'pascal_output/' + eval_(pred_path=pred_path, gt_path=opts.gt_path,classes=opts.classes, txt_file=opts.txt_file) + +if __name__ == '__main__': + opts = get_parser() + main(opts) \ No newline at end of file diff --git a/exp/test/eval_show_pascal2cihp.py b/exp/test/eval_show_pascal2cihp.py new file mode 100644 index 0000000..cc30173 --- /dev/null +++ b/exp/test/eval_show_pascal2cihp.py @@ -0,0 +1,268 @@ +import socket +import timeit +import numpy as np +from PIL import Image +from datetime import datetime +import os +import sys +import glob +from collections import OrderedDict +sys.path.append('../../') +# PyTorch includes +import torch +import pdb +from torch.autograd import Variable +import torch.optim as optim +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.utils import make_grid +import cv2 + +# Tensorboard include +# from tensorboardX import SummaryWriter + +# Custom includes +from dataloaders import cihp +from utils import util +from networks import deeplab_xception_transfer, graph +from dataloaders import custom_transforms as tr + +# +import argparse +import copy +import torch.nn.functional as F +from test_from_disk import eval_ + + +gpu_id = 1 + +label_colours = [(0,0,0) + , (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0) + , (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)] + + +def flip(x, dim): + indices = [slice(None)] * x.dim() + indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, + dtype=torch.long, device=x.device) + return x[tuple(indices)] + +def flip_cihp(tail_list): + ''' + + :param tail_list: tail_list size is 1 x n_class x h x w + :return: + ''' + # tail_list = tail_list[0] + tail_list_rev = [None] * 20 + for xx in range(14): + tail_list_rev[xx] = tail_list[xx].unsqueeze(0) + tail_list_rev[14] = tail_list[15].unsqueeze(0) + tail_list_rev[15] = tail_list[14].unsqueeze(0) + tail_list_rev[16] = tail_list[17].unsqueeze(0) + tail_list_rev[17] = tail_list[16].unsqueeze(0) + tail_list_rev[18] = tail_list[19].unsqueeze(0) + tail_list_rev[19] = tail_list[18].unsqueeze(0) + return torch.cat(tail_list_rev,dim=0) + +def decode_labels(mask, num_images=1, num_classes=20): + """Decode batch of segmentation masks. + + Args: + mask: result of inference after taking argmax. + num_images: number of images to decode from the batch. + num_classes: number of classes to predict (including background). + + Returns: + A batch with num_images RGB images of the same size as the input. + """ + n, h, w = mask.shape + assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) + outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) + for i in range(num_images): + img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) + pixels = img.load() + for j_, j in enumerate(mask[i, :, :]): + for k_, k in enumerate(j): + if k < num_classes: + pixels[k_,j_] = label_colours[k] + outputs[i] = np.array(img) + return outputs + +def get_parser(): + '''argparse begin''' + parser = argparse.ArgumentParser() + LookupChoices = type('', (argparse.Action,), dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))) + + parser.add_argument('--epochs', default=100, type=int) + parser.add_argument('--batch', default=16, type=int) + parser.add_argument('--lr', default=1e-7, type=float) + parser.add_argument('--numworker', default=12, type=int) + parser.add_argument('--step', default=30, type=int) + # parser.add_argument('--loadmodel',default=None,type=str) + parser.add_argument('--classes', default=7, type=int) + parser.add_argument('--testepoch', default=10, type=int) + parser.add_argument('--loadmodel', default='', type=str) + parser.add_argument('--txt_file', default='', type=str) + parser.add_argument('--hidden_layers', default=128, type=int) + parser.add_argument('--gpus', default=4, type=int) + parser.add_argument('--output_path', default='./results/', type=str) + parser.add_argument('--gt_path', default='./results/', type=str) + opts = parser.parse_args() + return opts + + +def main(opts): + adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float() + adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3) + + adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float()) + adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda() + + cihp_adj = graph.preprocess_adj(graph.cihp_graph) + adj3_ = Variable(torch.from_numpy(cihp_adj).float()) + adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda() + + p = OrderedDict() # Parameters to include in report + p['trainBatch'] = opts.batch # Training batch size + p['nAveGrad'] = 1 # Average the gradient of several iterations + p['lr'] = opts.lr # Learning rate + p['lrFtr'] = 1e-5 + p['lraspp'] = 1e-5 + p['lrpro'] = 1e-5 + p['lrdecoder'] = 1e-5 + p['lrother'] = 1e-5 + p['wd'] = 5e-4 # Weight decay + p['momentum'] = 0.9 # Momentum + p['epoch_size'] = 10 # How many epochs to change learning rate + p['num_workers'] = opts.numworker + backbone = 'xception' # Use xception or resnet as feature extractor, + + with open(opts.txt_file, 'r') as f: + img_list = f.readlines() + + max_id = 0 + save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__))) + exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1] + runs = glob.glob(os.path.join(save_dir_root, 'run', 'run_*')) + for r in runs: + run_id = int(r.split('_')[-1]) + if run_id >= max_id: + max_id = run_id + 1 + # run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0 + + # Network definition + if backbone == 'xception': + net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=opts.classes, os=16, + hidden_layers=opts.hidden_layers, source_classes=7, + ) + elif backbone == 'resnet': + # net = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=7, os=16, pretrained=True) + raise NotImplementedError + else: + raise NotImplementedError + + if gpu_id >= 0: + net.cuda() + + # net load weights + if not opts.loadmodel =='': + x = torch.load(opts.loadmodel) + net.load_source_model(x) + print('load model:' ,opts.loadmodel) + else: + print('no model load !!!!!!!!') + + ## multi scale + scale_list=[1,0.5,0.75,1.25,1.5,1.75] + testloader_list = [] + testloader_flip_list = [] + for pv in scale_list: + composed_transforms_ts = transforms.Compose([ + tr.Scale_(pv), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts_flip = transforms.Compose([ + tr.Scale_(pv), + tr.HorizontalFlip(), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + voc_val = cihp.VOCSegmentation(split='test', transform=composed_transforms_ts) + voc_val_f = cihp.VOCSegmentation(split='test', transform=composed_transforms_ts_flip) + + testloader = DataLoader(voc_val, batch_size=1, shuffle=False, num_workers=p['num_workers']) + testloader_flip = DataLoader(voc_val_f, batch_size=1, shuffle=False, num_workers=p['num_workers']) + + testloader_list.append(copy.deepcopy(testloader)) + testloader_flip_list.append(copy.deepcopy(testloader_flip)) + + print("Eval Network") + + if not os.path.exists(opts.output_path + 'cihp_output_vis/'): + os.makedirs(opts.output_path + 'cihp_output_vis/') + if not os.path.exists(opts.output_path + 'cihp_output/'): + os.makedirs(opts.output_path + 'cihp_output/') + + start_time = timeit.default_timer() + # One testing epoch + total_iou = 0.0 + net.eval() + for ii, large_sample_batched in enumerate(zip(*testloader_list, *testloader_flip_list)): + print(ii) + #1 0.5 0.75 1.25 1.5 1.75 ; flip: + sample1 = large_sample_batched[:6] + sample2 = large_sample_batched[6:] + for iii,sample_batched in enumerate(zip(sample1,sample2)): + inputs, labels = sample_batched[0]['image'], sample_batched[0]['label'] + inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label'] + inputs = torch.cat((inputs,inputs_f),dim=0) + if iii == 0: + _,_,h,w = inputs.size() + # assert inputs.size() == inputs_f.size() + + # Forward pass of the mini-batch + inputs, labels = Variable(inputs, requires_grad=False), Variable(labels) + + with torch.no_grad(): + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + # outputs = net.forward(inputs) + # pdb.set_trace() + outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda()) + outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2 + outputs = outputs.unsqueeze(0) + + if iii>0: + outputs = F.upsample(outputs,size=(h,w),mode='bilinear',align_corners=True) + outputs_final = outputs_final + outputs + else: + outputs_final = outputs.clone() + ################ plot pic + predictions = torch.max(outputs_final, 1)[1] + prob_predictions = torch.max(outputs_final,1)[0] + results = predictions.cpu().numpy() + prob_results = prob_predictions.cpu().numpy() + vis_res = decode_labels(results) + + parsing_im = Image.fromarray(vis_res[0]) + parsing_im.save(opts.output_path + 'cihp_output_vis/{}.png'.format(img_list[ii][:-1])) + cv2.imwrite(opts.output_path + 'cihp_output/{}.png'.format(img_list[ii][:-1]), results[0,:,:]) + # np.save('../../cihp_prob_output/{}.npy'.format(img_list[ii][:-1]), prob_results[0, :, :]) + # pred_list.append(predictions.cpu()) + # label_list.append(labels.squeeze(1).cpu()) + # loss = criterion(outputs, labels, batch_average=True) + # running_loss_ts += loss.item() + + # total_iou += utils.get_iou(predictions, labels) + end_time = timeit.default_timer() + print('time use for '+str(ii) + ' is :' + str(end_time - start_time)) + + # Eval + pred_path = opts.output_path + 'cihp_output/' + eval_(pred_path=pred_path, gt_path=opts.gt_path,classes=opts.classes, txt_file=opts.txt_file) + +if __name__ == '__main__': + opts = get_parser() + main(opts) \ No newline at end of file diff --git a/exp/test/test_from_disk.py b/exp/test/test_from_disk.py new file mode 100644 index 0000000..2b72604 --- /dev/null +++ b/exp/test/test_from_disk.py @@ -0,0 +1,65 @@ +import sys +sys.path.append('./') +# PyTorch includes +import torch +import numpy as np + +from utils import test_human +from PIL import Image + +# +import argparse + +def get_parser(): + '''argparse begin''' + parser = argparse.ArgumentParser() + LookupChoices = type('', (argparse.Action,), dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))) + + parser.add_argument('--epochs', default=100, type=int) + parser.add_argument('--batch', default=16, type=int) + parser.add_argument('--lr', default=1e-7, type=float) + parser.add_argument('--numworker',default=12,type=int) + parser.add_argument('--freezeBN', choices=dict(true=True, false=False), default=True, action=LookupChoices) + parser.add_argument('--step', default=30, type=int) + parser.add_argument('--txt_file',default=None,type=str) + parser.add_argument('--pred_path',default=None,type=str) + parser.add_argument('--gt_path',default=None,type=str) + parser.add_argument('--classes', default=7, type=int) + parser.add_argument('--testepoch', default=10, type=int) + opts = parser.parse_args() + return opts + +def eval_(pred_path, gt_path, classes, txt_file): + pred_path = pred_path + gt_path = gt_path + + with open(txt_file,) as f: + lines = f.readlines() + lines = [x.strip() for x in lines] + + output_list = [] + label_list = [] + for i,file in enumerate(lines): + print(i) + file_name = file + '.png' + try: + predict_pic = np.array(Image.open(pred_path+file_name)) + gt_pic = np.array(Image.open(gt_path+file_name)) + output_list.append(torch.from_numpy(predict_pic)) + label_list.append(torch.from_numpy(gt_pic)) + except: + print(file_name,flush=True) + raise RuntimeError('no predict/gt image.') + # gt_pic = np.array(Image.open(gt_path + file_name)) + # output_list.append(torch.from_numpy(gt_pic)) + # label_list.append(torch.from_numpy(gt_pic)) + + + miou = test_human.get_iou_from_list(output_list, label_list, n_cls=classes) + + print('Validation:') + print('MIoU: %f\n' % miou) + +if __name__ == '__main__': + opts = get_parser() + eval_(pred_path=opts.pred_path, gt_path=opts.gt_path, classes=opts.classes, txt_file=opts.txt_file) \ No newline at end of file diff --git a/exp/transfer/train_cihp_from_pascal.py b/exp/transfer/train_cihp_from_pascal.py new file mode 100644 index 0000000..30e8df4 --- /dev/null +++ b/exp/transfer/train_cihp_from_pascal.py @@ -0,0 +1,331 @@ +import socket +import timeit +from datetime import datetime +import os +import sys +import glob +import numpy as np +from collections import OrderedDict +sys.path.append('../../') +sys.path.append('../../networks/') +# PyTorch includes +import torch +from torch.autograd import Variable +import torch.optim as optim +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.utils import make_grid + + +# Tensorboard include +from tensorboardX import SummaryWriter + +# Custom includes +from dataloaders import cihp +from utils import util,get_iou_from_list +from networks import deeplab_xception_transfer, graph +from dataloaders import custom_transforms as tr + +# +import argparse + +gpu_id = 0 + +nEpochs = 100 # Number of epochs for training +resume_epoch = 0 # Default is 0, change if want to resume + +def flip(x, dim): + indices = [slice(None)] * x.dim() + indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, + dtype=torch.long, device=x.device) + return x[tuple(indices)] + +def flip_cihp(tail_list): + ''' + + :param tail_list: tail_list size is 1 x n_class x h x w + :return: + ''' + # tail_list = tail_list[0] + tail_list_rev = [None] * 20 + for xx in range(14): + tail_list_rev[xx] = tail_list[xx].unsqueeze(0) + tail_list_rev[14] = tail_list[15].unsqueeze(0) + tail_list_rev[15] = tail_list[14].unsqueeze(0) + tail_list_rev[16] = tail_list[17].unsqueeze(0) + tail_list_rev[17] = tail_list[16].unsqueeze(0) + tail_list_rev[18] = tail_list[19].unsqueeze(0) + tail_list_rev[19] = tail_list[18].unsqueeze(0) + return torch.cat(tail_list_rev,dim=0) + +def get_parser(): + '''argparse begin''' + parser = argparse.ArgumentParser() + LookupChoices = type('', (argparse.Action,), dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))) + + parser.add_argument('--epochs', default=100, type=int) + parser.add_argument('--batch', default=16, type=int) + parser.add_argument('--lr', default=1e-7, type=float) + parser.add_argument('--numworker',default=12,type=int) + parser.add_argument('--freezeBN', choices=dict(true=True, false=False), default=True, action=LookupChoices) + parser.add_argument('--step', default=10, type=int) + parser.add_argument('--classes', default=7, type=int) + parser.add_argument('--testInterval', default=10, type=int) + parser.add_argument('--loadmodel',default='',type=str) + parser.add_argument('--pretrainedModel', default='', type=str) + parser.add_argument('--hidden_layers',default=128,type=int) + parser.add_argument('--gpus',default=4, type=int) + + opts = parser.parse_args() + return opts + +def get_graphs(opts): + adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float() + adj2 = adj2_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 20).transpose(2, 3).cuda() + adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).transpose(2, 3) + + adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float()) + adj3 = adj1_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 7).cuda() + adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7) + + # adj2 = torch.from_numpy(graph.cihp2pascal_adj).float() + # adj2 = adj2.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 20) + cihp_adj = graph.preprocess_adj(graph.cihp_graph) + adj3_ = Variable(torch.from_numpy(cihp_adj).float()) + adj1 = adj3_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 20).cuda() + adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20) + train_graph = [adj1, adj2, adj3] + test_graph = [adj1_test, adj2_test, adj3_test] + return train_graph, test_graph + + +def val_cihp(net_, testloader, testloader_flip, test_graph, epoch, writer, criterion, classes=20): + adj1_test, adj2_test, adj3_test = test_graph + num_img_ts = len(testloader) + net_.eval() + pred_list = [] + label_list = [] + running_loss_ts = 0.0 + miou = 0 + for ii, sample_batched in enumerate(zip(testloader, testloader_flip)): + + inputs, labels = sample_batched[0]['image'], sample_batched[0]['label'] + inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label'] + inputs = torch.cat((inputs, inputs_f), dim=0) + # Forward pass of the mini-batch + inputs, labels = Variable(inputs, requires_grad=False), Variable(labels) + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + + with torch.no_grad(): + outputs = net_.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda()) + # pdb.set_trace() + outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2 + outputs = outputs.unsqueeze(0) + predictions = torch.max(outputs, 1)[1] + pred_list.append(predictions.cpu()) + label_list.append(labels.squeeze(1).cpu()) + loss = criterion(outputs, labels, batch_average=True) + running_loss_ts += loss.item() + # total_iou += utils.get_iou(predictions, labels) + # Print stuff + if ii % num_img_ts == num_img_ts - 1: + # if ii == 10: + miou = get_iou_from_list(pred_list, label_list, n_cls=classes) + running_loss_ts = running_loss_ts / num_img_ts + + print('Validation:') + print('[Epoch: %d, numImages: %5d]' % (epoch, ii * 1 + inputs.data.shape[0])) + writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch) + writer.add_scalar('data/test_miour', miou, epoch) + print('Loss: %f' % running_loss_ts) + print('MIoU: %f\n' % miou) + + +def main(opts): + p = OrderedDict() # Parameters to include in report + p['trainBatch'] = opts.batch # Training batch size + testBatch = 1 # Testing batch size + useTest = True # See evolution of the test set when training + nTestInterval = opts.testInterval # Run on test set every nTestInterval epochs + snapshot = 1 # Store a model every snapshot epochs + p['nAveGrad'] = 1 # Average the gradient of several iterations + p['lr'] = opts.lr # Learning rate + p['lrFtr'] = 1e-5 + p['lraspp'] = 1e-5 + p['lrpro'] = 1e-5 + p['lrdecoder'] = 1e-5 + p['lrother'] = 1e-5 + p['wd'] = 5e-4 # Weight decay + p['momentum'] = 0.9 # Momentum + p['epoch_size'] = opts.step # How many epochs to change learning rate + p['num_workers'] = opts.numworker + model_path = opts.pretrainedModel + backbone = 'xception' # Use xception or resnet as feature extractor, + nEpochs = opts.epochs + + max_id = 0 + save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__))) + exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1] + runs = glob.glob(os.path.join(save_dir_root, 'run_cihp', 'run_*')) + for r in runs: + run_id = int(r.split('_')[-1]) + if run_id >= max_id: + max_id = run_id + 1 + save_dir = os.path.join(save_dir_root, 'run_cihp', 'run_' + str(max_id)) + + # Network definition + if backbone == 'xception': + net_ = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20, os=16, + hidden_layers=opts.hidden_layers, source_classes=7, ) + elif backbone == 'resnet': + # net_ = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=7, os=16, pretrained=True) + raise NotImplementedError + else: + raise NotImplementedError + + modelName = 'deeplabv3plus-' + backbone + '-voc'+datetime.now().strftime('%b%d_%H-%M-%S') + criterion = util.cross_entropy2d + + if gpu_id >= 0: + # torch.cuda.set_device(device=gpu_id) + net_.cuda() + + # net load weights + if not model_path == '': + x = torch.load(model_path) + net_.load_state_dict_new(x) + print('load pretrainedModel.') + else: + print('no pretrainedModel.') + if not opts.loadmodel =='': + x = torch.load(opts.loadmodel) + net_.load_source_model(x) + print('load model:' ,opts.loadmodel) + else: + print('no model load !!!!!!!!') + + log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname()) + writer = SummaryWriter(log_dir=log_dir) + writer.add_text('load model',opts.loadmodel,1) + writer.add_text('setting',sys.argv[0],1) + + if opts.freezeBN: + net_.freeze_bn() + + # Use the following optimizer + optimizer = optim.SGD(net_.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd']) + + composed_transforms_tr = transforms.Compose([ + tr.RandomSized_new(512), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts = transforms.Compose([ + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts_flip = transforms.Compose([ + tr.HorizontalFlip(), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + voc_train = cihp.VOCSegmentation(split='train', transform=composed_transforms_tr, flip=True) + voc_val = cihp.VOCSegmentation(split='val', transform=composed_transforms_ts) + voc_val_flip = cihp.VOCSegmentation(split='val', transform=composed_transforms_ts_flip) + + trainloader = DataLoader(voc_train, batch_size=p['trainBatch'], shuffle=True, num_workers=p['num_workers'],drop_last=True) + testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=p['num_workers']) + testloader_flip = DataLoader(voc_val_flip, batch_size=testBatch, shuffle=False, num_workers=p['num_workers']) + + num_img_tr = len(trainloader) + num_img_ts = len(testloader) + running_loss_tr = 0.0 + running_loss_ts = 0.0 + aveGrad = 0 + global_step = 0 + print("Training Network") + + net = torch.nn.DataParallel(net_) + train_graph, test_graph = get_graphs(opts) + adj1, adj2, adj3 = train_graph + + + # Main Training and Testing Loop + for epoch in range(resume_epoch, nEpochs): + start_time = timeit.default_timer() + + if epoch % p['epoch_size'] == p['epoch_size'] - 1: + lr_ = util.lr_poly(p['lr'], epoch, nEpochs, 0.9) + optimizer = optim.SGD(net_.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd']) + writer.add_scalar('data/lr_', lr_, epoch) + print('(poly lr policy) learning rate: ', lr_) + + net.train() + for ii, sample_batched in enumerate(trainloader): + + inputs, labels = sample_batched['image'], sample_batched['label'] + # Forward-Backward of the mini-batch + inputs, labels = Variable(inputs, requires_grad=True), Variable(labels) + global_step += inputs.data.shape[0] + + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + + outputs = net.forward(inputs, adj1, adj3, adj2) + + loss = criterion(outputs, labels, batch_average=True) + running_loss_tr += loss.item() + + # Print stuff + if ii % num_img_tr == (num_img_tr - 1): + running_loss_tr = running_loss_tr / num_img_tr + writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch) + print('[Epoch: %d, numImages: %5d]' % (epoch, ii * p['trainBatch'] + inputs.data.shape[0])) + print('Loss: %f' % running_loss_tr) + running_loss_tr = 0 + stop_time = timeit.default_timer() + print("Execution time: " + str(stop_time - start_time) + "\n") + + # Backward the averaged gradient + loss /= p['nAveGrad'] + loss.backward() + aveGrad += 1 + + # Update the weights once in p['nAveGrad'] forward passes + if aveGrad % p['nAveGrad'] == 0: + writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch) + optimizer.step() + optimizer.zero_grad() + aveGrad = 0 + + # Show 10 * 3 images results each epoch + if ii % (num_img_tr // 10) == 0: + grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True) + writer.add_image('Image', grid_image, global_step) + grid_image = make_grid(util.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, + range=(0, 255)) + writer.add_image('Predicted label', grid_image, global_step) + grid_image = make_grid(util.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255)) + writer.add_image('Groundtruth label', grid_image, global_step) + print('loss is ', loss.cpu().item(), flush=True) + + # Save the model + if (epoch % snapshot) == snapshot - 1: + torch.save(net_.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')) + print("Save model at {}\n".format(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))) + + torch.cuda.empty_cache() + + # One testing epoch + if useTest and epoch % nTestInterval == (nTestInterval - 1): + val_cihp(net_,testloader=testloader, testloader_flip=testloader_flip, test_graph=test_graph, + epoch=epoch,writer=writer,criterion=criterion) + torch.cuda.empty_cache() + + + + +if __name__ == '__main__': + opts = get_parser() + main(opts) \ No newline at end of file diff --git a/exp/universal/pascal_atr_cihp_uni.py b/exp/universal/pascal_atr_cihp_uni.py new file mode 100644 index 0000000..95057b8 --- /dev/null +++ b/exp/universal/pascal_atr_cihp_uni.py @@ -0,0 +1,493 @@ +import socket +import timeit +from datetime import datetime +import os +import sys +import glob +import numpy as np +from collections import OrderedDict +sys.path.append('./') +sys.path.append('./networks/') +# PyTorch includes +import torch +from torch.autograd import Variable +import torch.optim as optim +from torchvision import transforms +from torch.utils.data import DataLoader +from torchvision.utils import make_grid +import random + +# Tensorboard include +from tensorboardX import SummaryWriter + +# Custom includes +from dataloaders import pascal, cihp_pascal_atr +from utils import get_iou_from_list +from utils import util as ut +from networks import deeplab_xception_universal, graph +from dataloaders import custom_transforms as tr +from utils import sampler as sam +# +import argparse + +''' +source is cihp +target is pascal +''' + +gpu_id = 1 +# print('Using GPU: {} '.format(gpu_id)) + +# nEpochs = 100 # Number of epochs for training +resume_epoch = 0 # Default is 0, change if want to resume + +def flip(x, dim): + indices = [slice(None)] * x.dim() + indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, + dtype=torch.long, device=x.device) + return x[tuple(indices)] + +def flip_cihp(tail_list): + ''' + + :param tail_list: tail_list size is 1 x n_class x h x w + :return: + ''' + # tail_list = tail_list[0] + tail_list_rev = [None] * 20 + for xx in range(14): + tail_list_rev[xx] = tail_list[xx].unsqueeze(0) + tail_list_rev[14] = tail_list[15].unsqueeze(0) + tail_list_rev[15] = tail_list[14].unsqueeze(0) + tail_list_rev[16] = tail_list[17].unsqueeze(0) + tail_list_rev[17] = tail_list[16].unsqueeze(0) + tail_list_rev[18] = tail_list[19].unsqueeze(0) + tail_list_rev[19] = tail_list[18].unsqueeze(0) + return torch.cat(tail_list_rev,dim=0) + +def get_parser(): + '''argparse begin''' + parser = argparse.ArgumentParser() + LookupChoices = type('', (argparse.Action,), dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))) + + parser.add_argument('--epochs', default=100, type=int) + parser.add_argument('--batch', default=16, type=int) + parser.add_argument('--lr', default=1e-7, type=float) + parser.add_argument('--numworker',default=12,type=int) + # parser.add_argument('--freezeBN', choices=dict(true=True, false=False), default=True, action=LookupChoices) + parser.add_argument('--step', default=10, type=int) + # parser.add_argument('--loadmodel',default=None,type=str) + parser.add_argument('--classes', default=7, type=int) + parser.add_argument('--testepoch', default=10, type=int) + parser.add_argument('--loadmodel',default='',type=str) + parser.add_argument('--pretrainedModel', default='', type=str) + parser.add_argument('--hidden_layers',default=128,type=int) + parser.add_argument('--gpus',default=4, type=int) + parser.add_argument('--testInterval', default=5, type=int) + opts = parser.parse_args() + return opts + +def get_graphs(opts): + '''source is pascal; target is cihp; middle is atr''' + # target 1 + cihp_adj = graph.preprocess_adj(graph.cihp_graph) + adj1_ = Variable(torch.from_numpy(cihp_adj).float()) + adj1 = adj1_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 20).cuda() + adj1_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20) + #source 2 + adj2_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float()) + adj2 = adj2_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 7).cuda() + adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7) + # s to target 3 + adj3_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float() + adj3 = adj3_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 20).transpose(2,3).cuda() + adj3_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).transpose(2,3) + # middle 4 + atr_adj = graph.preprocess_adj(graph.atr_graph) + adj4_ = Variable(torch.from_numpy(atr_adj).float()) + adj4 = adj4_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 18, 18).cuda() + adj4_test = adj4_.unsqueeze(0).unsqueeze(0).expand(1, 1, 18, 18) + # source to middle 5 + adj5_ = torch.from_numpy(graph.pascal2atr_nlp_adj).float() + adj5 = adj5_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 18).cuda() + adj5_test = adj5_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 18) + # target to middle 6 + adj6_ = torch.from_numpy(graph.cihp2atr_nlp_adj).float() + adj6 = adj6_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 18).cuda() + adj6_test = adj6_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 18) + train_graph = [adj1, adj2, adj3, adj4, adj5, adj6] + test_graph = [adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test] + return train_graph, test_graph + + +def main(opts): + # Set parameters + p = OrderedDict() # Parameters to include in report + p['trainBatch'] = opts.batch # Training batch size + testBatch = 1 # Testing batch size + useTest = True # See evolution of the test set when training + nTestInterval = opts.testInterval # Run on test set every nTestInterval epochs + snapshot = 1 # Store a model every snapshot epochs + p['nAveGrad'] = 1 # Average the gradient of several iterations + p['lr'] = opts.lr # Learning rate + p['wd'] = 5e-4 # Weight decay + p['momentum'] = 0.9 # Momentum + p['epoch_size'] = opts.step # How many epochs to change learning rate + p['num_workers'] = opts.numworker + model_path = opts.pretrainedModel + backbone = 'xception' # Use xception or resnet as feature extractor + nEpochs = opts.epochs + + max_id = 0 + save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__))) + exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1] + runs = glob.glob(os.path.join(save_dir_root, 'run', 'run_*')) + for r in runs: + run_id = int(r.split('_')[-1]) + if run_id >= max_id: + max_id = run_id + 1 + # run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0 + save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(max_id)) + + # Network definition + if backbone == 'xception': + net_ = deeplab_xception_universal.deeplab_xception_end2end_3d(n_classes=20, os=16, + hidden_layers=opts.hidden_layers, + source_classes=7, + middle_classes=18, ) + elif backbone == 'resnet': + # net_ = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=7, os=16, pretrained=True) + raise NotImplementedError + else: + raise NotImplementedError + + modelName = 'deeplabv3plus-' + backbone + '-voc'+datetime.now().strftime('%b%d_%H-%M-%S') + criterion = ut.cross_entropy2d + + if gpu_id >= 0: + # torch.cuda.set_device(device=gpu_id) + net_.cuda() + + # net load weights + if not model_path == '': + x = torch.load(model_path) + net_.load_state_dict_new(x) + print('load pretrainedModel.') + else: + print('no pretrainedModel.') + + if not opts.loadmodel =='': + x = torch.load(opts.loadmodel) + net_.load_source_model(x) + print('load model:' ,opts.loadmodel) + else: + print('no trained model load !!!!!!!!') + + log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname()) + writer = SummaryWriter(log_dir=log_dir) + writer.add_text('load model',opts.loadmodel,1) + writer.add_text('setting',sys.argv[0],1) + + # Use the following optimizer + optimizer = optim.SGD(net_.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd']) + + composed_transforms_tr = transforms.Compose([ + tr.RandomSized_new(512), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts = transforms.Compose([ + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + composed_transforms_ts_flip = transforms.Compose([ + tr.HorizontalFlip(), + tr.Normalize_xception_tf(), + tr.ToTensor_()]) + + all_train = cihp_pascal_atr.VOCSegmentation(split='train', transform=composed_transforms_tr, flip=True) + voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts) + voc_val_flip = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts_flip) + + num_cihp,num_pascal,num_atr = all_train.get_class_num() + ss = sam.Sampler_uni(num_cihp,num_pascal,num_atr,opts.batch) + # balance datasets based pascal + ss_balanced = sam.Sampler_uni(num_cihp,num_pascal,num_atr,opts.batch, balance_id=1) + + trainloader = DataLoader(all_train, batch_size=p['trainBatch'], shuffle=False, num_workers=p['num_workers'], + sampler=ss, drop_last=True) + trainloader_balanced = DataLoader(all_train, batch_size=p['trainBatch'], shuffle=False, num_workers=p['num_workers'], + sampler=ss_balanced, drop_last=True) + testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=p['num_workers']) + testloader_flip = DataLoader(voc_val_flip, batch_size=testBatch, shuffle=False, num_workers=p['num_workers']) + + num_img_tr = len(trainloader) + num_img_balanced = len(trainloader_balanced) + num_img_ts = len(testloader) + running_loss_tr = 0.0 + running_loss_tr_atr = 0.0 + running_loss_ts = 0.0 + aveGrad = 0 + global_step = 0 + print("Training Network") + net = torch.nn.DataParallel(net_) + + id_list = torch.LongTensor(range(opts.batch)) + pascal_iter = int(num_img_tr//opts.batch) + + # Get graphs + train_graph, test_graph = get_graphs(opts) + adj1, adj2, adj3, adj4, adj5, adj6 = train_graph + adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test = test_graph + + # Main Training and Testing Loop + for epoch in range(resume_epoch, int(1.5*nEpochs)): + start_time = timeit.default_timer() + + if epoch % p['epoch_size'] == p['epoch_size'] - 1 and epoch nEpochs: + lr_ = ut.lr_poly(p['lr'], epoch-nEpochs, int(0.5*nEpochs), 0.9) + optimizer = optim.SGD(net_.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd']) + print('(poly lr policy) learning rate: ', lr_) + writer.add_scalar('data/lr_', lr_, epoch) + + net_.train() + if epoch < nEpochs: + for ii, sample_batched in enumerate(trainloader): + inputs, labels = sample_batched['image'], sample_batched['label'] + dataset_lbl = sample_batched['pascal'][0].item() + # Forward-Backward of the mini-batch + inputs, labels = Variable(inputs, requires_grad=True), Variable(labels) + global_step += 1 + + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + + if dataset_lbl == 0: + # 0 is cihp -- target + _, outputs,_ = net.forward(None, input_target=inputs, input_middle=None, adj1_target=adj1, adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2,3), adj4_middle=adj4,adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3),adj5_transfer_m2s=adj5,adj6_transfer_m2t=adj6,) + elif dataset_lbl == 1: + # pascal is source + outputs, _, _ = net.forward(inputs, input_target=None, input_middle=None, adj1_target=adj1, + adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), + adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, + adj6_transfer_m2t=adj6, ) + else: + # atr + _, _, outputs = net.forward(None, input_target=None, input_middle=inputs, adj1_target=adj1, + adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), + adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, + adj6_transfer_m2t=adj6, ) + # print(sample_batched['pascal']) + # print(outputs.size(),) + # print(labels) + loss = criterion(outputs, labels, batch_average=True) + running_loss_tr += loss.item() + + # Print stuff + if ii % num_img_tr == (num_img_tr - 1): + running_loss_tr = running_loss_tr / num_img_tr + writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch) + print('[Epoch: %d, numImages: %5d]' % (epoch, epoch)) + print('Loss: %f' % running_loss_tr) + running_loss_tr = 0 + stop_time = timeit.default_timer() + print("Execution time: " + str(stop_time - start_time) + "\n") + + # Backward the averaged gradient + loss /= p['nAveGrad'] + loss.backward() + aveGrad += 1 + + # Update the weights once in p['nAveGrad'] forward passes + if aveGrad % p['nAveGrad'] == 0: + writer.add_scalar('data/total_loss_iter', loss.item(), global_step) + if dataset_lbl == 0: + writer.add_scalar('data/total_loss_iter_cihp', loss.item(), global_step) + if dataset_lbl == 1: + writer.add_scalar('data/total_loss_iter_pascal', loss.item(), global_step) + if dataset_lbl == 2: + writer.add_scalar('data/total_loss_iter_atr', loss.item(), global_step) + optimizer.step() + optimizer.zero_grad() + # optimizer_gcn.step() + # optimizer_gcn.zero_grad() + aveGrad = 0 + + # Show 10 * 3 images results each epoch + if ii % (num_img_tr // 10) == 0: + grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True) + writer.add_image('Image', grid_image, global_step) + grid_image = make_grid(ut.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, + range=(0, 255)) + writer.add_image('Predicted label', grid_image, global_step) + grid_image = make_grid(ut.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255)) + writer.add_image('Groundtruth label', grid_image, global_step) + + print('loss is ',loss.cpu().item(),flush=True) + else: + # Balanced the number of datasets + for ii, sample_batched in enumerate(trainloader_balanced): + inputs, labels = sample_batched['image'], sample_batched['label'] + dataset_lbl = sample_batched['pascal'][0].item() + # Forward-Backward of the mini-batch + inputs, labels = Variable(inputs, requires_grad=True), Variable(labels) + global_step += 1 + + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + + if dataset_lbl == 0: + # 0 is cihp -- target + _, outputs, _ = net.forward(None, input_target=inputs, input_middle=None, adj1_target=adj1, + adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), + adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, + adj6_transfer_m2t=adj6, ) + elif dataset_lbl == 1: + # pascal is source + outputs, _, _ = net.forward(inputs, input_target=None, input_middle=None, adj1_target=adj1, + adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), + adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, + adj6_transfer_m2t=adj6, ) + else: + # atr + _, _, outputs = net.forward(None, input_target=None, input_middle=inputs, adj1_target=adj1, + adj2_source=adj2, + adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), + adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), + adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, + adj6_transfer_m2t=adj6, ) + # print(sample_batched['pascal']) + # print(outputs.size(),) + # print(labels) + loss = criterion(outputs, labels, batch_average=True) + running_loss_tr += loss.item() + + # Print stuff + if ii % num_img_balanced == (num_img_balanced - 1): + running_loss_tr = running_loss_tr / num_img_balanced + writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch) + print('[Epoch: %d, numImages: %5d]' % (epoch, epoch)) + print('Loss: %f' % running_loss_tr) + running_loss_tr = 0 + stop_time = timeit.default_timer() + print("Execution time: " + str(stop_time - start_time) + "\n") + + # Backward the averaged gradient + loss /= p['nAveGrad'] + loss.backward() + aveGrad += 1 + + # Update the weights once in p['nAveGrad'] forward passes + if aveGrad % p['nAveGrad'] == 0: + writer.add_scalar('data/total_loss_iter', loss.item(), global_step) + if dataset_lbl == 0: + writer.add_scalar('data/total_loss_iter_cihp', loss.item(), global_step) + if dataset_lbl == 1: + writer.add_scalar('data/total_loss_iter_pascal', loss.item(), global_step) + if dataset_lbl == 2: + writer.add_scalar('data/total_loss_iter_atr', loss.item(), global_step) + optimizer.step() + optimizer.zero_grad() + + aveGrad = 0 + + # Show 10 * 3 images results each epoch + if ii % (num_img_balanced // 10) == 0: + grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True) + writer.add_image('Image', grid_image, global_step) + grid_image = make_grid( + ut.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, + normalize=False, + range=(0, 255)) + writer.add_image('Predicted label', grid_image, global_step) + grid_image = make_grid( + ut.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, + normalize=False, range=(0, 255)) + writer.add_image('Groundtruth label', grid_image, global_step) + + print('loss is ', loss.cpu().item(), flush=True) + + # Save the model + if (epoch % snapshot) == snapshot - 1: + torch.save(net_.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')) + print("Save model at {}\n".format(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))) + + # One testing epoch + if useTest and epoch % nTestInterval == (nTestInterval - 1): + val_pascal(net_=net_, testloader=testloader, testloader_flip=testloader_flip, test_graph=test_graph, + criterion=criterion, epoch=epoch, writer=writer) + + +def val_pascal(net_, testloader, testloader_flip, test_graph, criterion, epoch, writer, classes=7): + running_loss_ts = 0.0 + miou = 0 + adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test = test_graph + num_img_ts = len(testloader) + net_.eval() + pred_list = [] + label_list = [] + for ii, sample_batched in enumerate(zip(testloader, testloader_flip)): + # print(ii) + inputs, labels = sample_batched[0]['image'], sample_batched[0]['label'] + inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label'] + inputs = torch.cat((inputs, inputs_f), dim=0) + # Forward pass of the mini-batch + inputs, labels = Variable(inputs, requires_grad=False), Variable(labels) + + with torch.no_grad(): + if gpu_id >= 0: + inputs, labels = inputs.cuda(), labels.cuda() + outputs, _, _ = net_.forward(inputs, input_target=None, input_middle=None, + adj1_target=adj1_test.cuda(), + adj2_source=adj2_test.cuda(), + adj3_transfer_s2t=adj3_test.cuda(), + adj3_transfer_t2s=adj3_test.transpose(2, 3).cuda(), + adj4_middle=adj4_test.cuda(), + adj5_transfer_s2m=adj5_test.transpose(2, 3).cuda(), + adj6_transfer_t2m=adj6_test.transpose(2, 3).cuda(), + adj5_transfer_m2s=adj5_test.cuda(), + adj6_transfer_m2t=adj6_test.cuda(), ) + # pdb.set_trace() + outputs = (outputs[0] + flip(outputs[1], dim=-1)) / 2 + outputs = outputs.unsqueeze(0) + predictions = torch.max(outputs, 1)[1] + pred_list.append(predictions.cpu()) + label_list.append(labels.squeeze(1).cpu()) + loss = criterion(outputs, labels, batch_average=True) + running_loss_ts += loss.item() + + # total_iou += utils.get_iou(predictions, labels) + + # Print stuff + if ii % num_img_ts == num_img_ts - 1: + # if ii == 10: + miou = get_iou_from_list(pred_list, label_list, n_cls=classes) + running_loss_ts = running_loss_ts / num_img_ts + + print('Validation:') + print('[Epoch: %d, numImages: %5d]' % (epoch, ii * 1 + inputs.data.shape[0])) + writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch) + writer.add_scalar('data/test_miour', miou, epoch) + print('Loss: %f' % running_loss_ts) + print('MIoU: %f\n' % miou) + # return miou + + +if __name__ == '__main__': + opts = get_parser() + main(opts) \ No newline at end of file diff --git a/img/messi.jpg b/img/messi.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eeac0c117335cf1a019d476d5ca2eff484591554 GIT binary patch literal 46306 zcmbTdbyOTr6z4lQ0RlmWpn;%46J&4+5ZoOGNpKk4ouC0GxHGux;DbYgI|Lh?z~Jr# z5+Ix3?w;Lq-e2#%>gvC``kwk!ci&U@-f#b1{JQ~op(Lj$2S7tZ1AO{-0RBD!L}j#O zrF7J_MCffSxgai9HuPRR-0$e$c)Hs;+0ZL0$mz zzr*{x0+0bVLHVyP=`~nl zkd>2HP*l>^(bdy8Fa%pzT3OrJ+CkhsJiWYqeEq`0BfdmNMMIO4Q&Q8?GcsX?Ma3nh zW#tu>4UJ8R=9bpB_TIk!fx)5Skl|qAHjPKl#J|aW@gLg%B>R5{7W)5-?EirMU#?{U0S4N?hlfE7kOVxeF>xu< zeD|)t=jKx-{e#?}+)Zf_Hr9QX=k8e=EvQWgGp8~l>v%i}M`a~z^^0%+Dj!dYu?wG} zIKKzE{Rmj9)a2d*BlX4=7@@6U{0^6*dS4~TE2Y1@PGVzj6&`}Dj+90@tq6nCC53@S zZ#dbjR+6Wg?E)UovLd zlan9{C>B_lyY5{P1zEM$To!7R&o}37j z-xa~SH0lmjUF0W3f;eY`RSnZPZz~1dKLPAzX@m~s-=sie3GGEtBvdU*!6JH|KPZKE zzY^|RuTuF0nCFhcELb=`L^-6aZu)|7^ThAUt%S&iCs>J1ch|FfrLjE7k<96!DLYJT z;L2jEt$xgfG4LZ>A6Shg;MTI>dyR#5NTEfdX$V3f=`;HtVxQ#wfUE^x+#V2~NChMF%$0O~NtrDFad&^Nsf5@-7uY~ey5Pa7w z^Ckug+BX)mb_|O|KqX(-Jf#H}JlIgDNi~Gn5jaMw(afV|ozZtuwQpn2D^w~-Syne_ z7i(Cb2z#$86a&>UhJjPQ3b>qssvIp9QI!UzJSJF4wPatVOVg;tepRN6d`?a@VX_sV zD)9>5V`x&K92=vIt(fNijqnR5U9OZ~WflbOVpeh0882*TM$wk=AUc_AT1{Zj|is+R>nrsDmlM;aWT`F!+TTZ`$k8o zy6Z1MoMzxM3Es*PcSBZ(d7y7@H3YWmF?NAax4tO?ZDQJx-Ju@Xz-q42&0s_*gKYU6 zUff1-Q^Cc0VQLHBLc-oIz9)7K)^{LyjeQY^md%a4RQ@%+yJ2JKG32B#6;qel@yxK* zpxZt^k^|Z>^H7GJNIy)bNJuufXl=D!#tJLh%LBLgM8XQmlC`j+aUM2Nv7Sh7Hehu}-|$j5zizV6{Qg)_UXV zHla_UovUuf-g+jJNXJRueUMMJEz`x_kav94hox%7n~{JN%oSzn7rvRXG5;nn zSY%Dgp37}9bQHU%HcA(iMP1tMBch%wTq#Y*=oHBEtU#sP(A50&nzL-*J)?n5 zzQ@N~t?w;FwX?|`XYx7qz6;hpnBz3WfK^xxS1lO#0)W-matZHF4H`XA72P-p+wpKc zlcQtMz}!;ZOHua;S*~ekzegAbs6zVLBJtl;YYyhqK?2_^!uBXC6AdVc?jsCUQj~ACMbALi zqc7AHDi2&u`_Fq4h{*PWa|>&kC^3KxOWP#spfN!UgV8-vUt0wsV6LO^feG;VgR9~0 zD|_lJgA%HHOWnoM4Jx=x$8$L*dcA1M7W36cKrzNhIZzN)*;x$hu=YkeTSR_~60 z8Urn-Wl2T0#wkC#8pac%O&BR2au`QgF1zZ@xI@btu}gIz6R-jDfRhKUGj_K6_dX18 zTwY;dc)U`ULgA^pX)0K6N=Cml{CYs&UVA26o1sdj*a(CHofb$XXpY2z9xLsof+`dh zKMq#QCb5MsMHqg;(6%Q@wm?}MyX$L4nX0}oe+g$C%LNpV*85>&*U*;Ozj~&W3qh&i zdDc*+i_j0Gz3ZRni29CQeg91mTPv6o${Z$ZBL!KS(3}JxOq!JAa5_=be5OA`vPt_%=rPW+LQX#S-z(%N+ z8{%ECcm8*t7=4)^gFBuSB^59lZ7MMxtl_0|b{xOVt@0S-E2Zz@vFG#kX57c-4O)1! z_3?HT@WN%%x&7l3{Fnzw;FWz|*G}c*8d9F%n90DKiyEybtngdE3X+r16 zG!G$(VA%oL4p<{SMP;$OR`Ey!)0IGO$}`0Y=_zT6=NyN;bB<}iD(lV|ypgCY7_uP& zU3kEgD~2UDV_26)C)5gmLXS7Ncu!1MnERO%2ehWTmDO!vrs}$S3D7E6DFf|G&so2}r3+9rnCX+r%op>$qL0G55eA(-5&s2*I^R`+&rji>J%=DX2J_TWv%Yj(w)P4%UTA_Qv zoR^#TV20BTa_ljc@{m!DHg+%Wz+!Lyp;k=YcS2;3nILwymy;Z0G4qBctB^kV1PCcE zOGSZktm1*zCseV5(18|i>w!X6%Hq|TrX;xe997jRKT{Gh!nw8TZiM8qpwZI}|3I8L z;6biD(@x*So80(z0ARJPZ5@moYow2xGD0IlgN#Gh7GBASa@W{?E6yRegF#}?z3nKb zrxmSa`9%YpPm>f@GwG@x8x2Yuj7>93wjB^Z_bpHHF0r{+D2MD+0Qbb^-FP83l>EN( zWHT>$@PchuPSlseR2X5LG=ud`m^!M?Vc-d%ex|@JeA~U$x=Boj>m}T_fk4MvEG{(% z$`?aFs{)um4^r+ANfc_lW?)wx8KDgyjZqBZK^VM>qAqEgc$O9>OkC# zyBrkbH9|*VauG9c8ZCqOmI)Vk3-Tr3Nz`ElqmzT$6=q!5e*w(cIAlxMvlGsUI<|pN z>(7&3_(6}#IG{joGe{VTD|DPK87z3xE~8tA1EKL0*kd2gpk;51XSx9MvdDgzwt%HY z_@@x?S2<6Z8n`bP8#y4o=8*j_q>^W9K4d9W(1)8>R|Aj-`!=tE=LoRfyY(B z`+*j`M$Q%5UKi@Bm-cf9-cIRusP}~-pUQn$$4|GG13yOBK4Tc%vjpQPOgY=dEB;~C zsIM?+H{I7!UZu&PVjW+iSb6A`rjcUkRH8s!>{yBa0*!SRc5O7@(dxthwMy=aOT&PO zIix54%d&e|5|Q{3#&@uvsn1H^kEUF$k@EMYDmK=Z7(!>z+n?BW0+Lh@HWlkW-s-T0 zV?1ufgv#R>0Y;o$HEg1pR#%XCcG0W7f-ilQO*VSJOZ}XCOi25d<@+N3SVua63}o8x z&%e;WWOh-LSJARNDpN(tw^E7MFeSS(Fw;c?T)}z3Zq}ajc-amke}DbX)aEdkAYB|W zw4;H5N1H#|#>=KSbNr9hMxxU$JjPt33TLGUGc)q5rog$U(O05Y(XUrhW92VvKb+sz zmybDnW8+D&yVF31yd>9Xf`otW|YP*x9Hshui^)54TEO8 z#gWw~H4*{LANjh=Cul1$^7NEsURDbAhYi8ay{RIFHNk85LRw3jIc$#<}qq;KNzy4)& zPCn8P%AKa}Z?2?xiY=e(Y{Tb`KZTyw3a;;3W#ovJ# zpV)+Va}p=mc+GgkAv>M5DEyILVQqXLK-XKDsc@uqzpBHHrhNn!kp5A{ z8Jr9)a1Dj@=U0dyCR@dKS+PJ8U-b+q()dDbH?H*PO;ckhIK`x0&omSw6yZijpLyya z_Ha73RLbh?880h=#7G+d%I}fOrOJBIb+U#E5fH?OkM%jsHS$Wq%T%lX{6IwhNM&7( zQKBodQia!E+AL=NS%?@i={3;c6IFDateQ4E1ReK8Lu34*G>s>F2JJ-aLzl~&_hT=% zp&hvQepkBiEtY>?KnEH11+OR%95Tx^KfE;wVPm9@R_Ik*Hy2vH^srbpD>eR zO;QI5uAKWV&}_lSZC<*UI4VeY*~7lg36A@2z~JuXjWig@#$vaLkt6VIr&1X{#2~Xz z(s9c!DWDiNin3pvkpK%MlBLZP8?2N_0s8RZS;TOSH5uF$e)e1$%WW=$AB{ z&GxhOa%~{Cr-RvoA8`q2ckK+)jv8S=ct*UGG(rkjYz%ipyOIMTU$u9~k z#-z14g&1KKE_*njf?sz7x*fqA7lx~)F-S@flNR8ePx0m509*K8i9@kD%GSU{Jp01f zs4dPXLm{q9H)I%e-O{HX1%6&rU{&tptlZk1b4(c^wX;#4SZqxq=vE|z`CEUSSWX^t zbGGC;`$TTCQ@rJgK*syhB;Go&J$!?^o10N3bga>4K$42@^`_ico|st@w?u_lJC+!X zSu`##bN>ROglk_dNMPG~W3TV-8FaMA=@X+g1X%NFJb&F*j|4cfA(}&X9u5k-?0p$< z>@hkh9}dJ+QB|!@+UjYHpB-&{VmZyVDXegqoM?q89_&ZT1RaUgG{W;b1GUeqHi^PN zZyFq$QgqivAFnSx@Bi}7Fb=hHe<}c-yVE>R|Niay+tQA@q&=;U)XRod;(CeT&)^*i z(udFIyKU9$c3%lx4i*GVKQt%Vn7`2y$ix{}Q`m#+B5$tGS-zI5i;wxb4%Zt6yLa*& zmx;&`lf^T3U zi0i`R*Mf6f6!m)w3^62E^&;S=R+|v6$~Oq-n6ut~k42^i02*?;<jHag z_GnP?cII>327lfks&eX!!Wwq->|a61<^;O=k2*3dP;c8Mijb>~?xC z#SJ*o;NQ-*O&uw5&xTsxSF&XRbl}8ZG}a zx^R_k-5jri$|R)%kt^_mz+XVS1R~zu!RybgA39BJt7q93hpstc7A$bY(4ta77O*9R1yVqcv`+8MU7d7l5(E?w*mB ztrShuEhf?Folga0_?wB3??RUI)3-}8G{Rq{Y`!~yRbHN8S0a9BBjG&Y-Ch$cZB>#; z3&TY9-4^U7E|k!ON~t}CF|*30$ow;t)F~$2gP(Q-8m(e;Y{qR;#F5NhcKtgc2O42% z6*wS$%koA4o6>KZ(02wK?s^uD7NJs=Dqo@~atiaN*|*ceoYC`>#J@thLHXs1YN_VT zqaNk2DJcda53Y1?OP((_ok~|E_*B-ExI^0|jOVn1HL|s|NM&F2?`G}P3lFH`h3-}Z zDQ~OX_>^`n>$A6jc`gy^!1hDRDuLge==Q9PrrPh?ZVK>)zn9kCN0>H+3H>T~{qnj) z`i`Yt*`72c1XfhCa{6TdGSs4jJfbnDFH)Lp7}NL|XbD&-SUg4KH$ zoxgh~sdVT`ZKVEu;3=I~(alfl*`(yUxFqMo+dx!RPDJ4|5*q*-vI{0G>(y4(ExY$^ z)#_xfskkjBli!dmFD2wqj(zUct6UT%Q?xEp%t>@1=K2g)#M0>QPTIMd={p1&MWh`Y z^QW~qIw=y2%}Ederf&k6d{;|Y3W}z=0$N0$%A$it*|;IFlA3jY>aRAK`nSk*eLCwj z-*Ae{kvHnnOW%iPLDzf73mZiEt3EFW*V$(F@^mj~!|gCNS}&g_Bj|#q((>ur&s+GCpxfmSVWLkfHesj}?Xhd?)c+Dq})>THZ z@AFD$+c1hH8>jAUE^}M8~@Clt`*1PZEYkK!SwRlAqhmF8F_pB z5mGeL!S13+*h+gZUMa3U2+oVDe*L3{)xZ1)Vr4XcZwGT5T@3*ERf9J*q))zVG7*=D z_aQl_fnIo3G)T9Mwq&O!*?!xU!PNBqnClz`qnN`8O`4W_+^XJP0MmshP)P11yz-Bb z@U@)d-WRI-64>O1aci<&Y`Q&Z%m&TIlTfu8X7fa|!V=LR89KNhFd{ZR(&M3NO6=$0 zu@9%O5HQo()e)5wv>_Lg^kyiBB!M8#J=<$|P+()p4%(} zo~9EkG9iE}DLs6ooGAWI*tFz?Mr-e`bFOpi<>*T=#)OFyS~8lD=mfkUYO68PT)p~U z`;zzzJUpGtb}2itg0gBL`Ga5*sNe5MtvizV|*cTF} zyB)^uDm#cWn4J`vUt;dP(BYb1pwXQy!u}#uMJbrhtXPfgdVQp<8AY%l?h#YQ2vcxu zH0M&rNY?rbfS?&KWFM_(I8o#x^jR|wz7(}yLbhKTo*8G8HWE)bERV~V=oqxc{ug^@ zXJ^j^pOQIFXj=-pF=?qQWlI~x5JC(-tt}&(9z~}TEZ{b5Of{C*>;t8IZ-mNg9AONC zT;9zh&phauRXsNuMK)LuOatm&zc%v4dgNq&mwNS{-nxlv^I*Vk?JlsImuEmfn!rY= zW?8q03!0LG_`uhb@p#0(ixxKEs7}xLTTgc^#>Kv%F8X~hHji>F)K%#$Ic8RVB2Q_c z5zd8;pE!94wZ_Sw)5n#e(ba2Z3{%e-8Pc-2qH3f8)OKmNB)r^hDssVrP-8~QVeo2~<3lo(MB;3{9Gw@8S0#o!H}cH@xbnv}0@|$hSd==W%GN(R5H& zdRcwq(IjJt+uo_7-*O3%$7 zTh_B%vf>1_TH91@JQ*5mpHW^{FlJc5SMau}e9ep+nXv9W9)ixc26xaYDLe8H zuR^`qJ~-vI)=eqxJATopWovHNo1>_bw6To@Wb2&T)8e(bKD1REwJf^AmcJ!b+r-&t zb)+*xp>yQ?UYbIee@mEs%Cpo*?FMde0H;xGZqLN zB%nOa2&l@1iW^*GT>>`{Q?uN`?+ji1J^b3x%xM%;~z$Zw0s!@?BXTaf+v^y6w6h z#3$B^ya3aTN~9)ewLp+vz9&guNEbZchV;0}c@1?I)7TK-`Ev-PX3iuA#Nx^HN#yG- zJ@DJ~#T5uM{{nn8EBY#o4CH<;2@<~_a?WHSBR!b#`Jq`M_o5rY@^i%bo=#nm$^e#H z__(Yy2W%fXPK>lGsg4<^xEOq{k+iJdl(Vu*noSCuZAE@m50>p-e@5Zn_ABuYuO=Bg zynRA^6&|1Z5`wojq2$TVwRhTg>HOnrjbbpBMy$EackX75%kSD!WU{LKr%%wz2bj!b zmJOq#6SMHU)1A~nvRv0wdvr;n)p0*7pGxt(Mt!(b1V?R=C3y>pK~S;et4{u zXyqJecYW&u*{{J*0^>pL^Z$~>NqeJeJAN?7aB{Fpy{v6b>^I+p?0Sp!qVDrF#3q;C zo(l3fck*d9gh`7~o)YJ>5_#2`Oxm<1JmJ4+`~he%Fk+VqPSs_bR>x z3Af|u?yM?HT0G@O$SHgKx=d#3GWq=lBsCAfpej#s8$u^pL3xa7o4&lY&c)5+7s7a&bgI>hVyHj$x z3Df8Z@=y$+w{SZ?6b)FfR&dNzaHLygx5*{UvvEe#Ko1z3^vtxB%R+hL*k6S&?@@B+ z^axcSIN~~(cZhyPul7MToWcQ9WV3D0DH5f%o9(543X@St$6+brZCE$Sael+y&{+ap z>5_Z`6esO#SsEat=Lk8<&a2Js^w(R3oTV_SPC8$Dbek`bzeeC#h_uV*4i(x;D-4Yx zTc#q>2Xb;MiSR$Zj+GLw`N1pRb`Ycg9mTGrU4P}CQsc7_&Hs+J0nul&ls{UL*SwfBgr82(rxfAO25QWgs)@H1c*Y2IKnr!g5|)|R*kFG2%Qu^plEehcP>$!z&iAY&!#P_!yZ^-S-e^q=7gxeGxz}WA9eY@amKY77yrhV$y zve|M`EAuR*>g(qYFxSfBVDX$vEJxq3mS6u%svess?g$gMAQSq?g%3-2}xa-@52 z8Z^_MTf6a@=+35h9q)WM={LWYTrSfb|Nfa%!&w^)n891%bSx!C037gHuPud0@F2Tq zQR2>83#Y#0EiGz)8anmPwSWUrviejmc@Fv$cWxKjMX zcLOWwNHQ18PebQmp-74SX;N%TzT@6Q=ukK2JR@5cN*MA#fGOYj!9j1Wt_3U07;vgUw)4(xmo3#nIdVbW(P}LNwZT%OL?ky z@+h0=q(8FfC82hWK5o0V+;H%rB(pBc^$qinkZ&HJ`=9Oo1Ra?Y-qqWc2Pjc}gl?ya z>((UE2~3a}SUI3nkI^5h195;ClHr~HwLUTor~MzngjMl8!ds$5GN;9_a6ogmc|fcA zGM}andoB^*5Jl@yHuBA6nwXAOO4>P$GgE9v<^oe)J7(JfkAU`HkQ38oNzb+B>#lhA zbg_9hNoWmpVXedDq|8K=d9?&J_QR8UA#Lkqf`4-NEPGs;-K*} z4F;;I`)xGQnx$@W0;wehRufGVOBE&BEc^-3S(HWp)iH{bZP|+ov)ux#TIiH=MRAx} zZD_TxKJL0{?vdX1?=B%;tZ^$J1e7DC?xyjQlFF5kBwv##lg zchzy?zwz8~*GHu&1pDb+^i!`RRA|)nlo|1DCfXJ+n7g9CQkIM*aS&@9t{l5etp> zzpe%X7B)|O^Q}f|Zf6JD!l>hTOqq?p!Suxz!aGEXi0y+5C~H|S2=iyg7Z1S8rJb`$ zXv3hfLlR1g-wP!XeR7FQYLSh+1X;VFAPPY)-B5a~U4*#`-t`VG;cyWzY9VY80`Kak zEAv}Rqp3huO5y&T?Q(}L!j`#OQ8i;E4e0NS;|KtOY}U>x7>0-nW$CYS4I~^GhV{$O zXzeSFTzs(`5C&41p8U+#K9{X3&w#sFz74&qaCgv~p4Pi>)>{M-gnc^SE z4LYy`svn%5Sw*aAFU6|@T@J!ba}4EE22-ePSgQeQyvD}@>~B^M~xcb&{-)KPtGbDOXyHXKpRVSpXcsSC?s_g zU|l?&ISVAQ34t~``jv|yb?3hE?B**DpXra9?^t_qeLmA&b_V@MDzq{OPl4wfBZHo+ zq<&S;Xd`%KJIK~TF84ZjzQ7>_P-ZIM{fZ+mPur80Q16T@6{trGDi#KHd+d6#0?qrB z(hg5WZ5i#X6E-bmM1(squlwPyfl*<@zQILD_vs7p=y&C~{;wcxb1Lg{_!0|q-q5U7(;NIr5; z=v2SRI^P#x-`Q;AQEZ$XrD*+|7BRPUr!1v`v&fL}d*q60GvJa9yq9(7wdW@GAtvTs z?fY4Q;up#fIJ-h{`*;Go&`C2#GiPTW8AKw0@uJ3ErGxljj5-fSsp5Do7Jp=#$}Eyn z0G#<4rW#aM5>lZSb0~cCwWZSTDRQZs<}po-@dCdW=J4uOH`3x_em&i;`uj)#kR>b+RR@2!uUQ^#lL-M99&#CI0`zjMFU-s$(lXp1L!`8Al5AYKA1e$Pkx83yPAR#0Gb% zZ#sX8mgjwT`TT2rO^64}PnX-BCJMW5o}*px3o}I&(LG+bQRWE=#86ofFvW z^~<&YI(cQce7Y=nW{>;}sMyIvyzTo7AbeoxzW5w-GeP=9GDYIkeCd1~p35fDQ#LgD zN}l5RTbIm7VaiAc43B^-|}$6CT8#M$K1M2xG}l|6sJwlM5G%GSHte8?{WWa&IY z`7@!i_;q4PaLKs8vPGNNf(oSF6Ef1VEbt%+A9rNWP$Dz)xX=2dheUcIni`k?z`%P0 z&Nx2mw+3}c&)P^>P(|U+O`Hh@^PChGts8(_F-a1R6<2<*~ ziRWK!q<_D`##Zz_#eUIDA+gn+LP$2jxHftl|8v&T19DD82b5CwwK6g;B?HW-%T@ z(wKk6JN0<*dMwJBFhF=ljM=XxZJq9iEWH1a(rYY1fj(yY6Li!qeSQa2D^~rmHl>Yy z;X2MHjriI4wV^rs&1pB}$tH(8yQQSY^CdAucIeQzvM$TRc6bwJL(u7({HQ^|#GQ72 zOIJhmPu;f2QQehYK~BkjBrP(jW!l(S%J=rp&6VZd;z@#^ek}#!YgSC)esN(tv-9}w zfPVU|r_kGbpVi{)&YYiWKP!2i1622n0a$|Rx`;MY{N6^0eZ+)4rnKigRv((x8~ZA46$#deJatv|!C|`jJ6cV? z@E;eOGRq)}SJUTJx8ipZ7j&}qNXJ>VAJ50f-(D9++Oc@WV}@^G2r(sw#K3-E zZPFcIZN#Hx{QOWi_r!Sgj`$YtXJiE(#H9N?BJZiScw#CA=hlZT) z1?$n36uF0tK0@j-$LVdC6+uE=&<;n1Lwc>>pP#pRcX+V)@S9j|LnQwrj$W&~cd6Y= z_xpwL7<15|8nw76ZQ(aj>d7yM3Z>DTFhQOX>KI-jA$F;KDICzP4~;C1BeMu^SIg74 zF_}G{6o^?R7&3C=sGpPDqHMpGvzt}%Odw`K5rT&2Zt861GJOJ?Svta`-|c|a4~=_( zOGag%QSL3bns{A#x!B8V&5c8SYb-Rg8?s?vNp{@PpSIH(L3qMunx?|MRE#6&aGEo> z!DH`y!7kZc;bN`{O|1Q4{*mMzRM8g|yd1Q|;1jLhLCZAw>)|q4>x(*27j422Lz#)# zy94dHg>-2|>`+>tn0b2^^*5gPI0(1KzU?+t({<)EbcDS!lzT{}?mkXOMlz z)`(rzj^Ca$AZ~tYa#m#Pm8Cn6Y_qAXt$ZKAPfHzpkR^?olUGzPcV?fKCEA#cqEcIa zCrO^{D4L38x6m#uzudV3Ulm_q!RKC8-u=y$cqJJ<&9n=SDS|6*Ft#I7t#h7MZn1^l zvlOhRrD9$E@#mX(jG8x;qozw=k`rgXQoAI@;K6*u+9VLxgD;Msn;hG8fZ&CfKi&aF zjDyv-lqwm&G&-4tZM(3wPgK1eO)K(L6x_?+t)mIpZ8lMfJ~M2T{t&rSt+Py9Ja`D>G?K&=#C6=(zB(7a?p+Re%^AkU$%yk z(W0?2Gd!mxrvdZJn(nqb+Y6V<0`E_R-Xsubil%jZ9Vk*aOC^G2hQrfXtbvWGbx0*? zEwC!KyNgIKM`&!-S$pF%g)Qsm18VvAx>zs?* zv)ai=bt>y(uBGPjbOXoEe`1a~{WYt9a*L5mZH*Mi+U*;$08?PE>7RD%{$DdDgTu@$ zg7I#-UFtyPi7TMgy%VFxj?^KM-}ua4`#k^B%dw6yLpW6C#^{;;1*r)O&)pVy?)Mgub=)6Amb`Tt#8~ z!pLn=>1U6j_iu+1U!}cW{ppn1=0Q%rpuYgP2iQH0q3y2e0P|6FLBH_ebj=62pjztM zbPADr=42n!R!6@6W{%U6uA?JHk@D9NhC&;4g>iEnD z7MUr{V_Z-lzpm2=KYDAA$ay+Z^e`xb;cB6DXe!C-G*sF-ozf)5tLG|7$TMeFMU@88> zfqK^D#lC;+o}nZc@0&ass)vh~DIcMVT=gqi9JW9d8J`H{5hj)9UFUf!1YpLRb37pueMec4yvFAey$TSBPC7FE9#Zqci7;`G#e7 zJK&lEVQZF9xW85d9haOFh32^aDPiN{$JKy^9AAVnQ(C&JiRTZ4 zsIlaCrAVk^hXdlNgRE?>#CEI2_bogMgQg0L&e*y#wZ)U^$lJx3t+%Np3Lfh$AHi9a z#4w$JJ*X8V_vMciuGVs0g1t!nm;R-n zZxGgBz_U9Tt-T^_*{$%FLHT&-O!!FC7@n4^I@sSLx6!sG8R2&8NA+>|S3B)A^pr+0 z#m9LRasBw==zw?q1IZIS^wg#g6N!r^}*WLu`_oM1|nJ z>Cb4y#tF;F@%FrO&c=@z;!O63DCouT&nQaYPmQM1MWG#-@s;|apr8_~r zbMcnQfcLP61XWPY$xIK)>8GE^giDVco%4SI8~NRDkwG+uclLd^$ZpCxye56vGVSbY zsSEA~rkAnPCt+cZmXZK&#>hkSdh*iwtc8b%C+|!75+m^evweR~5Qwe2;ZEOrVcEXq zH2p(R@DUxb8}9+DgM2xD$up0%w^ys4zP%&P*wfn~g_ZTHc1m*jTYnJE#o55vH!>gl zK6f9dpW4xu9TD3^Gz&DlvO{==UfX^#H!PiAKQ-GkbM1DUlMD?y#$S5G^=~)wSthA7 z+>$m=s zQg_nC=&4zr?Uim|1=U57(18irI(4hQ)f<@#dZgpe$QrWCFWqihicP=7Uq0i~%vgxo zN}I4T9XIxbf4-AH_3ee#3WT{0Q4{URZmq`q>`rtjvNl7mMO+z69A!2uE^;C-;rJhk zR_p(i(J76=#7DOSCk z25e3kGUxJ-T?Ol(F*8&jknb2+>RuG6T_x$m9qrcX5QFC&AFGDavmTvwE;GP*qn$IE8`q!U#Y8Md{% zV)!kS4ctu}0`nKt-AmPXXe#I;++X_Xe$NwC2Tv=BPg0fzMlx*^Hr2YfewwSd<0Jdi z1rff^+`>W6|P~0MtaP1{FleK+KK7sHxsuN&RQ~)1jqdeBbw3E8yLZrj519RQKyR2Brn;^fpQx@i$hO1#{X6oDDfI}kxNH7 z0H8o$zt1?+>`S;^y*+EK(mXh~4yBjy#cj{v$sm+V8WKB!U0;TDMrKjv9Q`Zivm89> zH=*m{Dk<2-wD^4$!h#d@t6%W06rc5Y`U>^!7sKaj<~r%%@VdH#<&Vm~8nz;(Ia!}i zP7rAM=HuboMHwMKFn=B#h~ zL==0k%=F<)mJPnl_#eUYo2@X}NDAFr$?~4y5J*3PACaxyI`d|sGnf`xO5wBDsOenx ztj*!y8YIw_@~%4L?v8f<05EFjoFdhvxq?=LLf<@WI>wRezg1jS|qY-d4F{#)nq55el>(88B)Q?_iOPx z>p?2njX7xSpW$;iLb$n_&hJgR3fEga$UNc1pb?eMM{a4*>%U;Lc<(GOA%tXR5C8+e zRsOX-<2Ck^1>KAba?q3Wi4+){(;3Y(OYvWt8pB(Ot>cj3NC(N2>*-n6v}o3i-IYbJ z_0%x3lCOrmwRDGu|q? zG1$_m;eqw7bkI$tHy0L-d1lPtE5t#I*yp&edgH_zo|`O|SFD$@bp7J;N2oREL8^F) z)v7Cd$z5%9eBL*eVKCI>&26nqNsrQ*gEtTIBtdaSSgFp&#*3*$hI2OB?1xoz;( zLH__(`d7by!X|@1D5)Xv+9&V|=lblsn)^C^j|e{wlni-)rBa{avc7+J`d85p;nX9A z6ysndQ+*GdCHP>wv*rGkW)Fu70Qo;k`Vjb87=6xZWALIU@Q=!;>yz$U zqffEmv3xw)(SJ%O@D^h~C+S}PJT92~gFi}&Zwspf@(fb?} zgrcw7?xl+CxA#F>hbtfCh!3x zg#9b%mhg&ycl4-R!V90fOY5@kKdaL|bzg^>$SL|#x5JzdxnDzn!XTsgMGfIK$J`Ws zc74V5YHy+Qb=SkKzzv+$VesUuf2;i~={E3+6ZZv1-Vaaiew5ZaLG?;!&SStXI4Sy3 z{{RRJ1_x*9UqpYxAiv$Fp9iQva;;ocU++uXz6egh5B} zQU3r4kRQWR@zNjFY4kdwYaVJvwT$+t1iVw$Tmk7`X^$%`t+Zpe6)Ibx?NtQko+mV{ z7A~c{dFfN6NWBG6N&V_kaY&mN!mmnDifWL!&w6;c$9gp$rOzo1Jz$L0CAg^s=95fT zWz~fN=~4)j(y1-aJ!$K4nl%?Gd1Fd?qGP2|S?R)a>qvHG-z_~T@*~emswelSeWNs3 z3En8e@++R!WsOn2D@yX~Y-gI|^||DcPAk6&Mbw>}Pc{d*|O`C_Ld;j#`h{Yug3FSGJ|L6 zI6AIr7Bys#V^;^Pq(gBcyA59BF&=BrwabzV4A+~1jn14yX&la|>E;oRGg^8GUCYH| z&xvK^*GZsSCIPRYs^c1Gj}B)uuhYVS0Rpi;HEL<2d?xV)$UA>?vp-W?Fffc~HN$?; zB;WiS@hl*bhy(siE5^e~M-@(QWhDOq6V>FENV;oxr`~));zhQ-()8Q6C1g8+an8}W zb^Og!x}Bq%+C^c!E>xaX!1wP_U0;~Au>+0k7C1eWoD=-3CRdkFQ{`eycI3q0bp1U( zqQ5|eSj*Vds`k2n_Vw(}y4=*>B9$epVz5T^oce8c=}qmJs+#JW;MhAG-L#y5@Mms-aE0EA^P zZC`FfFY>N-Jra2Cwm_~QI0PJGrl+;0r-w`Ua^_#3+;h;cI&!CosJY`>Jda8*+|4#aPqs7I_;^)gw$XAz6kL{ymLy5`Syx_W_pW@;Sjd6?AyE zfgBO6Kj`qq-GN~GlN%NncXq`yo2!i9gU#++JebR_P#=7ZgNJAA%pQKe~@ z8a%Sv`MzbD{{WUQatQSVbgb_a>BwF=p^4sj+yQ}>Di2Im%`)!a#4szvI>mClY-9qp zZS60$#qwmj@^%7q5`ay5RB;I@P3sk;(%C(fqvZORTQ8@HglN&x%I=fvCA^_0(XO4( zvwvit8F+5*;cbV9bjz0h)6&De<*c!kWim?bmB{&&WDJgkSGz?tZ_^d<2fz(oych8^ zN=N}%E>cS=fbOg}Xa4#{HTGib{!shX`TqbBWj|BKw+D4K&G)x;_wD3)I6PN9J4>dA zZt}*Sm7jHe5{wgrT;Kd8LHOJ%t=6r)5KVl#m2$zqY5rxb1ame0`%sh;Erv zRDlNzAQ%$?=)eGd#(USxT6Bgf!5uS<_OH^v7{hJi4Jz|ZyhW1I?k0vrVUR)f{(psj zcKFxA7oQJ2TXmyehE$EWK_h+HVkhNbe?#so?{dnnFpMS8@i7*1ZFM^z3t#zU9oWe0 z?_Xm42JwRG8gpsWxGYo6izo9R*1jCjE`&u-mA--K{*Nw*I>wGN|y?RaMh}L+vXzi-JJSUXtQ8QCv-)y&RFGi1|kwm+4c> ztnU6SSM$v9vQlbGW9?&KO%9o_BNWxWi@qz4n_0$wYJ_WKkG)6LE`{}jw>MFi;}m&_ zMnJAU4PSe8t($Am$=0x)4npHsnBRn_da@A+2NjENb-4Ab>vM%1*A$mSbtYP9)N_h! zDrcovH#Fb1W4&WyxtTeQ2Wo37RPRwddoT~a$N-jR_S8JS84bb(X!scayi;k4Sicdf)19UxUrs!si zG(L7r(W;t_k$<~UHO^^<=iZA6O_DTe=qZeg=qe_-rW=8Zb}OBd6hqij7?;pgM&toe zo1c0T5~#_HY6smF8I4bG(xYpSdW_s>y%uBVWlIfXQo9dysIi=56!8-RIIE}>o^etH9)gv{Xq46_^r!i4=~oq0j+F4Lk9s+W zXq0vl+MI|_YP=PDfldmesG{QKiA*8SN_az7u)dV>x1K3;2P{gV7*o{^Sovw|x47nu zhb&6l8kcTr(A!7L?MmY1iJ5g7D19rI){vki4lB2~*|66$tj4UTn)+O+r8}N(F-Mg0 z?QOEm2;#CMRbFeU*7W`Rh|O~oN9AWdtM;r7So6iE7Xqda@=RuyC^(Ls~U*O;=MT4k8{o0o&DaRiieOZmDROKm5D4rI>)%yWte^J zS3j!DEV#=aO<{<{MiiU5y%@RFNbK~TRUQ6Qo@=_&WobaqO7jm2TJ73THR}2#Yp`)& zissmlX*PbLfTtM63#uXVO>kP&M=i!{(RE_JSrx}$z_M=5d6+r!>dxFmBJ?GUuJE*173OY-sVj^;by+$oWvD_atJIl zUcVFysB$x2QTs|u6|cf=ct8BMjbUH+iOBv{=HaK$Rx!6z)u#x?agy$jg)VK9T~g{L z<~d<3gYi>T?cx`fGsH*lAv_Oizo)U7+sT4nG%9|S+h{(`sut+7Hqt(|`V~rQqH3S= zMg74|xJor@{i?Ey^q2K>3Joz{bU7%b^S6RM#aNHTS2|VH_V*Bdrr1b!5J+_y_3!xB z#8zHbpzx8EE%MYFj+Kj!SE%6D&X>2V7{%M3Dg4pF6;}m^omzhtIk>;@ztZKM3&fUE z%v4E@!y6oJIjQf|CEnmlh9HW_)VwOUP%N7^FzCQk5a{~67UV&AWko;grW=K83{>3y zw^}-UGPmYyI#Y!&?AXTbKWPTPFHh@JZ&A`w;aOF;4&>EV&_3H{!50n(dZ%ULNiET> zwJTX8UV=z|U_S#|+Fqd(%tSLRv6GCB2=~Q8q^e=(hL6Vw%^x-XXJW5~sf?$JZqTZ^ zB)^tl<>&XDaIw9#n6x=p9Q?J_cq7JTHAwAmzjo!Y7?0Af-dM?Ja3lZ~Cpi9*RVjhqbsg z&l+j>fyVV<*?R_ZaVAKKNTwnh8KR9#eHvYaSLyTXvB!#pp1{u*M-G-d~le*tU^BJO@4iT{r>a6EvxM*`#C*g zGHar&cdj@mn%Ndh<)2EUEvl;%j%(r-XfGyu z?kq_1ffeOq>UKMD^y6*GAVRfg%pg=|MITyen0nWY<#V}0D3i>j^G!$QkF7Mx$*B%V zk{KzcT5p&s8bzQv5T;X#jLJdggCMAkf`rA(5ynzG=A$zc){&$KJk(}T4LgsLBh1|3 zQyO9p1xC-_qj@lBxcOpAWg?npsC>Y}np|V8F><0)Ju}WYsEhto#*hj`@<+KVTysn_ zl4=%^btaf)0P<;-%O2$IKRnY6#-K6`^``lN_orcUuH>yb%`nVr7m!pYL7ue7$cu}r z5ARdPs6V}2H}_6$F@ zJkITPF`vCs4OK@QtArZ46ydB9-j8C+{?zk3#ni?+=A;^;`ikI&v+im|)^pyc*oXG2 z?suQsYDZB@H8JWdgFBeib1R#@@e}J z{?+q48Lz=Z$gC|!`9L`}o&B$o)~a0GiGk-e*@kkXE2EyaVyY_{8rG+UZHu_q5q#HI z5;yRQ?JlkS!f+~=_FyEPwe{4qImR}*^A+Z&E1Z3vnUk)~{KyH~BP zw)U>dd9}A^D@Ah=#Z;Q#&6O9hIjyIM1Eob3x9>L*S>iKaX$KU|DLzRiYUmJ%$_!$? z14@`83gGMbrCiUOW>`rSxCOIa4x4nS2O_xm7-EDI+L@;_MR&2e>bd^_KT6k? zx_Hbzy7Mp3{DD_~*Dc0*azDG5^<;C|puO7a2;JY_vo);^Y^6(!W!l;P9xHQFU6%^O zxXn#^i#3|C=NL5S@6QQc{iVwP0IyS(;o|9L7+Y%x&c8p&FTBpT)_&1s66pvy2Yq^O zo38jbOt{kIvePBi7AWGD<~f{Txlf%G91I4}Ip-DU`gAKbyQh{9ZfcwY4Jv=%$sM$L zBQh{i`qwwrrOAlH-}r9ndj9~xUx_%Bu`|Kqt4Z?IT(W7|>F4YCBeL;kv!{5k#nW6{ zSq&n_MOAA^j~l?o?mA=uFh8vtRFU{oTC}>eKo;Irm?%GcoWxK1uc{H=~i!iWqGP9GFc_8W4pdw{sW5c!PcQdbfq0*C|XN>d%w%j^YM;~!Pddf ze)T!to8IwztNCy0eaEESL4Rj&c>@QAWD+Y5gc{k1h9i+)7w{kAMV6wxLrWa)dUaE;nNp^Ox-z@Xb6ktw-YBIRsW0E*A1M;72Jxdme}Rn)+z3 zeAglIZ^N^Azv14wrrVbLOF(6b5CUD(=2aQ>AP>g9tPVZl@i3kAzNa$8r=vc8@SlnP z&kR=X$`%;ip$vFuVc+RrMEoq*EadRsnWi<|a$MTNkSPc6?NA9Hjz{1t;#~t*G6X{T z9YJ%_zN`2h@UO;xB#JFY4Lz)^l!jZ2*#tX>W>h~j4{$MGuI3+9&2S#e6Fy67pJuT6Hf;U7p_LEN8W5Paxfj(bqKF{{Ro%i(B}-skZI}a;`m8_Z832DKB4yE5W$S zbaYh5N)MIj&3!q*kQ(YOZvrMWTt%(Vm&xL;e`=NQT$PtQH=~(yBd}}b&su4|T=lL% z>jx*C(@w5VIj(*^^EpugU( zHMM5-rX69qF4d04Gc)g8s z3#+ys++DvtyH0&Z^$DdloQ*3pns0a3{I>x>SoQj0s44#$C z9Z2WBBWf&92A{DH?NX-WntHZrg?oxr7_Ot_?oGd-sbg|MLZLZwD6guVL9S}8V9vFE@|qA zijW~E+KVv}Y#8ZE7o7f-{e-cl*m65jW+A(Irz#Oo*g&VManhBTTxx?(+C@E6nsY5K zRw6H$cc3oncudrb9X?hBYZ0dy_^E;%(wOM80=Y`@e~k*M;|(dKT&r0H$sG@v_3frQ z@}G|u5nI8g>l3!idu4Q$x?p4#AC66Z21jqMC^v3pCHJO!S#5u-V<}dDW=Z|b^80wL z6IW)M#w+aOXPE8;WDdAe0s0(@%keg=DY3s5a99yb6JbyJrG*?UUZJ+5rl}Eh*3@T!(Pvdl3rg-%GgNEqL1y_+J(g2G2eCvLRbfW}&HC>C!y@8cjJE-A@ zaaZkYO04U1T2fYe9(UKOsub}50A>#Bx_j^Q3!fF<>Jh~(77;9Ew&mQMAHtuf>zbaS zZ!ApF+k`tmc7Xo?Jl18R{h4m<$Q`6n?g&59wJZ#9m*jlMch<3lTa0N*-Z9yBf0*j# zSgF;WZhJ~<&8;rG6}3GpSRtAXR&OnbDjR3=#bbZMCzyQs)EM5{A$}T;6 zKF1|WYHHHapKp~kjF_%N;P=EQ_>19#HS|kwt=T`5dkPFGZmS}XMhElFcPxrVFf*RD z`Rz(Fqc=`_qsWqRe5|Rk<0Cj6P@f~K0mwP2e2}hK7ReaKvM{1;?#f^7GCq3RjWJe0IfGQfV=cwP>I;!k86S9m?n0 znn~+Ylo_NVmne$mj(HTr>r=91(+cN`EEga-KGfV1+N^naG!>%5S0LO)Avo_+l;)#o z^HpKotn5jqa&y|P7>+589SH@>%n7K@d8=kBZBzp>M|+7;+2pSF5dLvf_l^is#PEldMt%;TWCFL%ZiM) z2Q=N}QnCWrAdGjV7#(V|$=ak3>S(eH7FQfnowWkWGg5B^b*i!faJl1}k!lbCH7INh zj%sX#N-%oR*r@k{X-k9NrPyHD&lQp4&k5LgJ$IiC#sghEFWGO0zK|R=UUwuar1%p-Ii7UUB=d(smvscx#$Befw0*y6L z^$RufEB--A6yNPxtq+%Sl3O9@&Y{Qs^f~=2^*>$sW$?q|2gAP=XkHZX#+?tC2xVPN@9waYd>Xx=ca4{%hRir~ugSmsr0(dBWKCYH8$ zO$AZ>b%ZT6Ff30g)q15)eIkZfc&E(NE?=7+pko9AUHQN3YX0 z%+;N)YPPuO%y+~eK3hq;Z;8HE`h!{emA(Cqz>+JuB|V$}0IgHaY}OKiyUc;U>;C|a zTNgW1WAcP>)h;~B&qh*q*v5OSyMPSx1>*-`!9QA?PAe3?i&d_q#H=&l0-GF|Z05PS zto1J19cGR zk{h`tWVD|V-KwtQ$<8?S2l<-%lEYE8)-?EUY~>T$%)63B1Y~C!!S@7L!glwPMt)2K z+Pkj^d_~oKId!KKc|6*Eyy8Z)NBKw~gOAVjuRAcy)-(6C^pgC4;G7j`^JM&ee#U-YXa{H3z0lVxIlgQiG%Wk;n2_udx-#40u{?Z*z&TCtk z#IQ=cSr4UYO0Tn((b zCy(OK81Emr&!qTgQHx#paV5>^xVLK`_?MDmCl?6WMPb!JK_JQFETcIfA6J=Gq}uKI zA7_Z1F_~hpl2DVjHH85v!7Zyg5{mVG~X}^s$?Rgc@I3%%mu$RW`i)QTgY7Inq2kGD}~FA zAna+aAjhp!4#zyw?rdU?VUx6=cc_fO@_DN6%rj85hZLNwG+i?aPc&z(QVdTyrj~5= zrlbtE(t}a7*chs&LJwMNZfvW^wCPorv#E8TF%?jMI)Go|R!?=M_-h0a8bJ3F<3OXsyXEEx64+VFQY- z_SGH5D7;?8Q*|O57d(|cX?&$qRR!QZ!K8bzd(!8K7gYf@C_#$Mj`yCmVQ&FmO;5B$ zmsM9aCehlc3#A?D#_)R4?GP6M0;E!=vyI3-4LL4x-xVIw0ir<96y?A*m~Sw6BBV>q zj=a$7AsS$=YDICLm6$HG&UvN#QT3(mP+ZmX2Wn8kfW>94+f;(+;~D&D_NZDKB#J{g z6+`V_^%Gos)VioHX^Fu!G@w>u&+AgG`K-OLT)eXMIH!q6rDusqH19m~RqaCNno23g zU@I|a9<=74G`*;e0dguqa5(g=m6VfG`G;*!wGn-;xufkF=xa5f(9_Y*Df>a10&*$q zKOK73cx(!IlQfk^1rHqfb}txsa(y|-SxFMXC-)>=?LW+ZHKqGI_^LmK&!Bk1*4<6+ zhjliOt;H&Gg5;t@FSy^eNBwlBv;3m5^*vd9C2gtSn=^N&-z;%z@WwuNgc3Z}$MGO$ zV0}h=S3<<%9WP@_O*^NjUX6UycI_mY>SguePNywRpI*`$y{D71iDL zc!EdNG`XARU6KOEHTUy2l6n9u<8Ru#!EpR5`1|7x4lpD`a<{O;Qw)nb34$MG54S$m z^>&rxUkTdye#gWt6-<)5V(C#7`S*UM1H? z%}V~^q?K6YKr?|D&p@LJK9%Y4+^W^YPZ2oUN-8p!^R=_L_-yapw0#aUmNe88>vX$! zUwMIWw^ySrFok<0L;wf%s3x$I+W~cJf=5`=113KzhTiS<3+0O3;x=-q-8dh4zfcF` zUZdb2hBv-0m96xvWN{um$>w0Fr|aDN3jG1jnv0y>lInSJjO8v!E4F!h+*;qtA&UCl z?ioa6xeEBtKQCO>U3SJDCTVXXQsGoH1?mX?Azw=CU$TFUZFKo>?FE*s(Wdhav*KJ1 zp+V)3u&)ug&<)MNf_5<`OmeFXGLDV?XoRWaBK@09mpD|Q*R)nSWtJUM)6S3PR{P87 zYc^PoxG3C?)Z6_ZQTTJ>%k2wPLX%rcVs0?5J39J;ITd#72^2E&D~6p|*MgcWXQsz7 zdzj@Tw`znj`c?f#Jl0-H8sVOjIwPtyEhpMWYjVo#80NW%r`mZLts7gV2cgAnN_HYq zJ18X*0guYGZJ!t&Mt{#W%vs(51{mVLi})3#d>Pj~EZV=1yg??9tLPU}&nA$LtNpbD zEM_Rw5t2i!fQ82nGsSh*#zzfL+Bmo9uKxh5=wli#q`7XFsjKkgS-kKshjhEWb7iiH zu1vzfK77rnK6&xlJ;fI%sXSyU$F3xTMORh~tTGrJf-B|kh<_BW{w4UhRQtH7TVrSQd=wz`e3^zK;_E@X)Nf3xjgCO0dqiHFu=zj*hL z`FW+ket)e6mRzkn572bC zMlf(QO^Wk9YP96Oa4H1IZ%S4kmNvtJqMGFx6=1BSV1hUx^#-Ylu%nucTpaRgqE0Wx z8UfL5tO6{u={8rfPYI0#j|6^TW{U^!C?GJ&;{aEYd~VdVeNN{2^y?#IsN3!PbnzDQ z?<99WFjK%d!N|sYjPqYhs|QjX20}1ZhCv>LSI-|B{3&;)UtBa&J;jyNM=TK$lB}Q% zXQ>z+57)hW*b0)ZS`BDRyGx;^YBElLujQ{>Mr@WrFesYh>X8JbArNMemf zOGx~Y`IBF8d@T5Lt$0Jj7M7Aj58F2RlPEvLdB%OZSIa-Lm9?g?@qa^(@^rTkESFK4 zIo#?7)%uXf@~_a%e^;^7BR23(hH?Si26*>9tJ%j%&eYb+Q|PkzsNQdPp_AdgUgt-< zjw|bVEjOG8ZdJaA*1Y@niv6Cg{9mTr_^-kiVCz7a)GVMpT_GI0uTA5=F`j$Zt@uW3 zeQws`&h#zBEI|xXCu^C%6FP}q2awoeHU1^YfVDf?PPKHhhh(L?_a5( z7<>l?nWn|!EjBR}az5M0kYI0P!~Q3)=U+N}L-=!}{7SJ{bqI`iv4Sn{;lP$N>7L`* z{*}c?7c32D349OmN5jd>2T{?fs_1-Ie4i=$)=bHkIIl(V=fgc~z?Tw3ev5f(GqV2I z=0ZQ)T#|?D$30DOo=TIP@z{#`7dTdmoE_2VLP@KmIFg5_6*aZS*1_pcnh>YdR1Be8 zgNnxXcQ)M{wwE{~2Da^GRu~|2(<3#=+e~r*$@H&b_)+l3{t}z0=GG@RdV2icM1SjL z{{Z4g=6;=PvN(v-SCcbTWl{3Po8ia9t#9H+u^onsY&PV1w{qk|2<}+_072LFuhDPW z^Y(r4XTyy)NT;%c#9FP?Mk|~BMj<9c3^GQCgi;9{az|2Y%lrkS+G(0>b~;Q6VQm=s zV~Ai9JD%kGn)KfQc!N-fR#+NjkPgkkCcNs{>DBjEkv)od2vJ(zf{y*Enuj~(Wf{iPcCzTJG=YYW4+ zJxbgT1WpJmD)KOL2*(xmcgN3zR)4e)!`bvJsTRjmhy5c@zT3OWmmC5!%ANrk2EQlGB+EPj540;!y~PF)GPgFlc6`cd*{a=g!(ghPe%BI;kgpd&iDwgZ9&XY%6J5N zWbyddfZf9-oQ64GT^ki>0tw+Bj zp0v{A6ouEla#->OG7;94O}89W4F2?>gN{v3pot{7J!&7c9MVnqYOyr69;Jq#J-Sm2 zyQ&?;Z~>$)n7nBCJaC1NEqzp0v&6nsDjQ zS`xVIhOxy$$3FEWsKs(BzS2!ytW~Tzm++{|;Nqs+k&x1puJYsr~!z>rRZ8=M;(Dwk8?PO0o=k)_FMTK#_;AqzJ#i4Vsl_0A{EM z;B(C=lpn1OJxk)?6H3$PrBlIbeh6OJq$GI8~v15vncr*cc%5$1jn{lds zW_YJ7RJjV}(E0D0KGC%CRm8aM)`L{F9)+K@7U*e2xg6DByG}_dU!bm7^9kr^^UpwO ziU}N2GV#)$!MPx~^)!zmJk>^Guj@}%$*H>rT8*k;+*L0ygT+d+7C50?o09Iy^rUVE zYBEUa(v&ty;*+=z2Z~h#sq0K6JhlRgaYvf?YBLN+w`xqz95U6 zU$tJP<{}e7UP9Y<0thD_oPM?OkL?Gdoe$zZuJON?B{9mFJ(@K*{OjuPhCdOskBdJ6 zzTK(ZLk6Lv+I@>sX$TlGkKH~x{K^*|rCSyC_?OKkRvSOb?0v=tYEH>IF4s}Kze`}( z7m5~dFz;3aw$sOY<#qo6g8mHg1g`SwBGy*>(%HFXpGZ|Ea zyjw?^?Pi_RJZ5j%hr+Km<-OFFJpTZz^Aw-YAJV3O*`L8)Q!X_U$Ua zyomX`Gk|%os_yhC?qHS);u6Cmm2_c{z}L(_6#frt9s#?3Pfm_Yoe7&hVvL(({nzv! zhoyQHYr3B8P0w!yj=SZa{{Wfw55fNc9KmrS3D_{e`j329cj6sP_-9*+_7J{AxMYwX z54C(p@blsvx<%A@eabPxHSPM(#ycC|3Mx+`i8l-`{FL>shPoe53bCH2*nR@|y9T6? z$2ygB@|G3zSNs%bPSamg@K=cK<3-dbhUUiVQNfZl^4=`^k>)V^0a#uQ_>mlT@U(Xd zRlkpSYR&kyr(S;0J|(dG9rIgwcSN)DpugL>F0P%kA&>jYpemLeWk~H^czWK-T=IG& zmmMllrFlCZMQ5nLhx{c4o!Y6A@?@Sk8>lCsK7@t;0LO)NdJn`aKMF|kYTA6ZBH||U z?+5QzHXz|!Zc68z91)u4d}Ht@#`asTVY!D~w}r%kVkAm&-)ZZ|rE^kv>S9Kht3l*n ztNCuysZtlPIr@IJ<6@)rn$EIUyZgHzrDS|Tg{Om#7KK}KB$I18CgS(sWu5Hdwv!3_M74nr-oR9 z#)CNv_kHU^+r>~#Iu(-+G5S~0aQaikxK!3HTVBVP#ge5{EQ(2Uw%f9`-kSag(XVMF zQZg|OgZGs1ed(qKBNap8_lO(BdM5cwypUoo!_WbY*GJ{9Yr)JgKU2ljptg(C)2HBm z6S8+wa$2y#?^>-8$4X!TcCH_0hGt1_cT-l=b!vHA&T&A94%F2#>Sle0hO{EI zBwd&wRC`PI;*LZHX|y@bG};{2y2hXaMK(kL`&DuvF`C0PqPa>unJanhZ#QTAaXC|%06s|b+s)jdi1xpgNeNQy`S{jvw zlm$7agbo4DMLnW3bg3Y-bMPAgo7_Q0!EOw`1vC^~>1soccU_yEdYMUOWGi69P<251@#ERKA zXux9zgt0-MIi~3Yv6c~qNwu-aHKvw}yP&Cl%%kZ-q{+36Y%%O=NUfQhBpTO_7_-g^ zsY=G9fr@I9THFlugyS74?QVACsH>P6&N!(zC>+!FXm&Cd-*e*ztP3w{yrMF5Pi7bg z6xB2)W}5@o6fKz^wWb@?Q-blF3VzK6jK;D#^ryvT&nNS%GVgZco}qvl9Ma`LLfU>t zM@ocPVY#cXDZ2 zJZ<9Pt?2$u<*~O&+S#Po@v@9@p1Jm}U*XTePl!Grk5^G?9fqBGY+}5(x|CvNL%ndF z$RmT>BzHCSub3R2!?zUCAdCZ!KT7mz<7!iiic3Sf7|O7Uis*a`ty;CM#M0f8vTrMc z)REX#3v(mnfwX&e_BH7L02{m^cVqD)>NsNZY_7DGuDP*2OexpxmWf5 zXq)e2?cFZ!+gGrL_SR1_+Eom#{SUbBS4ygmoLA0&3H~e%2JI~FgAF#}zH($O<+0#6 z-JB0xe@glqDUw846@x~@aqkLvU2|Bi^4Xl6W+Tp?D&Tv7UXd{0i`9zdKw$ zmG&EkIWJ=E2q(YFqz}@nuk7{VCfgmy_Jz&5{{YJuyl_7-b6%KPe;STiLSvf0X*JZ{ zSE`kinZ)V82K*DDFl&7cB_QW*)UR-(@Nz|VG01^Js0gY8s*V5yfB^KU+=&iFEOD?s zDtV1tHK=_FWd)R)@sG!7m&~YI8`j4%5GaZp+4r@Hh`~9d* zr**jDIa8N39hvh7h<|3U7ilc#Or9N90C)kmK0b!AG(Uu%H+?`sCWkC4#|7l!N4CWHcOqo$^2{4p^vMEmozs$8Rk{$Lz1JqIo|;IG0iJ>I)X*h8>66 zq(_B*Pywp3GR_V89CO~J0l?tqs{TT;Fg)ip(l)R) zRJ4f4Ii(k&4%Y^u%?`!zg_o^Hh{29cRZUe35Dzru)RuNCr{<%XF<7=D=blAM6-xZI z0!#8?s%0TpAk`*>IbUuv4L^7%xTePHz~oZ61Y~e3cNK(bHuN-xW@X2EiD4v$&P7e+ zZX%^717as6mK3s12N|goPAq=+6&&`<8OBXjtQI6mn{EX?7`VYa(<3UQ9@OB3at%$` z$yHfTwOC(zf+sJ*6z1FoUZT*l@7cEP1892uRh~uZ3E_mrpSp#by()&Amlgoufqnb*7U5I|$;M%!-&Mq71m^n)_ckIG|@M@f*Uc;|&MNga*H~kX%}p z92Ol^`w&R_;=VU{CbGGiZWPCG3vb>&W#{?V-D@gD{Pyo&N%5=UO{Tj9+V+(T$#|G+ z%Q!F|bRT%8e~TI8y?Xd8Q;PNydS64^%JEe2^M2E|p1<%|fA)1lJryud+(kXkfAh!|(K7~pqv z*QxfQS+{5_di3@4{Et=pOhngK_WrvWx<$;Ma;RO|?O#!R81WUxr=qr*dafn4P|&*^ z?vFf=z~l4wua_*F$_1lg9AocH-Tj4nKf@1*R{k5;E#!8Y;y=5Qix0YZCm!8@3fmH; zjH2l%-rBEIs=il2VWldyEjwLyZ?|{rqV+zb5p1W4b_1SjwZ5Q}#;&9~Fv^uV1bXJ1 z=I_fM1$n}0mjmMXiM$W&31#NYwy zO(HG4hVmj9UBMGb(9FUGhyX}w8My%T0acii1_pCom~0Ls8z&q*Tekg|tJw)^(hE5d z0$=r{d$d^y9<_FV6JGcNHoTic@b&8l+Z+?d(r#YOyt?Fj?azIY>6|yFyc|1sOXhcWc`Ay}vCl zrS4@q(1Lu)rnTFrR{-nVN9+W0bCZML%=-maFgS^RVgFg=nc$ zcR(b`>rOj(B#Mqw5imN`ZZM-gYv!zNVt+MVaHg8L&nBvcAmkczN&*sTrUaJ~06t2H zKOC{mLL^xaT6pr+NGB$X+&3pi`AMi`T!KX&a0*vBH3P}Da!n>NCA1Q7O;`TO2;27` zrCBnPM$=JCcEf^rqTR=;BbqXYibD&?GJiUH${h3G)}H&74I~9dh?eWpq|VR*G{;tR zm8c4_I3(tk>@v$tK;o)8nGSRHsUw0%;Hd+pGDw*ifNHs&icZ9i0>l98Q3S!xX$mkI zY}95#s(t!Wzc9BQEGxK3PZZNGdQ;?tv`dr6etK0|a)gECnr}jjQOJ&@92%o=+X*04 zh>aSIo=q&#EP!)P_f`Uuv2MSe25qE*Ggns8Hu2QcmdRLT(+iiDXxo55Bds`DsTm+P zd93L^MaUgSYEh|;gsyghjCxV(9%h`DkSt1^WQvA6L;;RQ4;2cznho2&?sCQlKu<5)sYLr<2N_}1Rg-XnH5-5!LUz#?SZ)1?S^ zG$9KjfZQKyR%J)q&1V_qRQY&r)iAr7D8G3g=I&^6T}0Y136?e`hDBDmc&;QE9V)uq zO6ttZv3(6f_c9d-iZXwQR5qBAdK+jF_iU-6wzm?ORmE4dym+8gCwq3yNN!#b3$D`C ztdU#nTXl>kK*^@YNe3ep2)$)tf(=6sqa3Xuc|qEG1JbKTNu(tas2Ck-Mbr`~!*Pl(ZU9lRAkcXv zXv=H|6&wu2lB>-_1I*AcVXz*a^un%K0Q(62Y8g(&{{S{sQe7#@xuV=SKJ`lC?r$hY z!;opB>A~`eD!s_3duTgpa`K~961IUMz&+hEYXV%%_fs7<>E7~oV!No92@!KW?6n_G9^9R(IaW63@){2-d< z-uqV243W(`Zo9CHq`-5zN3XSf`31fQmU5A*hnSAM^cD96voxg+(i{r%Z;ZbRAH-UH z#;K!QB5EXwE}@KL&j57;*ZJ46hQ?mhsz0CkpGlQw5W~t*t<`A%02_StN6r@)zi2yG zD2u`<>5ub@mdfPB;Epj=N3i#=DZ7^w955D5qW-fABJwFQdVSJ3F0iH4RuMF_*+;H7n zUBPbBV~fo2uL2*^7wEtA7&u5!SwEG@;8sZv~^@$^QVsJS;^nSIo9G zW0-~GC)%QwpUl7qJ$S07^t5}Ka6i3Uz6D4%pBd;HbnE8FcYh2`8R+Y~EI;p%)#E}g zq?9AOIPG_1+W!D&KZ2KjDAKRIUwvU5;@Mbe*4J&e8;gqCqyN4%sr5JW=r~$6mjat(B~Sj!Dkk zJ`d%QUt#zw!J1#g{{RVEcymdFjXKmy7CBoa`Hs>)eyr`sp|8zdYr_`*01YlYKjHgp zyPK_X%(G6{3I@oX&ytM2LxtOs)6%~2xcG$+!#@OgQ%;&h)I33VX%se=5pX3T23+Tr zMH~}erg0k8p-Yyllx2Qc*(9ykO)al&jtnHZYgAPtu9EkYZujYDw%r+9$6pBk73sbk zn^^Idoo}PtZ`$n*!A0K0?NtM*Bb*O^O7IT}_>17ZuDhz}Q|UUbo|Arl&jzCIWQrF* zahxts-o^;%de_j}?z`dNjb9D5oextm$EjPXxi;`ikmfLV7|wH&zGD70^45)g@T0;W zCKfu^hNi!@M9GVJ-HpSN4J#@}4{$|uLPGI^M_!ff-xj<<@Y`SbX&uLd zE~GK(H_9Wonc1G+Ie2x;9PQp$@x^$|U%rbdz^8I6@yyEyEFLCQl4*3lj^1Z)b1VBi+2cf8T$AKCs3_919XjQZl63^7W@r8pgdsiT>dm@+T%fHZ8|pHI zqBJLGAXEfLB#K;M3ca-uu-lQEiddEK2SZ0Lr4dT@>^@9!PK}hd(l=CgHma8_;O@t2 zmU2<|Cjg$+T&;{`;bR4W$2}?~hjS|g&S~<_F~alSn3BAQY7>At0)~ZhXD5Zg6xpIE z1`|1`S;%mwc1Wa>@>IqYmOafJphrBIIL2zLL}Vl7=e1ryWjF)sO;ny~ZCw8BupJ1b z<6yAXMk+JgprQW*K2|RVbYFX<#`p}Akz;@Wbp1eUJx3y z9k~+Ye@{c!hXJJ#=V%>8T8aXJk(yAbq_3?>E0la-)yHVnc5pf46;wTp@{#H9np=c`q)LzFHq4UAkWOnUCWMg- zUtvJlL!VBVsiQKw1^IaN6=Er3c^A&vk;3g?ym8KdPV}bs1U_2AyRdtak^K!zxD}&G zHvzm3dew<7BxhD2bI^O#u+M6dFPe5IBbEEOq76dNW_QR{fFX90S3KsLi?*cd#Axo= z!WYX2oy&uobIoxYsxTNG#W-pq{HIA#3XJAOCv`UGR=ASc1$;WDao5od?pKu?t}$NpkPAA3Fh-@7S7r zt%F9JO}YTmL}b`7ArbNSO^XAvq&K6Dt~tUo@LE$S7N z6uLsYx&Q+YTvB#Ou4a{s*6A2X({ADqTC99G1X_f%Lmnhyxqv4g)m5RA37O2V^bLZ4 z&S|Z8K4oKaP_b1p?IhrT`sx#k-3FZvWVgI9yol? z#7vR~J5>gF_sKPCOT-qaQCcFS48-t#4Lhxg4aOOkG=RBobLmz7(|Zd$C8wR90_LQV z8B?kl%oc(yHt)!Dqjnus5h5+6>pL)%!>OBb|yP93(#_^ROIBwN)Hg!}k zxKqg8Rb-ylIS|feIb6D+G}$0&SgICy<59J_^y$x~OHDlqE{l0yPu-AE^NjVTLM4uJ zqA*Wf9@P{xt-r{QT5!95m39UQsV5{J8?RHq?cS|j=p!Rmmsm21EUnh$X8{4=8q!pn zTX^J}B9i0gUqe=HuA(0*OR$1?IR}b}U!b*k-Hur94r-T~T-t0&ECT5uC6Afaww@^^ zNTYU)Fm@QrW|rbGrDL@!%m(H-Cm&IojiNtle=<16N6KOu%O1xkwHh%uaqAMFkwIx2 zrVmPF?JQsu7-A0I1vW|E*LTbVHwWLvc5vL4DJRaGl&3yv)9MQ@hT*4Ig z8LXSlZaZ6;?d5Hf%5#z{S{Yj6H1qf5u1*2YJ!!I7+e*gAsf0?nXY5npl#TGW1goK3*1F`nnHGfeuA;*(Gv4f(KX##%H8gCJKJqq6KI8m=H7Ih$Z|kCTp?ck zu&-ASjGZ>2QQdw=tAndLlZ_i*?#t$D_$T2X!w-Yv!cU93)&9NWtCdL`!umUyZEcCc zSmk6Y%)^d10qQH&TjQs|4-i8ZiF4pM{5fXp<-@4y_Ax;5AK_+b!m%0t_V1yul67-= ztZFwJmZftnv&k&aa!6HUE>8u{~Hp2W51yzwJEIpH`)1=Vh}?&gZ-97T?*r#FptI&teDeqyP^wPcgm$INXI> z9kYt+J{oH_eh%?U_>T0V!=hWwe37sR%$H$~G42CM2_C|}ezDg)LGbSBzqPF1`$`S5 znB_SM;DI!!7#O??TKjoUpdiJi2wh)d! zpR`tsf1Zco*}hRzF0W|2!M}E2=jZvJh2bCCZM=W*`^0_>@eI!`_Ng1oZ8;z^P8>Wa zy)Zn)j~K=}`scy^5O}^{hkift4w2$Ji;XhNR8@}cuUUjD5rJ_eT_g_}X!f>O=gtl* zobf)3r|EiS-Ts$+3=61Ryi9|bmjzlf!HF636|>JO1${%OS$I$Om-uU^YC8NsW$_d+ z{gUHUwv9~7B8+Zz$PSH+;E-|Ag?_h)sfD9WxYKH!-oM>;TBZ5=Xng!(R*d-^xt+AX zYxn-V&jk2u;jMe)pTvz{#j?vZb`eP}{oAze%V!YfBVK=m0HJu`4wbF(E8_;ZH7gx* zX{Wx{^(ckL0{-UK*8s+dqwatSQVBf9$DG&G8aIWszlMJWweJl0Qq`^|I&|;9XYa3W z8FE~Ko=H3?&ISO^E8uN=Q`Gcjxwmm767585dyUI1YBPn{!ys~NrvZtMI&yKeV+$m$ zt6hHmFX)VF$C9dTUB3Dx@2Ae!*UaA1J}c_FH;U}FZ9i1Gwz!&I?qXs$W9BvV?}|PS zw~TxPrF=}#W|rnTRHgDe64ui9Cmm3hUQa4dans}f015bd{{Y1rg|?feTD{HG?o4YG zOy?L1cu|}WTzzZx1NK<(&a?2t;J@~7jTReMX`VxECMDS$;dZzHgS(D7_Z8~qIfKj8 zE5WXNtvklruh+=Jt`ZoD^1-z1)z?SU*Rx$unEXTVw)a8(pY5-2qLRk<#Fs4IM|6TT zXA)qHl>c{G>Yj*3ar=BBn!^k1o&n~4N)v0zn3 z2^pt6PNp^omF12)n%!iM6-i5nAPfLTJurVNhG_syG-G^+fTSAu-bcGNxu0_bINTgW zagaS}w#m70gu8*A%UADO1b_m#oDgu`cKTIh18kG2*gw^U>61cMVRE#Vr2}S7(i5DX zl-XW5L~w@l8#!mCX`sD<;c#8@R~S5VRphpc8(54_m0kufvH%jZsGF zFvqq!(!`jX%R}XyWr(P>*tf{hwi|HVSe)nBQ|=)au`(fXB!>jII2`(Y59L>*)I*xy zWSp|#x#VY>V!JeM4$#E%O;-juiURVo5ZG+woDP2uX|UWzX8|_?7a+3mea|$TS16H_ zg$7NWWYu?-q{JdmnNE6ksTv68`4(^pB(o8~`qZdaTaA&hJ6Abm9SbZV1Eue)91LJH7H|NLNTB7^{8ff)FY6)PvL`AVANn%jhhXJIOi1a zC?u!vd7b_0ONb?C-#9y0KT}a8(<}LtN{@kzuNgG#G$ipPC+A`q{{S;k3p8$4Ob>dS zW85^7WENkTm*@v-BA05N%B1J7N&uwZV_inSyO}xKe9eW%cpwqLH2al#L@g{)q-vN( z-<+N^jC+&TgK_pK?W2~}ox_xob}@ax^4P}%H54Lf)<%#`3I^S7r2%{-zbu3$yb%L$>?+J zXxZq*mzb|@45T4PgP>-U3NzOL=Y#7=mQzl`-B7xTwrv3Mp5xc4_8zq|PGkX zbL1S8l6^XH?N526N!{(>B$BDSZUXf?m$qRaJGvD(5GT z;|u!H9-Vh{a*kv~JMs6xG9J0f`A4TDRQC64G%Ov|ON?ZkJfLI}J$j7&J?dF*rPHQD zQNsiZ@tBEZZ$dFt;vA)Rx02-BNQjZN*uYcATy!|aOKqgx!a~3znVojBw1DHGA45`! z0?zGhRv{))a1RGK0Y`jQLvVDNELcZ~r?OcaxL zUyug?`u_kbV%bR{V63=}ae~`el5jX|cQ~e7$@VdFRoz5vjHi`m3_f$St^ojN-|0&> zC5ZsLkr3`62!kmY$tAl0K&f%iE!=5|{IrFBV(J_Kr+|G=N*2~exQ=ypb!FU%vjhS? zJLaXbx{ea1y!E+Xo#d6U*c{}Z-kGZ*V}wl+kQeKL_w)4a?NP`pb}CO4vCP|%W9JNJ zSKJs5g99DKRgHvk7~kyCN+k=-zkHrg(|`^St!kk$&vSBOnly>GJ0h0JB%B;|JmVki z^H5sq_FrkaMTjlbBpfsZ?cAp%5<7q~`BUV$aOWc%|1Kv zJZkgBD~QI>v6VO(=Oa1BD?-*_fJpJXZQ!Yl1!4$g2R#YJL2KnfYV%%PJ+yO%3Rn^t z;GL{Tzj|sbOAnU&737Iv+Wtg^UA|u=j>iMnIi{D1)X5&mY1- zIXn(2*1Ea8wsnpLK`IAe`_o{-B(_Hw{{TGVwH`TamfOlknOLi{0!9ZrhI(fQk?d+I z?mVI+RgEKNE@O={$QTjMc^v@9`(H{FS8%jSI(_8VvMio_azGadqFy8&_K}*lZl@C> zOLG)C`4}r@m3tfxc@);uZCoyJ0!XNB-HP$kC)%2}5!=OW8to{$d3|Q z+&p2Kh-gV>z+=Jm=REpiq8?I6*KaJY!S^f1q?|~kVSf;RI=iit3ohW$Pc53PZt{HU zIKaSW0G#>^4tjgk5$IPIkLEw0EyI?Kzy{V*cmxy1GmiD3B$6v9WSl@x%B?WL9q@fm z$_Lc@8kSqPo=aP)3kcmzo8(=qC#c+bI296%Sh-s>+BmeR$!BRKb{HjtgzOG;$UfPr z?QLV5WNCGfON2Q&I6X&f6Q2F5!$$GnBx3oQOk;GPc~o^I4w%Q`ip{$51d&)k(c8S~ z@{&=B3^9y?dfr=}dI$EHS%QF|si zTv^6r(u8iFSOhDvz;!=VkdOYiHSvX(gW@}T=q}*XZlobgW-v}pOJE+@{3_G1v*io=IN%wYy!uea=Thr77MrcJ2E2J-@{N02(!)jq3)XccrG6XKWAIQP7sy z-B_CDE$-*Hx{ZyvblZXzw}v2h!Obny7a9$l$$b=-Hk;6HMIue5ayCczm=5?HS3I67 zis~4mNawJeAdsVtpn@}uo}l&}#eQ>)obeT%R~}pJyDo^IXD+6rTs4@Hx4X3NSz-e! z21vm4!31;Z^r@~a)>}krVrG;`56W2%-h|-raB;{u=i0L4@k_f~TcxD)8-UKhL`sa4 zjsWTEewA-V8rHw2>Ze7ppHYuYMQgJhMb-r}05=|hlemAtPo;9IQs{^5-$LNGihF{- zL$U>CX#)~K10d(8QPZiXErQ*_B1YIx;bkNe-iIT<>shnUt{px-MfCf-T|PMkD|FWG zu*myd?Iesb`E$3?O;^+J^*wkZ*;rjHu}BvXTRWytIcy9M?|u}y9qnNDZuT_oV%s2P z`P*v#@V^buCmnwpY|RWz;ma0Zz5+9K`?$iBk9=aWTTs;P<&Iw@$8wB=e2a!wDi0&S zT#?t-nD-ikPjzt&Xz-O%p;vnnJLGf(l<5ud~Y{g?SXZ22r>b2MQ0} z&!Ow@oKz54!#u97I*T$lEowvN|Aa14k+Mr<(M!vZ>i$T{e9>}n}B zxNZXM8&w~8nOp9Vq?PB?sKD-VM=N`wayy;nt*ij#vn*;!0AqJfPBGYKt3H+zX%S;l zo0 z$EAan>~_&vr`pJlG+bL51d+omyVn_Qgn+m_`*Zy3&(mL++9-}0 zowpVwDGWj9k4)z$^Q(|}ib$YXoI1by;PXBK#z7#SN#Sw-0N16IiV%iv%IK=3Nt3&;81?B? zlU22uSshVRsLGR{rE*5rREx=TBU`aCqirfT^#C7wg>2Fvm|X`!$iQwn;C?ji zb9)R?V}SB6EX|CMrag01=eubn`CyuC01HMq&$VLPODT>uS)BQ7%YvsQw{&?M^TSzvX*np8DOQNz*Z`8z~iT{70u528L~xzT zpEK%-xw2+?t#P?`C(Kzhg1mr8>Bn6ARfwiVj^hH^!B7BbfG3=gezl7$Njz&V{{Sqb z002e94&aP)#(z3{CDr0fEJQ~)Qi`E>;AfxniaDAMjfm&p7+{f;ZMZf*L>r0ZgWj5{ zT6A2 zH=5xjGOF$cvNvFK&s_a#IW4aXpao$^Mp*<;_s%#3esyVd_`!oN%ZA+BNiBien@#9V z%8@+UNQxV4>x7E zCBn8B%J~$paksvGYW2OGFt*snHak9YM&6nK06x_-X;DuV%$FArG?M^R7}@|o{c370 zF8FUMo&vPBRjlq^s@4sm|BZ5Xh-^s;T(&c-b z*e7)vxt0_4QMt}IP&{DHc zRtTgo5Kx7PRr4N4D>O4@qz|}~ftRr) zV?1%qBSj)=Q>Yft6NOZkKuo*8iHe@;NbK1HA)WldlZ{9qN{ClM)TYz*aW80C^qmDUR@$P55cA6k9%xJ|}?H_rV@=5Pkt?nAkt9gEO@sjJc z2NhICU7B-lt2<#zg$Hlr{b=QC!E#wvLS>bAq>>V(XCoW}arGSM)}YihX}rh--A8Z? zK|-(u0rO)%`R&*dQZ2NN8%Z91Ni#BW^Kr?}DVL3MZqP*+lM=<1bs&=D{{RsqIL2zd z=ebF@xn9>o+Zwyf&P-%U7;>v2+ye36k`6tp;#dm_xO9<{OodzypywcTBe$hKIc{Qs z{P}#sxGB%dIp>OmrNNhUaYoUV+;Q@j`Vq}ae5jsghb$4DjdG2-LZoDkp)fE9_)}IG zC!frkC|D=Rz_!x(&KRD#JGrp1Jm>$!BYL;9J_fvE%|71_#Q+^TsPeNTf>@ zNSxrt6nTUd_rS+RO1*wF#vSQ zIs9uxPS8g2`9NF9*;w}Q7dZpzRAY&*3G!KDjhK%rIPt+G^)%d?zM{MCH;b!sHf@ke z7>uVuw6M;5le>?hs@E4fbdsby+9qcC<7qO@lY(%4`};6qHc{V$<4Yb&yVI!AzW|MM)z?=dG zSRR0tKT%Kq%diNFt+b<<%(3+ij$Luj9@VKXnG-XrmPVH$3%Mt5ImsBL8e9`P{NR-& zgV235=}Ved9=%M+tfG&0$z>@EmUIVl402EBocq*XMY$IF)FpKvDhRMu6!rrgeLb^R zAQsm#Brj&MGC4bl&hhSg)Vigunj}F{Rw7UC;~R0)9Ii7$lS93VQ|UJHMjy{+xZ*Z` zcM-`PcE&O9lTEqPwDT0PPb_mb&LRtf4;ky%8R_pQvFKZ|)u&D?p^HvzEvhPyyg@aq4}lSexvk zOup#Q0IFPuCjeyQ=~b>Haj^j^uEoyRKQ4Q6f6gg$_8M%>ZwcuJ@nm?_Br%d$909?` zN7tXqv}3Wb5oH%4L2oJCka+4(y;>HD98rj^U}#X2A#9S_^*wRgptw|uX#AUanb}4` z1O@sG({mfB-i?ojKeGs8@<>SowquVcIOFOujHcyIlxuVC@`W`&AiC?tJEu6eDR4 zxnEp?*?FLG79+Q*xI6d@-Xc{q@NuPdsGt&!q}&^(izZ{-u2RRVGCYN{sY9 z4nHCJRa=PeZk7_#C@7mTCOG7r0(y>WY4ZrSJAjM~VSe!ZXl+=D9brxJ2Evwn9DCDJ zVX1FNx{?{tD|8tI_03Clsn7PYz(1!Aye}FqtH^_%IR$@hdEqgiYq77lH7L;E%%IW1a_uFt4};< za7>Oj1?Gw>A3z;_s7YyRWk7~SAO>NYA=Kr#Hq5~RP*`mk=87p@+@7V+w7hbOK^xg} zaxyDc*~qXVKv9559nBO~kEvQff3qT&dj@78;PH&rD@k7ECJe-7&UW$8QAI=eM2BeC z4=}hccNB1{Po+zBBSScm7>4Pb(M1d4K_Ww`&3$_uG8GDo_mp9eJ-WO4`@ z{3xQj7Q2Yfmf2d#_ZDNg43o_yWc{Kz_Yi?H2fhsyRuUx|mR7N?n#qH`&&+eytInb| z45I{dnkcOieG8H{{jxHtW&=4LYD<+X!d{x-js~FmlN-f>q-Twk@OyKPDPWpJc_ZFPSI8%?N+_hZE2B2F#%cV-W;<3g*dDcu4aA>q z;NzXV0n&;p-pG>Kxsb%lPC#r7_86?;sLqAZFx$bRiq1@>v8=Z;L1!~594I&=-n`Ra z@eT3RrI1Uv%K$2`0~z$9ilwVMX{6cIY1-Az-LwqlL1YYejB{6R^E__u#fmNz9)gN1 z89hwxsm6_J`QKoW| zwAdYaX2y3M(_pz~w~*~nap^@B%)TQftjKOy#RItIf{bUiRY}@wcpb60agZpYolVJm zmM?#IFxpCzfRa1$QAr`eQUMI3B#vmJrYt&d#T}zJSa51@s zrIhY<83vJMc|=NFfB_&;MK!Tf(UM-Ejul1WT>ResDl6F;G8Exd3=C04HZG+}iCCDa x+D|7HRpx&+a(8e#(M3Rq-m1xPkd#pvLf8VTMWVr* zr|9DLtLV5J#-)x+x{+%L2^GKH>GS^a{`r31{oh)jy`J@~=Xuui{eBnQ#fc^bC;=D@ zM#|m}a>ZaoPh&75*SCnGGh)|gjWHORdV9$F5VPRpt20sWTDB+nT<+atpG_P9ysp=0 z?^PQBR;{Hk-NW2Er1Gfz+W1OyLEf%Md#&^zX}qP2pho`xpK9g7X{m&0&ynvUcb&Hg zF5=2#lm%{HWinYWwfZt&2#V@_)o}XuAb7-Fh53piE$8{Oa!XdgI!~*P6iXXe>oy zj1UvES|1@KGs_DI6e>h+FxHh5-dt0cmV%qY{(H3lIY@)Gz`CIh z!eM0DVEXf;l5T8pr?w|i2g};az@5_IWgb}M7{A8kau5oYBoTpyTMp2C2n1kY66~Nr zK9X%xQV7UZ4J3;HjE8N(M!8wKQf!IT&v9v=67^C^cly5(V|^3EzgjO@$_e;PWNf&t9M~9LU2^+&S1fK}m!j;xl*MN@(3ABD zoxemf?w-(LwK?+UN2a}uKUEgDzaG*lb-84Z+eNqk&^0!k-Z&9ttzxHhNR`)dr<+?H z<79Y*_6S;{{#LRRODAuCaq5JJ8>5b23&g0Gx+J-K8OeHo4OTkDy=+ua@4#4MT8Cc8-Isxza6XfyP@2JxC91<)RV>{QyAYjK z%GX?o=JUf+5E92_wQgW#HD21N-ZOq_wQI(`isGPl2_YrruGZOq-T8{VS|^0ZZQIBT`yruG-*pa%4fT1wUZulQ3XwvnZ_{WQ!1}ecJ9#^$ZhxI6SMx_w$ z4w?yK9)KpC0=d#( zFT9U4GXt_C0&UC_MRK8Azgg0J73kqw@YH2zZ=x2Q)+@NDNX^T$=O&HHDg0s2{Z1VbcwN{w{(G+tL&Ru>QjyxDuwV7JVPjBn5R^O_S(fhLPuvvp>A&I?hkDayp zCQ{3tT#pogQ9o@SpDmZ}vboc=Ry2AA*Y+NnNTQBBjD8+nL(wA)R+u*P->i=2 z`Df(Q)C%V=TViwHG!=-}@OyVoGJDuew2GSI$ifp7QlT3dxGx4Bs!Av^CsBX+sh}m|x*UQpjA14`4?X>!0Z26cK zHR$cEi)QwW?mu~g3iyKHQydSNF=Sdpw9H`mTwheU3;Sk@n@E!?FJSz76uqNY?68^k z;PL;A4cGfqZ|>WhpBS=ix%vKFO<+-qWTOTdAEDiPfB%`fl9F?l330J8_EgJ~U9-mo zQmQZ0W+xfuHNT8+RSJt7wZC#sC8nlFKWoBeXZl0p0g-A`ZKbC=%`sNlp`t^hzt?`) zC-q%csKC*fXSljdRnCZ9L?o_sQ&#{TrD^&_E7;10GTMRTnGn`)hg+7AsC7rjxX>;tl9%D^`3VjDuYCGfy;eVw!FI2!KJki`F7&AOowC z8$lm7=-5UCn%Q7S1)rltW!wM66Z)|}x;FthI318z#s7_tLKDtkfqsYp^ktUvQ(+z! zKIKOgMLSQf=Yvmz^YTsVJga@p=0UbOE)cG)&7#c?;wt8yO4y5zKE5GN?UOl+fZYik zk_1+K(6F@?mQdwc8PLc<3Ywx=@#Cb79wV%HcIl*d&M@2vcCx|lYtdXMHaPD-^i1zI zy5rzaz$EQ~oS!qICPu=KkkIhyqIs;!Yl2n{F84#2gKz{5;Kc zepl3vNVYSS_&6%ED*AI?CL-z_nNvDzoIAn>W%6x333BiUUsuIz7jp9<98;5$H9Si> zN&qD7Xt!*j#3N4a(PoeIuNd&3FI9Y&z`QNUUhYrPD>xpCIYhrY$1?f&EyzMLBz|W7 z_w~8YD(j1%?z*~*wckg;gK)Q-`3J!~zJ6@+8rPMBkg(YLoX4kzs`&c5p7qC$On@u| zl*_Z=4G0(yrzfV=SI9*sWQSh}ZP2NU0netBeHo0CmZ1#p48&!p)*Osj`mJ-=DKW&&JAzzDXA@Ns?YjSyNT z#1O)yPi$Kkk9g1FvDlEf4Y0~rmafOr5f@bKVR#~{!g*@~#XK zr^@b^Rq#>zVfBT>=zCzhQ;pZrMm$cty(hhcZ>Ywz@*%f9M&qGKO+1M@HW`3Vg(Wn3 zR@)QJrv?dE*kG?g`r!!6+DX5cG@%kR&7|n9dUgtLioVomf zKeRQ*=nSo^D1L7FT&-`+0H>6bNX0AI&U?X>03(iOh0 zDcx6|UL7{bP!%i^M;=qFU%^z<9DwY4RYR%%WeNjslA*Zo`N*|Y98S>XM8n| z9J6k56KDA4&Y^>{2)OeF`N(wRL{GD3?QMx}`Rm+|D0d5ZRelriIT$)*z!saRzYl6uE$Ij05|UE7?qBTBBxJV`AT_ESalRR`O9^!BqszwL7~?`zU@ zI>SVS?hj^n_Q1OkP;#uFUbFwI^CU)WLJU7P@`FS+PR`0_y*g+3I{T$yeBA%R*h2f? zt8g6J+y#x^YrI?f8>37y@Xv)1ppAL0sL7;;d+tAnzMgKJyXMT(T(0)v{%r<>r8#@j zffce*4NyI@Sx*0A79XuyGkNO5XKd;4iJKXx7AK@5huSFgZR)(XTlxGc^84Vzh4;h0y))myLq4*sZgReP zQJ`rCS=k{_3l8vZzbSgU$ozdUDdX^-vUY~Q7Ei`0JGSYVu)cg4HUJ|L5Vt&9HBsrq z$emogNygX5?V6ul8sFN6g%QHnwKG*d44v1){&RbS9Lxwn=dd)NCDDD0ytZs!p)*iT zmOrk<{Itvp{!UFOQSmpoLjYEdIdp-4VxAAMOc*5s6%^oj0OcA$&e(wWSEfe*Hjc|Ew0$;?oSm4ot$u!Zw!byAH19Q zW6t&q{`=bT@)P?i_{wJ-Zza9c!KfXoxW&<`&2}@b9-qX(6Ck7cg02;=e{jdVoI>$I z6fJw;77C=6nZNZm`CphkKtl;O>!`tnrva4U^OpcVmBPARZjARI4Z2u#uB{SZJUVml($ zw6HA872QCYOoU?6!I!y&P@Kyn`-p%$0y3H`02D@-Xf!nhRiK!jt6mJZU?zH1Eyg;) zIxSxJuUUjDI$9Pk3_orV6BTrIKCt{XEUb{|gUy$L6(Avs@jh3P22%!Zw;ZgH7U@}L z5BV65p(x7QOaR1485BGLIJvtI?eeHBRmkR00ttY6<(;jzY*6wJM3dr&Y4V7ccoqf1 z-SGC5cR(qC?J{cB8uB5KfoPOS*wWP0pKz76uA0ONs!7^epyTe)11{PyjgplXQ6>OD{namgpe 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + +def get_1x_lr_params(model): + """ + This generator returns all the parameters of the net except for + the last classification layer. Note that for each batchnorm layer, + requires_grad is set to False in deeplab_resnet.py, therefore this function does not return + any batchnorm parameter + """ + b = [model.xception_features] + for i in range(len(b)): + for k in b[i].parameters(): + if k.requires_grad: + yield k + + +def get_10x_lr_params(model): + """ + This generator returns all the parameters for the last layer of the net, + which does the classification of pixel into classes + """ + b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv] + for j in range(len(b)): + for k in b[j].parameters(): + if k.requires_grad: + yield k + + +if __name__ == "__main__": + model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True) + model.eval() + image = torch.randn(1, 3, 512, 512)*255 + with torch.no_grad(): + output = model.forward(image) + print(output.size()) + # print(output) + + + + + + diff --git a/networks/deeplab_xception_synBN.py b/networks/deeplab_xception_synBN.py new file mode 100644 index 0000000..d68312b --- /dev/null +++ b/networks/deeplab_xception_synBN.py @@ -0,0 +1,596 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo +from torch.nn.parameter import Parameter +from collections import OrderedDict +from sync_batchnorm import SynchronizedBatchNorm1d, DataParallelWithCallback, SynchronizedBatchNorm2d + + +def fixed_padding(inputs, kernel_size, rate): + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end)) + return padded_inputs + +class SeparableConv2d_aspp(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0): + super(SeparableConv2d_aspp, self).__init__() + + self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, + groups=inplanes, bias=bias) + self.depthwise_bn = SynchronizedBatchNorm2d(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) + self.pointwise_bn = SynchronizedBatchNorm2d(planes) + self.relu = nn.ReLU() + + def forward(self, x): + # x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0]) + x = self.depthwise(x) + x = self.depthwise_bn(x) + x = self.relu(x) + x = self.pointwise(x) + x = self.pointwise_bn(x) + x = self.relu(x) + return x + +class Decoder_module(nn.Module): + def __init__(self, inplanes, planes, rate=1): + super(Decoder_module, self).__init__() + self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate,padding=1) + + def forward(self, x): + x = self.atrous_convolution(x) + return x + +class ASPP_module(nn.Module): + def __init__(self, inplanes, planes, rate): + super(ASPP_module, self).__init__() + if rate == 1: + raise RuntimeError() + else: + kernel_size = 3 + padding = rate + self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, + padding=padding) + + def forward(self, x): + x = self.atrous_convolution(x) + return x + + +class ASPP_module_rate0(nn.Module): + def __init__(self, inplanes, planes, rate=1): + super(ASPP_module_rate0, self).__init__() + if rate == 1: + kernel_size = 1 + padding = 0 + self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, + stride=1, padding=padding, dilation=rate, bias=False) + self.bn = SynchronizedBatchNorm2d(planes, eps=1e-5, affine=True) + self.relu = nn.ReLU() + else: + raise RuntimeError() + + def forward(self, x): + x = self.atrous_convolution(x) + x = self.bn(x) + return self.relu(x) + + +class SeparableConv2d_same(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0): + super(SeparableConv2d_same, self).__init__() + + self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, + groups=inplanes, bias=bias) + self.depthwise_bn = SynchronizedBatchNorm2d(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) + self.pointwise_bn = SynchronizedBatchNorm2d(planes) + + def forward(self, x): + x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0]) + x = self.depthwise(x) + x = self.depthwise_bn(x) + x = self.pointwise(x) + x = self.pointwise_bn(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False): + super(Block, self).__init__() + + if planes != inplanes or stride != 1: + self.skip = nn.Conv2d(inplanes, planes, 1, stride=2, bias=False) + if is_last: + self.skip = nn.Conv2d(inplanes, planes, 1, stride=1, bias=False) + self.skipbn = SynchronizedBatchNorm2d(planes) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=True) + rep = [] + + filters = inplanes + if grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(planes)) + filters = planes + + for i in range(reps - 1): + rep.append(self.relu) + rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(planes)) + + if not start_with_relu: + rep = rep[1:] + + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d_same(planes, planes, 3, stride=2,dilation=dilation)) + + if is_last: + rep.append(self.relu) + rep.append(SeparableConv2d_same(planes, planes, 3, stride=1,dilation=dilation)) + + + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + # print(x.size(),skip.size()) + x += skip + + return x + +class Block2(nn.Module): + def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False): + super(Block2, self).__init__() + + if planes != inplanes or stride != 1: + self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False) + self.skipbn = SynchronizedBatchNorm2d(planes) + else: + self.skip = None + + self.relu = nn.ReLU(inplace=True) + rep = [] + + filters = inplanes + if grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(planes)) + filters = planes + + for i in range(reps - 1): + rep.append(self.relu) + rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation)) +# rep.append(nn.BatchNorm2d(planes)) + + if not start_with_relu: + rep = rep[1:] + + if stride != 1: + self.block2_lastconv = nn.Sequential(*[self.relu,SeparableConv2d_same(planes, planes, 3, stride=2,dilation=dilation)]) + + if is_last: + rep.append(SeparableConv2d_same(planes, planes, 3, stride=1)) + + + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + low_middle = x.clone() + x1 = x + x1 = self.block2_lastconv(x1) + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x1 += skip + + return x1,low_middle + +class Xception(nn.Module): + """ + Modified Alighed Xception + """ + def __init__(self, inplanes=3, os=16, pretrained=False): + super(Xception, self).__init__() + + if os == 16: + entry_block3_stride = 2 + middle_block_rate = 1 + exit_block_rates = (1, 2) + elif os == 8: + entry_block3_stride = 1 + middle_block_rate = 2 + exit_block_rates = (2, 4) + else: + raise NotImplementedError + + + # Entry flow + self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False) + self.bn1 = SynchronizedBatchNorm2d(32) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False) + self.bn2 = SynchronizedBatchNorm2d(64) + + self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False) + self.block2 = Block2(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True) + self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True) + + # Middle flow + self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0], + start_with_relu=True, grow_first=False, is_last=True) + + self.conv3 = SeparableConv2d_aspp(1024, 1536, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1]) + # self.bn3 = nn.BatchNorm2d(1536) + + self.conv4 = SeparableConv2d_aspp(1536, 1536, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1]) + # self.bn4 = nn.BatchNorm2d(1536) + + self.conv5 = SeparableConv2d_aspp(1536, 2048, 3, stride=1, dilation=exit_block_rates[1],padding=exit_block_rates[1]) + # self.bn5 = nn.BatchNorm2d(2048) + + # Init weights + # self.__init_weight() + + # Load pretrained model + if pretrained: + self.__load_xception_pretrained() + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + # print('conv1 ',x.size()) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + # print('block1',x.size()) + # low_level_feat = x + x,low_level_feat = self.block2(x) + # print('block2',x.size()) + x = self.block3(x) + # print('xception block3 ',x.size()) + + # Middle flow + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + x = self.block13(x) + x = self.block14(x) + x = self.block15(x) + x = self.block16(x) + x = self.block17(x) + x = self.block18(x) + x = self.block19(x) + + # Exit flow + x = self.block20(x) + x = self.conv3(x) + # x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + # x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + # x = self.bn5(x) + x = self.relu(x) + + return x, low_level_feat + + def __init_weight(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + # m.weight.data.normal_(0, math.sqrt(2. / n)) + torch.nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def __load_xception_pretrained(self): + pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth') + model_dict = {} + state_dict = self.state_dict() + + for k, v in pretrain_dict.items(): + if k in state_dict: + if 'pointwise' in k: + v = v.unsqueeze(-1).unsqueeze(-1) + if k.startswith('block12'): + model_dict[k.replace('block12', 'block20')] = v + elif k.startswith('block11'): + model_dict[k.replace('block11', 'block12')] = v + model_dict[k.replace('block11', 'block13')] = v + model_dict[k.replace('block11', 'block14')] = v + model_dict[k.replace('block11', 'block15')] = v + model_dict[k.replace('block11', 'block16')] = v + model_dict[k.replace('block11', 'block17')] = v + model_dict[k.replace('block11', 'block18')] = v + model_dict[k.replace('block11', 'block19')] = v + elif k.startswith('conv3'): + model_dict[k] = v + elif k.startswith('bn3'): + model_dict[k] = v + model_dict[k.replace('bn3', 'bn4')] = v + elif k.startswith('conv4'): + model_dict[k.replace('conv4', 'conv5')] = v + elif k.startswith('bn4'): + model_dict[k.replace('bn4', 'bn5')] = v + else: + model_dict[k] = v + state_dict.update(model_dict) + self.load_state_dict(state_dict) + +class DeepLabv3_plus(nn.Module): + def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True): + if _print: + print("Constructing DeepLabv3+ model...") + print("Number of classes: {}".format(n_classes)) + print("Output stride: {}".format(os)) + print("Number of Input Channels: {}".format(nInputChannels)) + super(DeepLabv3_plus, self).__init__() + + # Atrous Conv + self.xception_features = Xception(nInputChannels, os, pretrained) + + # ASPP + if os == 16: + rates = [1, 6, 12, 18] + elif os == 8: + rates = [1, 12, 24, 36] + else: + raise NotImplementedError + + self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0]) + self.aspp2 = ASPP_module(2048, 256, rate=rates[1]) + self.aspp3 = ASPP_module(2048, 256, rate=rates[2]) + self.aspp4 = ASPP_module(2048, 256, rate=rates[3]) + + self.relu = nn.ReLU() + + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(2048, 256, 1, stride=1, bias=False), + SynchronizedBatchNorm2d(256), + nn.ReLU() + ) + + self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False) + self.concat_projection_bn1 = SynchronizedBatchNorm2d(256) + + # adopt [1x1, 48] for channel reduction. + self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False) + self.feature_projection_bn1 = SynchronizedBatchNorm2d(48) + + self.decoder = nn.Sequential(Decoder_module(304, 256), + Decoder_module(256, 256) + ) + self.semantic = nn.Conv2d(256, n_classes, kernel_size=1, stride=1) + + def forward(self, input): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + def freeze_bn(self): + for m in self.xception_features.modules(): + if isinstance(m, nn.BatchNorm2d) or isinstance(m,SynchronizedBatchNorm2d): + m.eval() + + def freeze_aspp_bn(self): + for m in self.aspp1.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + for m in self.aspp2.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + for m in self.aspp3.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + for m in self.aspp4.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + + def learnable_parameters(self): + layer_features_BN = [] + layer_features = [] + layer_aspp = [] + layer_projection =[] + layer_decoder = [] + layer_other = [] + model_para = list(self.named_parameters()) + for name,para in model_para: + if 'xception' in name: + if 'bn' in name or 'downsample.1.weight' in name or 'downsample.1.bias' in name: + layer_features_BN.append(para) + else: + layer_features.append(para) + # print (name) + elif 'aspp' in name: + layer_aspp.append(para) + elif 'projection' in name: + layer_projection.append(para) + elif 'decode' in name: + layer_decoder.append(para) + else: + layer_other.append(para) + return layer_features_BN,layer_features,layer_aspp,layer_projection,layer_decoder,layer_other + + + def __init_weight(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + # torch.nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def load_state_dict_new(self, state_dict): + own_state = self.state_dict() + #for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.','') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print ('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + # raise + # print 'copying %s' %name + # if isinstance(param, own_state): + # backwards compatibility for serialized parameters + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + + + +def get_1x_lr_params(model): + """ + This generator returns all the parameters of the net except for + the last classification layer. Note that for each batchnorm layer, + requires_grad is set to False in deeplab_resnet.py, therefore this function does not return + any batchnorm parameter + """ + b = [model.xception_features] + for i in range(len(b)): + for k in b[i].parameters(): + if k.requires_grad: + yield k + + +def get_10x_lr_params(model): + """ + This generator returns all the parameters for the last layer of the net, + which does the classification of pixel into classes + """ + b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv] + for j in range(len(b)): + for k in b[j].parameters(): + if k.requires_grad: + yield k + + +if __name__ == "__main__": + model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True) + model.eval() + # ckt = torch.load('C:\\Users\gaoyi\code_python\deeplab_v3plus.pth') + # model.load_state_dict_new(ckt) + + + image = torch.randn(1, 3, 512, 512)*255 + with torch.no_grad(): + output = model.forward(image) + print(output.size()) + # print(output) + + + + + + diff --git a/networks/deeplab_xception_transfer.py b/networks/deeplab_xception_transfer.py new file mode 100644 index 0000000..f86e424 --- /dev/null +++ b/networks/deeplab_xception_transfer.py @@ -0,0 +1,1003 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo +from torch.nn.parameter import Parameter +import numpy as np +from collections import OrderedDict +from torch.nn import Parameter +from networks import deeplab_xception,gcn, deeplab_xception_synBN +import pdb + +####################### +# base model +####################### + +class deeplab_xception_transfer_basemodel(deeplab_xception.DeepLabv3_plus): + def __init__(self,nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256): + super(deeplab_xception_transfer_basemodel, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os,) + ### source graph + # self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + # nodes=n_classes) + # self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # + # self.source_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, + # hidden_layers=hidden_layers, nodes=n_classes + # ) + # self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + # nn.ReLU(True)]) + + ### target graph + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_graph' not in name and 'transpose_graph' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + +class deeplab_xception_transfer_basemodel_savememory(deeplab_xception.DeepLabv3_plus): + def __init__(self,nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256): + super(deeplab_xception_transfer_basemodel_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os,) + ### source graph + + ### target graph + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_graph' not in name and 'transpose_graph' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + +class deeplab_xception_transfer_basemodel_synBN(deeplab_xception_synBN.DeepLabv3_plus): + def __init__(self,nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256): + super(deeplab_xception_transfer_basemodel_synBN, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os,) + ### source graph + # self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + # nodes=n_classes) + # self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # + # self.source_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, + # hidden_layers=hidden_layers, nodes=n_classes + # ) + # self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + # nn.ReLU(True)]) + + ### target graph + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + + if 'graph' in name and 'source' not in name and 'target' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + +class deeplab_xception_transfer_basemodel_synBN_savememory(deeplab_xception_synBN.DeepLabv3_plus): + def __init__(self,nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256): + super(deeplab_xception_transfer_basemodel_synBN_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os, ) + ### source graph + # self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + # nodes=n_classes) + # self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + # + # self.source_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, + # hidden_layers=hidden_layers, nodes=n_classes + # ) + # self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + # nn.ReLU(True)]) + + ### target graph + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.BatchNorm2d(input_channels), + nn.ReLU(True)]) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + + if 'graph' in name and 'source' not in name and 'target' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + +####################### +# transfer model +####################### + +class deeplab_xception_transfer_projection(deeplab_xception_transfer_basemodel): + def __init__(self, nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256, + transfer_graph=None, source_classes=20): + super(deeplab_xception_transfer_projection, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os, input_channels=input_channels, + hidden_layers=hidden_layers, out_channels=out_channels, ) + self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=source_classes) + self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers,out_features=hidden_layers,adj=transfer_graph, + begin_nodes=source_classes,end_nodes=n_classes) + self.fc_graph = gcn.GraphConvolution(hidden_layers*3, hidden_layers) + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + # source graph + source_graph = self.source_featuremap_2_graph(x) + source_graph1 = self.source_graph_conv1.forward(source_graph,adj=adj2_source, relu=True) + source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True) + source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True) + + source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True) + source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True) + source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True) + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + source_2_target_graph1 = self.similarity_trans(source_graph1, graph) + # graph combine 1 + # print(graph.size()) + # print(source_2_target_graph1.size()) + # print(source_2_target_graph1_v5.size()) + graph = torch.cat((graph,source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)),dim=-1) + graph = self.fc_graph.forward(graph,relu=True) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph2 = self.similarity_trans(source_graph2, graph) + # graph combine 2 + graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph3 = self.similarity_trans(source_graph3, graph) + # graph combine 3 + graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + + # print(graph.size(),x.size()) + + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + def similarity_trans(self,source,target): + sim = torch.matmul(F.normalize(target, p=2, dim=-1), F.normalize(source, p=2, dim=-1).transpose(-1, -2)) + sim = F.softmax(sim, dim=-1) + return torch.matmul(sim, source) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_' not in name and 'transpose_graph' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + +class deeplab_xception_transfer_projection_savemem(deeplab_xception_transfer_basemodel_savememory): + def __init__(self, nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256, + transfer_graph=None, source_classes=20): + super(deeplab_xception_transfer_projection_savemem, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os, input_channels=input_channels, + hidden_layers=hidden_layers, out_channels=out_channels, ) + self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=source_classes) + self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers,out_features=hidden_layers,adj=transfer_graph, + begin_nodes=source_classes,end_nodes=n_classes) + self.fc_graph = gcn.GraphConvolution(hidden_layers*3, hidden_layers) + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + # source graph + source_graph = self.source_featuremap_2_graph(x) + source_graph1 = self.source_graph_conv1.forward(source_graph,adj=adj2_source, relu=True) + source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True) + source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True) + + source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True) + source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True) + source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True) + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + source_2_target_graph1 = self.similarity_trans(source_graph1, graph) + # graph combine 1 + graph = torch.cat((graph,source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)),dim=-1) + graph = self.fc_graph.forward(graph,relu=True) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph2 = self.similarity_trans(source_graph2, graph) + # graph combine 2 + graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph3 = self.similarity_trans(source_graph3, graph) + # graph combine 3 + graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + + # print(graph.size(),x.size()) + + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + def similarity_trans(self,source,target): + sim = torch.matmul(F.normalize(target, p=2, dim=-1), F.normalize(source, p=2, dim=-1).transpose(-1, -2)) + sim = F.softmax(sim, dim=-1) + return torch.matmul(sim, source) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_' not in name and 'transpose_graph' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + +class deeplab_xception_transfer_projection_synBN_savemem(deeplab_xception_transfer_basemodel_synBN_savememory): + def __init__(self, nInputChannels=3, n_classes=7, os=16,input_channels=256,hidden_layers=128,out_channels=256, + transfer_graph=None, source_classes=20): + super(deeplab_xception_transfer_projection_synBN_savemem, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os, input_channels=input_channels, + hidden_layers=hidden_layers, out_channels=out_channels, ) + self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, + nodes=source_classes) + self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers,out_features=hidden_layers,adj=transfer_graph, + begin_nodes=source_classes,end_nodes=n_classes) + self.fc_graph = gcn.GraphConvolution(hidden_layers*3 ,hidden_layers) + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + # source graph + source_graph = self.source_featuremap_2_graph(x) + source_graph1 = self.source_graph_conv1.forward(source_graph,adj=adj2_source, relu=True) + source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True) + source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True) + + source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True) + source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True) + source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True) + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + source_2_target_graph1 = self.similarity_trans(source_graph1, graph) + # graph combine 1 + graph = torch.cat((graph,source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)),dim=-1) + graph = self.fc_graph.forward(graph,relu=True) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph2 = self.similarity_trans(source_graph2, graph) + # graph combine 2 + graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + + source_2_target_graph3 = self.similarity_trans(source_graph3, graph) + # graph combine 3 + graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=-1) + graph = self.fc_graph.forward(graph, relu=True) + + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + + # print(graph.size(),x.size()) + + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + def similarity_trans(self,source,target): + sim = torch.matmul(F.normalize(target, p=2, dim=-1), F.normalize(source, p=2, dim=-1).transpose(-1, -2)) + sim = F.softmax(sim, dim=-1) + return torch.matmul(sim, source) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_' not in name and 'transpose_graph' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + +# if __name__ == '__main__': + # net = deeplab_xception_transfer_projection_v3v5_more_savemem() + # img = torch.rand((2,3,128,128)) + # net.eval() + # a = torch.rand((1,1,7,7)) + # net.forward(img, adj1_target=a) \ No newline at end of file diff --git a/networks/deeplab_xception_universal.py b/networks/deeplab_xception_universal.py new file mode 100644 index 0000000..3545581 --- /dev/null +++ b/networks/deeplab_xception_universal.py @@ -0,0 +1,1077 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from collections import OrderedDict +from torch.nn import Parameter +from networks import deeplab_xception, gcn, deeplab_xception_synBN + + + +class deeplab_xception_transfer_basemodel_savememory(deeplab_xception.DeepLabv3_plus): + def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, + source_classes=20, transfer_graph=None): + super(deeplab_xception_transfer_basemodel_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os,) + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_graph' not in name \ + and 'transpose_graph' not in name and 'middle' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def top_forward(self, input, adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### source graph + source_graph = self.source_featuremap_2_graph(x) + + source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True) + source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True) + source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True) + + ### target source + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + +class deeplab_xception_transfer_basemodel_savememory_synbn(deeplab_xception_synBN.DeepLabv3_plus): + def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, + source_classes=20, transfer_graph=None): + super(deeplab_xception_transfer_basemodel_savememory_synbn, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, + os=os,) + + + def load_source_model(self,state_dict): + own_state = self.state_dict() + # for name inshop_cos own_state: + # print name + new_state_dict = OrderedDict() + for name, param in state_dict.items(): + name = name.replace('module.', '') + if 'graph' in name and 'source' not in name and 'target' not in name and 'fc_graph' not in name \ + and 'transpose_graph' not in name and 'middle' not in name: + if 'featuremap_2_graph' in name: + name = name.replace('featuremap_2_graph','source_featuremap_2_graph') + else: + name = name.replace('graph','source_graph') + new_state_dict[name] = 0 + if name not in own_state: + if 'num_batch' in name: + continue + print('unexpected key "{}" in state_dict' + .format(name)) + continue + # if isinstance(param, own_state): + if isinstance(param, Parameter): + # backwards compatibility for serialized parameters + param = param.data + try: + own_state[name].copy_(param) + except: + print('While copying the parameter named {}, whose dimensions in the model are' + ' {} and whose dimensions in the checkpoint are {}, ...'.format( + name, own_state[name].size(), param.size())) + continue # i add inshop_cos 2018/02/01 + own_state[name].copy_(param) + # print 'copying %s' %name + + missing = set(own_state.keys()) - set(new_state_dict.keys()) + if len(missing) > 0: + print('missing keys in state_dict: "{}"'.format(missing)) + + def get_target_parameter(self): + l = [] + other = [] + for name, k in self.named_parameters(): + if 'target' in name or 'semantic' in name: + l.append(k) + else: + other.append(k) + return l, other + + def get_semantic_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'semantic' in name: + l.append(k) + return l + + def get_source_parameter(self): + l = [] + for name, k in self.named_parameters(): + if 'source' in name: + l.append(k) + return l + + def top_forward(self, input, adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### source graph + source_graph = self.source_featuremap_2_graph(x) + + source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True) + source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True) + source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True) + + ### target source + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + + + def forward(self, input,adj1_target=None, adj2_source=None,adj3_transfer=None ): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### add graph + + + # target graph + # print('x size',x.size(),adj1.size()) + graph = self.target_featuremap_2_graph(x) + + # graph combine + # print(graph.size(),source_2_target_graph.size()) + # graph = self.fc_graph.forward(graph,relu=True) + # print(graph.size()) + + graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True) + graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True) + # print(graph.size(),x.size()) + # graph = self.gcn_encode.forward(graph,relu=True) + # graph = self.graph_conv2.forward(graph,adj=adj2,relu=True) + # graph = self.gcn_decode.forward(graph,relu=True) + graph = self.target_graph_2_fea.forward(graph, x) + x = self.target_skip_conv(x) + x = x + graph + + ### + x = self.semantic(x) + x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True) + + return x + + +class deeplab_xception_end2end_3d(deeplab_xception_transfer_basemodel_savememory): + def __init__(self, nInputChannels=3, n_classes=20, os=16, input_channels=256, hidden_layers=128, out_channels=256, + source_classes=7, middle_classes=18, transfer_graph=None): + super(deeplab_xception_end2end_3d, self).__init__(nInputChannels=nInputChannels, + n_classes=n_classes, + os=os, ) + ### source graph + self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=source_classes) + self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.source_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=source_classes + ) + self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + self.source_semantic = nn.Conv2d(out_channels,source_classes,1) + self.middle_semantic = nn.Conv2d(out_channels, middle_classes, 1) + + ### target graph 1 + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + ### middle + self.middle_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=middle_classes) + self.middle_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.middle_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.middle_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.middle_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.middle_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + ### multi transpose + self.transpose_graph_source2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=source_classes, end_nodes=n_classes) + self.transpose_graph_target2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=n_classes, end_nodes=source_classes) + + self.transpose_graph_middle2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=middle_classes, end_nodes=source_classes) + self.transpose_graph_middle2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=middle_classes, end_nodes=source_classes) + + self.transpose_graph_source2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=source_classes, end_nodes=middle_classes) + self.transpose_graph_target2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=n_classes, end_nodes=middle_classes) + + + self.fc_graph_source = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + self.fc_graph_target = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + self.fc_graph_middle = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + + def freeze_totally_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad = False + m.bias.requires_grad = False + + def freeze_backbone_bn(self): + for m in self.xception_features.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad = False + m.bias.requires_grad = False + + def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, + adj4_middle=None,adj5_transfer_s2m=None,adj6_transfer_t2m=None,adj5_transfer_m2s=None,adj6_transfer_m2t=None,): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### source graph + source_graph = self.source_featuremap_2_graph(x) + ### target source + target_graph = self.target_featuremap_2_graph(x) + ### middle source + middle_graph = self.middle_featuremap_2_graph(x) + + ##### end2end multi task + + ### first task + # print(source_graph.size(),target_graph.size()) + source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True) + target_graph1 = self.target_graph_conv1.forward(target_graph, adj=adj1_target, relu=True) + middle_graph1 = self.target_graph_conv1.forward(middle_graph, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph1_v5 = self.transpose_graph_source2target.forward(source_graph1, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph1_v5 = self.transpose_graph_source2middle.forward(source_graph1,adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph1_v5 = self.transpose_graph_target2source.forward(target_graph1, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph1_v5 = self.transpose_graph_target2middle.forward(target_graph1, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph1_v5 = self.transpose_graph_middle2source.forward(middle_graph1, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph1_v5 = self.transpose_graph_middle2target.forward(middle_graph1, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph1 = self.similarity_trans(source_graph1, target_graph1) + source_2_middle_graph1 = self.similarity_trans(source_graph1, middle_graph1) + # target 2 source middle + target_2_source_graph1 = self.similarity_trans(target_graph1, source_graph1) + target_2_middle_graph1 = self.similarity_trans(target_graph1, middle_graph1) + # middle 2 source target + middle_2_source_graph1 = self.similarity_trans(middle_graph1, source_graph1) + middle_2_target_graph1 = self.similarity_trans(middle_graph1, target_graph1) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph1 = torch.cat( + (source_graph1, target_2_source_graph1, target_2_source_graph1_v5, + middle_2_source_graph1, middle_2_source_graph1_v5), dim=-1) + source_graph1 = self.fc_graph_source.forward(source_graph1, relu=True) + # target + target_graph1 = torch.cat( + (target_graph1, source_2_target_graph1, source_2_target_graph1_v5, + middle_2_target_graph1, middle_2_target_graph1_v5), dim=-1) + target_graph1 = self.fc_graph_target.forward(target_graph1, relu=True) + # middle + middle_graph1 = torch.cat((middle_graph1, source_2_middle_graph1, source_2_middle_graph1_v5, + target_2_middle_graph1, target_2_middle_graph1_v5), dim=-1) + middle_graph1 = self.fc_graph_middle.forward(middle_graph1, relu=True) + + + ### seconde task + source_graph2 = self.source_graph_conv1.forward(source_graph1, adj=adj2_source, relu=True) + target_graph2 = self.target_graph_conv1.forward(target_graph1, adj=adj1_target, relu=True) + middle_graph2 = self.target_graph_conv1.forward(middle_graph1, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph2_v5 = self.transpose_graph_source2target.forward(source_graph2, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph2_v5 = self.transpose_graph_source2middle.forward(source_graph2, adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph2_v5 = self.transpose_graph_target2source.forward(target_graph2, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph2_v5 = self.transpose_graph_target2middle.forward(target_graph2, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph2_v5 = self.transpose_graph_middle2source.forward(middle_graph2, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph2_v5 = self.transpose_graph_middle2target.forward(middle_graph2, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph2 = self.similarity_trans(source_graph2, target_graph2) + source_2_middle_graph2 = self.similarity_trans(source_graph2, middle_graph2) + # target 2 source middle + target_2_source_graph2 = self.similarity_trans(target_graph2, source_graph2) + target_2_middle_graph2 = self.similarity_trans(target_graph2, middle_graph2) + # middle 2 source target + middle_2_source_graph2 = self.similarity_trans(middle_graph2, source_graph2) + middle_2_target_graph2 = self.similarity_trans(middle_graph2, target_graph2) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph2 = torch.cat( + (source_graph2, target_2_source_graph2, target_2_source_graph2_v5, + middle_2_source_graph2, middle_2_source_graph2_v5), dim=-1) + source_graph2 = self.fc_graph_source.forward(source_graph2, relu=True) + # target + target_graph2 = torch.cat( + (target_graph2, source_2_target_graph2, source_2_target_graph2_v5, + middle_2_target_graph2, middle_2_target_graph2_v5), dim=-1) + target_graph2 = self.fc_graph_target.forward(target_graph2, relu=True) + # middle + middle_graph2 = torch.cat((middle_graph2, source_2_middle_graph2, source_2_middle_graph2_v5, + target_2_middle_graph2, target_2_middle_graph2_v5), dim=-1) + middle_graph2 = self.fc_graph_middle.forward(middle_graph2, relu=True) + + + ### third task + source_graph3 = self.source_graph_conv1.forward(source_graph2, adj=adj2_source, relu=True) + target_graph3 = self.target_graph_conv1.forward(target_graph2, adj=adj1_target, relu=True) + middle_graph3 = self.target_graph_conv1.forward(middle_graph2, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph3_v5 = self.transpose_graph_source2target.forward(source_graph3, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph3_v5 = self.transpose_graph_source2middle.forward(source_graph3, adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph3_v5 = self.transpose_graph_target2source.forward(target_graph3, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph3_v5 = self.transpose_graph_target2middle.forward(target_graph3, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph3_v5 = self.transpose_graph_middle2source.forward(middle_graph3, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph3_v5 = self.transpose_graph_middle2target.forward(middle_graph3, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph3 = self.similarity_trans(source_graph3, target_graph3) + source_2_middle_graph3 = self.similarity_trans(source_graph3, middle_graph3) + # target 2 source middle + target_2_source_graph3 = self.similarity_trans(target_graph3, source_graph3) + target_2_middle_graph3 = self.similarity_trans(target_graph3, middle_graph3) + # middle 2 source target + middle_2_source_graph3 = self.similarity_trans(middle_graph3, source_graph3) + middle_2_target_graph3 = self.similarity_trans(middle_graph3, target_graph3) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph3 = torch.cat( + (source_graph3, target_2_source_graph3, target_2_source_graph3_v5, + middle_2_source_graph3, middle_2_source_graph3_v5), dim=-1) + source_graph3 = self.fc_graph_source.forward(source_graph3, relu=True) + # target + target_graph3 = torch.cat( + (target_graph3, source_2_target_graph3, source_2_target_graph3_v5, + middle_2_target_graph3, middle_2_target_graph3_v5), dim=-1) + target_graph3 = self.fc_graph_target.forward(target_graph3, relu=True) + # middle + middle_graph3 = torch.cat((middle_graph3, source_2_middle_graph3, source_2_middle_graph3_v5, + target_2_middle_graph3, target_2_middle_graph3_v5), dim=-1) + middle_graph3 = self.fc_graph_middle.forward(middle_graph3, relu=True) + + return source_graph3, target_graph3, middle_graph3, x + + def similarity_trans(self,source,target): + sim = torch.matmul(F.normalize(target, p=2, dim=-1), F.normalize(source, p=2, dim=-1).transpose(-1, -2)) + sim = F.softmax(sim, dim=-1) + return torch.matmul(sim, source) + + def bottom_forward_source(self, input, source_graph): + # print('input size') + # print(input.size()) + # print(source_graph.size()) + graph = self.source_graph_2_fea.forward(source_graph, input) + x = self.source_skip_conv(input) + x = x + graph + x = self.source_semantic(x) + return x + + def bottom_forward_target(self, input, target_graph): + graph = self.target_graph_2_fea.forward(target_graph, input) + x = self.target_skip_conv(input) + x = x + graph + x = self.semantic(x) + return x + + def bottom_forward_middle(self, input, target_graph): + graph = self.middle_graph_2_fea.forward(target_graph, input) + x = self.middle_skip_conv(input) + x = x + graph + x = self.middle_semantic(x) + return x + + def forward(self, input_source, input_target=None, input_middle=None, adj1_target=None, adj2_source=None, + adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None,adj5_transfer_s2m=None, + adj6_transfer_t2m=None,adj5_transfer_m2s=None,adj6_transfer_m2t=None,): + if input_source is None and input_target is not None and input_middle is None: + # target + target_batch = input_target.size(0) + input = input_target + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + # source_x = self.bottom_forward_source(source_x, source_graph) + target_x = self.bottom_forward_target(x, target_graph) + + target_x = F.upsample(target_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return None, target_x, None + + if input_source is not None and input_target is None and input_middle is None: + # source + source_batch = input_source.size(0) + source_list = range(source_batch) + input = input_source + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, + adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + source_x = self.bottom_forward_source(x, source_graph) + source_x = F.upsample(source_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return source_x, None, None + + if input_middle is not None and input_source is None and input_target is None: + # middle + input = input_middle + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, + adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + middle_x = self.bottom_forward_middle(x, source_graph) + middle_x = F.upsample(middle_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return None, None, middle_x + + +class deeplab_xception_end2end_3d_synbn(deeplab_xception_transfer_basemodel_savememory_synbn): + def __init__(self, nInputChannels=3, n_classes=20, os=16, input_channels=256, hidden_layers=128, out_channels=256, + source_classes=7, middle_classes=18, transfer_graph=None): + super(deeplab_xception_end2end_3d_synbn, self).__init__(nInputChannels=nInputChannels, + n_classes=n_classes, + os=os, ) + ### source graph + self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=source_classes) + self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.source_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=source_classes + ) + self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + self.source_semantic = nn.Conv2d(out_channels,source_classes,1) + self.middle_semantic = nn.Conv2d(out_channels, middle_classes, 1) + + ### target graph 1 + self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=n_classes) + self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + ### middle + self.middle_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, + hidden_layers=hidden_layers, + nodes=middle_classes) + self.middle_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.middle_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers) + self.middle_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers) + + self.middle_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, + output_channels=out_channels, + hidden_layers=hidden_layers, nodes=n_classes + ) + self.middle_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), + nn.ReLU(True)]) + + ### multi transpose + self.transpose_graph_source2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=source_classes, end_nodes=n_classes) + self.transpose_graph_target2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=n_classes, end_nodes=source_classes) + + self.transpose_graph_middle2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=middle_classes, end_nodes=source_classes) + self.transpose_graph_middle2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=middle_classes, end_nodes=source_classes) + + self.transpose_graph_source2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=source_classes, end_nodes=middle_classes) + self.transpose_graph_target2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, + adj=transfer_graph, + begin_nodes=n_classes, end_nodes=middle_classes) + + + self.fc_graph_source = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + self.fc_graph_target = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + self.fc_graph_middle = gcn.GraphConvolution(hidden_layers * 5, hidden_layers) + + + def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, + adj4_middle=None,adj5_transfer_s2m=None,adj6_transfer_t2m=None,adj5_transfer_m2s=None,adj6_transfer_m2t=None,): + x, low_level_features = self.xception_features(input) + # print(x.size()) + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.concat_projection_conv1(x) + x = self.concat_projection_bn1(x) + x = self.relu(x) + # print(x.size()) + x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True) + + low_level_features = self.feature_projection_conv1(low_level_features) + low_level_features = self.feature_projection_bn1(low_level_features) + low_level_features = self.relu(low_level_features) + # print(low_level_features.size()) + # print(x.size()) + x = torch.cat((x, low_level_features), dim=1) + x = self.decoder(x) + + ### source graph + source_graph = self.source_featuremap_2_graph(x) + ### target source + target_graph = self.target_featuremap_2_graph(x) + ### middle source + middle_graph = self.middle_featuremap_2_graph(x) + + ##### end2end multi task + + ### first task + # print(source_graph.size(),target_graph.size()) + source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True) + target_graph1 = self.target_graph_conv1.forward(target_graph, adj=adj1_target, relu=True) + middle_graph1 = self.target_graph_conv1.forward(middle_graph, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph1_v5 = self.transpose_graph_source2target.forward(source_graph1, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph1_v5 = self.transpose_graph_source2middle.forward(source_graph1,adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph1_v5 = self.transpose_graph_target2source.forward(target_graph1, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph1_v5 = self.transpose_graph_target2middle.forward(target_graph1, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph1_v5 = self.transpose_graph_middle2source.forward(middle_graph1, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph1_v5 = self.transpose_graph_middle2target.forward(middle_graph1, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph1 = self.similarity_trans(source_graph1, target_graph1) + source_2_middle_graph1 = self.similarity_trans(source_graph1, middle_graph1) + # target 2 source middle + target_2_source_graph1 = self.similarity_trans(target_graph1, source_graph1) + target_2_middle_graph1 = self.similarity_trans(target_graph1, middle_graph1) + # middle 2 source target + middle_2_source_graph1 = self.similarity_trans(middle_graph1, source_graph1) + middle_2_target_graph1 = self.similarity_trans(middle_graph1, target_graph1) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph1 = torch.cat( + (source_graph1, target_2_source_graph1, target_2_source_graph1_v5, + middle_2_source_graph1, middle_2_source_graph1_v5), dim=-1) + source_graph1 = self.fc_graph_source.forward(source_graph1, relu=True) + # target + target_graph1 = torch.cat( + (target_graph1, source_2_target_graph1, source_2_target_graph1_v5, + middle_2_target_graph1, middle_2_target_graph1_v5), dim=-1) + target_graph1 = self.fc_graph_target.forward(target_graph1, relu=True) + # middle + middle_graph1 = torch.cat((middle_graph1, source_2_middle_graph1, source_2_middle_graph1_v5, + target_2_middle_graph1, target_2_middle_graph1_v5), dim=-1) + middle_graph1 = self.fc_graph_middle.forward(middle_graph1, relu=True) + + + ### seconde task + source_graph2 = self.source_graph_conv1.forward(source_graph1, adj=adj2_source, relu=True) + target_graph2 = self.target_graph_conv1.forward(target_graph1, adj=adj1_target, relu=True) + middle_graph2 = self.target_graph_conv1.forward(middle_graph1, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph2_v5 = self.transpose_graph_source2target.forward(source_graph2, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph2_v5 = self.transpose_graph_source2middle.forward(source_graph2, adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph2_v5 = self.transpose_graph_target2source.forward(target_graph2, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph2_v5 = self.transpose_graph_target2middle.forward(target_graph2, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph2_v5 = self.transpose_graph_middle2source.forward(middle_graph2, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph2_v5 = self.transpose_graph_middle2target.forward(middle_graph2, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph2 = self.similarity_trans(source_graph2, target_graph2) + source_2_middle_graph2 = self.similarity_trans(source_graph2, middle_graph2) + # target 2 source middle + target_2_source_graph2 = self.similarity_trans(target_graph2, source_graph2) + target_2_middle_graph2 = self.similarity_trans(target_graph2, middle_graph2) + # middle 2 source target + middle_2_source_graph2 = self.similarity_trans(middle_graph2, source_graph2) + middle_2_target_graph2 = self.similarity_trans(middle_graph2, target_graph2) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph2 = torch.cat( + (source_graph2, target_2_source_graph2, target_2_source_graph2_v5, + middle_2_source_graph2, middle_2_source_graph2_v5), dim=-1) + source_graph2 = self.fc_graph_source.forward(source_graph2, relu=True) + # target + target_graph2 = torch.cat( + (target_graph2, source_2_target_graph2, source_2_target_graph2_v5, + middle_2_target_graph2, middle_2_target_graph2_v5), dim=-1) + target_graph2 = self.fc_graph_target.forward(target_graph2, relu=True) + # middle + middle_graph2 = torch.cat((middle_graph2, source_2_middle_graph2, source_2_middle_graph2_v5, + target_2_middle_graph2, target_2_middle_graph2_v5), dim=-1) + middle_graph2 = self.fc_graph_middle.forward(middle_graph2, relu=True) + + + ### third task + source_graph3 = self.source_graph_conv1.forward(source_graph2, adj=adj2_source, relu=True) + target_graph3 = self.target_graph_conv1.forward(target_graph2, adj=adj1_target, relu=True) + middle_graph3 = self.target_graph_conv1.forward(middle_graph2, adj=adj4_middle, relu=True) + + # source 2 target & middle + source_2_target_graph3_v5 = self.transpose_graph_source2target.forward(source_graph3, adj=adj3_transfer_s2t, + relu=True) + source_2_middle_graph3_v5 = self.transpose_graph_source2middle.forward(source_graph3, adj=adj5_transfer_s2m, + relu=True) + # target 2 source & middle + target_2_source_graph3_v5 = self.transpose_graph_target2source.forward(target_graph3, adj=adj3_transfer_t2s, + relu=True) + target_2_middle_graph3_v5 = self.transpose_graph_target2middle.forward(target_graph3, adj=adj6_transfer_t2m, + relu=True) + # middle 2 source & target + middle_2_source_graph3_v5 = self.transpose_graph_middle2source.forward(middle_graph3, adj=adj5_transfer_m2s, + relu=True) + middle_2_target_graph3_v5 = self.transpose_graph_middle2target.forward(middle_graph3, adj=adj6_transfer_m2t, + relu=True) + # source 2 middle target + source_2_target_graph3 = self.similarity_trans(source_graph3, target_graph3) + source_2_middle_graph3 = self.similarity_trans(source_graph3, middle_graph3) + # target 2 source middle + target_2_source_graph3 = self.similarity_trans(target_graph3, source_graph3) + target_2_middle_graph3 = self.similarity_trans(target_graph3, middle_graph3) + # middle 2 source target + middle_2_source_graph3 = self.similarity_trans(middle_graph3, source_graph3) + middle_2_target_graph3 = self.similarity_trans(middle_graph3, target_graph3) + + ## concat + # print(source_graph1.size(), target_2_source_graph1.size(), ) + source_graph3 = torch.cat( + (source_graph3, target_2_source_graph3, target_2_source_graph3_v5, + middle_2_source_graph3, middle_2_source_graph3_v5), dim=-1) + source_graph3 = self.fc_graph_source.forward(source_graph3, relu=True) + # target + target_graph3 = torch.cat( + (target_graph3, source_2_target_graph3, source_2_target_graph3_v5, + middle_2_target_graph3, middle_2_target_graph3_v5), dim=-1) + target_graph3 = self.fc_graph_target.forward(target_graph3, relu=True) + # middle + middle_graph3 = torch.cat((middle_graph3, source_2_middle_graph3, source_2_middle_graph3_v5, + target_2_middle_graph3, target_2_middle_graph3_v5), dim=-1) + middle_graph3 = self.fc_graph_middle.forward(middle_graph3, relu=True) + + return source_graph3, target_graph3, middle_graph3, x + + def similarity_trans(self,source,target): + sim = torch.matmul(F.normalize(target, p=2, dim=-1), F.normalize(source, p=2, dim=-1).transpose(-1, -2)) + sim = F.softmax(sim, dim=-1) + return torch.matmul(sim, source) + + def bottom_forward_source(self, input, source_graph): + # print('input size') + # print(input.size()) + # print(source_graph.size()) + graph = self.source_graph_2_fea.forward(source_graph, input) + x = self.source_skip_conv(input) + x = x + graph + x = self.source_semantic(x) + return x + + def bottom_forward_target(self, input, target_graph): + graph = self.target_graph_2_fea.forward(target_graph, input) + x = self.target_skip_conv(input) + x = x + graph + x = self.semantic(x) + return x + + def bottom_forward_middle(self, input, target_graph): + graph = self.middle_graph_2_fea.forward(target_graph, input) + x = self.middle_skip_conv(input) + x = x + graph + x = self.middle_semantic(x) + return x + + def forward(self, input_source, input_target=None, input_middle=None, adj1_target=None, adj2_source=None, + adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None,adj5_transfer_s2m=None, + adj6_transfer_t2m=None,adj5_transfer_m2s=None,adj6_transfer_m2t=None,): + + if input_source is None and input_target is not None and input_middle is None: + # target + target_batch = input_target.size(0) + input = input_target + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + # source_x = self.bottom_forward_source(source_x, source_graph) + target_x = self.bottom_forward_target(x, target_graph) + + target_x = F.upsample(target_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return None, target_x, None + + if input_source is not None and input_target is None and input_middle is None: + # source + source_batch = input_source.size(0) + source_list = range(source_batch) + input = input_source + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, + adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + source_x = self.bottom_forward_source(x, source_graph) + source_x = F.upsample(source_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return source_x, None, None + + if input_middle is not None and input_source is None and input_target is None: + # middle + input = input_middle + + source_graph, target_graph, middle_graph, x = self.top_forward(input, adj1_target=adj1_target, + adj2_source=adj2_source, + adj3_transfer_s2t=adj3_transfer_s2t, + adj3_transfer_t2s=adj3_transfer_t2s, + adj4_middle=adj4_middle, + adj5_transfer_s2m=adj5_transfer_s2m, + adj6_transfer_t2m=adj6_transfer_t2m, + adj5_transfer_m2s=adj5_transfer_m2s, + adj6_transfer_m2t=adj6_transfer_m2t) + + middle_x = self.bottom_forward_middle(x, source_graph) + middle_x = F.upsample(middle_x, size=input.size()[2:], mode='bilinear', align_corners=True) + return None, None, middle_x + + +if __name__ == '__main__': + net = deeplab_xception_end2end_3d() + net.freeze_totally_bn() + img1 = torch.rand((1,3,128,128)) + img2 = torch.rand((1, 3, 128, 128)) + a1 = torch.ones((1,1,7,20)) + a2 = torch.ones((1,1,20,7)) + net.eval() + net.forward(img1,img2,adj3_transfer_t2s=a2,adj3_transfer_s2t=a1) \ No newline at end of file diff --git a/networks/gcn.py b/networks/gcn.py new file mode 100644 index 0000000..cf65805 --- /dev/null +++ b/networks/gcn.py @@ -0,0 +1,271 @@ +import math +import torch +from torch.nn.parameter import Parameter +import torch.nn as nn +import torch.nn.functional as F +from networks import graph +# import pdb + +class GraphConvolution(nn.Module): + + def __init__(self,in_features,out_features,bias=False): + super(GraphConvolution, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features,out_features)) + if bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias',None) + # self.reset_parameters() + + def reset_parameters(self): + # stdv = 1./math.sqrt(self.weight(1)) + # self.weight.data.uniform_(-stdv,stdv) + torch.nn.init.xavier_uniform_(self.weight) + # if self.bias is not None: + # self.bias.data.uniform_(-stdv,stdv) + + def forward(self, input,adj=None,relu=False): + support = torch.matmul(input,self.weight) + # print(support.size(),adj.size()) + if adj is not None: + output = torch.matmul(adj,support) + else: + output = support + # print(output.size()) + if self.bias is not None: + return output + self.bias + else: + if relu: + return F.relu(output) + else: + return output + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + str(self.in_features) + ' -> ' \ + + str(self.out_features) + ')' + +class Featuremaps_to_Graph(nn.Module): + + def __init__(self,input_channels,hidden_layers,nodes=7): + super(Featuremaps_to_Graph, self).__init__() + self.pre_fea = Parameter(torch.FloatTensor(input_channels,nodes)) + self.weight = Parameter(torch.FloatTensor(input_channels,hidden_layers)) + # self.reset_parameters() + + def forward(self, input): + n,c,h,w = input.size() + # print('fea input',input.size()) + input1 = input.view(n,c,h*w) + input1 = input1.transpose(1,2) # n x hw x c + # print('fea input1', input1.size()) + ############## Feature maps to node ################ + fea_node = torch.matmul(input1,self.pre_fea) # n x hw x n_classes + weight_node = torch.matmul(input1,self.weight) # n x hw x hidden_layer + # softmax fea_node + fea_node = F.softmax(fea_node,dim=-1) + # print(fea_node.size(),weight_node.size()) + graph_node = F.relu(torch.matmul(fea_node.transpose(1,2),weight_node)) + return graph_node # n x n_class x hidden_layer + + def reset_parameters(self): + for ww in self.parameters(): + torch.nn.init.xavier_uniform_(ww) + # if self.bias is not None: + # self.bias.data.uniform_(-stdv,stdv) + +class Featuremaps_to_Graph_transfer(nn.Module): + + def __init__(self,input_channels,hidden_layers,nodes=7, source_nodes=20): + super(Featuremaps_to_Graph_transfer, self).__init__() + self.pre_fea = Parameter(torch.FloatTensor(input_channels,nodes)) + self.weight = Parameter(torch.FloatTensor(input_channels,hidden_layers)) + self.pre_fea_transfer = nn.Sequential(*[nn.Linear(source_nodes, source_nodes),nn.LeakyReLU(True), + nn.Linear(source_nodes, nodes), nn.LeakyReLU(True)]) + # self.reset_parameters() + + def forward(self, input, source_pre_fea): + self.pre_fea.data = self.pre_fea_learn(source_pre_fea) + n,c,h,w = input.size() + # print('fea input',input.size()) + input1 = input.view(n,c,h*w) + input1 = input1.transpose(1,2) # n x hw x c + # print('fea input1', input1.size()) + ############## Feature maps to node ################ + fea_node = torch.matmul(input1,self.pre_fea) # n x hw x n_classes + weight_node = torch.matmul(input1,self.weight) # n x hw x hidden_layer + # softmax fea_node + fea_node = F.softmax(fea_node,dim=-1) + # print(fea_node.size(),weight_node.size()) + graph_node = F.relu(torch.matmul(fea_node.transpose(1,2),weight_node)) + return graph_node # n x n_class x hidden_layer + + def pre_fea_learn(self, input): + pre_fea = self.pre_fea_transfer.forward(input.unsqueeze(0)).squeeze(0) + return self.pre_fea.data + pre_fea + +class Graph_to_Featuremaps(nn.Module): + # this is a special version + def __init__(self,input_channels,output_channels,hidden_layers,nodes=7): + super(Graph_to_Featuremaps, self).__init__() + self.node_fea = Parameter(torch.FloatTensor(input_channels+hidden_layers,1)) + self.weight = Parameter(torch.FloatTensor(hidden_layers,output_channels)) + # self.reset_parameters() + + def reset_parameters(self): + for ww in self.parameters(): + torch.nn.init.xavier_uniform_(ww) + + def forward(self, input, res_feature): + ''' + + :param input: 1 x batch x nodes x hidden_layer + :param res_feature: batch x channels x h x w + :return: + ''' + batchi,channeli,hi,wi = res_feature.size() + # print(res_feature.size()) + # print(input.size()) + try: + _,batch,nodes,hidden = input.size() + except: + # print(input.size()) + input = input.unsqueeze(0) + _,batch, nodes, hidden = input.size() + + assert batch == batchi + input1 = input.transpose(0,1).expand(batch,hi*wi,nodes,hidden) + res_feature_after_view = res_feature.view(batch,channeli,hi*wi).transpose(1,2) + res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(batch,hi*wi,nodes,channeli) + new_fea = torch.cat((res_feature_after_view1,input1),dim=3) + + # print(self.node_fea.size(),new_fea.size()) + new_node = torch.matmul(new_fea, self.node_fea) # batch x hw x nodes x 1 + new_weight = torch.matmul(input, self.weight) # batch x node x channel + new_node = new_node.view(batch, hi*wi, nodes) + feature_out = torch.matmul(new_node,new_weight) + # print(feature_out.size()) + feature_out = feature_out.transpose(2,3).contiguous().view(res_feature.size()) + return F.relu(feature_out) + +class Graph_to_Featuremaps_savemem(nn.Module): + # this is a special version for saving gpu memory. The process is same as Graph_to_Featuremaps. + def __init__(self, input_channels, output_channels, hidden_layers, nodes=7): + super(Graph_to_Featuremaps_savemem, self).__init__() + self.node_fea_for_res = Parameter(torch.FloatTensor(input_channels, 1)) + self.node_fea_for_hidden = Parameter(torch.FloatTensor(hidden_layers, 1)) + self.weight = Parameter(torch.FloatTensor(hidden_layers,output_channels)) + # self.reset_parameters() + + def reset_parameters(self): + for ww in self.parameters(): + torch.nn.init.xavier_uniform_(ww) + + def forward(self, input, res_feature): + ''' + + :param input: 1 x batch x nodes x hidden_layer + :param res_feature: batch x channels x h x w + :return: + ''' + batchi,channeli,hi,wi = res_feature.size() + # print(res_feature.size()) + # print(input.size()) + try: + _,batch,nodes,hidden = input.size() + except: + # print(input.size()) + input = input.unsqueeze(0) + _,batch, nodes, hidden = input.size() + + assert batch == batchi + input1 = input.transpose(0,1).expand(batch,hi*wi,nodes,hidden) + res_feature_after_view = res_feature.view(batch,channeli,hi*wi).transpose(1,2) + res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(batch,hi*wi,nodes,channeli) + # new_fea = torch.cat((res_feature_after_view1,input1),dim=3) + ## sim + new_node1 = torch.matmul(res_feature_after_view1, self.node_fea_for_res) + new_node2 = torch.matmul(input1, self.node_fea_for_hidden) + new_node = new_node1 + new_node2 + ## sim end + # print(self.node_fea.size(),new_fea.size()) + # new_node = torch.matmul(new_fea, self.node_fea) # batch x hw x nodes x 1 + new_weight = torch.matmul(input, self.weight) # batch x node x channel + new_node = new_node.view(batch, hi*wi, nodes) + feature_out = torch.matmul(new_node,new_weight) + # print(feature_out.size()) + feature_out = feature_out.transpose(2,3).contiguous().view(res_feature.size()) + return F.relu(feature_out) + + +class Graph_trans(nn.Module): + + def __init__(self,in_features,out_features,begin_nodes=7,end_nodes=2,bias=False,adj=None): + super(Graph_trans, self).__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.FloatTensor(in_features,out_features)) + if adj is not None: + h,w = adj.size() + assert (h == end_nodes) and (w == begin_nodes) + self.adj = torch.autograd.Variable(adj,requires_grad=False) + else: + self.adj = Parameter(torch.FloatTensor(end_nodes,begin_nodes)) + if bias: + self.bias = Parameter(torch.FloatTensor(out_features)) + else: + self.register_parameter('bias',None) + # self.reset_parameters() + + def reset_parameters(self): + # stdv = 1./math.sqrt(self.weight(1)) + # self.weight.data.uniform_(-stdv,stdv) + torch.nn.init.xavier_uniform_(self.weight) + # if self.bias is not None: + # self.bias.data.uniform_(-stdv,stdv) + + def forward(self, input, relu=False, adj_return=False, adj=None): + support = torch.matmul(input,self.weight) + # print(support.size(),self.adj.size()) + if adj is None: + adj = self.adj + adj1 = self.norm_trans_adj(adj) + output = torch.matmul(adj1,support) + if adj_return: + output1 = F.normalize(output,p=2,dim=-1) + self.adj_mat = torch.matmul(output1,output1.transpose(-2,-1)) + if self.bias is not None: + return output + self.bias + else: + if relu: + return F.relu(output) + else: + return output + + def get_adj_mat(self): + adj = graph.normalize_adj_torch(F.relu(self.adj_mat)) + return adj + + def get_encode_adj(self): + return self.adj + + def norm_trans_adj(self,adj): # maybe can use softmax + adj = F.relu(adj) + r = F.softmax(adj,dim=-1) + # print(adj.size()) + # row_sum = adj.sum(-1).unsqueeze(-1) + # d_mat = row_sum.expand(adj.size()) + # r = torch.div(row_sum,d_mat) + # r[torch.isnan(r)] = 0 + + return r + + +if __name__ == '__main__': + + graph = torch.randn((7,128)) + pred = (torch.rand((7,7))*7).int() + # a = en.forward(graph,pred) + # print(a.size()) \ No newline at end of file diff --git a/networks/graph.py b/networks/graph.py new file mode 100644 index 0000000..8e49059 --- /dev/null +++ b/networks/graph.py @@ -0,0 +1,261 @@ +import numpy as np +import pickle as pkl +import networkx as nx +import scipy.sparse as sp +import torch + +pascal_graph = {0:[0], + 1:[1, 2], + 2:[1, 2, 3, 5], + 3:[2, 3, 4], + 4:[3, 4], + 5:[2, 5, 6], + 6:[5, 6]} + +cihp_graph = {0: [], + 1: [2, 13], + 2: [1, 13], + 3: [14, 15], + 4: [13], + 5: [6, 7, 9, 10, 11, 12, 14, 15], + 6: [5, 7, 10, 11, 14, 15, 16, 17], + 7: [5, 6, 9, 10, 11, 12, 14, 15], + 8: [16, 17, 18, 19], + 9: [5, 7, 10, 16, 17, 18, 19], + 10:[5, 6, 7, 9, 11, 12, 13, 14, 15, 16, 17], + 11:[5, 6, 7, 10, 13], + 12:[5, 7, 10, 16, 17], + 13:[1, 2, 4, 10, 11], + 14:[3, 5, 6, 7, 10], + 15:[3, 5, 6, 7, 10], + 16:[6, 8, 9, 10, 12, 18], + 17:[6, 8, 9, 10, 12, 19], + 18:[8, 9, 16], + 19:[8, 9, 17]} + +atr_graph = {0: [], + 1: [2, 11], + 2: [1, 11], + 3: [11], + 4: [5, 6, 7, 11, 14, 15, 17], + 5: [4, 6, 7, 8, 12, 13], + 6: [4,5,7,8,9,10,12,13], + 7: [4,11,12,13,14,15], + 8: [5,6], + 9: [6, 12], + 10:[6, 13], + 11:[1,2,3,4,7,14,15,17], + 12:[5,6,7,9], + 13:[5,6,7,10], + 14:[4,7,11,16], + 15:[4,7,11,16], + 16:[14,15], + 17:[4,11], + } + +cihp2pascal_adj = np.array([[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]) + +cihp2pascal_nlp_adj = \ + np.array([[ 1., 0.35333052, 0.32727194, 0.17418084, 0.18757584, + 0.40608522, 0.37503981, 0.35448462, 0.22598555, 0.23893579, + 0.33064262, 0.28923404, 0.27986573, 0.4211553 , 0.36915778, + 0.41377746, 0.32485771, 0.37248222, 0.36865639, 0.41500332], + [ 0.39615879, 0.46201529, 0.52321467, 0.30826114, 0.25669527, + 0.54747773, 0.3670523 , 0.3901983 , 0.27519473, 0.3433325 , + 0.52728509, 0.32771333, 0.34819325, 0.63882953, 0.68042925, + 0.69368576, 0.63395791, 0.65344337, 0.59538781, 0.6071375 ], + [ 0.16373166, 0.21663339, 0.3053872 , 0.28377612, 0.1372435 , + 0.4448808 , 0.29479995, 0.31092595, 0.22703953, 0.33983576, + 0.75778818, 0.2619818 , 0.37069392, 0.35184867, 0.49877512, + 0.49979437, 0.51853277, 0.52517541, 0.32517741, 0.32377309], + [ 0.32687232, 0.38482461, 0.37693463, 0.41610834, 0.20415749, + 0.76749079, 0.35139853, 0.3787411 , 0.28411737, 0.35155421, + 0.58792618, 0.31141718, 0.40585111, 0.51189218, 0.82042737, + 0.8342413 , 0.70732188, 0.72752501, 0.60327325, 0.61431337], + [ 0.34069369, 0.34817292, 0.37525998, 0.36497069, 0.17841617, + 0.69746208, 0.31731463, 0.34628951, 0.25167277, 0.32072379, + 0.56711286, 0.24894776, 0.37000453, 0.52600859, 0.82483993, + 0.84966274, 0.7033991 , 0.73449378, 0.56649608, 0.58888791], + [ 0.28477487, 0.35139564, 0.42742352, 0.41664321, 0.20004676, + 0.78566833, 0.42237487, 0.41048549, 0.37933812, 0.46542516, + 0.62444759, 0.3274493 , 0.49466009, 0.49314658, 0.71244233, + 0.71497003, 0.8234787 , 0.83566589, 0.62597135, 0.62626812], + [ 0.3011378 , 0.31775977, 0.42922647, 0.36896257, 0.17597556, + 0.72214655, 0.39162804, 0.38137872, 0.34980296, 0.43818419, + 0.60879174, 0.26762545, 0.46271161, 0.51150476, 0.72318109, + 0.73678399, 0.82620388, 0.84942166, 0.5943811 , 0.60607602]]) + +pascal2atr_nlp_adj = \ + np.array([[ 1., 0.35333052, 0.32727194, 0.18757584, 0.40608522, + 0.27986573, 0.23893579, 0.27600672, 0.30964391, 0.36865639, + 0.41500332, 0.4211553 , 0.32485771, 0.37248222, 0.36915778, + 0.41377746, 0.32006291, 0.28923404], + [ 0.39615879, 0.46201529, 0.52321467, 0.25669527, 0.54747773, + 0.34819325, 0.3433325 , 0.26603942, 0.45162929, 0.59538781, + 0.6071375 , 0.63882953, 0.63395791, 0.65344337, 0.68042925, + 0.69368576, 0.44354613, 0.32771333], + [ 0.16373166, 0.21663339, 0.3053872 , 0.1372435 , 0.4448808 , + 0.37069392, 0.33983576, 0.26563416, 0.35443504, 0.32517741, + 0.32377309, 0.35184867, 0.51853277, 0.52517541, 0.49877512, + 0.49979437, 0.21750868, 0.2619818 ], + [ 0.32687232, 0.38482461, 0.37693463, 0.20415749, 0.76749079, + 0.40585111, 0.35155421, 0.28271333, 0.52684576, 0.60327325, + 0.61431337, 0.51189218, 0.70732188, 0.72752501, 0.82042737, + 0.8342413 , 0.40137029, 0.31141718], + [ 0.34069369, 0.34817292, 0.37525998, 0.17841617, 0.69746208, + 0.37000453, 0.32072379, 0.27268885, 0.47426719, 0.56649608, + 0.58888791, 0.52600859, 0.7033991 , 0.73449378, 0.82483993, + 0.84966274, 0.37830796, 0.24894776], + [ 0.28477487, 0.35139564, 0.42742352, 0.20004676, 0.78566833, + 0.49466009, 0.46542516, 0.32662614, 0.55780359, 0.62597135, + 0.62626812, 0.49314658, 0.8234787 , 0.83566589, 0.71244233, + 0.71497003, 0.41223219, 0.3274493 ], + [ 0.3011378 , 0.31775977, 0.42922647, 0.17597556, 0.72214655, + 0.46271161, 0.43818419, 0.3192333 , 0.50979216, 0.5943811 , + 0.60607602, 0.51150476, 0.82620388, 0.84942166, 0.72318109, + 0.73678399, 0.39259827, 0.26762545]]) + +cihp2atr_nlp_adj = np.array([[ 1., 0.35333052, 0.32727194, 0.18757584, 0.40608522, + 0.27986573, 0.23893579, 0.27600672, 0.30964391, 0.36865639, + 0.41500332, 0.4211553 , 0.32485771, 0.37248222, 0.36915778, + 0.41377746, 0.32006291, 0.28923404], + [ 0.35333052, 1. , 0.39206695, 0.42143438, 0.4736689 , + 0.47139544, 0.51999208, 0.38354847, 0.45628529, 0.46514124, + 0.50083501, 0.4310595 , 0.39371443, 0.4319752 , 0.42938598, + 0.46384034, 0.44833757, 0.6153155 ], + [ 0.32727194, 0.39206695, 1. , 0.32836702, 0.52603065, + 0.39543695, 0.3622627 , 0.43575346, 0.33866223, 0.45202552, + 0.48421 , 0.53669903, 0.47266611, 0.50925436, 0.42286557, + 0.45403656, 0.37221304, 0.40999322], + [ 0.17418084, 0.46892601, 0.25774838, 0.31816231, 0.39330317, + 0.34218382, 0.48253904, 0.22084125, 0.41335728, 0.52437572, + 0.5191713 , 0.33576117, 0.44230914, 0.44250678, 0.44330833, + 0.43887264, 0.50693611, 0.39278795], + [ 0.18757584, 0.42143438, 0.32836702, 1. , 0.35030067, + 0.30110947, 0.41055555, 0.34338879, 0.34336307, 0.37704433, + 0.38810141, 0.34702081, 0.24171562, 0.25433078, 0.24696241, + 0.2570884 , 0.4465962 , 0.45263213], + [ 0.40608522, 0.4736689 , 0.52603065, 0.35030067, 1. , + 0.54372584, 0.58300258, 0.56674191, 0.555266 , 0.66599594, + 0.68567555, 0.55716359, 0.62997328, 0.65638548, 0.61219615, + 0.63183318, 0.54464151, 0.44293752], + [ 0.37503981, 0.50675565, 0.4761106 , 0.37561813, 0.60419403, + 0.77912403, 0.64595517, 0.85939662, 0.46037144, 0.52348817, + 0.55875094, 0.37741886, 0.455671 , 0.49434392, 0.38479954, + 0.41804074, 0.47285709, 0.57236283], + [ 0.35448462, 0.50576632, 0.51030446, 0.35841033, 0.55106903, + 0.50257274, 0.52591451, 0.4283053 , 0.39991808, 0.42327211, + 0.42853819, 0.42071825, 0.41240559, 0.42259136, 0.38125352, + 0.3868255 , 0.47604934, 0.51811717], + [ 0.22598555, 0.5053299 , 0.36301185, 0.38002282, 0.49700941, + 0.45625243, 0.62876479, 0.4112051 , 0.33944371, 0.48322639, + 0.50318714, 0.29207815, 0.38801966, 0.41119094, 0.29199072, + 0.31021029, 0.41594871, 0.54961962], + [ 0.23893579, 0.51999208, 0.3622627 , 0.41055555, 0.58300258, + 0.68874251, 1. , 0.56977937, 0.49918447, 0.48484363, + 0.51615925, 0.41222306, 0.49535971, 0.53134951, 0.3807616 , + 0.41050298, 0.48675801, 0.51112664], + [ 0.33064262, 0.306412 , 0.60679935, 0.25592294, 0.58738706, + 0.40379627, 0.39679161, 0.33618385, 0.39235148, 0.45474013, + 0.4648476 , 0.59306762, 0.58976007, 0.60778661, 0.55400397, + 0.56551297, 0.3698029 , 0.33860535], + [ 0.28923404, 0.6153155 , 0.40999322, 0.45263213, 0.44293752, + 0.60359359, 0.51112664, 0.46578181, 0.45656936, 0.38142307, + 0.38525582, 0.33327223, 0.35360175, 0.36156453, 0.3384992 , + 0.34261229, 0.49297863, 1. ], + [ 0.27986573, 0.47139544, 0.39543695, 0.30110947, 0.54372584, + 1. , 0.68874251, 0.67765588, 0.48690078, 0.44010641, + 0.44921156, 0.32321099, 0.48311542, 0.4982002 , 0.39378102, + 0.40297733, 0.45309735, 0.60359359], + [ 0.4211553 , 0.4310595 , 0.53669903, 0.34702081, 0.55716359, + 0.32321099, 0.41222306, 0.25721705, 0.36633509, 0.5397475 , + 0.56429928, 1. , 0.55796926, 0.58842844, 0.57930828, + 0.60410597, 0.41615326, 0.33327223], + [ 0.36915778, 0.42938598, 0.42286557, 0.24696241, 0.61219615, + 0.39378102, 0.3807616 , 0.28089866, 0.48450394, 0.77400821, + 0.68813814, 0.57930828, 0.8856886 , 0.81673412, 1. , + 0.92279623, 0.46969152, 0.3384992 ], + [ 0.41377746, 0.46384034, 0.45403656, 0.2570884 , 0.63183318, + 0.40297733, 0.41050298, 0.332879 , 0.48799542, 0.69231828, + 0.77015091, 0.60410597, 0.79788484, 0.88232104, 0.92279623, + 1. , 0.45685017, 0.34261229], + [ 0.32485771, 0.39371443, 0.47266611, 0.24171562, 0.62997328, + 0.48311542, 0.49535971, 0.32477932, 0.51486622, 0.79353556, + 0.69768738, 0.55796926, 1. , 0.92373745, 0.8856886 , + 0.79788484, 0.47883134, 0.35360175], + [ 0.37248222, 0.4319752 , 0.50925436, 0.25433078, 0.65638548, + 0.4982002 , 0.53134951, 0.38057074, 0.52403969, 0.72035243, + 0.78711147, 0.58842844, 0.92373745, 1. , 0.81673412, + 0.88232104, 0.47109935, 0.36156453], + [ 0.36865639, 0.46514124, 0.45202552, 0.37704433, 0.66599594, + 0.44010641, 0.48484363, 0.39636574, 0.50175258, 1. , + 0.91320249, 0.5397475 , 0.79353556, 0.72035243, 0.77400821, + 0.69231828, 0.59087008, 0.38142307], + [ 0.41500332, 0.50083501, 0.48421, 0.38810141, 0.68567555, + 0.44921156, 0.51615925, 0.45156472, 0.50438158, 0.91320249, + 1., 0.56429928, 0.69768738, 0.78711147, 0.68813814, + 0.77015091, 0.57698754, 0.38525582]]) + + + +def normalize_adj(adj): + """Symmetrically normalize adjacency matrix.""" + adj = sp.coo_matrix(adj) + rowsum = np.array(adj.sum(1)) + d_inv_sqrt = np.power(rowsum, -0.5).flatten() + d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. + d_mat_inv_sqrt = sp.diags(d_inv_sqrt) + return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() + +def preprocess_adj(adj): + """Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.""" + adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj)) # return a adjacency matrix of adj ( type is numpy) + adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) # + # return sparse_to_tuple(adj_normalized) + return adj_normalized.todense() + +def row_norm(inputs): + outputs = [] + for x in inputs: + xsum = x.sum() + x = x / xsum + outputs.append(x) + return outputs + + +def normalize_adj_torch(adj): + # print(adj.size()) + if len(adj.size()) == 4: + new_r = torch.zeros(adj.size()).type_as(adj) + for i in range(adj.size(1)): + adj_item = adj[0,i] + rowsum = adj_item.sum(1) + d_inv_sqrt = rowsum.pow_(-0.5) + d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0 + d_mat_inv_sqrt = torch.diag(d_inv_sqrt) + r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj_item), d_mat_inv_sqrt) + new_r[0,i,...] = r + return new_r + rowsum = adj.sum(1) + d_inv_sqrt = rowsum.pow_(-0.5) + d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0 + d_mat_inv_sqrt = torch.diag(d_inv_sqrt) + r = torch.matmul(torch.matmul(d_mat_inv_sqrt,adj),d_mat_inv_sqrt) + return r + +# def row_norm(adj): + + + + +if __name__ == '__main__': + a= row_norm(cihp2pascal_adj) + print(a) + print(cihp2pascal_adj) + # print(a.shape) diff --git a/requirements b/requirements new file mode 100644 index 0000000..ba99367 --- /dev/null +++ b/requirements @@ -0,0 +1,7 @@ +torchvision +scipy +tensorboardX +numpy +opencv-python +matplotlib +networkx \ No newline at end of file diff --git a/sync_batchnorm/__init__.py b/sync_batchnorm/__init__.py new file mode 100644 index 0000000..bc8709d --- /dev/null +++ b/sync_batchnorm/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# File : __init__.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d +from .replicate import DataParallelWithCallback, patch_replication_callback diff --git a/sync_batchnorm/batchnorm.py b/sync_batchnorm/batchnorm.py new file mode 100644 index 0000000..5f4e763 --- /dev/null +++ b/sync_batchnorm/batchnorm.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- +# File : batchnorm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import collections + +import torch +import torch.nn.functional as F + +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast + +from .comm import SyncMaster + +__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d'] + + +def _sum_ft(tensor): + """sum over the first and last dimention""" + return tensor.sum(dim=0).sum(dim=-1) + + +def _unsqueeze_ft(tensor): + """add new dementions at the front and the tail""" + return tensor.unsqueeze(0).unsqueeze(-1) + + +_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size']) +_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std']) + + +class _SynchronizedBatchNorm(_BatchNorm): + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True): + super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) + + self._sync_master = SyncMaster(self._data_parallel_master) + + self._is_parallel = False + self._parallel_id = None + self._slave_pipe = None + + def forward(self, input): + # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation. + if not (self._is_parallel and self.training): + return F.batch_norm( + input, self.running_mean, self.running_var, self.weight, self.bias, + self.training, self.momentum, self.eps) + + # Resize the input to (B, C, -1). + input_shape = input.size() + input = input.view(input.size(0), self.num_features, -1) + + # Compute the sum and square-sum. + sum_size = input.size(0) * input.size(2) + input_sum = _sum_ft(input) + input_ssum = _sum_ft(input ** 2) + + # Reduce-and-broadcast the statistics. + if self._parallel_id == 0: + mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) + else: + mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) + + # Compute the output. + if self.affine: + # MJY:: Fuse the multiplication for speed. + output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias) + else: + output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std) + + # Reshape it. + return output.view(input_shape) + + def __data_parallel_replicate__(self, ctx, copy_id): + self._is_parallel = True + self._parallel_id = copy_id + + # parallel_id == 0 means master device. + if self._parallel_id == 0: + ctx.sync_master = self._sync_master + else: + self._slave_pipe = ctx.sync_master.register_slave(copy_id) + + def _data_parallel_master(self, intermediates): + """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" + + # Always using same "device order" makes the ReduceAdd operation faster. + # Thanks to:: Tete Xiao (http://tetexiao.com/) + intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) + + to_reduce = [i[1][:2] for i in intermediates] + to_reduce = [j for i in to_reduce for j in i] # flatten + target_gpus = [i[1].sum.get_device() for i in intermediates] + + sum_size = sum([i[1].sum_size for i in intermediates]) + sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) + mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) + + broadcasted = Broadcast.apply(target_gpus, mean, inv_std) + + outputs = [] + for i, rec in enumerate(intermediates): + outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2]))) + + return outputs + + def _compute_mean_std(self, sum_, ssum, size): + """Compute the mean and standard-deviation with sum and square-sum. This method + also maintains the moving average on the master device.""" + assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' + mean = sum_ / size + sumvar = ssum - sum_ * mean + unbias_var = sumvar / (size - 1) + bias_var = sumvar / size + + self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data + self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data + + return mean, bias_var.clamp(self.eps) ** -0.5 + + +class SynchronizedBatchNorm1d(_SynchronizedBatchNorm): + r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a + mini-batch. + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm1d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm + + Args: + num_features: num_features from an expected input of size + `batch_size x num_features [x width]` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C)` or :math:`(N, C, L)` + - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm1d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm1d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError('expected 2D or 3D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm1d, self)._check_input_dim(input) + + +class SynchronizedBatchNorm2d(_SynchronizedBatchNorm): + r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch + of 3d inputs + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm2d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm + + Args: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm2d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm2d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm2d, self)._check_input_dim(input) + + +class SynchronizedBatchNorm3d(_SynchronizedBatchNorm): + r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch + of 4d inputs + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm3d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm + or Spatio-temporal BatchNorm + + Args: + num_features: num_features from an expected input of + size batch_size x num_features x depth x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm3d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm3d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm3d, self)._check_input_dim(input) diff --git a/sync_batchnorm/comm.py b/sync_batchnorm/comm.py new file mode 100644 index 0000000..922f8c4 --- /dev/null +++ b/sync_batchnorm/comm.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# File : comm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import queue +import collections +import threading + +__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] + + +class FutureResult(object): + """A thread-safe future implementation. Used only as one-to-one pipe.""" + + def __init__(self): + self._result = None + self._lock = threading.Lock() + self._cond = threading.Condition(self._lock) + + def put(self, result): + with self._lock: + assert self._result is None, 'Previous result has\'t been fetched.' + self._result = result + self._cond.notify() + + def get(self): + with self._lock: + if self._result is None: + self._cond.wait() + + res = self._result + self._result = None + return res + + +_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) +_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) + + +class SlavePipe(_SlavePipeBase): + """Pipe for master-slave communication.""" + + def run_slave(self, msg): + self.queue.put((self.identifier, msg)) + ret = self.result.get() + self.queue.put(True) + return ret + + +class SyncMaster(object): + """An abstract `SyncMaster` object. + + - During the replication, as the data parallel will trigger an callback of each module, all slave devices should + call `register(id)` and obtain an `SlavePipe` to communicate with the master. + - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, + and passed to a registered callback. + - After receiving the messages, the master device should gather the information and determine to message passed + back to each slave devices. + """ + + def __init__(self, master_callback): + """ + + Args: + master_callback: a callback to be invoked after having collected messages from slave devices. + """ + self._master_callback = master_callback + self._queue = queue.Queue() + self._registry = collections.OrderedDict() + self._activated = False + + def __getstate__(self): + return {'master_callback': self._master_callback} + + def __setstate__(self, state): + self.__init__(state['master_callback']) + + def register_slave(self, identifier): + """ + Register an slave device. + + Args: + identifier: an identifier, usually is the device id. + + Returns: a `SlavePipe` object which can be used to communicate with the master device. + + """ + if self._activated: + assert self._queue.empty(), 'Queue is not clean before next initialization.' + self._activated = False + self._registry.clear() + future = FutureResult() + self._registry[identifier] = _MasterRegistry(future) + return SlavePipe(identifier, self._queue, future) + + def run_master(self, master_msg): + """ + Main entry for the master device in each forward pass. + The messages were first collected from each devices (including the master device), and then + an callback will be invoked to compute the message to be sent back to each devices + (including the master device). + + Args: + master_msg: the message that the master want to send to itself. This will be placed as the first + message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. + + Returns: the message to be sent back to the master device. + + """ + self._activated = True + + intermediates = [(0, master_msg)] + for i in range(self.nr_slaves): + intermediates.append(self._queue.get()) + + results = self._master_callback(intermediates) + assert results[0][0] == 0, 'The first result should belongs to the master.' + + for i, res in results: + if i == 0: + continue + self._registry[i].result.put(res) + + for i in range(self.nr_slaves): + assert self._queue.get() is True + + return results[0][1] + + @property + def nr_slaves(self): + return len(self._registry) diff --git a/sync_batchnorm/replicate.py b/sync_batchnorm/replicate.py new file mode 100644 index 0000000..b71c7b8 --- /dev/null +++ b/sync_batchnorm/replicate.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# File : replicate.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import functools + +from torch.nn.parallel.data_parallel import DataParallel + +__all__ = [ + 'CallbackContext', + 'execute_replication_callbacks', + 'DataParallelWithCallback', + 'patch_replication_callback' +] + + +class CallbackContext(object): + pass + + +def execute_replication_callbacks(modules): + """ + Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. + + The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` + + Note that, as all modules are isomorphism, we assign each sub-module with a context + (shared among multiple copies of this module on different devices). + Through this context, different copies can share some information. + + We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback + of any slave copies. + """ + master_copy = modules[0] + nr_modules = len(list(master_copy.modules())) + ctxs = [CallbackContext() for _ in range(nr_modules)] + + for i, module in enumerate(modules): + for j, m in enumerate(module.modules()): + if hasattr(m, '__data_parallel_replicate__'): + m.__data_parallel_replicate__(ctxs[j], i) + + +class DataParallelWithCallback(DataParallel): + """ + Data Parallel with a replication callback. + + An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by + original `replicate` function. + The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` + + Examples: + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + # sync_bn.__data_parallel_replicate__ will be invoked. + """ + + def replicate(self, module, device_ids): + modules = super(DataParallelWithCallback, self).replicate(module, device_ids) + execute_replication_callbacks(modules) + return modules + + +def patch_replication_callback(data_parallel): + """ + Monkey-patch an existing `DataParallel` object. Add the replication callback. + Useful when you have customized `DataParallel` implementation. + + Examples: + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) + > patch_replication_callback(sync_bn) + # this is equivalent to + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + """ + + assert isinstance(data_parallel, DataParallel) + + old_replicate = data_parallel.replicate + + @functools.wraps(old_replicate) + def new_replicate(module, device_ids): + modules = old_replicate(module, device_ids) + execute_replication_callbacks(modules) + return modules + + data_parallel.replicate = new_replicate diff --git a/sync_batchnorm/unittest.py b/sync_batchnorm/unittest.py new file mode 100644 index 0000000..0675c02 --- /dev/null +++ b/sync_batchnorm/unittest.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# File : unittest.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import unittest + +import numpy as np +from torch.autograd import Variable + + +def as_numpy(v): + if isinstance(v, Variable): + v = v.data + return v.cpu().numpy() + + +class TorchTestCase(unittest.TestCase): + def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): + npa, npb = as_numpy(a), as_numpy(b) + self.assertTrue( + np.allclose(npa, npb, atol=atol), + 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) + ) diff --git a/train_transfer_cihp.sh b/train_transfer_cihp.sh new file mode 100644 index 0000000..42d2c96 --- /dev/null +++ b/train_transfer_cihp.sh @@ -0,0 +1,2 @@ +python ./exp/transfer/train_cihp_from_pascal.py \ + --batch 24 --gpus 8 --pretrainedModel './pascal_base_trained.pth' \ No newline at end of file diff --git a/train_universal.sh b/train_universal.sh new file mode 100644 index 0000000..f71aff6 --- /dev/null +++ b/train_universal.sh @@ -0,0 +1,3 @@ +python ./exp/universal/pascal_atr_cihp_uni.py \ + --batch 24 --gpus 8 \ + --pretrainedModel './data/pretrained_model/deeplab_v3plus_v3.pth' \ No newline at end of file diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..c83f266 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,5 @@ +from .test_human import get_iou_from_list +import utils + + +__all__ = ['get_iou_from_list','utils'] \ No newline at end of file diff --git a/utils/sampler.py b/utils/sampler.py new file mode 100644 index 0000000..754986e --- /dev/null +++ b/utils/sampler.py @@ -0,0 +1,164 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import random +import math +from torchvision import transforms +from PIL import Image + +__all__ = ['cusSampler','Sampler_uni'] + +'''common N-pairs sampler''' +def index_dataset(dataset): + ''' + get the index according to the dataset type(e.g. pascal or atr or cihp) + :param dataset: + :return: + ''' + return_dict = {} + for i in range(len(dataset)): + tmp_lbl = dataset.datasets_lbl[i] + if tmp_lbl in return_dict: + return_dict[tmp_lbl].append(i) + else : + return_dict[tmp_lbl] = [i] + return return_dict + +def sample_from_class(dataset,class_id): + return dataset[class_id][random.randrange(len(dataset[class_id]))] + +def sampler_npair_K(batch_size,dataset,K=2,label_random_list = [0,0,1,1,2,2,2]): + images_by_class = index_dataset(dataset) + for batch_idx in range(int(math.ceil(len(dataset) * 1.0 / batch_size))): + example_indices = [sample_from_class(images_by_class, class_label_ind) for _ in range(batch_size) + for class_label_ind in [label_random_list[random.randrange(len(label_random_list))]] + ] + yield example_indices[:batch_size] + +def sampler_(images_by_class,batch_size,dataset,K=2,label_random_list = [0,0,1,1,]): + # images_by_class = index_dataset(dataset) + a = label_random_list[random.randrange(len(label_random_list))] + # print(a) + example_indices = [sample_from_class(images_by_class, a) for _ in range(batch_size) + for class_label_ind in [a] + ] + return example_indices[:batch_size] + +class cusSampler(torch.utils.data.sampler.Sampler): + r"""Samples elements randomly from a given list of indices, without replacement. + + Arguments: + indices (sequence): a sequence of indices + """ + + def __init__(self, dataset, batchsize, label_random_list=[0,1,1,1,2,2,2]): + self.images_by_class = index_dataset(dataset) + self.batch_size = batchsize + self.dataset = dataset + self.label_random_list = label_random_list + self.len = int(math.ceil(len(dataset) * 1.0 / batchsize)) + + def __iter__(self): + # return [sample_from_class(self.images_by_class, class_label_ind) for _ in range(self.batchsize) + # for class_label_ind in [self.label_random_list[random.randrange(len(self.label_random_list))]] + # ] + # print(sampler_(self.images_by_class,self.batch_size,self.dataset)) + return iter(sampler_(self.images_by_class,self.batch_size,self.dataset,self.label_random_list)) + + def __len__(self): + return self.len + +def shuffle_cus(d1=20,d2=10,d3=5,batch=2): + return_list = [] + total_num = d1 + d2 + d3 + list1 = list(range(d1)) + batch1 = d1//batch + list2 = list(range(d1,d1+d2)) + batch2 = d2//batch + list3 = list(range(d1+d2,d1+d2+d3)) + batch3 = d3// batch + random.shuffle(list1) + random.shuffle(list2) + random.shuffle(list3) + random_list = list(range(batch1+batch2+batch3)) + random.shuffle(random_list) + for random_batch_index in random_list: + if random_batch_index < batch1: + random_batch_index1 = random_batch_index + return_list += list1[random_batch_index1*batch : (random_batch_index1+1)*batch] + elif random_batch_index < batch1 + batch2: + random_batch_index1 = random_batch_index - batch1 + return_list += list2[random_batch_index1*batch : (random_batch_index1+1)*batch] + else: + random_batch_index1 = random_batch_index - batch1 - batch2 + return_list += list3[random_batch_index1*batch : (random_batch_index1+1)*batch] + return return_list + +def shuffle_cus_balance(d1=20,d2=10,d3=5,batch=2,balance_index=1): + return_list = [] + total_num = d1 + d2 + d3 + list1 = list(range(d1)) + # batch1 = d1//batch + list2 = list(range(d1,d1+d2)) + # batch2 = d2//batch + list3 = list(range(d1+d2,d1+d2+d3)) + # batch3 = d3// batch + random.shuffle(list1) + random.shuffle(list2) + random.shuffle(list3) + total_list = [list1,list2,list3] + target_list = total_list[balance_index] + for index,list_item in enumerate(total_list): + if index == balance_index: + continue + if len(list_item) > len(target_list): + list_item = list_item[:len(target_list)] + total_list[index] = list_item + list1 = total_list[0] + list2 = total_list[1] + list3 = total_list[2] + # list1 = list(range(d1)) + d1 = len(list1) + batch1 = d1 // batch + # list2 = list(range(d1, d1 + d2)) + d2 = len(list2) + batch2 = d2 // batch + # list3 = list(range(d1 + d2, d1 + d2 + d3)) + d3 = len(list3) + batch3 = d3 // batch + + random_list = list(range(batch1+batch2+batch3)) + random.shuffle(random_list) + for random_batch_index in random_list: + if random_batch_index < batch1: + random_batch_index1 = random_batch_index + return_list += list1[random_batch_index1*batch : (random_batch_index1+1)*batch] + elif random_batch_index < batch1 + batch2: + random_batch_index1 = random_batch_index - batch1 + return_list += list2[random_batch_index1*batch : (random_batch_index1+1)*batch] + else: + random_batch_index1 = random_batch_index - batch1 - batch2 + return_list += list3[random_batch_index1*batch : (random_batch_index1+1)*batch] + return return_list + +class Sampler_uni(torch.utils.data.sampler.Sampler): + def __init__(self, num1, num2, num3, batchsize,balance_id=None): + self.num1 = num1 + self.num2 = num2 + self.num3 = num3 + self.batchsize = batchsize + self.balance_id = balance_id + + def __iter__(self): + if self.balance_id is not None: + rlist = shuffle_cus_balance(self.num1, self.num2, self.num3, self.batchsize, balance_index=self.balance_id) + else: + rlist = shuffle_cus(self.num1, self.num2, self.num3, self.batchsize) + return iter(rlist) + + + def __len__(self): + if self.balance_id is not None: + return self.num1*3 + return self.num1+self.num2+self.num3 diff --git a/utils/test_human.py b/utils/test_human.py new file mode 100644 index 0000000..6243544 --- /dev/null +++ b/utils/test_human.py @@ -0,0 +1,167 @@ +import os +import numpy as np +from PIL import Image + + +def main(): + image_paths, label_paths = init_path() + hist = compute_hist(image_paths, label_paths) + show_result(hist) + + +def init_path(): + list_file = './human/list/val_id.txt' + file_names = [] + with open(list_file, 'rb') as f: + for fn in f: + file_names.append(fn.strip()) + + image_dir = './human/features/attention/val/results/' + label_dir = './human/data/labels/' + + image_paths = [] + label_paths = [] + for file_name in file_names: + image_paths.append(os.path.join(image_dir, file_name + '.png')) + label_paths.append(os.path.join(label_dir, file_name + '.png')) + return image_paths, label_paths + + +def fast_hist(lbl, pred, n_cls): + ''' + compute the miou + :param lbl: label + :param pred: output + :param n_cls: num of class + :return: + ''' + # print(n_cls) + k = (lbl >= 0) & (lbl < n_cls) + return np.bincount(n_cls * lbl[k].astype(int) + pred[k], minlength=n_cls ** 2).reshape(n_cls, n_cls) + + +def compute_hist(images, labels,n_cls=20): + hist = np.zeros((n_cls, n_cls)) + for img_path, label_path in zip(images, labels): + label = Image.open(label_path) + label_array = np.array(label, dtype=np.int32) + image = Image.open(img_path) + image_array = np.array(image, dtype=np.int32) + + gtsz = label_array.shape + imgsz = image_array.shape + if not gtsz == imgsz: + image = image.resize((gtsz[1], gtsz[0]), Image.ANTIALIAS) + image_array = np.array(image, dtype=np.int32) + + hist += fast_hist(label_array, image_array, n_cls) + + return hist + + +def show_result(hist): + classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', + 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', + 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', + 'rightShoe'] + # num of correct pixels + num_cor_pix = np.diag(hist) + # num of gt pixels + num_gt_pix = hist.sum(1) + print('=' * 50) + + # @evaluation 1: overall accuracy + acc = num_cor_pix.sum() / hist.sum() + print('>>>', 'overall accuracy', acc) + print('-' * 50) + + # @evaluation 2: mean accuracy & per-class accuracy + print('Accuracy for each class (pixel accuracy):') + for i in range(20): + print('%-15s: %f' % (classes[i], num_cor_pix[i] / num_gt_pix[i])) + acc = num_cor_pix / num_gt_pix + print('>>>', 'mean accuracy', np.nanmean(acc)) + print('-' * 50) + + # @evaluation 3: mean IU & per-class IU + union = num_gt_pix + hist.sum(0) - num_cor_pix + for i in range(20): + print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i])) + iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix) + print('>>>', 'mean IU', np.nanmean(iu)) + print('-' * 50) + + # @evaluation 4: frequency weighted IU + freq = num_gt_pix / hist.sum() + print('>>>', 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum()) + print('=' * 50) + +def get_iou(pred,lbl,n_cls): + ''' + need tensor cpu + :param pred: + :param lbl: + :param n_cls: + :return: + ''' + hist = np.zeros((n_cls,n_cls)) + for i,j in zip(range(pred.size(0)),range(lbl.size(0))): + pred_item = pred[i].data.numpy() + lbl_item = lbl[j].data.numpy() + hist += fast_hist(lbl_item, pred_item, n_cls) + # num of correct pixels + num_cor_pix = np.diag(hist) + # num of gt pixels + num_gt_pix = hist.sum(1) + union = num_gt_pix + hist.sum(0) - num_cor_pix + # for i in range(20): + # print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i])) + iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix) + print('>>>', 'mean IU', np.nanmean(iu)) + miou = np.nanmean(iu) + print('-' * 50) + return miou + +def get_iou_from_list(pred,lbl,n_cls): + ''' + need tensor cpu + :param pred: list + :param lbl: list + :param n_cls: + :return: + ''' + hist = np.zeros((n_cls,n_cls)) + for i,j in zip(range(len(pred)),range(len(lbl))): + pred_item = pred[i].data.numpy() + lbl_item = lbl[j].data.numpy() + # print(pred_item.shape,lbl_item.shape) + hist += fast_hist(lbl_item, pred_item, n_cls) + + # num of correct pixels + num_cor_pix = np.diag(hist) + # num of gt pixels + num_gt_pix = hist.sum(1) + union = num_gt_pix + hist.sum(0) - num_cor_pix + # for i in range(20): + acc = num_cor_pix.sum() / hist.sum() + print('>>>', 'overall accuracy', acc) + print('-' * 50) + # print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i])) + iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix) + print('>>>', 'mean IU', np.nanmean(iu)) + miou = np.nanmean(iu) + print('-' * 50) + + acc = num_cor_pix / num_gt_pix + print('>>>', 'mean accuracy', np.nanmean(acc)) + print('-' * 50) + + return miou + + +if __name__ == '__main__': + import torch + pred = torch.autograd.Variable(torch.ones((2,1,32,32)).int())*20 + pred2 = torch.autograd.Variable(torch.zeros((2,1, 32, 32)).int()) + # lbl = [torch.zeros((32,32)).int() for _ in range(len(pred))] + get_iou(pred,pred2,7) diff --git a/utils/util.py b/utils/util.py new file mode 100644 index 0000000..35c7bb9 --- /dev/null +++ b/utils/util.py @@ -0,0 +1,244 @@ +import os + +import torch +import random +import numpy as np +import torch.nn as nn +import torch.nn.functional as F +import matplotlib.pyplot as plt + + +def recursive_glob(rootdir='.', suffix=''): + """Performs recursive glob with given suffix and rootdir + :param rootdir is the root directory + :param suffix is the suffix to be searched + """ + return [os.path.join(looproot, filename) + for looproot, _, filenames in os.walk(rootdir) + for filename in filenames if filename.endswith(suffix)] + +def get_cityscapes_labels(): + return np.array([ + # [ 0, 0, 0], + [128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [0, 130, 180], + [220, 20, 60], + [255, 0, 0], + [0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32]]) + +def get_pascal_labels(): + """Load the mapping that associates pascal classes with label colors + Returns: + np.ndarray with dimensions (21, 3) + """ + return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], + [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], + [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], + [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], + [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], + [0, 64, 128]]) + + +def get_mhp_labels(): + """Load the mapping that associates pascal classes with label colors + Returns: + np.ndarray with dimensions (21, 3) + """ + return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], + [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], + [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], + [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], + [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], + [0, 64, 128], # 21 + [96, 0, 0], [0, 96, 0], [96, 96, 0], + [0, 0, 96], [96, 0, 96], [0, 96, 96], [96, 96, 96], + [32, 0, 0], [160, 0, 0], [32, 96, 0], [160, 96, 0], + [32, 0, 96], [160, 0, 96], [32, 96, 96], [160, 96, 96], + [0, 32, 0], [96, 32, 0], [0, 160, 0], [96, 160, 0], + [0, 32, 96], # 41 + [48, 0, 0], [0, 48, 0], [48, 48, 0], + [0, 0, 96], [48, 0, 48], [0, 48, 48], [48, 48, 48], + [16, 0, 0], [80, 0, 0], [16, 48, 0], [80, 48, 0], + [16, 0, 48], [80, 0, 48], [16, 48, 48], [80, 48, 48], + [0, 16, 0], [48, 16, 0], [0, 80, 0], # 59 + + ]) + +def encode_segmap(mask): + """Encode segmentation label images as pascal classes + Args: + mask (np.ndarray): raw segmentation label image of dimension + (M, N, 3), in which the Pascal classes are encoded as colours. + Returns: + (np.ndarray): class map with dimensions (M,N), where the value at + a given location is the integer denoting the class index. + """ + mask = mask.astype(int) + label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16) + for ii, label in enumerate(get_pascal_labels()): + label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii + label_mask = label_mask.astype(int) + return label_mask + + +def decode_seg_map_sequence(label_masks, dataset='pascal'): + rgb_masks = [] + for label_mask in label_masks: + rgb_mask = decode_segmap(label_mask, dataset) + rgb_masks.append(rgb_mask) + rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2])) + return rgb_masks + +def decode_segmap(label_mask, dataset, plot=False): + """Decode segmentation class labels into a color image + Args: + label_mask (np.ndarray): an (M,N) array of integer values denoting + the class label at each spatial location. + plot (bool, optional): whether to show the resulting color image + in a figure. + Returns: + (np.ndarray, optional): the resulting decoded color image. + """ + if dataset == 'pascal': + n_classes = 21 + label_colours = get_pascal_labels() + elif dataset == 'cityscapes': + n_classes = 19 + label_colours = get_cityscapes_labels() + elif dataset == 'mhp': + n_classes = 59 + label_colours = get_mhp_labels() + else: + raise NotImplementedError + + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + for ll in range(0, n_classes): + r[label_mask == ll] = label_colours[ll, 0] + g[label_mask == ll] = label_colours[ll, 1] + b[label_mask == ll] = label_colours[ll, 2] + rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)) + rgb[:, :, 0] = r / 255.0 + rgb[:, :, 1] = g / 255.0 + rgb[:, :, 2] = b / 255.0 + if plot: + plt.imshow(rgb) + plt.show() + else: + return rgb + +def generate_param_report(logfile, param): + log_file = open(logfile, 'w') + for key, val in param.items(): + log_file.write(key + ':' + str(val) + '\n') + log_file.close() + +def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True): + n, c, h, w = logit.size() + # logit = logit.permute(0, 2, 3, 1) + target = target.squeeze(1) + if weight is None: + criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index,size_average=size_average) + else: + criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=size_average) + loss = criterion(logit, target.long()) + + return loss + +def cross_entropy2d_dataparallel(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True): + n, c, h, w = logit.size() + # logit = logit.permute(0, 2, 3, 1) + target = target.squeeze(1) + if weight is None: + criterion = nn.DataParallel(nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index,size_average=size_average)) + else: + criterion = nn.DataParallel(nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=size_average)) + loss = criterion(logit, target.long()) + + return loss.sum() + +def lr_poly(base_lr, iter_, max_iter=100, power=0.9): + return base_lr * ((1 - float(iter_) / max_iter) ** power) + + +def get_iou(pred, gt, n_classes=21): + total_iou = 0.0 + for i in range(len(pred)): + pred_tmp = pred[i] + gt_tmp = gt[i] + + intersect = [0] * n_classes + union = [0] * n_classes + for j in range(n_classes): + match = (pred_tmp == j) + (gt_tmp == j) + + it = torch.sum(match == 2).item() + un = torch.sum(match > 0).item() + + intersect[j] += it + union[j] += un + + iou = [] + for k in range(n_classes): + if union[k] == 0: + continue + iou.append(intersect[k] / union[k]) + + img_iou = (sum(iou) / len(iou)) + total_iou += img_iou + + return total_iou + +def scale_tensor(input,size=512,mode='bilinear'): + print(input.size()) + # b,h,w = input.size() + _, _, h, w = input.size() + if mode == 'nearest': + if h == 512 and w == 512: + return input + return F.upsample_nearest(input,size=(size,size)) + if h>512 and w > 512: + return F.upsample(input, size=(size,size), mode=mode, align_corners=True) + return F.upsample(input, size=(size,size), mode=mode, align_corners=True) + +def scale_tensor_list(input,): + + output = [] + for i in range(len(input)-1): + output_item = [] + for j in range(len(input[i])): + _, _, h, w = input[-1][j].size() + output_item.append(F.upsample(input[i][j], size=(h,w), mode='bilinear', align_corners=True)) + output.append(output_item) + output.append(input[-1]) + return output + +def scale_tensor_list_0(input,base_input): + + output = [] + assert len(input) == len(base_input) + for j in range(len(input)): + _, _, h, w = base_input[j].size() + after_size = F.upsample(input[j], size=(h,w), mode='bilinear', align_corners=True) + base_input[j] = base_input[j] + after_size + # output.append(output_item) + # output.append(input[-1]) + return base_input + +if __name__ == '__main__': + print(lr_poly(0.007,iter_=99,max_iter=150)) \ No newline at end of file