-
Notifications
You must be signed in to change notification settings - Fork 11
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
6f0b134
commit 1947dfa
Showing
101 changed files
with
121,123 additions
and
10 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,4 @@ | ||
# script | ||
git.sh | ||
Allweather/pretrained_models/net_g_best.pth | ||
Allweather/pretrained_models/net_g_real.pth |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
For training and testing, your directory structure should look like this | ||
|
||
`Datasets` <br/> | ||
`├──train` <br/> | ||
`└──Rain13K` <br/> | ||
`├──input` <br/> | ||
`└──target` <br/> | ||
`└──test` <br/> | ||
`├──Test100` <br/> | ||
`├──input` <br/> | ||
`└──target` <br/> | ||
`├──Rain100H` <br/> | ||
`├──input` <br/> | ||
`└──target` <br/> | ||
`├──Rain100L` <br/> | ||
`├──input` <br/> | ||
`└──target` <br/> | ||
`├──Test1200` <br/> | ||
`├──input` <br/> | ||
`└──target` <br/> | ||
`└──Test2800`<br/> | ||
`├──input` <br/> | ||
`└──target` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,155 @@ | ||
# general settings | ||
name: Allweather_Histotormer | ||
model_type: ImageCleanModel | ||
scale: 1 | ||
num_gpu: 4 # set num_gpu: 0 for cpu mode | ||
manual_seed: 100 | ||
|
||
# dataset and data loader settings | ||
datasets: | ||
train: | ||
name: TrainSet | ||
type: Dataset_PairedImage | ||
dataroot_gt: /home1/ssq/data/allweather/gt/ | ||
dataroot_lq: /home1/ssq/data/allweather/input/ | ||
geometric_augs: true | ||
|
||
filename_tmpl: '{}' | ||
io_backend: | ||
type: disk | ||
|
||
# data loader | ||
use_shuffle: true | ||
num_worker_per_gpu: 8 | ||
batch_size_per_gpu: 8 | ||
|
||
### -------------Progressive training-------------------------- | ||
mini_batch_sizes: [8,5,2,1,1] # Batch size per gpu | ||
iters: [92000,84000,56000,36000,32000] | ||
gt_size: 362 # Max patch size for progressive training | ||
gt_sizes: [128,160,256,320,362] # Patch sizes for progressive training. | ||
### ------------------------------------------------------------ | ||
|
||
### ------- Training on single fixed-patch size 128x128--------- | ||
# mini_batch_sizes: [8] | ||
# iters: [300000] | ||
# gt_size: 128 | ||
# gt_sizes: [128] | ||
### ------------------------------------------------------------ | ||
|
||
dataset_enlarge_ratio: 1 | ||
prefetch_mode: ~ | ||
|
||
val_snow_s: | ||
name: ValSet_Snow100K-S | ||
type: Dataset_PairedImage | ||
dataroot_gt: /home1/ssq/data/allweather/test/Snow100K-S/gt/ | ||
dataroot_lq: /home1/ssq/data/allweather/test/Snow100K-S/synthetic/ | ||
io_backend: | ||
type: disk | ||
val_snow_l: | ||
name: ValSet_Snow100K-L | ||
type: Dataset_PairedImage | ||
dataroot_gt: /home1/ssq/data/allweather/test/Snow100K-L/gt/ | ||
dataroot_lq: /home1/ssq/data/allweather/test/Snow100K-L/synthetic/ | ||
io_backend: | ||
type: disk | ||
val_test1: | ||
name: ValSet_Test1 | ||
type: Dataset_PairedImage | ||
dataroot_gt: /home1/ssq/data/allweather/test/Test1/gt/ | ||
dataroot_lq: /home1/ssq/data/allweather/test/Test1/input/ | ||
io_backend: | ||
type: disk | ||
val_raindrop: | ||
name: ValSet_RainDrop | ||
type: Dataset_PairedImage | ||
dataroot_gt: /home1/ssq/data/allweather/test/RainDrop/gt/ | ||
dataroot_lq: /home1/ssq/data/allweather/test/RainDrop/input/ | ||
io_backend: | ||
type: disk | ||
|
||
|
||
# network structures | ||
network_g: | ||
type: Histoformer | ||
inp_channels: 3 | ||
out_channels: 3 | ||
dim: 36 | ||
num_blocks: [4,4,6,8] | ||
num_refinement_blocks: 4 | ||
heads: [1,2,4,8] | ||
ffn_expansion_factor: 2.667 | ||
bias: False | ||
LayerNorm_type: WithBias | ||
dual_pixel_task: False | ||
|
||
|
||
# path | ||
path: | ||
pretrain_network_g: ~ | ||
strict_load_g: true | ||
resume_state: ~ | ||
|
||
# training settings | ||
train: | ||
total_iter: 300000 | ||
warmup_iter: -1 # no warm up | ||
use_grad_clip: true | ||
|
||
# Split 300k iterations into two cycles. | ||
# 1st cycle: fixed 3e-4 LR for 92k iters. | ||
# 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters. | ||
scheduler: | ||
type: CosineAnnealingRestartCyclicLR # ReduceLROnPlateau | ||
periods: [92000, 208000] | ||
restart_weights: [1,1] | ||
eta_mins: [0.0003,0.000001] | ||
|
||
mixing_augs: | ||
mixup: false | ||
mixup_beta: 1.2 | ||
use_identity: true | ||
|
||
optim_g: | ||
type: AdamW | ||
lr: !!float 3e-4 | ||
weight_decay: !!float 1e-4 | ||
betas: [0.9, 0.999] | ||
|
||
# losses | ||
pixel_opt: | ||
type: L1Loss | ||
loss_weight: 1 | ||
reduction: mean | ||
seq_opt: | ||
type: Pearson | ||
|
||
# validation settings | ||
val: | ||
window_size: 8 | ||
val_freq: !!float 1e3 | ||
save_img: true | ||
rgb2bgr: true | ||
use_image: true | ||
max_minibatch: 8 | ||
|
||
metrics: | ||
psnr: # metric name, can be arbitrary | ||
type: calculate_psnr | ||
crop_border: 0 | ||
test_y_channel: true | ||
|
||
# logging settings | ||
logger: | ||
print_freq: 10 | ||
save_checkpoint_freq: !!float 1e3 | ||
use_tb_logger: true | ||
wandb: | ||
project: ~ | ||
resume_id: ~ | ||
|
||
# dist training settings | ||
dist_params: | ||
backend: nccl | ||
port: 29500 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
pre-trained models are available [here](https://drive.google.com/drive/folders/1dmPhr8Z5iPRx9lh7TwdUFPSfwGIxp5l0?usp=drive_link) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,95 @@ | ||
## Restormer: Efficient Transformer for High-Resolution Image Restoration | ||
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang | ||
## https://arxiv.org/abs/2111.09881 | ||
|
||
|
||
|
||
import numpy as np | ||
import os | ||
import argparse | ||
from tqdm import tqdm | ||
|
||
import torch.nn as nn | ||
import torch | ||
import torch.nn.functional as F | ||
import util | ||
|
||
from natsort import natsorted | ||
from glob import glob | ||
import sys | ||
sys.path.append("/home1/ssq/proj9_single_derain/histoformer_allweather") | ||
from basicsr.models.archs.histoformer_arch import Histoformer | ||
from skimage import img_as_ubyte | ||
from pdb import set_trace as stx | ||
import time | ||
parser = argparse.ArgumentParser(description='Image Deraining using Restormer') | ||
|
||
parser.add_argument('--input_dir', default='./Datasets/', type=str, help='Directory of validation images') | ||
parser.add_argument('--result_dir', default='./results/', type=str, help='Directory for results') | ||
parser.add_argument('--weights', default='./pretrained_models/deraining.pth', type=str, help='Path to weights') | ||
parser.add_argument('--yaml_file', default='Options/Allweather_Histoformer.yml', type=str, help='Path to weights') | ||
|
||
args = parser.parse_args() | ||
|
||
####### Load yaml ####### | ||
yaml_file = args.yaml_file | ||
import yaml | ||
|
||
try: | ||
from yaml import CLoader as Loader | ||
except ImportError: | ||
from yaml import Loader | ||
|
||
x = yaml.load(open(yaml_file, mode='r'), Loader=Loader) | ||
|
||
s = x['network_g'].pop('type') | ||
########################## | ||
|
||
model_restoration = Histoformer(**x['network_g']) | ||
|
||
checkpoint = torch.load(args.weights) | ||
''' | ||
from thop import profile | ||
flops, params = profile(model_restoration, inputs=(torch.randn(1, 3, 256,256), )) | ||
print('FLOPs = ' + str(flops/1000**3) + 'G') | ||
print('Params = ' + str(params/1000**2) + 'M') | ||
''' | ||
model_restoration.load_state_dict(checkpoint['params']) | ||
print("===>Testing using weights: ",args.weights) | ||
model_restoration.cuda() | ||
model_restoration = nn.DataParallel(model_restoration) | ||
model_restoration.eval() | ||
|
||
factor = 8 | ||
|
||
result_dir = os.path.join(args.result_dir) | ||
os.makedirs(result_dir, exist_ok=True) | ||
inp_dir = os.path.join(args.input_dir) | ||
files = natsorted(glob(os.path.join(inp_dir, '*.png')) + glob(os.path.join(inp_dir, '*.jpg'))) | ||
with torch.no_grad(): | ||
for file_ in tqdm(files): | ||
torch.cuda.ipc_collect() | ||
torch.cuda.empty_cache() | ||
|
||
img = np.float32(util.load_img(file_))/255. | ||
img = torch.from_numpy(img).permute(2,0,1) | ||
input_ = img.unsqueeze(0).cuda() | ||
|
||
# Padding in case images are not multiples of 8 | ||
h,w = input_.shape[2], input_.shape[3] | ||
H,W = ((h+factor)//factor)*factor, ((w+factor)//factor)*factor | ||
padh = H-h if h%factor!=0 else 0 | ||
padw = W-w if w%factor!=0 else 0 | ||
input_ = F.pad(input_, (0,padw,0,padh), 'reflect') | ||
|
||
time1 = time.time() | ||
restored = model_restoration(input_) | ||
time2 = time.time() | ||
#print(time2-time1) | ||
|
||
# Unpad images to original dimensions | ||
restored = restored[:,:,:h,:w] | ||
|
||
restored = torch.clamp(restored,0,1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy() | ||
|
||
util.save_img((os.path.join(result_dir, os.path.splitext(os.path.split(file_)[-1])[0]+'.png')), img_as_ubyte(restored)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
## Restormer: Efficient Transformer for High-Resolution Image Restoration | ||
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang | ||
## https://arxiv.org/abs/2111.09881 | ||
|
||
import numpy as np | ||
import os | ||
import cv2 | ||
import math | ||
|
||
def calculate_psnr(img1, img2, border=0): | ||
# img1 and img2 have range [0, 255] | ||
#img1 = img1.squeeze() | ||
#img2 = img2.squeeze() | ||
if not img1.shape == img2.shape: | ||
raise ValueError('Input images must have the same dimensions.') | ||
h, w = img1.shape[:2] | ||
img1 = img1[border:h-border, border:w-border] | ||
img2 = img2[border:h-border, border:w-border] | ||
|
||
img1 = img1.astype(np.float64) | ||
img2 = img2.astype(np.float64) | ||
mse = np.mean((img1 - img2)**2) | ||
if mse == 0: | ||
return float('inf') | ||
return 20 * math.log10(255.0 / math.sqrt(mse)) | ||
|
||
|
||
# -------------------------------------------- | ||
# SSIM | ||
# -------------------------------------------- | ||
def calculate_ssim(img1, img2, border=0): | ||
'''calculate SSIM | ||
the same outputs as MATLAB's | ||
img1, img2: [0, 255] | ||
''' | ||
#img1 = img1.squeeze() | ||
#img2 = img2.squeeze() | ||
if not img1.shape == img2.shape: | ||
raise ValueError('Input images must have the same dimensions.') | ||
h, w = img1.shape[:2] | ||
img1 = img1[border:h-border, border:w-border] | ||
img2 = img2[border:h-border, border:w-border] | ||
|
||
if img1.ndim == 2: | ||
return ssim(img1, img2) | ||
elif img1.ndim == 3: | ||
if img1.shape[2] == 3: | ||
ssims = [] | ||
for i in range(3): | ||
ssims.append(ssim(img1[:,:,i], img2[:,:,i])) | ||
return np.array(ssims).mean() | ||
elif img1.shape[2] == 1: | ||
return ssim(np.squeeze(img1), np.squeeze(img2)) | ||
else: | ||
raise ValueError('Wrong input image dimensions.') | ||
|
||
|
||
def ssim(img1, img2): | ||
C1 = (0.01 * 255)**2 | ||
C2 = (0.03 * 255)**2 | ||
|
||
img1 = img1.astype(np.float64) | ||
img2 = img2.astype(np.float64) | ||
kernel = cv2.getGaussianKernel(11, 1.5) | ||
window = np.outer(kernel, kernel.transpose()) | ||
|
||
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid | ||
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] | ||
mu1_sq = mu1**2 | ||
mu2_sq = mu2**2 | ||
mu1_mu2 = mu1 * mu2 | ||
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq | ||
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq | ||
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 | ||
|
||
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * | ||
(sigma1_sq + sigma2_sq + C2)) | ||
return ssim_map.mean() | ||
|
||
def load_img(filepath): | ||
return cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB) | ||
|
||
def save_img(filepath, img): | ||
cv2.imwrite(filepath,cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) | ||
|
||
def load_gray_img(filepath): | ||
return np.expand_dims(cv2.imread(filepath, cv2.IMREAD_GRAYSCALE), axis=2) | ||
|
||
def save_gray_img(filepath, img): | ||
cv2.imwrite(filepath, img) |
Oops, something went wrong.