|
| 1 | +""" |
| 2 | +This experiment is a simple vanilla autoencoder |
| 3 | +""" |
| 4 | + |
| 5 | +import os |
| 6 | +import torch |
| 7 | +import tqdm |
| 8 | +import torch.nn.functional as F |
| 9 | +from torch.optim import Adam |
| 10 | +from torch.optim.lr_scheduler import ReduceLROnPlateau |
| 11 | +from torch.nn import MSELoss |
| 12 | + |
| 13 | + |
| 14 | +from models.cnn_generator import CNNAutoencoder |
| 15 | +from utils.TorchUtils.training.StatsTracker import StatsTracker |
| 16 | + |
| 17 | + |
| 18 | +def compute_forward_pass(model, x, optimizer, criterion, update): |
| 19 | + latent, reconstruction = model(x) |
| 20 | + |
| 21 | + photometric_loss = criterion(reconstruction, x) |
| 22 | + if update: |
| 23 | + model.zero_grad() |
| 24 | + photometric_loss.backward() |
| 25 | + optimizer.step() |
| 26 | + return photometric_loss |
| 27 | + |
| 28 | + |
| 29 | +def train(model, train_loader, val_loader, device, epochs, lr, batch_size): |
| 30 | + # Initialize autoencoder |
| 31 | + |
| 32 | + optimizer = Adam(params=model.parameters(), lr=lr) |
| 33 | + scheduler = ReduceLROnPlateau( |
| 34 | + optimizer, 'min', factor=0.1, patience=3, min_lr=0.00001, verbose=True) |
| 35 | + |
| 36 | + statsTracker = StatsTracker( |
| 37 | + batch_size * len(train_loader), batch_size * len(val_loader)) |
| 38 | + criterion = MSELoss(reduction="sum") |
| 39 | + |
| 40 | + for epoch in range(1, epochs + 1): |
| 41 | + |
| 42 | + model.train() |
| 43 | + for x, _ in tqdm.tqdm(train_loader): |
| 44 | + x = x.to(device=device) |
| 45 | + photometric_loss = compute_forward_pass( |
| 46 | + model, x, optimizer, criterion, update=True) |
| 47 | + statsTracker.update_curr_losses(photometric_loss.item(), None) |
| 48 | + |
| 49 | + with torch.no_grad(): |
| 50 | + model.eval() |
| 51 | + for x, _ in tqdm.tqdm(val_loader): |
| 52 | + x = x.to(device=device) |
| 53 | + photometric_loss_val = compute_forward_pass( |
| 54 | + model, x, optimizer, criterion, update=False) |
| 55 | + |
| 56 | + statsTracker.update_curr_losses( |
| 57 | + None, photometric_loss_val.item()) |
| 58 | + |
| 59 | + train_loss_epoch, val_loss_epoch = statsTracker.compute_means() |
| 60 | + assert((statsTracker.train_loss_curr / |
| 61 | + (batch_size * len(train_loader))) == train_loss_epoch) |
| 62 | + assert((statsTracker.val_loss_curr / |
| 63 | + (batch_size * len(val_loader))) == val_loss_epoch) |
| 64 | + |
| 65 | + statsTracker.update_histories(train_loss_epoch, None) |
| 66 | + |
| 67 | + statsTracker.update_histories(None, val_loss_epoch, model) |
| 68 | + |
| 69 | + scheduler.step(val_loss_epoch) |
| 70 | + print('Student_network, Epoch {}, Train Loss {}, Val Loss {}'.format( |
| 71 | + epoch, round(train_loss_epoch, 6), round(val_loss_epoch, 6))) |
| 72 | + |
| 73 | + statsTracker.reset() |
| 74 | + |
| 75 | + return statsTracker.best_model |
| 76 | + |
| 77 | + |
| 78 | +def run_experiment(fp, training_params, architecture_params, dataset_params, dataloader_func, resume): |
| 79 | + device = (torch.device('cuda') if torch.cuda.is_available() |
| 80 | + else torch.device('cpu')) |
| 81 | + |
| 82 | + train_loader, val_loader = dataloader_func(**dataset_params["hyperparams"]) |
| 83 | + |
| 84 | + autoencoder = CNNAutoencoder(**(architecture_params)).to(device=device) |
| 85 | + |
| 86 | + if resume: |
| 87 | + autoencoder.load_state_dict(torch.load( |
| 88 | + os.path.join(fp, "weights/cnn_ae.pt"))) |
| 89 | + |
| 90 | + print(autoencoder) |
| 91 | + best_model = train(autoencoder, train_loader, val_loader, |
| 92 | + device, **(training_params)) |
| 93 | + torch.save(best_model, os.path.join(fp, "weights/cnn_ae.pt")) |
0 commit comments