Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix deprecated statement #3307

Merged
merged 12 commits into from
Dec 9, 2024
7 changes: 4 additions & 3 deletions examples/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
from torch.amp import autocast
from torch.cuda.amp import GradScaler

import ignite
import ignite.distributed as idist
Expand Down Expand Up @@ -299,7 +300,7 @@ def train_step(engine, batch):

model.train()

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down Expand Up @@ -355,7 +356,7 @@ def evaluate_step(engine: Engine, batch):
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
output = model(x)
return output, y

Expand Down
5 changes: 3 additions & 2 deletions examples/cifar100_amp_benchmark/benchmark_torch_cuda_amp.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import fire
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.amp import autocast
from torch.cuda.amp import GradScaler
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
Expand Down Expand Up @@ -34,7 +35,7 @@ def train_step(engine, batch):
optimizer.zero_grad()

# Runs the forward pass with autocasting.
with autocast():
with autocast("cuda"):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down
5 changes: 3 additions & 2 deletions examples/cifar10_qat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import torch.nn as nn
import torch.optim as optim
import utils
from torch.cuda.amp import autocast, GradScaler
from torch.amp import autocast
from torch.cuda.amp import GradScaler

import ignite
import ignite.distributed as idist
Expand Down Expand Up @@ -283,7 +284,7 @@ def train_step(engine, batch):

model.train()

with autocast(enabled=with_amp):
with autocast("cuda", enabled=with_amp):
y_pred = model(x)
loss = criterion(y_pred, y)

Expand Down
Loading
Loading