RuntimeError: applying transform <monai.transforms.compose.Compose object at 0x7f12920a50a0> #7223
medicalpeppa
started this conversation in
General
Replies: 1 comment 5 replies
-
Hi @medicalpeppa, could you please paste the whole error message? Thanks! |
Beta Was this translation helpful? Give feedback.
5 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
epoch 1/5
1/2, train_loss: 0.8549
2/2, train_loss: 0.7164
epoch 1 average loss: 0.7856
epoch 2/5
1/2, train_loss: 0.8476
2/2, train_loss: 0.6748
epoch 2 average loss: 0.7612
RuntimeError Traceback (most recent call last)
/home/yangzherui/chenjianan/3D语义分割-vnet3D及unet3D/3d分类(2).ipynb 单元格 10 line 3
31 y_pred = torch.tensor([], dtype=torch.float32, device=device)
32 y = torch.tensor([], dtype=torch.long, device=device)
---> 33 for val_data in val_loader:
34 val_images, val_labels = val_data["image"].to(device), val_data["label"].to(device)
35 y_pred = torch.cat([y_pred, model(val_images)], dim=0)
File ~/miniconda3/envs/yangzherui/lib/python3.9/site-packages/torch/utils/data/dataloader.py:633, in _BaseDataLoaderIter.next(self)
630 if self._sampler_iter is None:
631 # TODO(pytorch/pytorch#76750)
632 self._reset() # type: ignore[call-arg]
--> 633 data = self._next_data()
634 self._num_yielded += 1
635 if self._dataset_kind == _DatasetKind.Iterable and
636 self._IterableDataset_len_called is not None and
637 self._num_yielded > self._IterableDataset_len_called:
File ~/miniconda3/envs/yangzherui/lib/python3.9/site-packages/torch/utils/data/dataloader.py:1345, in _MultiProcessingDataLoaderIter._next_data(self)
1343 else:
1344 del self._task_info[idx]
-> 1345 return self._process_data(data)
File ~/miniconda3/envs/yangzherui/lib/python3.9/site-packages/torch/utils/data/dataloader.py:1371, in _MultiProcessingDataLoaderIter._process_data(self, data)
...
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
File "/home/yangzherui/miniconda3/envs/yangzherui/lib/python3.9/site-packages/monai/transforms/transform.py", line 171, in apply_transform
raise RuntimeError(f"applying transform {transform}") from e
RuntimeError: applying transform <monai.transforms.compose.Compose object at 0x7f12920a50a0>
This is my code:
data_dir = "/home/yangzherui/chenjianan/10-seg/10-seg-data/images2/pre-image" class_names = sorted(x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x)))
num_class = len(class_names)
image_files = [
[os.path.join(data_dir, class_names[i], x ) for x in os.listdir(os.path.join(data_dir, class_names[i]))]
for i in range(num_class)
]
num_each = [len(image_files[i]) for i in range(num_class)]
image_files_list = []
image_class = []
for i in range(num_class):
image_files_list.extend(image_files[i])
image_class.extend([i] * num_each[i])
image_class = np.array(image_class, dtype=np.int64)
random.seed(2023)
templist = [i for i in zip(image_files_list, image_class)]
random.shuffle(templist)
image_files_list[:], image_class[:] = zip(*templist)
print(f"Total image count: {len(image_files_list)}")
print(f"Label names: {class_names}")
print(f"Label counts: {num_each}")
num_class = 2
train_transform = Compose(
[
LoadImageD(keys="image", image_only=True),
EnsureChannelFirstD(keys="image"),
ScaleIntensityRangeD(
keys="image",
a_min=-17.3,
a_max=1280.5,
b_min=0.0,
b_max=1.0,
clip=True,
),
OrientationD(keys="image", axcodes="RAS"),
SpacingD(keys="image", pixdim=(1, 1, 1), mode="bilinear"),
ResizeD(keys="image",spatial_size=(96,96,96)),
RandRotate90D(keys="image", prob=0.8, spatial_axes=[0, 2]),
]
)
val_transforms = Compose(
[
LoadImageD(keys="image", image_only=True),
EnsureChannelFirstD(keys="image"),
ScaleIntensityRangeD(
keys="image",
a_min=-17.3,
a_max=1280.5,
b_min=0.0,
b_max=1.0,
clip=True,
),
OrientationD(keys="image", axcodes="RAS"),
SpacingD(keys="image", pixdim=(1, 1, 1), mode="bilinear"),
ResizeD(keys="image",spatial_size=(96,96,96)),
)
post_pred = Compose([Activations(softmax=True)])
post_label = Compose([AsDiscrete(to_onehot=num_class)]) val_frac = 0.2
slicer = int(len(image_files_list)*val_frac)
train_files = [{"image": img, "label": label} for img, label in zip(image_files_list[:slicer], image_class[:slicer])]
val_files = [{"image": img, "label": label} for img, label in zip(image_files_list[-slicer:], image_class[-slicer:])]
tra_ds = Dataset(data=train_files, transform=train_transform)
tra_loader = DataLoader(tra_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#model = densenet121(spatial_dims=3, in_channels=1, out_channels=num_class).to(device)
model = resnet50(spatial_dims=3,n_input_channels=1,num_classes=num_class).to(device)
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), 1e-5)
auc_metric = ROCAUCMetric()
max_epochs = 5
val_interval = 2
best_metric = -1
best_metric_epoch = -1
writer = SummaryWriter()
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in tra_loader:
step += 1
inputs, labels = batch_data["image"].to(device), batch_data["label"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_len = len(tra_ds) // tra_loader.batch_size
print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
epoch_loss /= step
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
writer.close()
What's wrong with my code, and my monai version is 1.3.0.
Thank you very much.
Beta Was this translation helpful? Give feedback.
All reactions