Skip to content

Commit fbef517

Browse files
committed
Merge: [nnUNet/PyT] Remove profiling
2 parents 067d21e + 37dd0ff commit fbef517

File tree

4 files changed

+3
-22
lines changed

4 files changed

+3
-22
lines changed

PyTorch/Segmentation/nnUNet/main.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
import ctypes
1616
import os
1717

18-
import nvidia_dlprof_pytorch_nvtx
1918
import torch
2019
from pytorch_lightning import Trainer, seed_everything
2120
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, early_stopping
@@ -30,10 +29,6 @@
3029
if __name__ == "__main__":
3130
args = get_main_args()
3231

33-
if args.profile:
34-
nvidia_dlprof_pytorch_nvtx.init()
35-
print("Profiling enabled")
36-
3732
if args.affinity != "disabled":
3833
set_affinity(int(os.getenv("LOCAL_RANK", "0")), args.gpus, mode=args.affinity)
3934

@@ -67,7 +62,6 @@
6762
mode=args.exec_mode,
6863
warmup=args.warmup,
6964
dim=args.dim,
70-
profile=args.profile,
7165
)
7266
]
7367
elif args.exec_mode == "train":
@@ -108,11 +102,7 @@
108102

109103
if args.benchmark:
110104
if args.exec_mode == "train":
111-
if args.profile:
112-
with torch.autograd.profiler.emit_nvtx():
113-
trainer.fit(model, train_dataloader=data_module.train_dataloader())
114-
else:
115-
trainer.fit(model, train_dataloader=data_module.train_dataloader())
105+
trainer.fit(model, train_dataloader=data_module.train_dataloader())
116106
else:
117107
# warmup
118108
trainer.test(model, test_dataloaders=data_module.test_dataloader())

PyTorch/Segmentation/nnUNet/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,4 @@ pytorch-lightning==1.3.8
55
scikit-learn==1.0
66
scikit-image==0.18.3
77
pynvml==11.0.0
8-
numpy==1.21.3
9-
nvidia_dlprof_pytorch_nvtx==1.7.0
8+
numpy==1.21.3

PyTorch/Segmentation/nnUNet/scripts/benchmark.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
parser.add_argument("--warmup", type=int, default=50, help="Warmup iterations before collecting statistics")
3030
parser.add_argument("--results", type=str, default="/results", help="Path to results directory")
3131
parser.add_argument("--logname", type=str, default="perf.json", help="Name of dlloger output")
32-
parser.add_argument("--profile", action="store_true", help="Enable dlprof profiling")
3332

3433
if __name__ == "__main__":
3534
args = parser.parse_args()
@@ -45,7 +44,6 @@
4544
cmd += f"--test_batches {args.test_batches} "
4645
cmd += f"--warmup {args.warmup} "
4746
cmd += "--amp " if args.amp else ""
48-
cmd += "--profile " if args.profile else ""
4947
if args.mode == "train":
5048
cmd += f"--batch_size {args.batch_size} "
5149
else:

PyTorch/Segmentation/nnUNet/utils/logger.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
import dllogger as logger
1919
import numpy as np
20-
import torch.cuda.profiler as profiler
2120
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
2221
from pytorch_lightning import Callback
2322

@@ -49,20 +48,17 @@ def flush(self):
4948

5049

5150
class LoggingCallback(Callback):
52-
def __init__(self, log_dir, filnename, global_batch_size, mode, warmup, dim, profile):
51+
def __init__(self, log_dir, filnename, global_batch_size, mode, warmup, dim):
5352
self.dllogger = DLLogger(log_dir, filnename)
5453
self.warmup_steps = warmup
5554
self.global_batch_size = global_batch_size
5655
self.step = 0
5756
self.dim = dim
5857
self.mode = mode
59-
self.profile = profile
6058
self.timestamps = []
6159

6260
def do_step(self):
6361
self.step += 1
64-
if self.profile and self.step == self.warmup_steps:
65-
profiler.start()
6662
if self.step > self.warmup_steps:
6763
self.timestamps.append(time.time())
6864

@@ -96,8 +92,6 @@ def _log(self):
9692
self.dllogger.flush()
9793

9894
def on_train_end(self, trainer, pl_module):
99-
if self.profile:
100-
profiler.stop()
10195
self._log()
10296

10397
def on_test_end(self, trainer, pl_module):

0 commit comments

Comments
 (0)