Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Created new logger to use instead of root logger #124

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions batchgenerators/dataloading/multi_threaded_augmenter.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,11 @@
from queue import Queue as thrQueue
import numpy as np
import sys
import logging
from logging import INFO, DEBUG
from multiprocessing import Event
from time import sleep, time
from threadpoolctl import threadpool_limits
from batchgenerators.utilities.logger import log

try:
import torch
Expand Down Expand Up @@ -72,7 +73,7 @@ def results_loop(in_queues: List[Queue], out_queue: thrQueue, abort_event: Event
do_pin_memory = torch is not None and pin_memory and gpu is not None and torch.cuda.is_available()

if do_pin_memory:
print('using pin_memory on device', gpu)
log(INFO, f'using pin_memory on device {gpu}')
torch.cuda.set_device(gpu)

item = None
Expand Down Expand Up @@ -208,15 +209,15 @@ def __next__(self):
if self._end_ctr == self.num_processes:
self._end_ctr = 0
self._queue_ctr = 0
logging.debug("MultiThreadedGenerator: finished data generation")
log(DEBUG, "MultiThreadedGenerator: finished data generation")
raise StopIteration

item = self.__get_next_item()

return item

except KeyboardInterrupt:
logging.error("MultiThreadedGenerator: caught exception: {}".format(sys.exc_info()))
log(DEBUG, "MultiThreadedGenerator: caught exception: {}".format(sys.exc_info()))
self.abort_event.set()
self._finish()
raise KeyboardInterrupt
Expand All @@ -226,7 +227,7 @@ def _start(self):
self._finish()
self.abort_event.clear()

logging.debug("starting workers")
log(DEBUG, "starting workers")
self._queue_ctr = 0
self._end_ctr = 0

Expand Down Expand Up @@ -258,7 +259,7 @@ def _start(self):

self.was_initialized = True
else:
logging.debug("MultiThreadedGenerator Warning: start() has been called but it has already been "
log(DEBUG, "MultiThreadedGenerator Warning: start() has been called but it has already been "
"initialized previously")

def _finish(self, timeout=10):
Expand All @@ -269,7 +270,7 @@ def _finish(self, timeout=10):
sleep(0.2)

if len(self._processes) != 0:
logging.debug("MultiThreadedGenerator: shutting down workers...")
log(DEBUG, "MultiThreadedGenerator: shutting down workers...")
[i.terminate() for i in self._processes]

for i, p in enumerate(self._processes):
Expand All @@ -290,5 +291,5 @@ def restart(self):
self._start()

def __del__(self):
logging.debug("MultiThreadedGenerator: destructor was called")
log(DEBUG, "MultiThreadedGenerator: destructor was called")
self._finish()
11 changes: 6 additions & 5 deletions batchgenerators/dataloading/nondet_multi_threaded_augmenter.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@
from multiprocessing import Queue
from queue import Queue as thrQueue
import numpy as np
import logging
from logging import DEBUG, INFO
from multiprocessing import Event
from time import sleep, time

from batchgenerators.dataloading.data_loader import DataLoader
from batchgenerators.utilities.logger import log
from threadpoolctl import threadpool_limits

try:
Expand Down Expand Up @@ -87,7 +88,7 @@ def results_loop(in_queue: Queue, out_queue: thrQueue, abort_event: Event,
do_pin_memory = torch is not None and pin_memory and gpu is not None and torch.cuda.is_available()

if do_pin_memory:
print('using pin_memory on device', gpu)
log(INFO, f'using pin_memory on device{gpu}')
torch.cuda.set_device(gpu)

item = None
Expand Down Expand Up @@ -204,7 +205,7 @@ def _start(self):
self.results_loop_queue = thrQueue(self.num_cached)
self.abort_event = Event()

logging.debug("starting workers")
log(DEBUG, "starting workers")
if isinstance(self.generator, DataLoader):
self.generator.was_initialized = False

Expand Down Expand Up @@ -237,7 +238,7 @@ def _start(self):

self.initialized = True
else:
logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running")
log(DEBUG, "MultiThreadedGenerator Warning: start() has been called but workers are already running")

def _finish(self):
if self.initialized:
Expand All @@ -255,7 +256,7 @@ def restart(self):
self._start()

def __del__(self):
logging.debug("MultiThreadedGenerator: destructor was called")
log(DEBUG, "MultiThreadedGenerator: destructor was called")
self._finish()


Expand Down
8 changes: 8 additions & 0 deletions batchgenerators/utilities/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import logging

# Create a logger to use instead of root logger
logger = logging.getLogger('batchgen')
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
log = logger.log # Useful as it can be imported by other files