Skip to content

Commit a32efdb

Browse files
Updated .pre-commit-config.yaml file and reformatted few files for better readability. (#1211)
* Used pylint automated tool to update from .format to f-strings. * Updated the pre-commit configuration file. * Reformatted 2 more files to use f-strings. * Added new check that verifies .toml files in the repo, and updated black repo link. --------- Co-authored-by: Claudia Comito <[email protected]>
1 parent 724a80b commit a32efdb

File tree

14 files changed

+49
-59
lines changed

14 files changed

+49
-59
lines changed

.pre-commit-config.yaml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,21 @@
22
# See https://pre-commit.com/hooks.html for more hooks
33
repos:
44
- repo: https://github.com/pre-commit/pre-commit-hooks
5-
rev: v2.0.0
5+
rev: v4.4.0
66
hooks:
77
- id: trailing-whitespace
88
- id: end-of-file-fixer
99
- id: check-yaml
1010
- id: check-added-large-files
11-
- id: flake8
12-
- repo: https://github.com/psf/black
11+
- id: check-toml
12+
- repo: https://github.com/psf/black-pre-commit-mirror
1313
rev: 23.9.1
1414
hooks:
1515
- id: black
16+
- repo: https://github.com/PyCQA/flake8
17+
rev: 6.1.0
18+
hooks:
19+
- id: flake8
1620
- repo: https://github.com/pycqa/pydocstyle
1721
rev: 6.3.0 # pick a git hash / tag to point to
1822
hooks:

benchmarks/2020/distance_matrix/dask-chunks-cpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,4 @@
3131
start = time.perf_counter()
3232
dist = dmm.euclidean_distances(data, data).compute()
3333
end = time.perf_counter()
34-
print("\t{}s".format(end - start))
34+
print(f"\t{end - start}s")

benchmarks/2020/generate_jobscripts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def jobscripts_from(
7676
if key in SKIP:
7777
continue
7878

79-
parameters.append("--{}".format(key))
79+
parameters.append(f"--{key}")
8080
parameters.append(str(value))
8181

8282
for script, benchmark in configuration["benchmarks"].items():

examples/nn/imagenet-DASO.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -330,8 +330,8 @@ def main():
330330
print0("Test mode - no DDP, no apex, RN50, 10 iterations")
331331

332332
args.distributed = True # TODO: DDDP: if ht.MPI_WORLD.size > 1 else False
333-
print0("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
334-
print0("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
333+
print0(f"loss_scale = {args.loss_scale}", type(args.loss_scale))
334+
print0(f"\nCUDNN VERSION: {torch.backends.cudnn.version()}\n")
335335

336336
cudnn.benchmark = True
337337
best_prec1 = 0
@@ -379,10 +379,10 @@ def main():
379379

380380
# create model
381381
if args.pretrained:
382-
print0("=> using pre-trained model '{}'".format(args.arch))
382+
print0(f"=> using pre-trained model '{args.arch}'")
383383
model = models.__dict__[args.arch](pretrained=True)
384384
else:
385-
print0("=> creating model '{}'".format(args.arch))
385+
print0(f"=> creating model '{args.arch}'")
386386
model = models.__dict__[args.arch]()
387387

388388
if (
@@ -426,7 +426,7 @@ def main():
426426
# Use a local scope to avoid dangling references
427427
def resume():
428428
if os.path.isfile(args.resume):
429-
print0("=> loading checkpoint '{}'".format(args.resume))
429+
print0(f"=> loading checkpoint '{args.resume}'")
430430
checkpoint = torch.load(
431431
args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)
432432
)
@@ -440,7 +440,7 @@ def resume():
440440
else:
441441
try:
442442
resfile = "imgnet-checkpoint-" + str(args.world_size) + ".pth.tar"
443-
print0("=> loading checkpoint '{}'".format(resfile))
443+
print0(f"=> loading checkpoint '{resfile}'")
444444
checkpoint = torch.load(
445445
resfile, map_location=lambda storage, loc: storage.cuda(args.gpu)
446446
)
@@ -636,11 +636,11 @@ def train(dev, train_loader, model, criterion, optimizer, epoch):
636636
target = data[0]["label"].squeeze().cuda(dev).long()
637637

638638
if 0 <= args.prof == i:
639-
print("Profiling begun at iteration {}".format(i))
639+
print(f"Profiling begun at iteration {i}")
640640
torch.cuda.cudart().cudaProfilerStart()
641641

642642
if args.prof >= 0:
643-
torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
643+
torch.cuda.nvtx.range_push(f"Body of iteration {i}")
644644

645645
lr_warmup(optimizer, epoch, i, train_loader_len)
646646

@@ -719,7 +719,7 @@ def train(dev, train_loader, model, criterion, optimizer, epoch):
719719
torch.cuda.nvtx.range_pop()
720720

721721
if args.prof >= 0 and i == args.prof + 10:
722-
print0("Profiling ended at iteration {}".format(i))
722+
print0(f"Profiling ended at iteration {i}")
723723
torch.cuda.cudart().cudaProfilerStop()
724724
quit()
725725
# todo average loss, and top1 and top5

examples/nn/imagenet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def validate(val_loader, model, criterion, args):
324324
progress.display(i)
325325

326326
# TODO: this should also be done with the ProgressMeter
327-
print(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5))
327+
print(f" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}")
328328

329329
return top1.avg
330330

heat/cluster/_kcluster.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def _initialize_cluster_centers(self, x: DNDarray):
132132
elif isinstance(self.init, DNDarray):
133133
if len(self.init.shape) != 2:
134134
raise ValueError(
135-
"passed centroids need to be two-dimensional, but are {}".format(len(self.init))
135+
f"passed centroids need to be two-dimensional, but are {len(self.init)}"
136136
)
137137
if self.init.shape[0] != self.n_clusters or self.init.shape[1] != x.shape[1]:
138138
raise ValueError("passed centroids do not match cluster count or data shape")

heat/core/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def __repr__(self, indent: int = 1) -> str:
6363
indent : int, default: 1
6464
Indicates the indentation for the top-level output.
6565
"""
66-
return "{}({})".format(self.__class__.__name__, json.dumps(self.get_params(), indent=4))
66+
return f"{self.__class__.__name__}({json.dumps(self.get_params(), indent=4)})"
6767

6868
def set_params(self, **params: Dict[str, object]) -> self:
6969
"""

heat/core/factories.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def array(
323323
else devices.get_device().torch_device,
324324
)
325325
except RuntimeError:
326-
raise TypeError("invalid data of type {}".format(type(obj)))
326+
raise TypeError(f"invalid data of type {type(obj)}")
327327
else:
328328
if copy is False and not np.isscalar(obj) and not isinstance(obj, (Tuple, List)):
329329
# Python array-API compliance, cf. https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.asarray.html
@@ -346,7 +346,7 @@ def array(
346346
else devices.get_device().torch_device,
347347
)
348348
except RuntimeError:
349-
raise TypeError("invalid data of type {}".format(type(obj)))
349+
raise TypeError(f"invalid data of type {type(obj)}")
350350

351351
# infer dtype from obj if not explicitly given
352352
if dtype is None:

heat/core/io.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def load_hdf5(
110110
if not isinstance(path, str):
111111
raise TypeError(f"path must be str, not {type(path)}")
112112
elif not isinstance(dataset, str):
113-
raise TypeError("dataset must be str, not {}".format(type(dataset)))
113+
raise TypeError(f"dataset must be str, not {type(dataset)}")
114114
elif split is not None and not isinstance(split, int):
115115
raise TypeError(f"split must be None or int, not {type(split)}")
116116

@@ -410,11 +410,11 @@ def save_netcdf(
410410
>>> ht.save_netcdf(x, 'data.nc', dataset='DATA')
411411
"""
412412
if not isinstance(data, DNDarray):
413-
raise TypeError("data must be heat tensor, not {}".format(type(data)))
413+
raise TypeError(f"data must be heat tensor, not {type(data)}")
414414
if not isinstance(path, str):
415-
raise TypeError("path must be str, not {}".format(type(path)))
415+
raise TypeError(f"path must be str, not {type(path)}")
416416
if not isinstance(variable, str):
417-
raise TypeError("variable must be str, not {}".format(type(path)))
417+
raise TypeError(f"variable must be str, not {type(path)}")
418418
if dimension_names is None:
419419
dimension_names = [
420420
__NETCDF_DIM_TEMPLATE.format(variable, dim) for dim, _ in enumerate(data.shape)
@@ -430,15 +430,11 @@ def save_netcdf(
430430
)
431431
)
432432
elif not len(dimension_names) == len(data.shape):
433-
raise ValueError(
434-
"{0} names given for {1} dimensions".format(len(dimension_names), len(data.shape))
435-
)
433+
raise ValueError(f"{len(dimension_names)} names given for {len(data.shape)} dimensions")
436434

437435
# we only support a subset of possible modes
438436
if mode not in __VALID_WRITE_MODES:
439-
raise ValueError(
440-
"mode was {}, not in possible modes {}".format(mode, __VALID_WRITE_MODES)
441-
)
437+
raise ValueError(f"mode was {mode}, not in possible modes {__VALID_WRITE_MODES}")
442438

443439
failed = 0
444440
excep = None
@@ -468,9 +464,7 @@ def __get_expanded_split(
468464
If resulting shapes do not match.
469465
"""
470466
if np.prod(shape) != np.prod(expanded_shape):
471-
raise ValueError(
472-
"Shapes %s and %s do not have the same size" % (shape, expanded_shape)
473-
)
467+
raise ValueError(f"Shapes {shape} and {expanded_shape} do not have the same size")
474468
if np.prod(shape) == 1: # size 1 array
475469
return split
476470
if len(shape) == len(expanded_shape): # actually not expanded at all
@@ -484,7 +478,7 @@ def __get_expanded_split(
484478
ex_ind_nonempty, sq_ex = list(zip(*enumerated)) # transpose
485479
if not sq_shape == sq_ex:
486480
raise ValueError(
487-
"Shapes %s and %s differ in non-empty dimensions" % (shape, expanded_shape)
481+
f"Shapes {shape} and {expanded_shape} differ in non-empty dimensions"
488482
)
489483
if split in ind_nonempty: # split along non-empty dimension
490484
split_sq = ind_nonempty.index(split) # split-axis in squeezed shape
@@ -664,7 +658,7 @@ def __merge_slices(
664658
raise excep
665659
elif failed:
666660
excep = data.comm.bcast(excep, root=failed - 1)
667-
excep.args = "raised by process rank {}".format(failed - 1), *excep.args
661+
excep.args = f"raised by process rank {failed - 1}", *excep.args
668662
raise excep from None # raise the same error but without traceback
669663
# because that is on a different process
670664

heat/core/linalg/svdtools.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def hsvd_rank(
8585
[2] Himpe, Leibner, Rave. Hierarchical approximate proper orthogonal decomposition. SIAM J. Sci. Comput., 40 (5), 2018.
8686
"""
8787
if not isinstance(A, DNDarray):
88-
raise TypeError("Argument needs to be a DNDarray but is {}.".format(type(A)))
88+
raise TypeError(f"Argument needs to be a DNDarray but is {type(A)}.")
8989
if not A.ndim == 2:
9090
raise ValueError("A needs to be a 2D matrix")
9191
if not A.dtype == types.float32 and not A.dtype == types.float64:
@@ -197,7 +197,7 @@ def hsvd_rtol(
197197
[2] Himpe, Leibner, Rave. Hierarchical approximate proper orthogonal decomposition. SIAM J. Sci. Comput., 40 (5), 2018.
198198
"""
199199
if not isinstance(A, DNDarray):
200-
raise TypeError("Argument needs to be a DNDarray but is {}.".format(type(A)))
200+
raise TypeError(f"Argument needs to be a DNDarray but is {type(A)}.")
201201
if not A.ndim == 2:
202202
raise ValueError("A needs to be a 2D matrix")
203203
if not A.dtype == types.float32 and not A.dtype == types.float64:

0 commit comments

Comments
 (0)