Skip to content

Commit ba2af11

Browse files
committed
more f-string lint
1 parent d3a8ab4 commit ba2af11

21 files changed

+51
-69
lines changed

docs/source/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@
9595
from datetime import date
9696

9797
project = 'ipyparallel'
98-
copyright = '%04d, The IPython Development Team' % date.today().year
98+
copyright = f'{date.today().year}, The IPython Development Team'
9999
author = 'The IPython Development Team'
100100

101101
# The version info for the project you're documenting, acts as replacement for

docs/source/examples/customresults.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def sleep_here(count, t):
3030
import sys
3131
import time
3232

33-
print("hi from engine %i" % id)
33+
print(f"hi from engine {id}")
3434
sys.stdout.flush()
3535
time.sleep(t)
3636
return count, t
@@ -52,12 +52,12 @@ def sleep_here(count, t):
5252
for msg_id in finished:
5353
# we know these are done, so don't worry about blocking
5454
ar = rc.get_result(msg_id)
55-
print("job id %s finished on engine %i" % (msg_id, ar.engine_id))
55+
print(f"job id {msg_id} finished on engine {ar.engine_id}")
5656
print("with stdout:")
5757
print(' ' + ar.stdout.replace('\n', '\n ').rstrip())
5858
print("and results:")
5959

6060
# note that each job in a map always returns a list of length chunksize
6161
# even if chunksize == 1
6262
for count, t in ar.get():
63-
print(" item %i: slept for %.2fs" % (count, t))
63+
print(f" item {count}: slept for {t:.2f}s")

docs/source/examples/daVinci Word Count/pwordfreq.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -68,18 +68,18 @@ def pwordfreq(view, fnames):
6868
block = nlines // n
6969
for i in range(n):
7070
chunk = lines[i * block : i * (block + 1)]
71-
with open('davinci%i.txt' % i, 'w', encoding='utf8') as f:
71+
with open(f'davinci{i}.txt', 'w', encoding='utf8') as f:
7272
f.write('\n'.join(chunk))
7373

7474
try: # python2
7575
cwd = os.path.abspath(os.getcwdu())
7676
except AttributeError: # python3
7777
cwd = os.path.abspath(os.getcwd())
78-
fnames = [os.path.join(cwd, 'davinci%i.txt' % i) for i in range(n)]
78+
fnames = [os.path.join(cwd, f'davinci{i}.txt') for i in range(n)]
7979
tic = time.time()
8080
pfreqs = pwordfreq(view, fnames)
8181
toc = time.time()
8282
print_wordfreq(freqs)
83-
print("Took %.3f s to calculate on %i engines" % (toc - tic, len(view.targets)))
83+
print(f"Took {toc - tic:.3f}s to calculate on {len(view.targets)} engines")
8484
# cleanup split files
8585
map(os.remove, fnames)

docs/source/examples/dagdeps.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def main(nodes, edges):
100100

101101
client = parallel.Client()
102102
view = client.load_balanced_view()
103-
print("submitting %i tasks with %i dependencies" % (nodes, edges))
103+
print(f"submitting {nodes} tasks with {edges} dependencies")
104104
results = submit_jobs(view, G, jobs)
105105
print("waiting for results")
106106
client.wait_interactive()

docs/source/examples/interengine/communicator.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ def __init__(self, interface='tcp://*', identity=None):
2121
# bind to ports
2222
port = self.socket.bind_to_random_port(interface)
2323
pub_port = self.pub.bind_to_random_port(interface)
24-
self.url = interface + ":%i" % port
25-
self.pub_url = interface + ":%i" % pub_port
24+
self.url = f"{interface}:{port}"
25+
self.pub_url = f"{interface}:{pub_port}"
2626
# guess first public IP from socket
2727
self.location = socket.gethostbyname_ex(socket.gethostname())[-1][0]
2828
self.peers = {}

docs/source/examples/itermapresult.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,10 @@
3434
# create a Reference to `id`. This will be a different value on each engine
3535
ref = ipp.Reference('id')
3636
print("sleeping for `id` seconds on each engine")
37-
tic = time.time()
37+
tic = time.perf_counter()
3838
ar = dv.apply(time.sleep, ref)
3939
for i, r in enumerate(ar):
40-
print("%i: %.3f" % (i, time.time() - tic))
40+
print(f"{i}: {time.perf_counter() - tic:.3f}")
4141

4242

4343
def sleep_here(t):
@@ -50,22 +50,22 @@ def sleep_here(t):
5050
# one call per task
5151
print("running with one call per task")
5252
amr = v.map(sleep_here, [0.01 * t for t in range(100)])
53-
tic = time.time()
53+
tic = time.perf_counter()
5454
for i, r in enumerate(amr):
55-
print("task %i on engine %i: %.3f" % (i, r[0], time.time() - tic))
55+
print(f"task {i} on engine {r[0]}: {time.perf_counter() - tic:.3f}")
5656

5757
print("running with four calls per task")
5858
# with chunksize, we can have four calls per task
5959
amr = v.map(sleep_here, [0.01 * t for t in range(100)], chunksize=4)
60-
tic = time.time()
60+
tic = time.perf_counter()
6161
for i, r in enumerate(amr):
62-
print("task %i on engine %i: %.3f" % (i, r[0], time.time() - tic))
62+
print(f"task {i} on engine {r[0]}: {time.perf_counter() - tic:.3f}")
6363

6464
print("running with two calls per task, with unordered results")
6565
# We can even iterate through faster results first, with ordered=False
6666
amr = v.map(
6767
sleep_here, [0.01 * t for t in range(100, 0, -1)], ordered=False, chunksize=2
6868
)
69-
tic = time.time()
69+
tic = time.perf_counter()
7070
for i, r in enumerate(amr):
71-
print("slept %.2fs on engine %i: %.3f" % (r[1], r[0], time.time() - tic))
71+
print(f"slept {r[1]:.2f}s on engine {r[0]}: {time.perf_counter() - tic:.3f}")

docs/source/examples/pi/parallelpi.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
import ipyparallel as ipp
3030

3131
# Files with digits of pi (10m digits each)
32-
filestring = 'pi200m.ascii.%(i)02dof20'
33-
files = [filestring % {'i': i} for i in range(1, 21)]
32+
filestring = 'pi200m.ascii.{}of20'
33+
files = [filestring.format(i) for i in range(1, 21)]
3434

3535
# Connect to the IPython cluster
3636
c = ipp.Client()
@@ -42,7 +42,7 @@
4242
v = c[:]
4343
v.block = True
4444
# fetch the pi-files
45-
print("downloading %i files of pi" % n)
45+
print(f"downloading {n} files of pi")
4646
v.map(fetch_pi_file, files[:n]) # noqa: F821
4747
print("done")
4848

@@ -60,10 +60,10 @@
6060
freqs150m = reduce_freqs(freqs_all)
6161
t2 = clock()
6262
digits_per_second8 = n * 10.0e6 / (t2 - t1)
63-
print("Digits per second (%i engines, %i0m digits): " % (n, n), digits_per_second8)
63+
print(f"Digits per second ({n} engines, {n}0m digits): ", digits_per_second8)
6464

6565
print("Speedup: ", digits_per_second8 / digits_per_second1)
6666

6767
plot_two_digit_freqs(freqs150m)
68-
plt.title("2 digit sequences in %i0m digits of pi" % n)
68+
plt.title(f"2 digit sequences in {n}0m digits of pi")
6969
plt.show()

docs/source/examples/task_profiler.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,7 @@ def main():
6060
]
6161
stime = sum(times)
6262

63-
print(
64-
"executing %i tasks, totalling %.1f secs on %i engines"
65-
% (opts.n, stime, nengines)
66-
)
63+
print(f"executing {opts.n} tasks, totalling {stime:.1f} secs on {nengines} engines")
6764
time.sleep(1)
6865
start = time.perf_counter()
6966
amr = view.map(time.sleep, times)
@@ -74,8 +71,8 @@ def main():
7471
scale = stime / ptime
7572

7673
print(f"executed {stime:.1f} secs in {ptime:.1f} secs")
77-
print("%.3fx parallel performance on %i engines" % (scale, nengines))
78-
print("%.1f%% of theoretical max" % (100 * scale / nengines))
74+
print(f"{scale:.3f}x parallel performance on {nengines} engines")
75+
print(f"{scale / nengines:.1%} of theoretical max")
7976

8077

8178
if __name__ == '__main__':

docs/source/examples/wave2D/RectPartitioner.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def prepare_communication(self):
6161

6262
nsd_ = self.nsd
6363
if nsd_ < 1:
64-
print('Number of space dimensions is %d, nothing to do' % nsd_)
64+
print(f'Number of space dimensions is {nsd_}, nothing to do')
6565
return
6666

6767
self.subd_rank = [-1, -1, -1]
@@ -93,7 +93,7 @@ def prepare_communication(self):
9393
self.subd_rank[1] = (my_id % offsets[2]) / self.num_parts[0]
9494
self.subd_rank[2] = my_id / offsets[2]
9595

96-
print("my_id=%d, subd_rank: " % my_id, self.subd_rank)
96+
print(f"my_id={my_id}, subd_rank={self.subd_rank}")
9797
if my_id == 0:
9898
print("offsets=", offsets)
9999

docs/source/examples/wave2D/communicator.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ def __init__(self, interface='tcp://*', identity=None):
2727
northport = self.north.bind_to_random_port(interface)
2828
eastport = self.east.bind_to_random_port(interface)
2929

30-
self.north_url = interface + ":%i" % northport
31-
self.east_url = interface + ":%i" % eastport
30+
self.north_url = f"{interface}:{northport}"
31+
self.east_url = f"{interface}:{eastport}"
3232

3333
# guess first public IP from socket
3434
self.location = socket.gethostbyname_ex(socket.gethostname())[-1][0]

docs/source/examples/wave2D/parallelwave-mpi.py

+3-7
Original file line numberDiff line numberDiff line change
@@ -119,13 +119,9 @@ def wave_saver(u, x, y, t):
119119
if partition is None:
120120
partition = [1, num_procs]
121121

122-
assert partition[0] * partition[1] == num_procs, (
123-
"can't map partition %s to %i engines"
124-
% (
125-
partition,
126-
num_procs,
127-
)
128-
)
122+
assert (
123+
partition[0] * partition[1] == num_procs
124+
), f"can't map partition {partition} to {num_procs} engines"
129125

130126
view = rc[:]
131127
print(f"Running {grid} system on {partition} processes until {tstop:f}")

docs/source/examples/wave2D/parallelwave.py

+3-7
Original file line numberDiff line numberDiff line change
@@ -126,13 +126,9 @@ def wave_saver(u, x, y, t):
126126
else:
127127
num_procs = min(num_procs, partition[0] * partition[1])
128128

129-
assert partition[0] * partition[1] == num_procs, (
130-
"can't map partition %s to %i engines"
131-
% (
132-
partition,
133-
num_procs,
134-
)
135-
)
129+
assert (
130+
partition[0] * partition[1] == num_procs
131+
), f"can't map partition {partition} to {num_procs} engines"
136132

137133
# construct the View:
138134
view = rc[:num_procs]

docs/source/examples/wave2D/wavesolver.py

+1-8
Original file line numberDiff line numberDiff line change
@@ -303,14 +303,7 @@ def solve(self, tstop, dt=-1, user_action=None, verbose=False, final_test=False)
303303

304304
t1 = time.time()
305305
print(
306-
'my_id=%2d, dt=%g, %s version, slice_copy=%s, net Wtime=%g'
307-
% (
308-
partitioner.my_id,
309-
dt,
310-
implementation['inner'],
311-
partitioner.slice_copy,
312-
t1 - t0,
313-
)
306+
f"my_id={partitioner.my_id:2}, dt={dt:g}, {implementation['inner']} version, slice_copy={partitioner.slice_copy}, net Wtime={t1 - t0:g}"
314307
)
315308
# save the us
316309
self.us = u, u_1, u_2

ipyparallel/client/client.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def __repr__(self):
153153
if len(text_out) > 32:
154154
text_out = text_out[:29] + '...'
155155

156-
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
156+
return f"<ExecuteReply[{self.execution_count}]: {text_out}>"
157157

158158
def _plaintext(self):
159159
execute_result = self.metadata['execute_result'] or {'data': {}}

ipyparallel/controller/hub.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1246,7 +1246,7 @@ def purge_results(self, client_id, msg):
12461246
for eid in eids:
12471247
if eid not in self.engines:
12481248
try:
1249-
raise IndexError("No such engine: %i" % eid)
1249+
raise IndexError(f"No such engine: {eid}")
12501250
except Exception:
12511251
reply = error.wrap_exception()
12521252
self.log.exception("Error dropping records")

ipyparallel/controller/sqlitedb.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def _init_db(self):
274274
i = 0
275275
while not self._check_table():
276276
i += 1
277-
self.table = first_table + '_%i' % i
277+
self.table = f"{first_table}_{i}"
278278
self.log.warning(
279279
f"Table {previous_table} exists and doesn't match db format, trying {self.table}"
280280
)

ipyparallel/engine/app.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -612,7 +612,7 @@ async def complete_registration(self, msg, connect, maybe_tunnel):
612612

613613
def url(key):
614614
"""get zmq url for given channel"""
615-
return str(info["interface"] + ":%i" % info[key])
615+
return f"{info['interface']}:{info['key']}"
616616

617617
def urls(key):
618618
return [f'{info["interface"]}:{port}' for port in info[key]]
@@ -777,7 +777,7 @@ def send_with_metadata(
777777
content['hb_period'],
778778
identity,
779779
)
780-
self.log.info("Completed registration with id %i" % self.id)
780+
self.log.info(f"Completed registration with id {self.id}")
781781

782782
def start_nanny(self, control_url):
783783
self.log.info("Starting nanny")
@@ -809,7 +809,7 @@ def start_heartbeat(self, hb_ping, hb_pong, hb_period, identity):
809809
self._hb_listener = zmqstream.ZMQStream(mon, self.loop)
810810
self._hb_listener.on_recv(self._report_ping)
811811

812-
hb_monitor = "tcp://%s:%i" % (localhost(), mport)
812+
hb_monitor = f"tcp://{localhost()}:{mport}"
813813

814814
heart = Heart(
815815
hb_ping,

ipyparallel/engine/log.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,6 @@ def root_topic(self):
1515
"""this is a property, in case the handler is created
1616
before the engine gets registered with an id"""
1717
if isinstance(getattr(self.engine, 'id', None), int):
18-
return "engine.%i" % self.engine.id
18+
return f"engine.{self.engine.id}"
1919
else:
2020
return "engine"

ipyparallel/error.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -161,11 +161,11 @@ def __str__(self):
161161
engine_str = self._get_engine_str(ei)
162162
s = s + '\n' + engine_str + en + ': ' + str(ev)
163163
if len(self.elist) > self.tb_limit:
164-
s = s + '\n.... %i more exceptions ...' % (len(self.elist) - self.tb_limit)
164+
s = s + f'\n.... {len(self.elist) - self.tb_limit} more exceptions ...'
165165
return s
166166

167167
def __repr__(self):
168-
return "CompositeError(%i)" % len(self.elist)
168+
return f"CompositeError({len(self.elist)})"
169169

170170
def render_traceback(self, excid=None):
171171
"""render one or all of my tracebacks to a list of lines"""
@@ -177,13 +177,13 @@ def render_traceback(self, excid=None):
177177
lines.append('')
178178
if len(self.elist) > self.tb_limit:
179179
lines.append(
180-
'... %i more exceptions ...' % (len(self.elist) - self.tb_limit)
180+
f'... {len(self.elist) - self.tb_limit} more exceptions ...'
181181
)
182182
else:
183183
try:
184184
en, ev, etb, ei = self.elist[excid]
185185
except Exception:
186-
raise IndexError("an exception with index %i does not exist" % excid)
186+
raise IndexError(f"an exception with index {excid} does not exist")
187187
else:
188188
lines.append(self._get_engine_str(ei) + ":")
189189
lines.extend((etb or 'No traceback available').splitlines())
@@ -197,7 +197,7 @@ def raise_exception(self, excid=0):
197197
try:
198198
en, ev, etb, ei = self.elist[excid]
199199
except Exception:
200-
raise IndexError("an exception with index %i does not exist" % excid)
200+
raise IndexError(f"an exception with index {excid} does not exist")
201201
else:
202202
raise RemoteError(en, ev, etb, ei)
203203

ipyparallel/tests/test_view.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -553,7 +553,7 @@ def test_execute_reply(self):
553553
e0.block = True
554554
ar = e0.execute("5", silent=False)
555555
er = ar.get()
556-
assert str(er) == "<ExecuteReply[%i]: 5>" % er.execution_count
556+
assert str(er) == f"<ExecuteReply[{er.execution_count}]: 5>"
557557
assert er.execute_result['data']['text/plain'] == '5'
558558

559559
def test_execute_reply_rich(self):

ipyparallel/util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ def signal_children(children):
364364

365365
def terminate_children(sig, frame):
366366
log = get_logger()
367-
log.critical("Got signal %i, terminating children..." % sig)
367+
log.critical("Got signal %i, terminating children...", sig)
368368
for child in children:
369369
child.terminate()
370370

0 commit comments

Comments
 (0)