Skip to content

Commit da2004e

Browse files
ezyangfacebook-github-bot
authored andcommitted
Upgrade lint. (pytorch#39483)
Summary: Pull Request resolved: pytorch#39483 I fixed all of the new errors that occurred because of the upgrade. Signed-off-by: Edward Z. Yang <[email protected]> Test Plan: Imported from OSS Differential Revision: D21884575 Pulled By: ezyang fbshipit-source-id: 45c8e1f1ecb410c8d7c46dd3922ad70e982a0685
1 parent fe68467 commit da2004e

File tree

12 files changed

+54
-33
lines changed

12 files changed

+54
-33
lines changed

.flake8

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,12 @@ max-line-length = 120
55
# E501 is not flexible enough, we're using B950 instead
66
ignore =
77
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
8+
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
9+
# to line this up with executable bit
10+
EXE001,
811
# these ignores are from flake8-bugbear; please fix!
912
B007,B008,
1013
# these ignores are from flake8-comprehensions; please fix!
1114
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
1215
per-file-ignores = __init__.py: F401
13-
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git
16+
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git,build,build_test_custom_build,build_code_analyzer

.github/workflows/lint.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ jobs:
6666
- name: Run flake8
6767
run: |
6868
set -eux
69-
pip install flake8==3.7.9 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi mccabe pycodestyle==2.5.0 pyflakes==2.1.1
69+
pip install flake8==3.8.2 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi==20.5.0 mccabe pycodestyle==2.6.0 pyflakes==2.2.0
7070
flake8 --version
7171
flake8 --exit-zero > ${GITHUB_WORKSPACE}/flake8-output.txt
7272
cat ${GITHUB_WORKSPACE}/flake8-output.txt

benchmarks/operator_benchmark/pt/qpool_test.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,12 @@
1010
# 2D pooling will have input matrix of rank 3 or 4
1111
qpool2d_long_configs = op_bench.config_list(
1212
attrs=(
13-
# C H W k s p
14-
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa
15-
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa
16-
# VGG16 pools with original input shape: (-1, 3, 224, 224)
17-
( 64, 224, 224, (2, 2), (2, 2), (0, 0)), # MaxPool2d-4 # noqa
18-
(256, 56, 56, (2, 2), (2, 2), (0, 0)), # MaxPool2d-16 # noqa
13+
# C H W k s p
14+
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa
15+
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa
16+
# VGG16 pools with original input shape: (-1, 3, 224, 224)
17+
( 64, 224, 224, (2, 2), (2, 2), (0, 0)), # MaxPool2d-4 # noqa
18+
(256, 56, 56, (2, 2), (2, 2), (0, 0)), # MaxPool2d-16 # noqa
1919
),
2020
attr_names=('C', 'H', 'W', # Input layout
2121
'k', 's', 'p'), # Pooling parameters

test/jit/test_class_type.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,6 +485,7 @@ def forward(self, a):
485485

486486
def test_interface(self):
487487
global Foo, Bar, OneTwo, OneTwoThree, OneTwoWrong, NotMember, NotMember2
488+
488489
@torch.jit.script
489490
class Foo(object):
490491
def __init__(self):
@@ -647,6 +648,7 @@ def __init__(self):
647648

648649
def test_overloaded_fn(self):
649650
global Foo, MyClass # see [local resolution in python]
651+
650652
@torch.jit.script
651653
class Foo(object):
652654
def __init__(self, x):
@@ -802,6 +804,7 @@ def test():
802804

803805
def test_cast_overloads(self):
804806
global Foo # see [local resolution in python]
807+
805808
@torch.jit.script
806809
class Foo(object):
807810
def __init__(self, val):

test/print_test_stats.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,11 @@ def print_report(self):
4747
print(f" avg_time: {self.total_time/test_count:.2f} seconds")
4848
if test_count > 2:
4949
print(f" mean_time: {sorted_tests[test_count>>1].time:.2f} seconds")
50-
print(f" Three longest tests:")
50+
print(" Three longest tests:")
5151
for idx in [-1, -2, -3]:
5252
print(f" {sorted_tests[idx].name} time: {sorted_tests[idx].time:.2f} seconds")
5353
elif test_count > 0:
54-
print(f" Longest test:")
54+
print(" Longest test:")
5555
print(f" {sorted_tests[-1].name} time: {sorted_tests[-1].time:.2f} seconds")
5656
print("")
5757

test/quantization/test_quantized_op.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1401,7 +1401,8 @@ def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layo
14011401
mode=mode, align_corners=align_corners)
14021402
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
14031403
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
1404-
msg="{} results are off".format(name, qX_hat.int_repr(), X_ref))
1404+
msg="{} results are off: qX_hat={} X_ref={}"
1405+
.format(name, qX_hat.int_repr(), X_ref))
14051406
self.assertEqual(scale, qX_hat.q_scale(),
14061407
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
14071408
self.assertEqual(zero_point, qX_hat.q_zero_point(),
@@ -1455,7 +1456,8 @@ def test_interpolate3d(self, X, size, scale_factor, align_corners, nhwc_layout):
14551456
mode=mode, align_corners=align_corners)
14561457
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
14571458
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
1458-
msg="{} results are off".format(name, qX_hat.int_repr(), X_ref))
1459+
msg="{} results are off: qX_hat={}, X_ref={}"
1460+
.format(name, qX_hat.int_repr(), X_ref))
14591461
self.assertEqual(scale, qX_hat.q_scale(),
14601462
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
14611463
self.assertEqual(zero_point, qX_hat.q_zero_point(),

test/test_autograd.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6072,6 +6072,7 @@ def test_copy_(self, device):
60726072
def test_simple_reentrant_cross_device(self, device):
60736073
class ReentrantFunc(Function):
60746074
_cpu_mode = True
6075+
60756076
@staticmethod
60766077
def forward(ctx, x):
60776078
return x * (x + 2)

test/test_jit.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3796,6 +3796,7 @@ def f(i):
37963796
def test_bailout_loop_carried_deps_name_clash(self):
37973797
with enable_profiling_mode_for_profiling_tests():
37983798
NUM_ITERATIONS = 10
3799+
37993800
@torch.jit.script
38003801
def fct_loop(z, size):
38013802
# type: (int, int) -> Tuple[Tensor, List[int]]
@@ -3817,6 +3818,7 @@ def fct_loop(z, size):
38173818
def test_bailout_loop_counter_transition(self):
38183819
with enable_profiling_mode_for_profiling_tests():
38193820
NUM_ITERATIONS = 10
3821+
38203822
@torch.jit.script
38213823
def fct_loop(z, size):
38223824
# type: (int, int) -> Tuple[Tensor, List[int]]
@@ -7001,9 +7003,10 @@ def test_cast_float(x):
70017003
self.checkScript(test_cast_float, (-1.,))
70027004

70037005
with self.assertRaisesRegex(RuntimeError, r"Could not cast value of type Tuple\[int, int\] to bool"): # noqa: W605
7006+
70047007
@torch.jit.script
70057008
def test_bad_conditional(x):
7006-
if (1, 2):
7009+
if (1, 2): # noqa F634
70077010
return
70087011
else:
70097012
return 0
@@ -7669,6 +7672,7 @@ def bar(c, b):
76697672

76707673
def test_error_stacktrace_interface(self):
76717674
global IFace
7675+
76727676
@torch.jit.script
76737677
def baz(c, b):
76747678
return c + b
@@ -8318,6 +8322,7 @@ def foo(a):
83188322
return 4
83198323
self.assertEqual(foo(4), 7)
83208324
self.assertEqual(foo(None), 4)
8325+
83218326
@torch.jit.script
83228327
def foo2(a, b):
83238328
# type: (Optional[int], Optional[int]) -> int
@@ -16082,7 +16087,7 @@ def fn():
1608216087
def identity(x1): # noqa: F811
1608316088
# type: (str) -> str
1608416089
pass
16085-
#
16090+
1608616091
@torch.jit._overload # noqa: F811
1608716092
def identity(x1): # noqa: F811
1608816093
# type: (float) -> float

test/test_jit_py3.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def test_joined_str(self):
1616
def func(x):
1717
hello, test = "Hello", "test"
1818
print(f"{hello + ' ' + test}, I'm a {test}") # noqa E999
19-
print(f"format blank")
19+
print(f"format blank") # noqa F541
2020
hi = 'hi'
2121
print(f"stuff before {hi}")
2222
print(f"{hi} stuff after")
@@ -39,6 +39,7 @@ def func(x):
3939
@unittest.skipIf(sys.version_info[:2] < (3, 7), "`dataclasses` module not present on < 3.7")
4040
def test_dataclass_error(self):
4141
from dataclasses import dataclass
42+
4243
@dataclass
4344
class NormalizationInfo(object):
4445
mean: float = 0.0
@@ -256,6 +257,7 @@ def __call__(self, *args) -> str:
256257
return str(type(args[0]))
257258

258259
the_class = MyPythonClass()
260+
259261
@torch.jit.script
260262
def fn(x):
261263
return the_class(x)
@@ -424,6 +426,7 @@ def foo():
424426

425427
def test_export_opnames_interface(self):
426428
global OneTwoModule
429+
427430
@torch.jit.interface
428431
class OneTwoModule(nn.Module):
429432
def one(self, x, y):

test/test_tensorboard.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -363,28 +363,32 @@ def test_image_with_boxes(self):
363363
self))
364364

365365
def test_image_with_one_channel(self):
366-
self.assertTrue(compare_image_proto(summary.image('dummy',
367-
tensor_N(shape=(1, 8, 8)),
368-
dataformats='CHW'),
369-
self)) # noqa E127
366+
self.assertTrue(compare_image_proto(
367+
summary.image('dummy',
368+
tensor_N(shape=(1, 8, 8)),
369+
dataformats='CHW'),
370+
self)) # noqa E127
370371

371372
def test_image_with_one_channel_batched(self):
372-
self.assertTrue(compare_image_proto(summary.image('dummy',
373-
tensor_N(shape=(2, 1, 8, 8)),
374-
dataformats='NCHW'),
375-
self)) # noqa E127
373+
self.assertTrue(compare_image_proto(
374+
summary.image('dummy',
375+
tensor_N(shape=(2, 1, 8, 8)),
376+
dataformats='NCHW'),
377+
self)) # noqa E127
376378

377379
def test_image_with_3_channel_batched(self):
378-
self.assertTrue(compare_image_proto(summary.image('dummy',
379-
tensor_N(shape=(2, 3, 8, 8)),
380-
dataformats='NCHW'),
381-
self)) # noqa E127
380+
self.assertTrue(compare_image_proto(
381+
summary.image('dummy',
382+
tensor_N(shape=(2, 3, 8, 8)),
383+
dataformats='NCHW'),
384+
self)) # noqa E127
382385

383386
def test_image_without_channel(self):
384-
self.assertTrue(compare_image_proto(summary.image('dummy',
385-
tensor_N(shape=(8, 8)),
386-
dataformats='HW'),
387-
self)) # noqa E127
387+
self.assertTrue(compare_image_proto(
388+
summary.image('dummy',
389+
tensor_N(shape=(8, 8)),
390+
dataformats='HW'),
391+
self)) # noqa E127
388392

389393
def test_video(self):
390394
try:

0 commit comments

Comments
 (0)