Skip to content

Commit 0f3f4ec

Browse files
James Reedsoumith
authored andcommitted
Kill hypothesis deadline testing (pytorch#30890)
Summary: Pull Request resolved: pytorch#30890 We've received way too many complaints about this functionality making tests flaky, and it's not providing value to us anyway. Let's cut the shit and kill deadline testing Test Plan: Imported from OSS Differential Revision: D18857597 Pulled By: jamesr66a fbshipit-source-id: 67e3412795ef2fb7b7ee896169651084e434d2f6
1 parent 509df60 commit 0f3f4ec

File tree

6 files changed

+16
-48
lines changed

6 files changed

+16
-48
lines changed

test/hypothesis_utils.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -304,10 +304,11 @@ def tensor_conv(
304304

305305
return X, W, b, groups
306306

307-
# Disable deadline testing if this version of hypthesis supports it, otherwise
308-
# just return the original function
309-
def no_deadline(fn):
310-
try:
311-
return hypothesis.settings(deadline=None)(fn)
312-
except hypothesis.errors.InvalidArgument:
313-
return fn
307+
from hypothesis import settings
308+
settings.register_profile("no_deadline", deadline=None)
309+
settings.load_profile("no_deadline")
310+
311+
# This is really just to get flake8 to not complain when this file
312+
# is imported purely for the side-effectful stuff above
313+
def assert_deadline_disabled():
314+
assert settings().deadline is None

test/test_fake_quant.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from hypothesis import given
66
from hypothesis import strategies as st
77
import hypothesis_utils as hu
8-
from hypothesis_utils import no_deadline
8+
hu.assert_deadline_disabled()
99
from common_utils import run_tests, TestCase
1010
from torch.quantization import FakeQuantize
1111
from torch.quantization import default_observer, default_per_channel_weight_observer
@@ -64,10 +64,7 @@ def to_tensor(X, device):
6464
tolerance = 1e-6
6565

6666
class TestFakeQuantizePerTensor(TestCase):
67-
# NOTE: Tests in this class are decorated with no_deadline
68-
# to prevent spurious failures due to cuda runtime initialization.
6967

70-
@no_deadline
7168
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
7269
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
7370
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -85,7 +82,6 @@ def test_forward_per_tensor(self, device, X):
8582
X, scale, zero_point, quant_min, quant_max)
8683
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
8784

88-
@no_deadline
8985
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
9086
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
9187
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -108,7 +104,6 @@ def test_backward_per_tensor(self, device, X):
108104
Y_prime.backward(dout)
109105
np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
110106

111-
@no_deadline
112107
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
113108
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
114109
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -127,7 +122,6 @@ def test_numerical_consistency_per_tensor(self, device, X):
127122
X, scale, zero_point, quant_min, quant_max)
128123
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
129124

130-
@no_deadline
131125
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
132126
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
133127
qparams=hu.qparams(dtypes=[torch.quint8])),
@@ -206,10 +200,7 @@ def test_fake_quant_control(self):
206200

207201

208202
class TestFakeQuantizePerChannel(TestCase):
209-
# NOTE: Tests in this class are decorated with no_deadline
210-
# to prevent spurious failures due to cuda runtime initialization.
211203

212-
@no_deadline
213204
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
214205
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
215206
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -229,7 +220,6 @@ def test_forward_per_channel(self, device, X):
229220
X, scale, zero_point, axis, quant_min, quant_max)
230221
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
231222

232-
@no_deadline
233223
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
234224
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
235225
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -253,7 +243,6 @@ def test_backward_per_channel(self, device, X):
253243
Y_prime.backward(dout)
254244
np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
255245

256-
@no_deadline
257246
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
258247
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
259248
qparams=hu.qparams(dtypes=torch.quint8)))
@@ -275,7 +264,6 @@ def test_numerical_consistency_per_channel(self, device, X):
275264
X, scale, zero_point, axis, quant_min, quant_max)
276265
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
277266

278-
@no_deadline
279267
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
280268
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
281269
qparams=hu.qparams(dtypes=torch.qint8)))

test/test_qat.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,13 @@
1111
from common_utils import TestCase, run_tests
1212
from hypothesis import given
1313
from hypothesis import strategies as st
14-
from hypothesis_utils import no_deadline
14+
import hypothesis_utils as hu
15+
hu.assert_deadline_disabled()
1516
from functools import reduce
1617

1718

1819
class IntrinsicQATModuleTest(TestCase):
19-
# NOTE: Tests in this class are decorated with no_deadline
20-
# to prevent spurious failures due to cuda runtime initialization.
2120

22-
@no_deadline
2321
@given(batch_size=st.integers(2, 4),
2422
input_channels_per_group=st.sampled_from([2, 3, 4]),
2523
height=st.integers(5, 10),

test/test_quantization.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,15 +42,15 @@
4242

4343
from hypothesis import given
4444
from hypothesis import strategies as st
45-
from hypothesis_utils import no_deadline
45+
import hypothesis_utils as hu
46+
hu.assert_deadline_disabled()
4647
import io
4748
import copy
4849

4950
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
5051
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
5152
" with instruction set support avx2 or newer.")
5253
class EagerModePostTrainingQuantTest(QuantizationTestCase):
53-
@no_deadline
5454
@given(qconfig=st.sampled_from((torch.quantization.default_qconfig, torch.quantization.default_per_channel_qconfig)))
5555
def test_single_layer(self, qconfig):
5656
r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
@@ -919,7 +919,6 @@ def test_nested(self):
919919

920920
class FunctionalModuleTest(QuantizationTestCase):
921921
# Histogram Observers are slow, so have no-deadline to ensure test doesn't time out
922-
@no_deadline
923922
@given(train_mode=st.booleans())
924923
def test_functional_module(self, train_mode):
925924
model = ModelWithFunctionals()
@@ -1349,7 +1348,6 @@ def test_record_observer(self):
13491348
self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()), 2 * len(self.calib_data))
13501349
self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0], model(self.calib_data[0][0]))
13511350

1352-
@no_deadline
13531351
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
13541352
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)))
13551353
def test_observer_scriptable(self, qdtype, qscheme):
@@ -1366,7 +1364,6 @@ def test_observer_scriptable(self, qdtype, qscheme):
13661364
loaded = torch.jit.load(buf)
13671365
self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))
13681366

1369-
@no_deadline
13701367
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
13711368
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
13721369
reduce_range=st.booleans())

test/test_quantized.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from hypothesis import assume, given
1111
from hypothesis import strategies as st
1212
import hypothesis_utils as hu
13-
from hypothesis_utils import no_deadline
13+
hu.assert_deadline_disabled()
1414

1515
from common_utils import TEST_WITH_UBSAN, TestCase, run_tests, IS_PPC, IS_MACOS
1616
from common_quantized import _quantize, _dequantize, _calculate_dynamic_qparams, \
@@ -145,7 +145,6 @@ def test_qrelu6(self, X):
145145
message="{} relu failed".format(name))
146146

147147
"""Tests the correctness of the scalar addition."""
148-
@no_deadline
149148
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
150149
elements=st.floats(-1e6, 1e6, allow_nan=False),
151150
qparams=hu.qparams()),
@@ -506,7 +505,6 @@ def test_max_pool2d_nhwc(self, X, kernel, stride, dilation, padding, ceil_mode):
506505
self.assertEqual(a_ref, a_hat.dequantize(),
507506
message="ops.quantized.max_pool2d results are off")
508507

509-
@no_deadline
510508
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
511509
min_side=5, max_side=10),
512510
qparams=hu.qparams(dtypes=torch.quint8)),
@@ -556,7 +554,6 @@ def test_avg_pool2d(self, X, kernel, stride, padding, ceil_mode, count_include_p
556554
message=error_message.format(name + '.zero_point', scale,
557555
qX_hat.q_zero_point()))
558556

559-
@no_deadline
560557
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
561558
min_side=5, max_side=10),
562559
qparams=hu.qparams(dtypes=torch.qint8)),
@@ -619,7 +616,6 @@ def test_avg_pool2d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_incl
619616
message=error_message.format(name + '.zero_point', scale,
620617
X_hat.q_zero_point()))
621618

622-
@no_deadline
623619
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
624620
min_side=1, max_side=10),
625621
qparams=hu.qparams(dtypes=torch.quint8)),
@@ -662,7 +658,6 @@ def test_adaptive_avg_pool2d(self, X, output_size_h, output_size_w):
662658
qX_hat.q_zero_point()))
663659

664660
"""Tests adaptive average pool operation on NHWC quantized tensors."""
665-
@no_deadline
666661
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
667662
min_side=1, max_side=10),
668663
qparams=hu.qparams(dtypes=torch.qint8)),
@@ -708,7 +703,6 @@ def test_adaptive_avg_pool2d_nhwc(self, X, output_size_h, output_size_w):
708703
message=error_message.format(name + '.zero_point', scale,
709704
X_hat.q_zero_point()))
710705

711-
@no_deadline
712706
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
713707
min_side=1, max_side=10),
714708
qparams=hu.qparams()),
@@ -733,7 +727,6 @@ def test_qtopk(self, X, k, dim, largest, sorted):
733727
torch.testing.assert_allclose(quantized_out[0].dequantize(), unquantized_out[0])
734728
torch.testing.assert_allclose(quantized_out[1], unquantized_out[1])
735729

736-
@no_deadline
737730
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
738731
min_side=1, max_side=10),
739732
qparams=hu.qparams()),
@@ -818,7 +811,6 @@ def test_cat(self, X, num, dim, relu):
818811
cat_q = q_cat_op(tensors_q, dim=ch_axis, scale=scale,
819812
zero_point=zero_point)
820813

821-
@no_deadline
822814
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
823815
min_side=5, max_side=10),
824816
qparams=hu.qparams()),
@@ -874,7 +866,6 @@ def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layo
874866
qX_hat.q_zero_point()))
875867

876868
"""Tests quantize concatenation (both fused and not)."""
877-
@no_deadline
878869
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
879870
min_side=1, max_side=10),
880871
qparams=hu.qparams()),
@@ -999,7 +990,6 @@ def equal_ref(qX, qX2):
999990
" with instruction set support avx2 or newer.")
1000991
class TestDynamicQuantizedLinear(TestCase):
1001992
"""Tests the correctness of the dynamic quantized linear and linear_relu op."""
1002-
@no_deadline
1003993
@given(
1004994
batch_size=st.integers(1, 4),
1005995
input_channels=st.integers(16, 32),
@@ -1112,7 +1102,6 @@ def test_qlinear(self, batch_size, input_channels, output_channels,
11121102
message="torch.ops.quantized.linear_dynamic (fbgemm) results are off")
11131103

11141104
"""Tests the correctness of the legacy dynamic quantized linear op."""
1115-
@no_deadline
11161105
@given(
11171106
batch_size=st.integers(1, 4),
11181107
input_channels=st.integers(16, 32),
@@ -1189,7 +1178,6 @@ def test_qlinear_legacy(self, batch_size, input_channels, output_channels):
11891178

11901179
class TestQuantizedLinear(unittest.TestCase):
11911180
"""Tests the correctness of the quantized linear and linear_relu op."""
1192-
@no_deadline
11931181
@given(batch_size=st.integers(1, 4),
11941182
input_channels=st.integers(16, 32),
11951183
output_channels=st.integers(4, 8),

test/test_quantized_nn_mods.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@
1313
from common_utils import run_tests, IS_PPC, TEST_WITH_UBSAN
1414
from hypothesis import assume, given
1515
from hypothesis import strategies as st
16-
from hypothesis_utils import no_deadline
16+
import hypothesis_utils as hu
17+
hu.assert_deadline_disabled()
1718

1819
import io
1920
import numpy as np
@@ -127,7 +128,6 @@ def _test_conv_api_impl(
127128

128129

129130

130-
@no_deadline
131131
@given(batch_size=st.integers(1, 3),
132132
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
133133
H=st.integers(4, 16),
@@ -181,7 +181,6 @@ def test_conv2d_api(
181181
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
182182
use_channelwise)
183183

184-
@no_deadline
185184
@given(batch_size=st.integers(1, 3),
186185
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
187186
D=st.integers(4, 8),
@@ -239,7 +238,6 @@ def test_conv3d_api(
239238

240239

241240
class DynamicModuleAPITest(QuantizationTestCase):
242-
@no_deadline
243241
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
244242
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
245243
" with instruction set support avx2 or newer.")
@@ -357,7 +355,6 @@ def test_relu(self):
357355
message="ReLU6 module API failed")
358356

359357

360-
@no_deadline
361358
@given(
362359
batch_size=st.integers(1, 5),
363360
in_features=st.integers(16, 32),
@@ -646,7 +643,6 @@ def _test_conv_api_impl(
646643
# Smoke test extra_repr
647644
self.assertTrue(module_name in str(converted_qconv_module))
648645

649-
@no_deadline
650646
@given(batch_size=st.integers(1, 3),
651647
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
652648
H=st.integers(4, 16),

0 commit comments

Comments
 (0)