@@ -65,6 +65,7 @@ def to_tensor(X, device):
65
65
66
66
class TestFakeQuantizePerTensor (TestCase ):
67
67
68
+ @unittest .skip ("temporarily disable the test" )
68
69
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
69
70
X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
70
71
qparams = hu .qparams (dtypes = torch .quint8 )))
@@ -205,6 +206,7 @@ def test_fake_quant_control(self):
205
206
206
207
class TestFakeQuantizePerChannel (TestCase ):
207
208
209
+ @unittest .skip ("temporarily disable the test" )
208
210
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
209
211
X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
210
212
qparams = hu .qparams (dtypes = torch .quint8 )))
@@ -224,6 +226,7 @@ def test_forward_per_channel(self, device, X):
224
226
X , scale , zero_point , axis , quant_min , quant_max )
225
227
np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
226
228
229
+ @unittest .skip ("temporarily disable the test" )
227
230
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
228
231
X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
229
232
qparams = hu .qparams (dtypes = torch .quint8 )))
0 commit comments