@@ -82,10 +82,10 @@ def test_forward_per_tensor(self, device, X):
82
82
X , scale , zero_point , quant_min , quant_max )
83
83
np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
84
84
85
+ @unittest .skip ("temporarily disable the test" )
85
86
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
86
87
X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
87
88
qparams = hu .qparams (dtypes = torch .quint8 )))
88
- @unittest .skip ("temporarily disable the test" )
89
89
def test_backward_per_tensor (self , device , X ):
90
90
r"""Tests the backward method.
91
91
"""
@@ -105,11 +105,11 @@ def test_backward_per_tensor(self, device, X):
105
105
Y_prime .backward (dout )
106
106
np .testing .assert_allclose (dX .cpu (), X .grad .cpu ().detach ().numpy (), rtol = tolerance , atol = tolerance )
107
107
108
+ # https://github.com/pytorch/pytorch/issues/30604
109
+ @unittest .skip ("temporarily disable the test" )
108
110
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
109
111
X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
110
112
qparams = hu .qparams (dtypes = torch .quint8 )))
111
- # https://github.com/pytorch/pytorch/issues/30604
112
- @unittest .skip ("temporarily disable the test" )
113
113
def test_numerical_consistency_per_tensor (self , device , X ):
114
114
r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
115
115
"""
@@ -125,6 +125,7 @@ def test_numerical_consistency_per_tensor(self, device, X):
125
125
X , scale , zero_point , quant_min , quant_max )
126
126
np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
127
127
128
+ @unittest .skip ("temporarily disable the test" )
128
129
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
129
130
X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
130
131
qparams = hu .qparams (dtypes = [torch .quint8 ])),
@@ -246,10 +247,10 @@ def test_backward_per_channel(self, device, X):
246
247
Y_prime .backward (dout )
247
248
np .testing .assert_allclose (dX .cpu ().detach ().numpy (), X .grad .cpu ().detach ().numpy (), rtol = tolerance , atol = tolerance )
248
249
250
+ @unittest .skip ("temporarily disable the test" )
249
251
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
250
252
X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
251
253
qparams = hu .qparams (dtypes = torch .quint8 )))
252
- @unittest .skip ("temporarily disable the test" )
253
254
def test_numerical_consistency_per_channel (self , device , X ):
254
255
r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
255
256
"""
@@ -267,6 +268,7 @@ def test_numerical_consistency_per_channel(self, device, X):
267
268
X , scale , zero_point , axis , quant_min , quant_max )
268
269
np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
269
270
271
+ @unittest .skip ("temporarily disable the test" )
270
272
@given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
271
273
X = hu .per_channel_tensor (shapes = hu .array_shapes (2 , 5 ,),
272
274
qparams = hu .qparams (dtypes = torch .qint8 )))
0 commit comments