We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent bea971c commit 3ad3d0bCopy full SHA for 3ad3d0b
src/sparseml/modifiers/quantization/gptq/utils/gptq_wrapper.py
@@ -164,9 +164,10 @@ def fasterprune(
164
165
elif hasattr(self.layer, "quantization_scheme"):
166
quant_scheme = self.layer.quantization_scheme
167
+ breakpoint()
168
actorder = quant_scheme.weights.actorder
- if quant_scheme.weights is not None:
169
170
+ if quant_scheme.weights is not None:
171
if actorder:
172
perm = torch.argsort(torch.diag(self.H), descending=True)
173
W = W[:, perm]
0 commit comments