File tree Expand file tree Collapse file tree 1 file changed +3
-2
lines changed
torchtitan/components/quantization Expand file tree Collapse file tree 1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -119,21 +119,22 @@ def convert(self, model: nn.Module):
119
119
# to perform dynamic float8 rowwise quantization + scaled grouped GEMMs for the target MoE FQNs.
120
120
if self .moe_fqns :
121
121
from torchao .quantization .quant_api import quantize_
122
+
122
123
try :
123
124
from torchao .prototype .moe_training .conversion_utils import (
124
125
MoETrainingConfig ,
125
126
)
126
127
except ImportError as e :
127
128
raise ImportError (
128
129
"torchao installation does not have MoE training support. Please install torchao nightly build."
129
- )
130
+ ) from e
130
131
131
132
def moe_module_filter_fn (mod : nn .Module , cur_fqn : str ) -> bool :
132
133
for target_fqn in self .moe_fqns :
133
134
if target_fqn in cur_fqn :
134
135
return True
135
136
return False
136
-
137
+
137
138
config = MoETrainingConfig ()
138
139
quantize_ (model , config = config , filter_fn = moe_module_filter_fn )
139
140
logger .info ("Converted MoE to float8" )
You can’t perform that action at this time.
0 commit comments