Skip to content

Commit ae43909

Browse files
committed
Precision issues
1 parent 99a591a commit ae43909

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

unsloth/models/_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "2025.3.12"
15+
__version__ = "2025.3.13"
1616

1717
__all__ = [
1818
"SUPPORTS_BFLOAT16",

unsloth/models/rl.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -238,9 +238,8 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"):
238238
"use_fp16 = getattr(args, 'fp16', False)\n"\
239239
"force_float32 = False\n"\
240240
"if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':\n"\
241-
" if use_bf16 or use_fp16:\n"\
242-
" print('Unsloth: Switching to float32 training since model cannot work with float16')\n"\
243-
" force_float32 = True\n"\
241+
" print('Unsloth: Switching to float32 training since model cannot work with float16')\n"\
242+
" force_float32 = True\n"\
244243
"mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')\n"\
245244
"dtype = getattr(model.config, 'torch_dtype', None)\n"\
246245
"if dtype is None: dtype = model.get_input_embeddings().dtype\n"\

0 commit comments

Comments
 (0)