@@ -64,6 +64,7 @@ def create(
6464 metadata : Optional [Dict [str , str ]] | Omit = omit ,
6565 n : Optional [int ] | Omit = omit ,
6666 presence_penalty : Optional [float ] | Omit = omit ,
67+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
6768 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
6869 stream : Optional [Literal [False ]] | Omit = omit ,
6970 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
@@ -129,6 +130,9 @@ def create(
129130 whether they appear in the text so far, increasing the model's likelihood to
130131 talk about new topics.
131132
133+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
134+ can result in faster responses and fewer tokens used on reasoning in a response.
135+
132136 stop: Up to 4 sequences where the API will stop generating further tokens. The
133137 returned text will not contain the stop sequence.
134138
@@ -193,6 +197,7 @@ def create(
193197 metadata : Optional [Dict [str , str ]] | Omit = omit ,
194198 n : Optional [int ] | Omit = omit ,
195199 presence_penalty : Optional [float ] | Omit = omit ,
200+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
196201 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
197202 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
198203 temperature : Optional [float ] | Omit = omit ,
@@ -260,6 +265,9 @@ def create(
260265 whether they appear in the text so far, increasing the model's likelihood to
261266 talk about new topics.
262267
268+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
269+ can result in faster responses and fewer tokens used on reasoning in a response.
270+
263271 stop: Up to 4 sequences where the API will stop generating further tokens. The
264272 returned text will not contain the stop sequence.
265273
@@ -321,6 +329,7 @@ def create(
321329 metadata : Optional [Dict [str , str ]] | Omit = omit ,
322330 n : Optional [int ] | Omit = omit ,
323331 presence_penalty : Optional [float ] | Omit = omit ,
332+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
324333 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
325334 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
326335 temperature : Optional [float ] | Omit = omit ,
@@ -388,6 +397,9 @@ def create(
388397 whether they appear in the text so far, increasing the model's likelihood to
389398 talk about new topics.
390399
400+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
401+ can result in faster responses and fewer tokens used on reasoning in a response.
402+
391403 stop: Up to 4 sequences where the API will stop generating further tokens. The
392404 returned text will not contain the stop sequence.
393405
@@ -451,6 +463,7 @@ def create(
451463 metadata : Optional [Dict [str , str ]] | Omit = omit ,
452464 n : Optional [int ] | Omit = omit ,
453465 presence_penalty : Optional [float ] | Omit = omit ,
466+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
454467 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
455468 stream : Optional [Literal [False ]] | Literal [True ] | Omit = omit ,
456469 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
@@ -491,6 +504,7 @@ def create(
491504 "metadata" : metadata ,
492505 "n" : n ,
493506 "presence_penalty" : presence_penalty ,
507+ "reasoning_effort" : reasoning_effort ,
494508 "stop" : stop ,
495509 "stream" : stream ,
496510 "stream_options" : stream_options ,
@@ -557,6 +571,7 @@ async def create(
557571 metadata : Optional [Dict [str , str ]] | Omit = omit ,
558572 n : Optional [int ] | Omit = omit ,
559573 presence_penalty : Optional [float ] | Omit = omit ,
574+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
560575 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
561576 stream : Optional [Literal [False ]] | Omit = omit ,
562577 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
@@ -622,6 +637,9 @@ async def create(
622637 whether they appear in the text so far, increasing the model's likelihood to
623638 talk about new topics.
624639
640+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
641+ can result in faster responses and fewer tokens used on reasoning in a response.
642+
625643 stop: Up to 4 sequences where the API will stop generating further tokens. The
626644 returned text will not contain the stop sequence.
627645
@@ -686,6 +704,7 @@ async def create(
686704 metadata : Optional [Dict [str , str ]] | Omit = omit ,
687705 n : Optional [int ] | Omit = omit ,
688706 presence_penalty : Optional [float ] | Omit = omit ,
707+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
689708 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
690709 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
691710 temperature : Optional [float ] | Omit = omit ,
@@ -753,6 +772,9 @@ async def create(
753772 whether they appear in the text so far, increasing the model's likelihood to
754773 talk about new topics.
755774
775+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
776+ can result in faster responses and fewer tokens used on reasoning in a response.
777+
756778 stop: Up to 4 sequences where the API will stop generating further tokens. The
757779 returned text will not contain the stop sequence.
758780
@@ -814,6 +836,7 @@ async def create(
814836 metadata : Optional [Dict [str , str ]] | Omit = omit ,
815837 n : Optional [int ] | Omit = omit ,
816838 presence_penalty : Optional [float ] | Omit = omit ,
839+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
817840 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
818841 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
819842 temperature : Optional [float ] | Omit = omit ,
@@ -881,6 +904,9 @@ async def create(
881904 whether they appear in the text so far, increasing the model's likelihood to
882905 talk about new topics.
883906
907+ reasoning_effort: Constrains effort on reasoning for reasoning models. Reducing reasoning effort
908+ can result in faster responses and fewer tokens used on reasoning in a response.
909+
884910 stop: Up to 4 sequences where the API will stop generating further tokens. The
885911 returned text will not contain the stop sequence.
886912
@@ -941,6 +967,7 @@ async def create(
941967 metadata : Optional [Dict [str , str ]] | Omit = omit ,
942968 n : Optional [int ] | Omit = omit ,
943969 presence_penalty : Optional [float ] | Omit = omit ,
970+ reasoning_effort : Optional [Literal ["none" , "minimal" , "low" , "medium" , "high" , "xhigh" ]] | Omit = omit ,
944971 stop : Union [Optional [str ], SequenceNotStr [str ], None ] | Omit = omit ,
945972 stream : Optional [Literal [False ]] | Literal [True ] | Omit = omit ,
946973 stream_options : Optional [completion_create_params .StreamOptions ] | Omit = omit ,
@@ -981,6 +1008,7 @@ async def create(
9811008 "metadata" : metadata ,
9821009 "n" : n ,
9831010 "presence_penalty" : presence_penalty ,
1011+ "reasoning_effort" : reasoning_effort ,
9841012 "stop" : stop ,
9851013 "stream" : stream ,
9861014 "stream_options" : stream_options ,
0 commit comments