Skip to content

Commit ed7f356

Browse files
sasha-gitgcopybara-github
authored andcommitted
chore: update docs
PiperOrigin-RevId: 720733411
1 parent 4f7b3f3 commit ed7f356

File tree

7 files changed

+4754
-1655
lines changed

7 files changed

+4754
-1655
lines changed

docs/_sources/index.rst.txt

+122-72
Original file line numberDiff line numberDiff line change
@@ -68,11 +68,11 @@ with text content
6868
.. code:: python
6969
7070
response = client.models.generate_content(
71-
model="gemini-2.0-flash-exp", contents="What is your name?"
71+
model="gemini-2.0-flash-exp", contents="Why is the sky blue?"
7272
)
7373
print(response.text)
7474
75-
with uploaded file (Google AI only)
75+
with uploaded file (Gemini API only)
7676
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7777

7878
download the file in console.
@@ -85,9 +85,10 @@ python code.
8585

8686
.. code:: python
8787
88-
file = client.files.upload(path='a11.text')
88+
file = client.files.upload(path='a11.txt')
8989
response = client.models.generate_content(
90-
model='gemini-2.0-flash-exp', contents=['Summarize this file', file]
90+
model='gemini-2.0-flash-exp',
91+
contents=['Could you summarize this file?', file]
9192
)
9293
print(response.text)
9394
@@ -117,7 +118,7 @@ dictionaries. You can get the type from ``google.genai.types``.
117118
118119
response = client.models.generate_content(
119120
model="gemini-2.0-flash-exp",
120-
contents=types.Part.from_text("Why is the sky blue?"),
121+
contents=types.Part.from_text(text="Why is the sky blue?"),
121122
config=types.GenerateContentConfig(
122123
temperature=0,
123124
top_p=0.95,
@@ -131,7 +132,46 @@ dictionaries. You can get the type from ``google.genai.types``.
131132
),
132133
)
133134
134-
response
135+
print(response.text)
136+
137+
Thinking Model
138+
---------------
139+
140+
The Gemini 2.0 Flash Thinking model is an experimental model that could return
141+
"thoughts" as part of its response.
142+
143+
Gemini Developer API
144+
~~~~~~~~~~~~~~~~~~~~
145+
146+
Thinking config is only available in v1alpha for Gemini AI API.
147+
148+
.. code:: python
149+
150+
response = client.models.generate_content(
151+
model='gemini-2.0-flash-thinking-exp',
152+
contents='What is the sum of natural numbers from 1 to 100?',
153+
config=types.GenerateContentConfig(
154+
thinking_config=types.ThinkingConfig(include_thoughts=True),
155+
http_options=types.HttpOptions(api_version='v1alpha'),
156+
)
157+
)
158+
for part in response.candidates[0].content.parts:
159+
print(part)
160+
161+
Vertex AI API
162+
~~~~~~~~~~~~~~
163+
164+
.. code:: python
165+
166+
response = client.models.generate_content(
167+
model='gemini-2.0-flash-thinking-exp-01-21',
168+
contents='What is the sum of natural numbers from 1 to 100?',
169+
config=types.GenerateContentConfig(
170+
thinking_config=types.ThinkingConfig(include_thoughts=True),
171+
)
172+
)
173+
for part in response.candidates[0].content.parts:
174+
print(part)
135175
136176
List Base Models
137177
----------------
@@ -230,10 +270,10 @@ Then you will receive a function call part in the response.
230270
function = types.FunctionDeclaration(
231271
name="get_current_weather",
232272
description="Get the current weather in a given location",
233-
parameters=types.FunctionParameters(
273+
parameters=types.Schema(
234274
type="OBJECT",
235275
properties={
236-
"location": types.ParameterType(
276+
"location": types.Schema(
237277
type="STRING",
238278
description="The city and state, e.g. San Francisco, CA",
239279
),
@@ -262,10 +302,9 @@ The following example shows how to do it for a simple function invocation.
262302
263303
user_prompt_content = types.Content(
264304
role="user",
265-
parts=[types.Part.from_text("What is the weather like in Boston?")],
305+
parts=[types.Part.from_text(text="What is the weather like in Boston?")],
266306
)
267-
function_call_content = response.candidates[0].content
268-
function_call_part = function_call_content.parts[0]
307+
function_call_part = response.function_calls[0]
269308
270309
271310
try:
@@ -366,6 +405,60 @@ Schemas can be provided as Pydantic Models.
366405
)
367406
print(response.text)
368407
408+
Enum Response Schema
409+
--------------------
410+
411+
Text Response
412+
~~~~~~~~~~~~~
413+
414+
You can set response_mime_type to 'text/x.enum' to return one of those enum
415+
values as the response.
416+
417+
.. code:: python
418+
419+
from enum import Enum
420+
421+
class InstrumentEnum(Enum):
422+
PERCUSSION = 'Percussion'
423+
STRING = 'String'
424+
WOODWIND = 'Woodwind'
425+
BRASS = 'Brass'
426+
KEYBOARD = 'Keyboard'
427+
428+
response = client.models.generate_content(
429+
model='gemini-2.0-flash-exp',
430+
contents='What instrument plays multiple notes at once?',
431+
config={
432+
'response_mime_type': 'text/x.enum',
433+
'response_schema': InstrumentEnum,
434+
},
435+
)
436+
print(response.text)
437+
438+
JSON Response
439+
~~~~~~~~~~~~~
440+
441+
You can also set response_mime_type to 'application/json', the response will be
442+
identical but in quotes.
443+
444+
.. code:: python
445+
446+
class InstrumentEnum(Enum):
447+
PERCUSSION = 'Percussion'
448+
STRING = 'String'
449+
WOODWIND = 'Woodwind'
450+
BRASS = 'Brass'
451+
KEYBOARD = 'Keyboard'
452+
453+
response = client.models.generate_content(
454+
model='gemini-2.0-flash-exp',
455+
contents='What instrument plays multiple notes at once?',
456+
config={
457+
'response_mime_type': 'application/json',
458+
'response_schema': InstrumentEnum,
459+
},
460+
)
461+
print(response.text)
369462
370463
Streaming
371464
---------
@@ -439,10 +532,10 @@ Streaming
439532

440533
.. code:: python
441534
442-
async for response in client.aio.models.generate_content_stream(
535+
async for chunk in await client.aio.models.generate_content_stream(
443536
model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
444537
):
445-
print(response.text, end="")
538+
print(chunk.text, end="")
446539
447540
Count Tokens and Compute Tokens
448541
-------------------------------
@@ -451,7 +544,7 @@ Count Tokens and Compute Tokens
451544
452545
response = client.models.count_tokens(
453546
model="gemini-2.0-flash-exp",
454-
contents="What is your name?",
547+
contents="why is the sky blue?",
455548
)
456549
print(response)
457550
@@ -464,7 +557,7 @@ Compute tokens is only supported in Vertex AI.
464557
465558
response = client.models.compute_tokens(
466559
model="gemini-2.0-flash-exp",
467-
contents="What is your name?",
560+
contents="why is the sky blue?",
468561
)
469562
print(response)
470563
@@ -475,7 +568,7 @@ Async
475568
476569
response = await client.aio.models.count_tokens(
477570
model="gemini-2.0-flash-exp",
478-
contents="What is your name?",
571+
contents="why is the sky blue?",
479572
)
480573
print(response)
481574
@@ -486,7 +579,7 @@ Embed Content
486579
487580
response = client.models.embed_content(
488581
model="text-embedding-004",
489-
contents="What is your name?",
582+
contents="why is the sky blue?",
490583
)
491584
print(response)
492585
@@ -495,7 +588,7 @@ Embed Content
495588
# multiple contents with config
496589
response = client.models.embed_content(
497590
model="text-embedding-004",
498-
contents=["What is your name?", "What is your age?"],
591+
contents=["why is the sky blue?", "What is your age?"],
499592
config=types.EmbedContentConfig(output_dimensionality=10),
500593
)
501594
@@ -512,10 +605,10 @@ Support for generate image in Gemini Developer API is behind an allowlist
512605
.. code:: python
513606
514607
# Generate Image
515-
response1 = client.models.generate_image(
516-
model="imagen-3.0-generate-001",
608+
response1 = client.models.generate_images(
609+
model="imagen-3.0-generate-002",
517610
prompt="An umbrella in the foreground, and a rainy night sky in the background",
518-
config=types.GenerateImageConfig(
611+
config=types.GenerateImagesConfig(
519612
negative_prompt="human",
520613
number_of_images=1,
521614
include_rai_reason=True,
@@ -533,7 +626,7 @@ Upscale image is only supported in Vertex AI.
533626
534627
# Upscale the generated image from above
535628
response2 = client.models.upscale_image(
536-
model="imagen-3.0-generate-001",
629+
model="imagen-3.0-generate-002",
537630
image=response1.generated_images[0].image,
538631
upscale_factor="x2",
539632
config=types.UpscaleImageConfig(
@@ -621,7 +714,7 @@ Async Streaming
621714
.. code:: python
622715
623716
chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
624-
async for chunk in chat.send_message_stream("tell me a story"):
717+
async for chunk in await chat.send_message_stream("tell me a story"):
625718
print(chunk.text, end="")
626719
627720
Files
@@ -720,7 +813,7 @@ Tunings
720813
=======
721814

722815
``client.tunings`` contains tuning job APIs and supports supervised fine
723-
tuning through ``tune`` and distillation through ``distill``
816+
tuning through ``tune``.
724817

725818
Tune
726819
----
@@ -789,7 +882,7 @@ Use Tuned Model
789882
790883
response = client.models.generate_content(
791884
model=tuning_job.tuned_model.endpoint,
792-
contents="What is your name?",
885+
contents="why is the sky blue?",
793886
)
794887
795888
print(response.text)
@@ -852,49 +945,6 @@ Update Tuned Model
852945
853946
print(model)
854947
855-
Distillation
856-
------------
857-
858-
Only supported in Vertex AI. Requires allowlist.
859-
860-
.. code:: python
861-
862-
distillation_job = client.tunings.distill(
863-
student_model="gemma-2b-1.1-it",
864-
teacher_model="gemini-1.5-pro-002",
865-
training_dataset=genai.types.DistillationDataset(
866-
gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
867-
),
868-
config=genai.types.CreateDistillationJobConfig(
869-
epoch_count=1,
870-
pipeline_root_directory=(
871-
"gs://vertex-sdk-dev-staging-us-central1/tmp/distillation_pipeline_root"
872-
),
873-
),
874-
)
875-
print(distillation_job)
876-
877-
.. code:: python
878-
879-
completed_states = set(
880-
[
881-
"JOB_STATE_SUCCEEDED",
882-
"JOB_STATE_FAILED",
883-
"JOB_STATE_CANCELLED",
884-
"JOB_STATE_PAUSED",
885-
]
886-
)
887-
888-
while distillation_job.state not in completed_states:
889-
print(distillation_job.state)
890-
distillation_job = client.tunings.get(name=distillation_job.name)
891-
time.sleep(10)
892-
893-
print(distillation_job)
894-
895-
.. code:: python
896-
897-
distillation_job
898948
899949
List Tuning Jobs
900950
----------------
@@ -976,12 +1026,12 @@ List
9761026

9771027
.. code:: python
9781028
979-
for job in client.batches.list(config=types.ListBatchJobConfig(page_size=10)):
1029+
for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
9801030
print(job)
9811031
9821032
.. code:: python
9831033
984-
pager = client.batches.list(config=types.ListBatchJobConfig(page_size=10))
1034+
pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
9851035
print(pager.page_size)
9861036
print(pager[0])
9871037
pager.next_page()
@@ -993,14 +1043,14 @@ Async
9931043
.. code:: python
9941044
9951045
async for job in await client.aio.batches.list(
996-
config=types.ListBatchJobConfig(page_size=10)
1046+
config=types.ListBatchJobsConfig(page_size=10)
9971047
):
9981048
print(job)
9991049
10001050
.. code:: python
10011051
10021052
async_pager = await client.aio.batches.list(
1003-
config=types.ListBatchJobConfig(page_size=10)
1053+
config=types.ListBatchJobsConfig(page_size=10)
10041054
)
10051055
print(async_pager.page_size)
10061056
print(async_pager[0])

0 commit comments

Comments
 (0)