Skip to content

Commit 448fa24

Browse files
authored
Merge pull request #14 from markmcd/pyink
Run `pyink` over the Python sample code.
2 parents 9c899fe + e4df4d2 commit 448fa24

13 files changed

+246
-181
lines changed

python/cache.py

+33-40
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020

2121
class UnitTests(absltest.TestCase):
22+
2223
def test_cache_create(self):
2324
# [START cache_create]
2425
from google import genai
@@ -32,17 +33,15 @@ def test_cache_create(self):
3233
model=model_name,
3334
config=types.CreateCachedContentConfig(
3435
contents=[document],
35-
system_instruction="You are an expert analyzing transcripts."
36-
)
36+
system_instruction="You are an expert analyzing transcripts.",
37+
),
3738
)
3839
print(cache)
3940

4041
response = client.models.generate_content(
4142
model=model_name,
4243
contents="Please summarize this transcript",
43-
config=types.GenerateContentConfig(
44-
cached_content=cache.name
45-
)
44+
config=types.GenerateContentConfig(cached_content=cache.name),
4645
)
4746
print(response.text)
4847
# [END cache_create]
@@ -61,8 +60,8 @@ def test_cache_create_from_name(self):
6160
model=model_name,
6261
config=types.CreateCachedContentConfig(
6362
contents=[document],
64-
system_instruction="You are an expert analyzing transcripts."
65-
)
63+
system_instruction="You are an expert analyzing transcripts.",
64+
),
6665
)
6766
cache_name = cache.name # Save the name for later
6867

@@ -71,9 +70,7 @@ def test_cache_create_from_name(self):
7170
response = client.models.generate_content(
7271
model=model_name,
7372
contents="Find a lighthearted moment from this transcript",
74-
config=types.GenerateContentConfig(
75-
cached_content=cache.name
76-
)
73+
config=types.GenerateContentConfig(cached_content=cache.name),
7774
)
7875
print(response.text)
7976
# [END cache_create_from_name]
@@ -91,9 +88,7 @@ def test_cache_create_from_chat(self):
9188
# Create a chat session with the given system instruction.
9289
chat = client.chats.create(
9390
model=model_name,
94-
config=types.GenerateContentConfig(
95-
system_instruction=system_instruction
96-
)
91+
config=types.GenerateContentConfig(system_instruction=system_instruction),
9792
)
9893
document = client.files.upload(file=media / "a11.txt")
9994

@@ -110,16 +105,14 @@ def test_cache_create_from_chat(self):
110105
cache = client.caches.create(
111106
model=model_name,
112107
config={
113-
'contents': chat.get_history(),
114-
'system_instruction': system_instruction
115-
}
108+
"contents": chat.get_history(),
109+
"system_instruction": system_instruction,
110+
},
116111
)
117112
# Continue the conversation using the cached content.
118113
chat = client.chats.create(
119114
model=model_name,
120-
config=types.GenerateContentConfig(
121-
cached_content=cache.name
122-
)
115+
config=types.GenerateContentConfig(cached_content=cache.name),
123116
)
124117
response = chat.send_message(
125118
message="I didn't understand that last part, could you explain it in simpler language?"
@@ -139,9 +132,9 @@ def test_cache_delete(self):
139132
cache = client.caches.create(
140133
model=model_name,
141134
config={
142-
'contents': [document],
143-
'system_instruction': "You are an expert analyzing transcripts.",
144-
}
135+
"contents": [document],
136+
"system_instruction": "You are an expert analyzing transcripts.",
137+
},
145138
)
146139
client.caches.delete(name=cache.name)
147140
# [END cache_delete]
@@ -157,9 +150,9 @@ def test_cache_get(self):
157150
cache = client.caches.create(
158151
model=model_name,
159152
config={
160-
'contents': [document],
161-
'system_instruction': "You are an expert analyzing transcripts.",
162-
}
153+
"contents": [document],
154+
"system_instruction": "You are an expert analyzing transcripts.",
155+
},
163156
)
164157
print(client.caches.get(name=cache.name))
165158
# [END cache_get]
@@ -176,9 +169,9 @@ def test_cache_list(self):
176169
cache = client.caches.create(
177170
model=model_name,
178171
config={
179-
'contents': [document],
180-
'system_instruction': "You are an expert analyzing transcripts.",
181-
}
172+
"contents": [document],
173+
"system_instruction": "You are an expert analyzing transcripts.",
174+
},
182175
)
183176
print("My caches:")
184177
for c in client.caches.list():
@@ -199,31 +192,31 @@ def test_cache_update(self):
199192
cache = client.caches.create(
200193
model=model_name,
201194
config={
202-
'contents': [document],
203-
'system_instruction': "You are an expert analyzing transcripts.",
204-
}
195+
"contents": [document],
196+
"system_instruction": "You are an expert analyzing transcripts.",
197+
},
205198
)
206199

207200
# Update the cache's time-to-live (ttl)
208201
ttl = f"{int(datetime.timedelta(hours=2).total_seconds())}s"
209202
client.caches.update(
210-
name=cache.name,
211-
config=types.UpdateCachedContentConfig(
212-
ttl=ttl
213-
)
203+
name=cache.name, config=types.UpdateCachedContentConfig(ttl=ttl)
214204
)
215205
print(f"After update:\n {cache}")
216206

217207
# Alternatively, update the expire_time directly
218208
# Update the expire_time directly in valid RFC 3339 format (UTC with a "Z" suffix)
219209
expire_time = (
220-
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=15)
221-
).isoformat().replace('+00:00', 'Z')
210+
(
211+
datetime.datetime.now(datetime.timezone.utc)
212+
+ datetime.timedelta(minutes=15)
213+
)
214+
.isoformat()
215+
.replace("+00:00", "Z")
216+
)
222217
client.caches.update(
223218
name=cache.name,
224-
config=types.UpdateCachedContentConfig(
225-
expire_time=expire_time
226-
)
219+
config=types.UpdateCachedContentConfig(expire_time=expire_time),
227220
)
228221
# [END cache_update]
229222
client.caches.delete(name=cache.name)

python/chat.py

+26-6
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020

2121
class UnitTests(absltest.TestCase):
22+
2223
def test_chat(self):
2324
# [START chat]
2425
from google import genai
@@ -30,8 +31,15 @@ def test_chat(self):
3031
model="gemini-2.0-flash",
3132
history=[
3233
types.Content(role="user", parts=[types.Part(text="Hello")]),
33-
types.Content(role="model", parts=[types.Part(text="Great to meet you. What would you like to know?")])
34-
]
34+
types.Content(
35+
role="model",
36+
parts=[
37+
types.Part(
38+
text="Great to meet you. What would you like to know?"
39+
)
40+
],
41+
),
42+
],
3543
)
3644
response = chat.send_message(message="I have 2 dogs in my house.")
3745
print(response.text)
@@ -49,8 +57,15 @@ def test_chat_streaming(self):
4957
model="gemini-2.0-flash",
5058
history=[
5159
types.Content(role="user", parts=[types.Part(text="Hello")]),
52-
types.Content(role="model", parts=[types.Part(text="Great to meet you. What would you like to know?")])
53-
]
60+
types.Content(
61+
role="model",
62+
parts=[
63+
types.Part(
64+
text="Great to meet you. What would you like to know?"
65+
)
66+
],
67+
),
68+
],
5469
)
5570
response = chat.send_message_stream(message="I have 2 dogs in my house.")
5671
for chunk in response:
@@ -72,15 +87,20 @@ def test_chat_streaming_with_images(self):
7287
client = genai.Client()
7388
chat = client.chats.create(model="gemini-2.0-flash")
7489

75-
response = chat.send_message_stream(message="Hello, I'm interested in learning about musical instruments. Can I show you one?")
90+
response = chat.send_message_stream(
91+
message="Hello, I'm interested in learning about musical instruments. Can I show you one?"
92+
)
7693
for chunk in response:
7794
print(chunk.text)
7895
print("_" * 80)
7996

8097
# Upload image file locally
8198
image_file = client.files.upload(file=media / "organ.jpg")
8299
response = chat.send_message_stream(
83-
message=["What family of instruments does this instrument belong to?", image_file]
100+
message=[
101+
"What family of instruments does this instrument belong to?",
102+
image_file,
103+
]
84104
)
85105
for chunk in response:
86106
print(chunk.text)

python/code_execution.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,14 @@
1414
# limitations under the License.
1515
from absl.testing import absltest
1616

17+
1718
class UnitTests(absltest.TestCase):
19+
1820
def test_code_execution_basic(self):
1921
# [START code_execution_basic]
2022
from google import genai
2123
from google.genai import types
24+
2225
client = genai.Client()
2326
response = client.models.generate_content(
2427
model="gemini-2.0-flash",
@@ -28,7 +31,7 @@ def test_code_execution_basic(self):
2831
),
2932
config=types.GenerateContentConfig(
3033
tools=[types.Tool(code_execution=types.ToolCodeExecution())],
31-
)
34+
),
3235
)
3336
# Each part may contain text, executable code, or an execution result.
3437
for part in response.candidates[0].content.parts:
@@ -71,7 +74,6 @@ def test_code_execution_basic(self):
7174
# print(f'{primes=}')
7275
# print(f'{sum(primes)=}')
7376

74-
7577
# primes=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
7678
# sum(primes)=5117
7779
# [END code_execution_basic_return]
@@ -80,6 +82,7 @@ def test_code_execution_request_override(self):
8082
# [START code_execution_request_override]
8183
from google import genai
8284
from google.genai import types
85+
8386
client = genai.Client()
8487
response = client.models.generate_content(
8588
model="gemini-2.0-flash",
@@ -89,7 +92,7 @@ def test_code_execution_request_override(self):
8992
),
9093
config=types.GenerateContentConfig(
9194
tools=[types.Tool(code_execution=types.ToolCodeExecution())],
92-
)
95+
),
9396
)
9497
print(response.executable_code)
9598
print(response.code_execution_result)
@@ -125,12 +128,13 @@ def test_code_execution_chat(self):
125128
# [START code_execution_chat]
126129
from google import genai
127130
from google.genai import types
131+
128132
client = genai.Client()
129133
chat = client.chats.create(
130134
model="gemini-2.0-flash",
131135
config=types.GenerateContentConfig(
132136
tools=[types.Tool(code_execution=types.ToolCodeExecution())],
133-
)
137+
),
134138
)
135139
# First, a simple chat message.
136140
response = chat.send_message(message='Can you print "Hello world!"?')

python/configure_model_parameters.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,9 @@
1414
# limitations under the License.
1515
from absl.testing import absltest
1616

17+
1718
class UnitTests(absltest.TestCase):
19+
1820
def test_configure_model(self):
1921
# [START configure_model_parameters]
2022
from google import genai
@@ -28,11 +30,12 @@ def test_configure_model(self):
2830
candidate_count=1,
2931
stop_sequences=["x"],
3032
max_output_tokens=20,
31-
temperature=1.0
32-
)
33+
temperature=1.0,
34+
),
3335
)
3436
print(response.text)
3537
# [END configure_model_parameters]
3638

39+
3740
if __name__ == "__main__":
3841
absltest.main()

0 commit comments

Comments
 (0)