1515 * limitations under the License.
1616 */
1717
18- import { GoogleGenAI } from "@google/genai" ;
18+ import {
19+ GoogleGenAI ,
20+ createUserContent ,
21+ createPartFromUri
22+ } from "@google/genai" ;
23+ import path from "path" ;
24+ import { fileURLToPath } from "url" ;
25+
26+ const __filename = fileURLToPath ( import . meta. url ) ;
27+ const __dirname = path . dirname ( __filename ) ;
28+
29+ const media = path . join ( __dirname , '..' , 'third_party' ) ;
30+
31+ // Sleep helper for video polling.
32+ const sleep = ( ms ) => new Promise ( ( resolve ) => setTimeout ( resolve , ms ) ) ;
1933
2034export async function textGenTextOnlyPrompt ( ) {
2135 // [START text_gen_text_only_prompt]
2236 // Make sure to include the following import:
2337 // import {GoogleGenAI} from '@google/genai';
24- const GEMINI_API_KEY = process . env . GEMINI_API_KEY ;
25- const ai = new GoogleGenAI ( { apiKey : GEMINI_API_KEY } ) ;
38+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
2639
2740 const response = await ai . models . generateContent ( {
2841 model : "gemini-2.0-flash" ,
@@ -32,3 +45,309 @@ export async function textGenTextOnlyPrompt() {
3245 // [END text_gen_text_only_prompt]
3346 return response . text ;
3447}
48+
49+ export async function textGenTextOnlyPromptStreaming ( ) {
50+ // [START text_gen_text_only_prompt_streaming]
51+ // Make sure to include the following import:
52+ // import {GoogleGenAI} from '@google/genai';
53+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
54+
55+ const response = await ai . models . generateContentStream ( {
56+ model : "gemini-2.0-flash" ,
57+ contents : "Write a story about a magic backpack." ,
58+ } ) ;
59+ let text = "" ;
60+ for await ( const chunk of response ) {
61+ console . log ( chunk . text ) ;
62+ text += chunk . text ;
63+ }
64+ // [END text_gen_text_only_prompt_streaming]
65+ return text ;
66+ }
67+
68+ export async function textGenMultimodalOneImagePrompt ( ) {
69+ // [START text_gen_multimodal_one_image_prompt]
70+ // Make sure to include the following import:
71+ // import {GoogleGenAI} from '@google/genai';
72+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
73+
74+ const organ = await ai . files . upload ( {
75+ file : path . join ( media , "organ.jpg" ) ,
76+ } ) ;
77+
78+ const response = await ai . models . generateContent ( {
79+ model : "gemini-2.0-flash" ,
80+ contents : [
81+ createUserContent ( [
82+ "Tell me about this instrument" ,
83+ createPartFromUri ( organ . uri , organ . mimeType )
84+ ] ) ,
85+ ] ,
86+ } ) ;
87+ console . log ( response . text ) ;
88+ // [END text_gen_multimodal_one_image_prompt]
89+ return response . text ;
90+ }
91+
92+ export async function textGenMultimodalOneImagePromptStreaming ( ) {
93+ // [START text_gen_multimodal_one_image_prompt_streaming]
94+ // Make sure to include the following import:
95+ // import {GoogleGenAI} from '@google/genai';
96+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
97+
98+ const organ = await ai . files . upload ( {
99+ file : path . join ( media , "organ.jpg" ) ,
100+ } ) ;
101+
102+ const response = await ai . models . generateContentStream ( {
103+ model : "gemini-2.0-flash" ,
104+ contents : [
105+ createUserContent ( [
106+ "Tell me about this instrument" ,
107+ createPartFromUri ( organ . uri , organ . mimeType )
108+ ] ) ,
109+ ] ,
110+ } ) ;
111+ let text = "" ;
112+ for await ( const chunk of response ) {
113+ console . log ( chunk . text ) ;
114+ text += chunk . text ;
115+ }
116+ // [END text_gen_multimodal_one_image_prompt_streaming]
117+ return text ;
118+ }
119+
120+ export async function textGenMultimodalMultiImagePrompt ( ) {
121+ // [START text_gen_multimodal_multi_image_prompt]
122+ // Make sure to include the following import:
123+ // import {GoogleGenAI} from '@google/genai';
124+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
125+
126+ const organ = await ai . files . upload ( {
127+ file : path . join ( media , "organ.jpg" ) ,
128+ } ) ;
129+
130+ const cajun = await ai . files . upload ( {
131+ file : path . join ( media , "Cajun_instruments.jpg" ) ,
132+ config : { mimeType : "image/jpeg" } ,
133+ } ) ;
134+
135+ const response = await ai . models . generateContent ( {
136+ model : "gemini-2.0-flash" ,
137+ contents : [
138+ createUserContent ( [
139+ "What is the difference between both of these instruments?" ,
140+ createPartFromUri ( organ . uri , organ . mimeType ) ,
141+ createPartFromUri ( cajun . uri , cajun . mimeType ) ,
142+ ] ) ,
143+ ] ,
144+ } ) ;
145+ console . log ( response . text ) ;
146+ // [END text_gen_multimodal_multi_image_prompt]
147+ return response . text ;
148+ }
149+
150+ export async function textGenMultimodalMultiImagePromptStreaming ( ) {
151+ // [START text_gen_multimodal_multi_image_prompt_streaming]
152+ // Make sure to include the following import:
153+ // import {GoogleGenAI} from '@google/genai';
154+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
155+
156+ const organ = await ai . files . upload ( {
157+ file : path . join ( media , "organ.jpg" ) ,
158+ } ) ;
159+
160+ const cajun = await ai . files . upload ( {
161+ file : path . join ( media , "Cajun_instruments.jpg" ) ,
162+ } ) ;
163+
164+ const response = await ai . models . generateContentStream ( {
165+ model : "gemini-2.0-flash" ,
166+ contents : [
167+ createUserContent ( [
168+ "What is the difference between both of these instruments?" ,
169+ createPartFromUri ( organ . uri , organ . mimeType ) ,
170+ createPartFromUri ( cajun . uri , cajun . mimeType ) ,
171+ ] ) ,
172+ ] ,
173+ } ) ;
174+ let text = "" ;
175+ for await ( const chunk of response ) {
176+ console . log ( chunk . text ) ;
177+ text += chunk . text ;
178+ }
179+ // [END text_gen_multimodal_multi_image_prompt_streaming]
180+ return text ;
181+ }
182+
183+ export async function textGenMultimodalAudio ( ) {
184+ // [START text_gen_multimodal_audio]
185+ // Make sure to include the following import:
186+ // import {GoogleGenAI} from '@google/genai';
187+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
188+
189+ const audio = await ai . files . upload ( {
190+ file : path . join ( media , "sample.mp3" ) ,
191+ } ) ;
192+
193+ const response = await ai . models . generateContent ( {
194+ model : "gemini-2.0-flash" ,
195+ contents : [
196+ createUserContent ( [
197+ "Give me a summary of this audio file." ,
198+ createPartFromUri ( audio . uri , audio . mimeType ) ,
199+ ] ) ,
200+ ] ,
201+ } ) ;
202+ console . log ( response . text ) ;
203+ // [END text_gen_multimodal_audio]
204+ return response . text ;
205+ }
206+
207+ export async function textGenMultimodalAudioStreaming ( ) {
208+ // [START text_gen_multimodal_audio_streaming]
209+ // Make sure to include the following import:
210+ // import {GoogleGenAI} from '@google/genai';
211+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
212+
213+ const audio = await ai . files . upload ( {
214+ file : path . join ( media , "sample.mp3" ) ,
215+ } ) ;
216+
217+ const response = await ai . models . generateContentStream ( {
218+ model : "gemini-2.0-flash" ,
219+ contents : [
220+ createUserContent ( [
221+ "Give me a summary of this audio file." ,
222+ createPartFromUri ( audio . uri , audio . mimeType ) ,
223+ ] ) ,
224+ ] ,
225+ } ) ;
226+ let text = "" ;
227+ for await ( const chunk of response ) {
228+ console . log ( chunk . text ) ;
229+ text += chunk . text ;
230+ }
231+ // [END text_gen_multimodal_audio_streaming]
232+ return text ;
233+ }
234+
235+ export async function textGenMultimodalVideoPrompt ( ) {
236+ // [START text_gen_multimodal_video_prompt]
237+ // Make sure to include the following import:
238+ // import {GoogleGenAI} from '@google/genai';
239+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
240+
241+ let video = await ai . files . upload ( {
242+ file : path . join ( media , 'Big_Buck_Bunny.mp4' ) ,
243+ } ) ;
244+
245+ // Poll until the video file is completely processed (state becomes ACTIVE).
246+ while ( ! video . state || video . state . toString ( ) !== 'ACTIVE' ) {
247+ console . log ( 'Processing video...' ) ;
248+ console . log ( 'File state: ' , video . state ) ;
249+ await sleep ( 5000 ) ;
250+ video = await ai . files . get ( { name : video . name } ) ;
251+ }
252+
253+ const response = await ai . models . generateContent ( {
254+ model : "gemini-2.0-flash" ,
255+ contents : [
256+ createUserContent ( [
257+ "Describe this video clip" ,
258+ createPartFromUri ( video . uri , video . mimeType ) ,
259+ ] ) ,
260+ ] ,
261+ } ) ;
262+ console . log ( response . text ) ;
263+ // [END text_gen_multimodal_video_prompt]
264+ return response . text ;
265+ }
266+
267+ export async function textGenMultimodalVideoPromptStreaming ( ) {
268+ // [START text_gen_multimodal_video_prompt_streaming]
269+ // Make sure to include the following import:
270+ // import {GoogleGenAI} from '@google/genai';
271+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
272+
273+ let video = await ai . files . upload ( {
274+ file : path . join ( media , 'Big_Buck_Bunny.mp4' ) ,
275+ } ) ;
276+
277+ // Poll until the video file is completely processed (state becomes ACTIVE).
278+ while ( ! video . state || video . state . toString ( ) !== 'ACTIVE' ) {
279+ console . log ( 'Processing video...' ) ;
280+ console . log ( 'File state: ' , video . state ) ;
281+ await sleep ( 5000 ) ;
282+ video = await ai . files . get ( { name : video . name } ) ;
283+ }
284+
285+ const response = await ai . models . generateContentStream ( {
286+ model : "gemini-2.0-flash" ,
287+ contents : [
288+ createUserContent ( [
289+ "Describe this video clip" ,
290+ createPartFromUri ( video . uri , video . mimeType ) ,
291+ ] ) ,
292+ ] ,
293+ } ) ;
294+ let text = "" ;
295+ for await ( const chunk of response ) {
296+ console . log ( chunk . text ) ;
297+ text += chunk . text ;
298+ }
299+ // [END text_gen_multimodal_video_prompt_streaming]
300+ return text ;
301+ }
302+
303+ export async function textGenMultimodalPdf ( ) {
304+ // [START text_gen_multimodal_pdf]
305+ // Make sure to include the following import:
306+ // import {GoogleGenAI} from '@google/genai';
307+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
308+
309+ const pdf = await ai . files . upload ( {
310+ file : path . join ( media , "test.pdf" ) ,
311+ } ) ;
312+
313+ const response = await ai . models . generateContent ( {
314+ model : "gemini-2.0-flash" ,
315+ contents : [
316+ createUserContent ( [
317+ "Give me a summary of this document:" ,
318+ createPartFromUri ( pdf . uri , pdf . mimeType ) ,
319+ ] ) ,
320+ ] ,
321+ } ) ;
322+ console . log ( response . text ) ;
323+ // [END text_gen_multimodal_pdf]
324+ return response . text ;
325+ }
326+
327+ export async function textGenMultimodalPdfStreaming ( ) {
328+ // [START text_gen_multimodal_pdf_streaming]
329+ // Make sure to include the following import:
330+ // import {GoogleGenAI} from '@google/genai';
331+ const ai = new GoogleGenAI ( { apiKey : process . env . GEMINI_API_KEY } ) ;
332+
333+ const pdf = await ai . files . upload ( {
334+ file : path . join ( media , "test.pdf" ) ,
335+ } ) ;
336+
337+ const response = await ai . models . generateContentStream ( {
338+ model : "gemini-2.0-flash" ,
339+ contents : [
340+ createUserContent ( [
341+ "Give me a summary of this document:" ,
342+ createPartFromUri ( pdf . uri , pdf . mimeType ) ,
343+ ] ) ,
344+ ] ,
345+ } ) ;
346+ let text = "" ;
347+ for await ( const chunk of response ) {
348+ console . log ( chunk . text ) ;
349+ text += chunk . text ;
350+ }
351+ // [END text_gen_multimodal_pdf_streaming]
352+ return text ;
353+ }
0 commit comments