diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ChatViewModel.java b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ChatViewModel.java index b5e15c798..addc9dd07 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ChatViewModel.java +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ChatViewModel.java @@ -29,7 +29,7 @@ public class ChatViewModel extends ViewModel { private GenerativeModelFutures model; void startChatSendMessageStream() { - // [START vertexai_send_message_stream] + // [START chat_streaming] // (optional) Create previous chat history for context Content.Builder userContentBuilder = new Content.Builder(); userContentBuilder.setRole("user"); @@ -84,11 +84,11 @@ public void onError(Throwable t) { } // [END_EXCLUDE] }); - // [END vertexai_send_message_stream] + // [END chat_streaming] } void startChatSendMessage(Executor executor) { - // [START vertexai_send_message] + // [START chat] // (optional) Create previous chat history for context Content.Builder userContentBuilder = new Content.Builder(); userContentBuilder.setRole("user"); @@ -126,12 +126,12 @@ public void onFailure(@NonNull Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_send_message] + // [END chat] } void countTokensChat(Executor executor) { ChatFutures chat = model.startChat(); - // [START vertexai_count_tokens_chat] + // [START count_tokens_chat] List history = chat.getChat().getHistory(); Content messageContent = new Content.Builder() @@ -156,17 +156,19 @@ public void onFailure(@NonNull Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_count_tokens_chat] + // [END count_tokens_chat] } void systemInstructionsText() { - // [START vertexai_si_text] + // [START system_instructions_text] + // Initialize the Vertex AI service and the generative model + // Specify a model that supports system instructions, like a Gemini 1.5 model Content systemInstruction = new Content.Builder() .addText("You are a cat. Your name is Neko.") .build(); GenerativeModel model = FirebaseVertexAI.getInstance() .generativeModel( - /* modelName */ "gemini-1.5-pro-preview-0409", + /* modelName */ "gemini-1.5-flash", /* generationConfig (optional) */ null, /* safetySettings (optional) */ null, /* requestOptions (optional) */ new RequestOptions(), @@ -174,6 +176,6 @@ void systemInstructionsText() { /* toolsConfig (optional) */ null, /* systemInstruction (optional) */ systemInstruction ); - // [END vertexai_si_text] + // [END system_instructions_text] } } diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ConfigurationViewModel.java b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ConfigurationViewModel.java index 8d393b2fe..9f335a351 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ConfigurationViewModel.java +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/ConfigurationViewModel.java @@ -18,7 +18,7 @@ public class ConfigurationViewModel extends ViewModel { void configModelParams() { - // [START vertexai_model_params] + // [START configure_model] GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); configBuilder.temperature = 0.9f; configBuilder.topK = 16; @@ -29,27 +29,31 @@ void configModelParams() { GenerationConfig generationConfig = configBuilder.build(); GenerativeModel gm = FirebaseVertexAI.Companion.getInstance().generativeModel( - "MODEL_NAME", + "gemini-1.5-flash", generationConfig ); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END vertexai_model_params] + // [END configure_model] } void configSafetySettings() { - SafetySetting harassmentSafety1 = new SafetySetting(HarmCategory.HARASSMENT, + // [START safety_settings] + SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); - GenerativeModel gm1 = FirebaseVertexAI.Companion.getInstance().generativeModel( - "MODEL_NAME", + GenerativeModel gm = FirebaseVertexAI.Companion.getInstance().generativeModel( + "gemini-1.5-flash", /* generationConfig is optional */ null, - Collections.singletonList(harassmentSafety1) + Collections.singletonList(harassmentSafety) ); - GenerativeModelFutures model1 = GenerativeModelFutures.from(gm1); + GenerativeModelFutures model = GenerativeModelFutures.from(gm); + // [END safety_settings] + } - // [START vertexai_safety_settings] + void configMultiSafetySettings() { + // [START multi_safety_settings] SafetySetting harassmentSafety = new SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH); @@ -57,12 +61,12 @@ void configSafetySettings() { BlockThreshold.MEDIUM_AND_ABOVE); GenerativeModel gm = FirebaseVertexAI.Companion.getInstance().generativeModel( - "MODEL_NAME", + "gemini-1.5-flash", /* generationConfig is optional */ null, List.of(harassmentSafety, hateSpeechSafety) ); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END vertexai_safety_settings] + // [END multi_safety_settings] } } diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/GenerateContentViewModel.java b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/GenerateContentViewModel.java index e5f0f15fa..605dab4f4 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/GenerateContentViewModel.java +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/java/GenerateContentViewModel.java @@ -37,16 +37,16 @@ public class GenerateContentViewModel extends ViewModel { // Only meant to separate the scope of the initialization snippet // so that it doesn't cause a naming clash with the top level declaration static class InitializationSnippet { - // [START vertexai_init] + // [START initialize_model] GenerativeModel gm = FirebaseVertexAI.getInstance() - .generativeModel("gemini-1.5-pro-preview-0409"); + .generativeModel("gemini-1.5-flash"); GenerativeModelFutures model = GenerativeModelFutures.from(gm); - // [END vertexai_init] + // [END initialize_model] } void generateContentStream() { - // [START vertexai_textonly_stream] + // [START text_gen_text_only_prompt_streaming] Content prompt = new Content.Builder() .addText("Write a story about a magic backpack.") .build(); @@ -77,11 +77,11 @@ public void onError(Throwable t) { public void onSubscribe(Subscription s) { } }); - // [END vertexai_textonly_stream] + // [END text_gen_text_only_prompt_streaming] } void generateContent(Executor executor) { - // [START vertexai_textonly] + // [START text_gen_text_only_prompt] // Provide a prompt that contains text Content prompt = new Content.Builder() .addText("Write a story about a magic backpack.") @@ -101,7 +101,7 @@ public void onFailure(Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_textonly] + // [END text_gen_text_only_prompt] } // Fake implementation to exemplify Activity.getResources() @@ -115,7 +115,7 @@ Context getApplicationContext() { } void generateContentWithImageStream() { - // [START vertexai_text_and_image_stream] + // [START text_gen_multimodal_one_image_prompt_streaming] Bitmap bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.sparky); Content prompt = new Content.Builder() @@ -149,11 +149,11 @@ public void onError(Throwable t) { public void onSubscribe(Subscription s) { } }); - // [END vertexai_text_and_image_stream] + // [END text_gen_multimodal_one_image_prompt_streaming] } void generateContentWithImage(Executor executor) { - // [START vertexai_text_and_image] + // [START text_gen_multimodal_one_image_prompt] Bitmap bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.sparky); Content content = new Content.Builder() @@ -174,11 +174,11 @@ public void onFailure(Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_text_and_image] + // [END text_gen_multimodal_one_image_prompt] } void generateContentWithMultipleImagesStream() { - // [START vertexai_text_and_images_stream] + // [START text_gen_multimodal_multi_image_prompt_streaming] Bitmap bitmap1 = BitmapFactory.decodeResource(getResources(), R.drawable.sparky); Bitmap bitmap2 = BitmapFactory.decodeResource(getResources(), R.drawable.sparky_eats_pizza); @@ -215,11 +215,11 @@ public void onError(Throwable t) { public void onSubscribe(Subscription s) { } }); - // [END vertexai_text_and_images_stream] + // [END text_gen_multimodal_multi_image_prompt_streaming] } void generateContentWithMultipleImages(Executor executor) { - // [START vertexai_text_and_images] + // [START text_gen_multimodal_multi_image_prompt] Bitmap bitmap1 = BitmapFactory.decodeResource(getResources(), R.drawable.sparky); Bitmap bitmap2 = BitmapFactory.decodeResource(getResources(), R.drawable.sparky_eats_pizza); @@ -243,11 +243,11 @@ public void onFailure(Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_text_and_images] + // [END text_gen_multimodal_multi_image_prompt] } void generateContentWithVideo(Executor executor, Uri videoUri) { - // [START vertexai_text_and_video] + // [START text_gen_multimodal_video_prompt] ContentResolver resolver = getApplicationContext().getContentResolver(); try (InputStream stream = resolver.openInputStream(videoUri)) { File videoFile = new File(new URI(videoUri.toString())); @@ -281,13 +281,13 @@ public void onFailure(Throwable t) { } catch (URISyntaxException e) { e.printStackTrace(); } - // [END vertexai_text_and_video] + // [END text_gen_multimodal_video_prompt] } void generateContentWithVideoStream( Uri videoUri ) { - // [START vertexai_text_and_video_stream] + // [START text_gen_multimodal_video_prompt_streaming] ContentResolver resolver = getApplicationContext().getContentResolver(); try (InputStream stream = resolver.openInputStream(videoUri)) { File videoFile = new File(new URI(videoUri.toString())); @@ -334,16 +334,17 @@ public void onSubscribe(Subscription s) { } catch (URISyntaxException e) { e.printStackTrace(); } - // [END vertexai_text_and_video_stream] + // [END text_gen_multimodal_video_prompt_streaming] } void countTokensText(Executor executor) { - // [START vertexai_count_tokens_text] - Content text = new Content.Builder() + // [START count_tokens_text] + Content prompt = new Content.Builder() .addText("Write a story about a magic backpack.") .build(); - ListenableFuture countTokensResponse = model.countTokens(text); + // Count tokens and billable characters before calling generateContent + ListenableFuture countTokensResponse = model.countTokens(prompt); Futures.addCallback(countTokensResponse, new FutureCallback() { @Override @@ -352,6 +353,9 @@ public void onSuccess(CountTokensResponse result) { int totalBillableTokens = result.getTotalBillableCharacters(); System.out.println("totalTokens = " + totalTokens + "totalBillableTokens = " + totalBillableTokens); + + // To generate text output, call generateContent with the text input + ListenableFuture response = model.generateContent(prompt); } @Override @@ -359,18 +363,18 @@ public void onFailure(Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_count_tokens_text] + // [END count_tokens_text] } void countTokensMultimodal(Executor executor, Bitmap bitmap) { - // [START vertexai_count_tokens_multimodal] - Content text = new Content.Builder() + // [START count_tokens_text_image] + Content prompt = new Content.Builder() .addImage(bitmap) .addText("Where can I buy this") .build(); - // For text-only input - ListenableFuture countTokensResponse = model.countTokens(text); + // Count tokens and billable characters before calling generateContent + ListenableFuture countTokensResponse = model.countTokens(prompt); Futures.addCallback(countTokensResponse, new FutureCallback() { @Override @@ -379,6 +383,9 @@ public void onSuccess(CountTokensResponse result) { int totalBillableTokens = result.getTotalBillableCharacters(); System.out.println("totalTokens = " + totalTokens + "totalBillableTokens = " + totalBillableTokens); + + // To generate text output, call generateContent with the prompt + ListenableFuture response = model.generateContent(prompt); } @Override @@ -386,6 +393,6 @@ public void onFailure(Throwable t) { t.printStackTrace(); } }, executor); - // [END vertexai_count_tokens_multimodal] + // [END count_tokens_text_image] } } diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ChatViewModel.kt b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ChatViewModel.kt index abe65c958..47433dcee 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ChatViewModel.kt +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ChatViewModel.kt @@ -16,12 +16,12 @@ class ChatViewModel : ViewModel() { private var generativeModel: GenerativeModel init { - generativeModel = Firebase.vertexAI.generativeModel("gemini-1.5-pro-preview-0409") + generativeModel = Firebase.vertexAI.generativeModel("gemini-1.5-flash") } fun startChatSendMessageStream() { viewModelScope.launch { - // [START vertexai_send_message_stream] + // [START chat_streaming] val chat = generativeModel.startChat( history = listOf( content(role = "user") { text("Hello, I have 2 dogs in my house.") }, @@ -32,13 +32,13 @@ class ChatViewModel : ViewModel() { chat.sendMessageStream("How many paws are in my house?").collect { chunk -> Log.d(TAG, chunk.text ?: "") } - // [END vertexai_send_message_stream] + // [END chat_streaming] } } fun startChatSendMessage() { viewModelScope.launch { - // [START vertexai_send_message] + // [START chat] val chat = generativeModel.startChat( history = listOf( content(role = "user") { text("Hello, I have 2 dogs in my house.") }, @@ -48,28 +48,30 @@ class ChatViewModel : ViewModel() { val response = chat.sendMessage("How many paws are in my house?") Log.d(TAG, response.text ?: "") - // [END vertexai_send_message] + // [END chat] } } fun countTokensChat() { viewModelScope.launch { val chat = generativeModel.startChat() - // [START vertexai_count_tokens_chat] + // [START count_tokens_chat] // Count tokens for a chat prompt val history = chat.history val messageContent = content { text("This is the message I intend to send") } val (tokens, billableChars) = generativeModel.countTokens(*history.toTypedArray(), messageContent) - // [END vertexai_count_tokens_chat] + // [END count_tokens_chat] } } fun systemInstructionsText() { - // [START vertexai_si_text] + // [START system_instructions_text] + // Initialize the Vertex AI service and the generative model + // Specify a model that supports system instructions, like a Gemini 1.5 model val generativeModel = Firebase.vertexAI.generativeModel( - modelName = "gemini-1.5-pro-preview-0409", + modelName = "gemini-1.5-flash", systemInstruction = content { text("You are a cat. Your name is Neko.") }, ) - // [END vertexai_si_text] + // [END system_instructions_text] } } \ No newline at end of file diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ConfigurationViewModel.kt b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ConfigurationViewModel.kt index 7476c9d79..32a9eeeb2 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ConfigurationViewModel.kt +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/ConfigurationViewModel.kt @@ -11,7 +11,7 @@ import com.google.firebase.vertexai.vertexAI class ConfigurationViewModel : ViewModel() { fun configModelParams() { - // [START vertexai_model_params] + // [START configure_model] val config = generationConfig { temperature = 0.9f topK = 16 @@ -20,28 +20,35 @@ class ConfigurationViewModel : ViewModel() { stopSequences = listOf("red") } val generativeModel = Firebase.vertexAI.generativeModel( - modelName = "gemini-1.5-pro-preview-0409", + modelName = "gemini-1.5-flash", generationConfig = config ) - // [END vertexai_model_params] + // [END configure_model] } - fun configSafetySettings() { - val generativeModel1 = Firebase.vertexAI.generativeModel( - modelName = "MODEL_NAME", + fun configSafetySetting() { + // [START safety_settings] + val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) + val hateSpeechSafety = SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE) + + val generativeModel = Firebase.vertexAI.generativeModel( + modelName = "gemini-1.5-flash", safetySettings = listOf( SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) ) ) + // [END safety_settings] + } - // [START vertexai_safety_settings] + fun configMultiSafetySettings() { + // [START multi_safety_settings] val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, BlockThreshold.ONLY_HIGH) val hateSpeechSafety = SafetySetting(HarmCategory.HATE_SPEECH, BlockThreshold.MEDIUM_AND_ABOVE) val generativeModel = Firebase.vertexAI.generativeModel( - modelName = "MODEL_NAME", + modelName = "gemini-1.5-flash", safetySettings = listOf(harassmentSafety, hateSpeechSafety) ) - // [END vertexai_safety_settings] + // [END multi_safety_settings] } } \ No newline at end of file diff --git a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/GenerateContentViewModel.kt b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/GenerateContentViewModel.kt index adb613463..3867c32ab 100644 --- a/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/GenerateContentViewModel.kt +++ b/vertexai/app/src/main/java/com/google/firebase/example/vertexai/kotlin/GenerateContentViewModel.kt @@ -23,22 +23,22 @@ class GenerateContentViewModel : ViewModel() { // Only meant to separate the scope of the initialization snippet // so that it doesn't cause a naming clash with the top level generativeModel fun initialize() { - // [START vertexai_init] + // [START initialize_model] val generativeModel = Firebase.vertexAI.generativeModel( // Specify a model that supports your use case // Gemini 1.5 Pro is versatile and can accept both text-only and multimodal prompt inputs - modelName = "gemini-1.5-pro-preview-0409" + modelName = "gemini-1.5-flash" ) - // [END vertexai_init] + // [END initialize_model] } init { - generativeModel = Firebase.vertexAI.generativeModel("gemini-1.5-pro-preview-0409") + generativeModel = Firebase.vertexAI.generativeModel("gemini-1.5-flash") } fun generateContentStream() { viewModelScope.launch { - // [START vertexai_textonly_stream] + // [START text_gen_text_only_prompt_streaming] // Provide a prompt that includes only text val prompt = "Write a story about a magic backpack." // To stream generated text output, call generateContentStream and pass in the prompt @@ -47,26 +47,26 @@ class GenerateContentViewModel : ViewModel() { Log.d(TAG, chunk.text ?: "") fullResponse += chunk.text } - // [END vertexai_textonly_stream] + // [END text_gen_text_only_prompt_streaming] } } fun generateContent() { viewModelScope.launch { - // [START vertexai_textonly] + // [START text_gen_text_only_prompt] // Provide a prompt that includes only text val prompt = "Write a story about a magic backpack." // To generate text output, call generateContent and pass in the prompt val response = generativeModel.generateContent(prompt) Log.d(TAG, response.text ?: "") - // [END vertexai_textonly] + // [END text_gen_text_only_prompt] } } fun generateContentWithImageStream(resources: Resources) { viewModelScope.launch { - // [START vertexai_text_and_image_stream] + // [START text_gen_multimodal_one_image_prompt_streaming] // Loads an image from the app/res/drawable/ directory val bitmap: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky) @@ -80,13 +80,13 @@ class GenerateContentViewModel : ViewModel() { Log.d(TAG, chunk.text ?: "") fullResponse += chunk.text } - // [END vertexai_text_and_image_stream] + // [END text_gen_multimodal_one_image_prompt_streaming] } } fun generateContentWithImage(resources: Resources) { viewModelScope.launch { - // [START vertexai_text_and_image] + // [START text_gen_multimodal_one_image_prompt] // Loads an image from the app/res/drawable/ directory val bitmap: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky) @@ -97,13 +97,13 @@ class GenerateContentViewModel : ViewModel() { val response = generativeModel.generateContent(prompt) Log.d(TAG, response.text ?: "") - // [END vertexai_text_and_image] + // [END text_gen_multimodal_one_image_prompt] } } fun generateContentWithMultipleImagesStream(resources: Resources) { viewModelScope.launch { - // [START vertexai_text_and_images_stream] + // [START text_gen_multimodal_multi_image_prompt_streaming] // Loads an image from the app/res/drawable/ directory val bitmap1: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky) val bitmap2: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky_eats_pizza) @@ -119,13 +119,13 @@ class GenerateContentViewModel : ViewModel() { Log.d(TAG, chunk.text ?: "") fullResponse += chunk.text } - // [END vertexai_text_and_images_stream] + // [END text_gen_multimodal_multi_image_prompt_streaming] } } fun generateContentWithMultipleImages(resources: Resources) { viewModelScope.launch { - // [START vertexai_text_and_images] + // [START text_gen_multimodal_multi_image_prompt] // Loads an image from the app/res/drawable/ directory val bitmap1: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky) val bitmap2: Bitmap = BitmapFactory.decodeResource(resources, R.drawable.sparky_eats_pizza) @@ -138,7 +138,7 @@ class GenerateContentViewModel : ViewModel() { val response = generativeModel.generateContent(prompt) Log.d(TAG, response.text ?: "") - // [END vertexai_text_and_images] + // [END text_gen_multimodal_multi_image_prompt] } } @@ -147,7 +147,7 @@ class GenerateContentViewModel : ViewModel() { videoUri: Uri ) { viewModelScope.launch { - // [START vertexai_text_and_video_stream] + // [START text_gen_multimodal_video_prompt_streaming] val contentResolver = applicationContext.contentResolver contentResolver.openInputStream(videoUri).use { stream -> stream?.let { @@ -165,7 +165,7 @@ class GenerateContentViewModel : ViewModel() { } } } - // [END vertexai_text_and_video_stream] + // [END text_gen_multimodal_video_prompt_streaming] } } @@ -174,7 +174,7 @@ class GenerateContentViewModel : ViewModel() { videoUri: Uri ) { viewModelScope.launch { - // [START vertexai_text_and_video] + // [START text_gen_multimodal_video_prompt] val contentResolver = applicationContext.contentResolver contentResolver.openInputStream(videoUri).use { stream -> stream?.let { @@ -189,27 +189,44 @@ class GenerateContentViewModel : ViewModel() { Log.d(TAG, response.text ?: "") } } - // [END vertexai_text_and_video] + // [END text_gen_multimodal_video_prompt] } } fun countTokensText() { viewModelScope.launch { - // [START vertexai_count_tokens_text] - val (tokens, billableChars) = generativeModel.countTokens("Write a story about a magic backpack.") - // [END vertexai_count_tokens_text] + // [START count_tokens_text] + val prompt = "Write a story about a magic backpack." + + // Count tokens and billable characters before calling generateContent + val (tokens, billableChars) = generativeModel.countTokens(prompt) + Log.d(TAG, "Total Tokens: $tokens") + Log.d(TAG, "Total Billable Characters: $billableChars") + + // To generate text output, call generateContent with the text input + val response = generativeModel.generateContent(prompt) + Log.d(TAG, response.text ?: "") + // [END count_tokens_text] } } fun countTokensMultimodal(bitmap: Bitmap) { viewModelScope.launch { - // [START vertexai_count_tokens_multimodal] + // [START count_tokens_text_image] val prompt = content { image(bitmap) text("Where can I buy this?") } + + // Count tokens and billable characters before calling generateContent val (tokens, billableChars) = generativeModel.countTokens(prompt) - // [END vertexai_count_tokens_multimodal] + Log.d(TAG, "Total Tokens: $tokens") + Log.d(TAG, "Total Billable Characters: $billableChars") + + // To generate text output, call generateContent with the text input + val response = generativeModel.generateContent(prompt) + Log.d(TAG, response.text ?: "") + // [END count_tokens_text_image] } } } \ No newline at end of file