From 82258c03e9101c0031796d8a22868a207197fc03 Mon Sep 17 00:00:00 2001 From: Luke Schlangen Date: Thu, 21 Dec 2023 14:15:13 -0600 Subject: [PATCH 1/2] feat: add model to each genai sample --- generative-ai/snippets/countTokens.js | 20 ++++++----------- generative-ai/snippets/nonStreamingChat.js | 19 ++++++---------- generative-ai/snippets/nonStreamingContent.js | 19 ++++++---------- .../snippets/nonStreamingMultipartContent.js | 20 ++++++----------- generative-ai/snippets/safetySettings.js | 18 ++++++--------- .../snippets/sendMultiModalPromptWithImage.js | 4 ++-- .../snippets/sendMultiModalPromptWithVideo.js | 19 ++++++---------- generative-ai/snippets/streamChat.js | 19 ++++++---------- generative-ai/snippets/streamContent.js | 20 ++++++----------- .../snippets/streamMultipartContent.js | 22 ++++++------------- 10 files changed, 65 insertions(+), 115 deletions(-) diff --git a/generative-ai/snippets/countTokens.js b/generative-ai/snippets/countTokens.js index 83b0f4db57..492be74da3 100644 --- a/generative-ai/snippets/countTokens.js +++ b/generative-ai/snippets/countTokens.js @@ -12,22 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_token_count] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function countTokens( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro' ) { - // [START aiplatform_gemini_token_count] - - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertex_ai = new VertexAI({project: projectId, location: location}); @@ -42,9 +37,8 @@ async function countTokens( const countTokensResp = await generativeModel.countTokens(req); console.log('count tokens response: ', countTokensResp); - - // [END aiplatform_gemini_token_count] } +// [END aiplatform_gemini_token_count] countTokens(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/nonStreamingChat.js b/generative-ai/snippets/nonStreamingChat.js index 0a3ec0e629..0199074f44 100644 --- a/generative-ai/snippets/nonStreamingChat.js +++ b/generative-ai/snippets/nonStreamingChat.js @@ -12,21 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_multiturn_chat_nonstreaming] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createNonStreamingChat( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro' ) { - // [START aiplatform_gemini_multiturn_chat_nonstreaming] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -55,9 +51,8 @@ async function createNonStreamingChat( const result3 = await chat.sendMessage(chatInput3); const response3 = result3.response.candidates[0].content.parts[0].text; console.log('Chat bot: ', response3); - - // [END aiplatform_gemini_multiturn_chat_nonstreaming] } +// [END aiplatform_gemini_multiturn_chat_nonstreaming] createNonStreamingChat(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/nonStreamingContent.js b/generative-ai/snippets/nonStreamingContent.js index 1b72588ae4..e1b486582e 100644 --- a/generative-ai/snippets/nonStreamingContent.js +++ b/generative-ai/snippets/nonStreamingContent.js @@ -12,21 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_content_nonstreaming] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createNonStreamingContent( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro', ) { - // [START aiplatform_gemini_content_nonstreaming] - - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -55,9 +51,8 @@ async function createNonStreamingContent( aggregatedResponse.candidates[0].content.parts[0].text; console.log(fullTextResponse); - - // [END aiplatform_gemini_content_nonstreaming] } +// [END aiplatform_gemini_content_nonstreaming] createNonStreamingContent(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/nonStreamingMultipartContent.js b/generative-ai/snippets/nonStreamingMultipartContent.js index 009779ac6f..3cf74d0344 100644 --- a/generative-ai/snippets/nonStreamingMultipartContent.js +++ b/generative-ai/snippets/nonStreamingMultipartContent.js @@ -12,24 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_get_started] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createNonStreamingMultipartContent( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL', + location = 'us-central1', + model = 'gemini-pro-vision', image = 'gs://generativeai-downloads/images/scones.jpg', mimeType = 'image/jpeg' ) { - // [START aiplatform_gemini_get_started] - - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image - // const mimeType = 'image/jpeg'; // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -71,9 +66,8 @@ async function createNonStreamingMultipartContent( aggregatedResponse.candidates[0].content.parts[0].text; console.log(fullTextResponse); - - // [END aiplatform_gemini_get_started] } +// [END aiplatform_gemini_get_started] createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/safetySettings.js b/generative-ai/snippets/safetySettings.js index 2ccbc108a4..ac4509f646 100644 --- a/generative-ai/snippets/safetySettings.js +++ b/generative-ai/snippets/safetySettings.js @@ -12,25 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_safety_settings] const { VertexAI, HarmCategory, HarmBlockThreshold, } = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function setSafetySettings( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro' ) { - // [START aiplatform_gemini_safety_settings] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -67,8 +63,8 @@ async function setSafetySettings( process.stdout.write(item.candidates[0].content.parts[0].text); } } - // [END aiplatform_gemini_safety_settings] } +// [END aiplatform_gemini_safety_settings] setSafetySettings(...process.argv.slice(3)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/sendMultiModalPromptWithImage.js b/generative-ai/snippets/sendMultiModalPromptWithImage.js index 33edbde3a1..b6dcb09aa7 100644 --- a/generative-ai/snippets/sendMultiModalPromptWithImage.js +++ b/generative-ai/snippets/sendMultiModalPromptWithImage.js @@ -26,8 +26,8 @@ async function getBase64(url) { */ async function sendMultiModalPromptWithImage( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro-vision' ) { // For images, the SDK supports base64 strings const landmarkImage1 = await getBase64( diff --git a/generative-ai/snippets/sendMultiModalPromptWithVideo.js b/generative-ai/snippets/sendMultiModalPromptWithVideo.js index b98709f5c1..cd777cbf12 100644 --- a/generative-ai/snippets/sendMultiModalPromptWithVideo.js +++ b/generative-ai/snippets/sendMultiModalPromptWithVideo.js @@ -12,21 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_single_turn_video] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function sendMultiModalPromptWithVideo( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro-vision' ) { - // [START aiplatform_gemini_single_turn_video] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -63,9 +59,8 @@ async function sendMultiModalPromptWithVideo( aggregatedResponse.candidates[0].content.parts[0].text; console.log(fullTextResponse); - - // [END aiplatform_gemini_single_turn_video] } +// [END aiplatform_gemini_single_turn_video] sendMultiModalPromptWithVideo(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/streamChat.js b/generative-ai/snippets/streamChat.js index c33f2be7a6..bf6edcfa40 100644 --- a/generative-ai/snippets/streamChat.js +++ b/generative-ai/snippets/streamChat.js @@ -12,21 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_multiturn_chat] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createStreamChat( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro' ) { - // [START aiplatform_gemini_multiturn_chat] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -44,9 +40,8 @@ async function createStreamChat( for await (const item of result1.stream) { console.log(item.candidates[0].content.parts[0].text); } - - // [END aiplatform_gemini_multiturn_chat] } +// [END aiplatform_gemini_multiturn_chat] createStreamChat(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/streamContent.js b/generative-ai/snippets/streamContent.js index 95cae269ba..9004c6b515 100644 --- a/generative-ai/snippets/streamContent.js +++ b/generative-ai/snippets/streamContent.js @@ -12,22 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_content] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createStreamContent( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL' + location = 'us-central1', + model = 'gemini-pro' ) { - // [START aiplatform_gemini_content] - - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -51,9 +46,8 @@ async function createStreamContent( for await (const item of responseStream.stream) { process.stdout.write(item.candidates[0].content.parts[0].text); } - - // [END aiplatform_gemini_content] } +// [END aiplatform_gemini_content] createStreamContent(...process.argv.slice(2)).catch(err => { console.error(err.message); diff --git a/generative-ai/snippets/streamMultipartContent.js b/generative-ai/snippets/streamMultipartContent.js index 9d8e29c163..b336258c29 100644 --- a/generative-ai/snippets/streamMultipartContent.js +++ b/generative-ai/snippets/streamMultipartContent.js @@ -12,26 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +// [START aiplatform_gemini_get_started] const {VertexAI} = require('@google-cloud/vertexai'); +/** + * TODO(developer): Update these variables before running the sample. + */ async function createStreamMultipartContent( projectId = 'PROJECT_ID', - location = 'LOCATION_ID', - model = 'MODEL', + location = 'us-central1', + model = 'gemini-pro-vision', image = 'gs://generativeai-downloads/images/scones.jpg', mimeType = 'image/jpeg' ) { - // [START aiplatform_gemini_get_started] - - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - // const projectId = 'your-project-id'; - // const location = 'us-central1'; - // const model = 'chosen-genai-model'; - // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image - // const mimeType = 'image/jpeg'; - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); @@ -68,9 +61,8 @@ async function createStreamMultipartContent( for await (const item of responseStream.stream) { process.stdout.write(item.candidates[0].content.parts[0].text); } - - // [END aiplatform_gemini_get_started] } +// [END aiplatform_gemini_get_started] createStreamMultipartContent(...process.argv.slice(2)).catch(err => { console.error(err.message); From be61214440971b8a8694023138762b05ab7dfe11 Mon Sep 17 00:00:00 2001 From: Luke Schlangen Date: Thu, 21 Dec 2023 14:24:37 -0600 Subject: [PATCH 2/2] fix: lint errors --- generative-ai/snippets/nonStreamingContent.js | 3 +-- generative-ai/snippets/nonStreamingMultipartContent.js | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/generative-ai/snippets/nonStreamingContent.js b/generative-ai/snippets/nonStreamingContent.js index e1b486582e..1d82a7d7bb 100644 --- a/generative-ai/snippets/nonStreamingContent.js +++ b/generative-ai/snippets/nonStreamingContent.js @@ -21,9 +21,8 @@ const {VertexAI} = require('@google-cloud/vertexai'); async function createNonStreamingContent( projectId = 'PROJECT_ID', location = 'us-central1', - model = 'gemini-pro', + model = 'gemini-pro' ) { - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); diff --git a/generative-ai/snippets/nonStreamingMultipartContent.js b/generative-ai/snippets/nonStreamingMultipartContent.js index 3cf74d0344..d281919313 100644 --- a/generative-ai/snippets/nonStreamingMultipartContent.js +++ b/generative-ai/snippets/nonStreamingMultipartContent.js @@ -25,7 +25,6 @@ async function createNonStreamingMultipartContent( image = 'gs://generativeai-downloads/images/scones.jpg', mimeType = 'image/jpeg' ) { - // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location});