diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml index 2ef6e6a402..450d366f8a 100644 --- a/.github/auto-label.yaml +++ b/.github/auto-label.yaml @@ -45,6 +45,7 @@ path: eventarc: "eventarc" error-reporting: "clouderrorreporting" functions: "cloudfunctions" + generative-ai: "genai" game-servers: "gameservices" healthcare: "healhcare" iam: "iam" diff --git a/.github/workflows/generative-ai-snippets.yaml b/.github/workflows/generative-ai-snippets.yaml new file mode 100644 index 0000000000..1dc64a5b13 --- /dev/null +++ b/.github/workflows/generative-ai-snippets.yaml @@ -0,0 +1,106 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: generative-ai-snippets +on: + push: + branches: + - main + paths: + - 'generative-ai/snippets/**' + - '.github/workflows/generative-ai-snippets.yaml' + pull_request: + paths: + - 'generative-ai/snippets/**' + - '.github/workflows/generative-ai-snippets.yaml' + pull_request_target: + types: [labeled] + paths: + - 'generative-ai/snippets/**' + - '.github/workflows/generative-ai-snippets.yaml' + schedule: + - cron: '0 0 * * 0' +jobs: + test: + if: github.event.action != 'labeled' || github.event.label.name == 'actions:force-run' + runs-on: ubuntu-latest + timeout-minutes: 120 + permissions: + contents: 'read' + id-token: 'write' + defaults: + run: + working-directory: 'generative-ai/snippets' + steps: + - uses: actions/checkout@v4.1.0 + with: + ref: ${{github.event.pull_request.head.sha}} + - uses: 'google-github-actions/auth@v1.1.1' + with: + workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider' + service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com' + create_credentials_file: 'true' + access_token_lifetime: 600s + - id: secrets + uses: 'google-github-actions/get-secretmanager-secrets@v1' + with: + secrets: |- + caip_id:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-caip-project-id + location:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-location + - uses: actions/setup-node@v4.0.0 + with: + node-version: 16 + - name: Get npm cache directory + id: npm-cache-dir + shell: bash + run: echo "dir=$(npm config get cache)" >> ${GITHUB_OUTPUT} + - uses: actions/cache@v3 + id: npm-cache + with: + path: ${{ steps.npm-cache-dir.outputs.dir }} + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-node- + - name: install repo dependencies + run: npm install + working-directory: . + - name: install directory dependencies + run: npm install + - run: npm run build --if-present + - name: set env vars for scheduled run + if: github.event.action == 'schedule' + run: | + echo "MOCHA_REPORTER_SUITENAME=generative-ai-snippets" >> $GITHUB_ENV + echo "MOCHA_REPORTER_OUTPUT=${{github.run_id}}_sponge_log.xml" >> $GITHUB_ENV + echo "MOCHA_REPORTER=xunit" >> $GITHUB_ENV + - run: npm test + env: + LOCATION: ${{ steps.secrets.outputs.location }} + CAIP_PROJECT_ID: ${{ steps.secrets.outputs.caip_id }} + - name: upload test results for FlakyBot workflow + if: github.event.action == 'schedule' && always() + uses: actions/upload-artifact@v3 + env: + MOCHA_REPORTER_OUTPUT: "${{github.run_id}}_sponge_log.xml" + with: + name: test-results + path: generative-ai/snippets/${{ env.MOCHA_REPORTER_OUTPUT }} + retention-days: 1 + flakybot: + permissions: + contents: 'read' + id-token: 'write' + if: github.event_name == 'schedule' && always() # always() submits logs even if tests fail + uses: ./.github/workflows/flakybot.yaml + needs: [test] diff --git a/.github/workflows/utils/workflows-secrets.json b/.github/workflows/utils/workflows-secrets.json index e426ce37a9..9c816a1f8b 100644 --- a/.github/workflows/utils/workflows-secrets.json +++ b/.github/workflows/utils/workflows-secrets.json @@ -5,5 +5,6 @@ "iam/deny", "security-center/snippets", "storagetransfer", + "generative-ai/snippets", "vision" ] diff --git a/CODEOWNERS b/CODEOWNERS index 6257ebd48e..d9e0907476 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -50,6 +50,7 @@ monitoring/opencensus @GoogleCloudPlatform/nodejs-samples-reviewers # Data & AI ai-platform @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers +generative-ai @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers automl @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers cloud-language @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers contact-center-insights @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers diff --git a/generative-ai/snippets/countTokens.js b/generative-ai/snippets/countTokens.js new file mode 100644 index 0000000000..68847dc62d --- /dev/null +++ b/generative-ai/snippets/countTokens.js @@ -0,0 +1,52 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function countTokens( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_token_count] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const model = 'gemini-pro'; + + // Initialize Vertex with your Cloud project and location + const vertex_ai = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertex_ai.preview.getGenerativeModel({ + model: model, + }); + + const req = { + contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}], + }; + + const countTokensResp = await generativeModel.countTokens(req); + console.log('count tokens response: ', countTokensResp); + + // [END aiplatform_gemini_token_count] +} + +countTokens(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/nonStreamingChat.js b/generative-ai/snippets/nonStreamingChat.js new file mode 100644 index 0000000000..199c387498 --- /dev/null +++ b/generative-ai/snippets/nonStreamingChat.js @@ -0,0 +1,73 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +function wait(time) { + return new Promise(resolve => { + setTimeout(resolve, time); + }); +} + +async function createNonStreamingChat( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // TODO: Find better method. Setting delay to give api time to respond, otherwise it will 404 + // await wait(10); + + // [START aiplatform_gemini_multiturn_chat] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const chat = generativeModel.startChat({}); + + const chatInput1 = 'Hello'; + console.log(`User: ${chatInput1}`); + + const result1 = await chat.sendMessage(chatInput1); + const response1 = result1.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response1); + + const chatInput2 = 'Can you tell me a scientific fun fact?'; + console.log(`User: ${chatInput2}`); + const result2 = await chat.sendMessage(chatInput2); + const response2 = result2.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response2); + + const chatInput3 = 'How can I learn more about that?'; + console.log(`User: ${chatInput3}`); + const result3 = await chat.sendMessage(chatInput3); + const response3 = result3.response.candidates[0].content.parts[0].text; + console.log('Chat bot: ', response3); + + // [END aiplatform_gemini_multiturn_chat] +} + +createNonStreamingChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/nonStreamingContent.js b/generative-ai/snippets/nonStreamingContent.js new file mode 100644 index 0000000000..21936c9d01 --- /dev/null +++ b/generative-ai/snippets/nonStreamingContent.js @@ -0,0 +1,64 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createNonStreamingContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_function_calling] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const request = { + contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Non-Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Wait for the response stream to complete + const aggregatedResponse = await responseStream.response; + + // Select the text from the response + const fullTextResponse = + aggregatedResponse.candidates[0].content.parts[0].text; + + console.log(fullTextResponse); + + // [END aiplatform_gemini_function_calling] +} + +createNonStreamingContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/nonStreamingMultipartContent.js b/generative-ai/snippets/nonStreamingMultipartContent.js new file mode 100644 index 0000000000..e899886d3e --- /dev/null +++ b/generative-ai/snippets/nonStreamingMultipartContent.js @@ -0,0 +1,81 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createNonStreamingMultipartContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL', + image = 'gs://generativeai-downloads/images/scones.jpg', + mimeType = 'image/jpeg' +) { + // [START aiplatform_gemini_get_started] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image + // const mimeType = 'image/jpeg'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + // For images, the SDK supports both Google Cloud Storage URI and base64 strings + const filePart = { + file_data: { + file_uri: image, + mime_type: mimeType, + }, + }; + + const textPart = { + text: 'what is shown in this image?', + }; + + const request = { + contents: [{role: 'user', parts: [textPart, filePart]}], + }; + + console.log('Prompt Text:'); + console.log(request.contents[0].parts[0].text); + console.log('Non-Streaming Response Text:'); + + // Create the response stream + const responseStream = + await generativeVisionModel.generateContentStream(request); + + // Wait for the response stream to complete + const aggregatedResponse = await responseStream.response; + + // Select the text from the response + const fullTextResponse = + aggregatedResponse.candidates[0].content.parts[0].text; + + console.log(fullTextResponse); + + // [END aiplatform_gemini_get_started] +} + +createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/package.json b/generative-ai/snippets/package.json new file mode 100644 index 0000000000..765acfa217 --- /dev/null +++ b/generative-ai/snippets/package.json @@ -0,0 +1,27 @@ +{ + "name": "nodejs-generativeai-samples", + "private": true, + "license": "Apache-2.0", + "author": "Google LLC", + "engines": { + "node": ">=16.0.0" + }, + "files": [ + "*.js" + ], + "scripts": { + "test": "c8 mocha -p -j 2 --timeout 2400000 test/*.test.js" + }, + "dependencies": { + "@google-cloud/aiplatform": "^3.0.0", + "@google-cloud/vertexai": "github:googleapis/nodejs-vertexai", + "supertest": "^6.3.3" + }, + "devDependencies": { + "c8": "^8.0.0", + "chai": "^4.2.0", + "mocha": "^10.0.0", + "sinon": "^16.0.0", + "uuid": "^9.0.0" + } +} diff --git a/generative-ai/snippets/safetySettings.js b/generative-ai/snippets/safetySettings.js new file mode 100644 index 0000000000..deb3c7f046 --- /dev/null +++ b/generative-ai/snippets/safetySettings.js @@ -0,0 +1,72 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const { + VertexAI, + HarmCategory, + HarmBlockThreshold, +} = require('@google-cloud/vertexai'); + +async function createStreamContent() { + // [START aiplatform_gemini_safety_settings] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + const projectId = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + // The following parameters are optional + // They can also be passed to individual content generation requests + safety_settings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + ], + generation_config: {max_output_tokens: 256}, + }); + + const request = { + contents: [{role: 'user', parts: [{text: 'Tell me something dangerous.'}]}], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + if (item.candidates[0].finishReason === 'SAFETY') { + console.log('This response stream terminated due to safety concerns.'); + } else { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + } + // [END aiplatform_gemini_safety_settings] +} + +createStreamContent(...process.argv.slice(3)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/sendMultiModalPromptWithImage.js b/generative-ai/snippets/sendMultiModalPromptWithImage.js new file mode 100644 index 0000000000..f9d1d486a4 --- /dev/null +++ b/generative-ai/snippets/sendMultiModalPromptWithImage.js @@ -0,0 +1,29 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function sendMultiModalPromptWithImage( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_single_turn_multi_image] + // [END aiplatform_gemini_single_turn_multi_image] +} + +sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/sendMultiModalPromptWithVideo.js b/generative-ai/snippets/sendMultiModalPromptWithVideo.js new file mode 100644 index 0000000000..a7564c7855 --- /dev/null +++ b/generative-ai/snippets/sendMultiModalPromptWithVideo.js @@ -0,0 +1,29 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function sendMultiModalPromptWithImage( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_single_turn_video] + // [END aiplatform_gemini_single_turn_video] +} + +sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/streamChat.js b/generative-ai/snippets/streamChat.js new file mode 100644 index 0000000000..b28536129c --- /dev/null +++ b/generative-ai/snippets/streamChat.js @@ -0,0 +1,53 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamChat( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_multiturn_chat] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const chat = generativeModel.startChat({}); + + const chatInput1 = 'How can I learn more about that?'; + console.log(`User: ${chatInput1}`); + const result1 = await chat.sendMessageStream(chatInput1); + for await (const item of result1.stream) { + console.log(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_multiturn_chat] +} + +createStreamChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/streamContent.js b/generative-ai/snippets/streamContent.js new file mode 100644 index 0000000000..6f2ed3e2bc --- /dev/null +++ b/generative-ai/snippets/streamContent.js @@ -0,0 +1,60 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL' +) { + // [START aiplatform_gemini_function_calling] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const request = { + contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}], + }; + + console.log('Prompt:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = await generativeModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_function_calling] +} + +createStreamContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/streamMultipartContent.js b/generative-ai/snippets/streamMultipartContent.js new file mode 100644 index 0000000000..aa816baf92 --- /dev/null +++ b/generative-ai/snippets/streamMultipartContent.js @@ -0,0 +1,77 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const {VertexAI} = require('@google-cloud/vertexai'); + +async function createStreamMultipartContent( + projectId = 'PROJECT_ID', + location = 'LOCATION_ID', + model = 'MODEL', + image = 'gs://generativeai-downloads/images/scones.jpg', + mimeType = 'image/jpeg' +) { + // [START aiplatform_gemini_get_started] + + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const projectId = 'your-project-id'; + // const location = 'us-central1'; + // const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image + // const mimeType = 'image/jpeg'; + + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + // For images, the SDK supports both Google Cloud Storage URI and base64 strings + const filePart = { + file_data: { + file_uri: image, + mime_type: mimeType, + }, + }; + + const textPart = { + text: 'what is shown in this image?', + }; + + const request = { + contents: [{role: 'user', parts: [textPart, filePart]}], + }; + + console.log('Prompt Text:'); + console.log(request.contents[0].parts[0].text); + console.log('Streaming Response Text:'); + + // Create the response stream + const responseStream = + await generativeVisionModel.generateContentStream(request); + + // Log the text response as it streams + for await (const item of responseStream.stream) { + process.stdout.write(item.candidates[0].content.parts[0].text); + } + + // [END aiplatform_gemini_get_started] +} + +createStreamMultipartContent(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/test/countTokens.test.js b/generative-ai/snippets/test/countTokens.test.js new file mode 100644 index 0000000000..d168b26fe5 --- /dev/null +++ b/generative-ai/snippets/test/countTokens.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Count tokens', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should count tokens', async () => { + const output = execSync( + `node ./countTokens.js ${project} ${location} ${model}` + ); + + // Expect 6 tokens + assert(output.match('totalTokens: 6')); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingChat.test.js b/generative-ai/snippets/test/nonStreamingChat.test.js new file mode 100644 index 0000000000..24ffe5a58b --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingChat.test.js @@ -0,0 +1,38 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Chat', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create nonstreaming chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingChat.js ${project} ${location} ${model}` + ); + + // Ensure that the beginning of the conversation is consistent + assert(output.match(/User: Hello/)); + assert(output.match(/User: Can you tell me a scientific fun fact?/)); + assert(output.match(/User: How can I learn more about that?/)); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingContent.test.js b/generative-ai/snippets/test/nonStreamingContent.test.js new file mode 100644 index 0000000000..51134de8b2 --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingContent.test.js @@ -0,0 +1,38 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create nonstreaming content and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingContent.js ${project} ${location} ${model}` + ); + + // Ensure that the beginning of the conversation is consistent + assert(output.match(/Prompt:/)); + assert(output.match(/What is Node.js/)); + assert(output.match(/Non-Streaming Response Text:/)); + }); +}); diff --git a/generative-ai/snippets/test/nonStreamingMultipartContent.test.js b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js new file mode 100644 index 0000000000..5e0888bb86 --- /dev/null +++ b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js @@ -0,0 +1,40 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI NonStreaming Multipart Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; + + it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}` + ); + + // Ensure that the conversation is what we expect for this scone image + assert(output.match(/Prompt Text:/)); + assert(output.match(/what is shown in this image/)); + assert(output.match(/Non-Streaming Response Text:/)); + assert(output.match(/scone/)); + }); +}); diff --git a/generative-ai/snippets/test/safetySettings.test.js b/generative-ai/snippets/test/safetySettings.test.js new file mode 100644 index 0000000000..265def1791 --- /dev/null +++ b/generative-ai/snippets/test/safetySettings.test.js @@ -0,0 +1,38 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Safety settings', async () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should reject a dangerous request', async () => { + const output = execSync( + `node ./safetySettings.js ${project} ${location} ${model}` + ); + + // Expect rejection due to safety concerns + assert( + output.match('This response stream terminated due to safety concerns') + ); + }); +}); diff --git a/generative-ai/snippets/test/streamChat.test.js b/generative-ai/snippets/test/streamChat.test.js new file mode 100644 index 0000000000..823fe6ccce --- /dev/null +++ b/generative-ai/snippets/test/streamChat.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Chat', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./streamChat.js ${project} ${location} ${model}` + ); + + // Assert that the advice given for learning is what we expect + assert(output.match(/User: How can I learn more about that/)); + }); +}); diff --git a/generative-ai/snippets/test/streamContent.test.js b/generative-ai/snippets/test/streamContent.test.js new file mode 100644 index 0000000000..769b68372e --- /dev/null +++ b/generative-ai/snippets/test/streamContent.test.js @@ -0,0 +1,37 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream content', async () => { + const output = execSync( + `node ./streamContent.js ${project} ${location} ${model}` + ); + // Ensure that the beginning of the conversation is consistent + assert(output.match(/Prompt:/)); + assert(output.match(/What is Node.js/)); + assert(output.match(/Streaming Response Text:/)); + }); +}); diff --git a/generative-ai/snippets/test/streamMultipartContent.test.js b/generative-ai/snippets/test/streamMultipartContent.test.js new file mode 100644 index 0000000000..d9b4fb061a --- /dev/null +++ b/generative-ai/snippets/test/streamMultipartContent.test.js @@ -0,0 +1,42 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Stream Multipart Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; + + it('should create stream multipart content', async () => { + const output = execSync( + `node ./streamMultipartContent.js ${project} ${location} ${model} ${image}` + ); + // Split up conversation output + const conversation = output.split('\n'); + + // Ensure that the conversation is what we expect for this scone image + assert(conversation[0].match(/Prompt Text:/)); + assert(conversation[1].match(/what is shown in this image/)); + assert(conversation[2].match(/Streaming Response Text:/)); + assert(conversation[3].match(/scones/)); + }); +});