Skip to content

Commit c401daf

Browse files
authored
feat(vertexai): Update sample models to gemini-1.5-flash-001 (#3705)
* feat(vertexai): Update sample models to gemini-1.5-flash-001 * fix: Revert model changes for grounding until feature is ready for release
1 parent d271b5d commit c401daf

34 files changed

+46
-46
lines changed

generative-ai/snippets/count-tokens/countTokens.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2222
async function countTokens(
2323
projectId = 'PROJECT_ID',
2424
location = 'us-central1',
25-
model = 'gemini-1.0-pro-002'
25+
model = 'gemini-1.5-flash-001'
2626
) {
2727
// Initialize Vertex with your Cloud project and location
2828
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/count-tokens/countTokensAdvanced.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function countTokens(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.5-pro-preview-0409'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/function-calling/functionCallingAdvanced.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ const generationConfig = {
6565
async function functionCallingAdvanced(
6666
projectId = 'PROJECT_ID',
6767
location = 'us-central1',
68-
model = 'gemini-1.0-pro-001'
68+
model = 'gemini-1.5-flash-001'
6969
) {
7070
// Initialize Vertex with your Cloud project and location
7171
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/function-calling/functionCallingBasic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ const functionDeclarations = [
4646
async function functionCallingBasic(
4747
projectId = 'PROJECT_ID',
4848
location = 'us-central1',
49-
model = 'gemini-1.0-pro-001'
49+
model = 'gemini-1.5-flash-001'
5050
) {
5151
// Initialize Vertex with your Cloud project and location
5252
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/function-calling/functionCallingStreamChat.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ const functionResponseParts = [
5555
async function functionCallingStreamChat(
5656
projectId = 'PROJECT_ID',
5757
location = 'us-central1',
58-
model = 'gemini-1.0-pro-002'
58+
model = 'gemini-1.5-flash-001'
5959
) {
6060
// Initialize Vertex with your Cloud project and location
6161
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/function-calling/functionCallingStreamContent.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ const functionResponseParts = [
5555
async function functionCallingStreamContent(
5656
projectId = 'PROJECT_ID',
5757
location = 'us-central1',
58-
model = 'gemini-1.0-pro-002'
58+
model = 'gemini-1.5-flash-001'
5959
) {
6060
// Initialize Vertex with your Cloud project and location
6161
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/gemini-text-input.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ async function generate_from_text_input(projectId = 'PROJECT_ID') {
2222
const vertexAI = new VertexAI({project: projectId, location: 'us-central1'});
2323

2424
const generativeModel = vertexAI.getGenerativeModel({
25-
model: 'gemini-1.0-pro-002',
25+
model: 'gemini-1.5-flash-001',
2626
});
2727

2828
const prompt =

generative-ai/snippets/inference/nonStreamMultiModalityBasic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function generateContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.5-pro-preview-0409'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex AI
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/inference/nonStreamTextBasic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function generateContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/inference/streamMultiModalityBasic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function generateContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.5-pro-preview-0409'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex AI
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/inference/streamTextBasic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function generateContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/nonStreamingChat.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createNonStreamingChat(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/nonStreamingContent.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createNonStreamingContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/safetySettings.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ const {
2525
async function setSafetySettings(
2626
projectId = 'PROJECT_ID',
2727
location = 'us-central1',
28-
model = 'gemini-1.0-pro-001'
28+
model = 'gemini-1.5-flash-001'
2929
) {
3030
// Initialize Vertex with your Cloud project and location
3131
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/streamChat.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createStreamChat(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/streamContent.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createStreamContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-1.0-pro-002'
24+
model = 'gemini-1.5-flash-001'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/test/count-tokens/countTokens.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Count tokens', async () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Count tokens', async () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should count tokens', async () => {
3636
const output = execSync(

generative-ai/snippets/test/count-tokens/countTokensAdvanced.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.5-pro-preview-0409';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Count tokens advanced', async () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Count tokens advanced', async () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should count tokens in a multimodal prompt', async () => {
3636
const output = execSync(

generative-ai/snippets/test/function-calling/functionCallingAdvanced.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-001';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Function Calling Advanced', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Function Calling Advanced', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should define multiple functions and have the model invoke the specified one', async () => {
3636
const output = execSync(

generative-ai/snippets/test/function-calling/functionCallingBasic.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-001';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Function Calling', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Function Calling', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should define a function and have the model invoke it', async () => {
3636
const output = execSync(

generative-ai/snippets/test/function-calling/functionCallingStreamChat.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.5-pro-preview-0409';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Function Calling Stream Chat', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Function Calling Stream Chat', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.5-pro-preview-0409';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create stream chat and begin the conversation the same in each instance', async () => {
3636
const output = execSync(

generative-ai/snippets/test/function-calling/functionCallingStreamContent.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Function Calling Stream Content', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Function Calling Stream Content', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create stream chat and begin the conversation the same in each instance', async () => {
3636
const output = execSync(

generative-ai/snippets/test/gemini-system-instruction.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323

24-
describe('Set sytem instruction', async () => {
25-
it('should set sytem instruction', async () => {
24+
describe('Set system instruction', async () => {
25+
it('should set system instruction', async () => {
2626
const output = execSync(`node ./gemini-system-instruction.js ${projectId}`);
2727

2828
assert(output.length > 0);

generative-ai/snippets/test/grounding/groundingPrivateDataBasic.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ describe('Private data grounding', async () => {
3131
*/
3232
// const projectId = 'YOUR_PROJECT_ID';
3333
// const location = 'YOUR_LOCATION';
34-
// const model = 'gemini-1.0-pro';
34+
// const model = 'gemini-1.0-pro-002';
3535

3636
it('should ground results in private VertexAI search data', async () => {
3737
const output = execSync(

generative-ai/snippets/test/grounding/groundingPublicDataBasic.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,12 @@ describe('Google search grounding', async () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.0-pro-002';
3434

3535
it('should ground results in public search data', async () => {
3636
const output = execSync(
3737
`node ./grounding/groundingPublicDataBasic.js ${projectId} ${location} ${model}`
3838
);
39-
assert(output.match(/webSearchQueries.*Why is the sky blue?/));
39+
assert(output.match(/webSearchQueries.*why is the sky blue?/));
4040
});
4141
});

generative-ai/snippets/test/inference/nonStreamMultiModalityBasic.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ describe('Generative AI Multimodal Text Inference', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.5-pro-preview-0409';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should generate text based on a prompt containing text, a video, and an image', async () => {
3636
const output = execSync(

generative-ai/snippets/test/inference/nonStreamTextBasic.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Basic Text Inference', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Basic Text Inference', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create a generative text model and infer text from a prompt', async () => {
3636
const output = execSync(

generative-ai/snippets/test/inference/streamMultiModalityBasic.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.5-pro-preview-0409';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Basic Multimodal Text Inference Streaming', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Basic Multimodal Text Inference Streaming', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.5-pro-preview-0409';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create a generative text model and infer text from a prompt, streaming the results', async () => {
3636
const output = execSync(

generative-ai/snippets/test/inference/streamTextBasic.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Basic Text Inference Streaming', () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI Basic Text Inference Streaming', () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create a generative text model and infer text from a prompt, streaming the results', async () => {
3636
const output = execSync(

generative-ai/snippets/test/nonStreamingChat.test.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI NonStreaming Chat', async () => {
2727
/**
@@ -30,7 +30,7 @@ describe('Generative AI NonStreaming Chat', async () => {
3030
*/
3131
// const projectId = 'YOUR_PROJECT_ID';
3232
// const location = 'YOUR_LOCATION';
33-
// const model = 'gemini-1.0-pro';
33+
// const model = 'gemini-1.5-flash-001';
3434

3535
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
3636
const output = execSync(

generative-ai/snippets/test/nonStreamingContent.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI NonStreaming Content', () => {
2727
/**

generative-ai/snippets/test/safetySettings.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-001';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Safety settings', async () => {
2727
/**

generative-ai/snippets/test/streamChat.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2121

2222
const projectId = process.env.CAIP_PROJECT_ID;
2323
const location = process.env.LOCATION;
24-
const model = 'gemini-1.0-pro-002';
24+
const model = 'gemini-1.5-flash-001';
2525

2626
describe('Generative AI Stream Chat', () => {
2727
/**

0 commit comments

Comments
 (0)