Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: updating genai tests to run on same gcp project as ai platform samples #3596

Merged
merged 8 commits into from
Feb 21, 2024
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/countTokens.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Count tokens', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Count tokens', async () => {
it('should count tokens', async () => {
const output = execSync(
`node ./countTokens.js ${project} ${location} ${model}`
`node ./countTokens.js ${projectId} ${location} ${model}`
);

// Expect 6 tokens
Expand Down
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/nonStreamingChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI NonStreaming Chat', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Generative AI NonStreaming Chat', async () => {
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingChat.js ${project} ${location} ${model}`
`node ./nonStreamingChat.js ${projectId} ${location} ${model}`
);

// Ensure that the beginning of the conversation is consistent
Expand Down
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/nonStreamingContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI NonStreaming Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Generative AI NonStreaming Content', () => {
it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingContent.js ${project} ${location} ${model}`
`node ./nonStreamingContent.js ${projectId} ${location} ${model}`
);

// Ensure that the beginning of the conversation is consistent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro-vision';

describe('Generative AI NonStreaming Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}`
`node ./nonStreamingMultipartContent.js ${projectId} ${location} ${model} ${image}`
);

// Ensure that the conversation is what we expect for this scone image
Expand Down
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/safetySettings.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Safety settings', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Safety settings', async () => {
it('should reject a dangerous request', async () => {
const output = execSync(
`node ./safetySettings.js ${project} ${location} ${model}`
`node ./safetySettings.js ${projectId} ${location} ${model}`
);

// Expect rejection due to safety concerns
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI Stream MultiModal with Image', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro-vision';

describe('Generative AI Stream MultiModal with Image', () => {
it('should create stream multimodal content', async () => {
const output = execSync(
`node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}`
`node ./sendMultiModalPromptWithImage.js ${projectId} ${location} ${model}`
);
// Ensure that the conversation is what we expect for these images
assert(output.match(/Paris/));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI Stream MultiModal with Video', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro-vision';

describe('Generative AI Stream MultiModal with Video', () => {
it('should create stream multimodal content', async () => {
const output = execSync(
`node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}`
`node ./sendMultiModalPromptWithVideo.js ${projectId} ${location} ${model}`
);
// Ensure that the conversation is what we expect for these images
assert(output.match(/advertisement/));
Expand Down
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/streamChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI Stream Chat', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Generative AI Stream Chat', () => {
it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./streamChat.js ${project} ${location} ${model}`
`node ./streamChat.js ${projectId} ${location} ${model}`
);

// Assert that the advice given for learning is what we expect
Expand Down
11 changes: 5 additions & 6 deletions generative-ai/snippets/test/streamContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

describe('Generative AI Stream Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro';

describe('Generative AI Stream Content', () => {
it('should create stream content', async () => {
const output = execSync(
`node ./streamContent.js ${project} ${location} ${model}`
`node ./streamContent.js ${projectId} ${location} ${model}`
);
// Ensure that the beginning of the conversation is consistent
assert(output.match(/Prompt:/));
Expand Down
10 changes: 5 additions & 5 deletions generative-ai/snippets/test/streamMultipartContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-pro-vision';

describe('Generative AI Stream Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create stream multipart content', async () => {
const output = execSync(
`node ./streamMultipartContent.js ${project} ${location} ${model} ${image}`
`node ./streamMultipartContent.js ${projectId} ${location} ${model} ${image}`
);
// Split up conversation output
const conversation = output.split('\n');
Expand Down
Loading