Skip to content

Commit

Permalink
feat: add model to each genai sample
Browse files Browse the repository at this point in the history
  • Loading branch information
LukeSchlangen committed Dec 21, 2023
1 parent 8f00e33 commit 82258c0
Show file tree
Hide file tree
Showing 10 changed files with 65 additions and 115 deletions.
20 changes: 7 additions & 13 deletions generative-ai/snippets/countTokens.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_token_count]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function countTokens(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro'
) {
// [START aiplatform_gemini_token_count]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertex_ai = new VertexAI({project: projectId, location: location});

Expand All @@ -42,9 +37,8 @@ async function countTokens(

const countTokensResp = await generativeModel.countTokens(req);
console.log('count tokens response: ', countTokensResp);

// [END aiplatform_gemini_token_count]
}
// [END aiplatform_gemini_token_count]

countTokens(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
19 changes: 7 additions & 12 deletions generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_multiturn_chat_nonstreaming]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createNonStreamingChat(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro'
) {
// [START aiplatform_gemini_multiturn_chat_nonstreaming]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand Down Expand Up @@ -55,9 +51,8 @@ async function createNonStreamingChat(
const result3 = await chat.sendMessage(chatInput3);
const response3 = result3.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response3);

// [END aiplatform_gemini_multiturn_chat_nonstreaming]
}
// [END aiplatform_gemini_multiturn_chat_nonstreaming]

createNonStreamingChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
19 changes: 7 additions & 12 deletions generative-ai/snippets/nonStreamingContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_content_nonstreaming]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createNonStreamingContent(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro',

Check failure on line 24 in generative-ai/snippets/nonStreamingContent.js

View workflow job for this annotation

GitHub Actions / lint

Delete `,`
) {

Check failure on line 25 in generative-ai/snippets/nonStreamingContent.js

View workflow job for this annotation

GitHub Actions / lint

Delete `⏎`
// [START aiplatform_gemini_content_nonstreaming]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down Expand Up @@ -55,9 +51,8 @@ async function createNonStreamingContent(
aggregatedResponse.candidates[0].content.parts[0].text;

console.log(fullTextResponse);

// [END aiplatform_gemini_content_nonstreaming]
}
// [END aiplatform_gemini_content_nonstreaming]

createNonStreamingContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
20 changes: 7 additions & 13 deletions generative-ai/snippets/nonStreamingMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_get_started]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createNonStreamingMultipartContent(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL',
location = 'us-central1',
model = 'gemini-pro-vision',
image = 'gs://generativeai-downloads/images/scones.jpg',
mimeType = 'image/jpeg'
) {
// [START aiplatform_gemini_get_started]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image
// const mimeType = 'image/jpeg';

Check failure on line 28 in generative-ai/snippets/nonStreamingMultipartContent.js

View workflow job for this annotation

GitHub Actions / lint

Delete `⏎`
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down Expand Up @@ -71,9 +66,8 @@ async function createNonStreamingMultipartContent(
aggregatedResponse.candidates[0].content.parts[0].text;

console.log(fullTextResponse);

// [END aiplatform_gemini_get_started]
}
// [END aiplatform_gemini_get_started]

createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
18 changes: 7 additions & 11 deletions generative-ai/snippets/safetySettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_safety_settings]
const {
VertexAI,
HarmCategory,
HarmBlockThreshold,
} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function setSafetySettings(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro'
) {
// [START aiplatform_gemini_safety_settings]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand Down Expand Up @@ -67,8 +63,8 @@ async function setSafetySettings(
process.stdout.write(item.candidates[0].content.parts[0].text);
}
}
// [END aiplatform_gemini_safety_settings]
}
// [END aiplatform_gemini_safety_settings]

setSafetySettings(...process.argv.slice(3)).catch(err => {
console.error(err.message);
Expand Down
4 changes: 2 additions & 2 deletions generative-ai/snippets/sendMultiModalPromptWithImage.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ async function getBase64(url) {
*/
async function sendMultiModalPromptWithImage(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro-vision'
) {
// For images, the SDK supports base64 strings
const landmarkImage1 = await getBase64(
Expand Down
19 changes: 7 additions & 12 deletions generative-ai/snippets/sendMultiModalPromptWithVideo.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_single_turn_video]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function sendMultiModalPromptWithVideo(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro-vision'
) {
// [START aiplatform_gemini_single_turn_video]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand Down Expand Up @@ -63,9 +59,8 @@ async function sendMultiModalPromptWithVideo(
aggregatedResponse.candidates[0].content.parts[0].text;

console.log(fullTextResponse);

// [END aiplatform_gemini_single_turn_video]
}
// [END aiplatform_gemini_single_turn_video]

sendMultiModalPromptWithVideo(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
19 changes: 7 additions & 12 deletions generative-ai/snippets/streamChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_multiturn_chat]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createStreamChat(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro'
) {
// [START aiplatform_gemini_multiturn_chat]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand All @@ -44,9 +40,8 @@ async function createStreamChat(
for await (const item of result1.stream) {
console.log(item.candidates[0].content.parts[0].text);
}

// [END aiplatform_gemini_multiturn_chat]
}
// [END aiplatform_gemini_multiturn_chat]

createStreamChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
20 changes: 7 additions & 13 deletions generative-ai/snippets/streamContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,22 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_content]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createStreamContent(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
location = 'us-central1',
model = 'gemini-pro'
) {
// [START aiplatform_gemini_content]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand All @@ -51,9 +46,8 @@ async function createStreamContent(
for await (const item of responseStream.stream) {
process.stdout.write(item.candidates[0].content.parts[0].text);
}

// [END aiplatform_gemini_content]
}
// [END aiplatform_gemini_content]

createStreamContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down
22 changes: 7 additions & 15 deletions generative-ai/snippets/streamMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_get_started]
const {VertexAI} = require('@google-cloud/vertexai');

/**
* TODO(developer): Update these variables before running the sample.
*/
async function createStreamMultipartContent(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL',
location = 'us-central1',
model = 'gemini-pro-vision',
image = 'gs://generativeai-downloads/images/scones.jpg',
mimeType = 'image/jpeg'
) {
// [START aiplatform_gemini_get_started]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'chosen-genai-model';
// const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image
// const mimeType = 'image/jpeg';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

Expand Down Expand Up @@ -68,9 +61,8 @@ async function createStreamMultipartContent(
for await (const item of responseStream.stream) {
process.stdout.write(item.candidates[0].content.parts[0].text);
}

// [END aiplatform_gemini_get_started]
}
// [END aiplatform_gemini_get_started]

createStreamMultipartContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
Expand Down

0 comments on commit 82258c0

Please sign in to comment.