|
| 1 | +// [START remote_config_server_vertex_init] |
| 2 | +const { onRequest } = require("firebase-functions/v2/https"); |
| 3 | +const logger = require("firebase-functions/logger"); |
| 4 | + |
| 5 | +const { initializeApp } = require("firebase-admin/app"); |
| 6 | +const { VertexAI } = require('@google-cloud/vertexai'); |
| 7 | +const { getRemoteConfig } = require("firebase-admin/remote-config"); |
| 8 | + |
| 9 | +// Set and check environment variables. |
| 10 | +const project = process.env.GCLOUD_PROJECT; |
| 11 | + |
| 12 | +// Initialize Firebase. |
| 13 | +const app = initializeApp(); |
| 14 | +// [END remote_config_server_vertex_init] |
| 15 | + |
| 16 | +// [START remote_config_server_vertex_default_values] |
| 17 | +// Define default (fallback) parameter values for Remote Config. |
| 18 | +const defaultConfig = { |
| 19 | + |
| 20 | + // Default values for Vertex AI. |
| 21 | + model_name: "gemini-1.5-flash-preview-0514", |
| 22 | + generation_config: [{ |
| 23 | + "stopSequences": [], "temperature": 0.7, |
| 24 | + "maxOutputTokens": 64, "topP": 0.1, "topK": 20 |
| 25 | + }], |
| 26 | + prompt: "I'm a developer who wants to learn about Firebase and you are a \ |
| 27 | + helpful assistant who knows everything there is to know about Firebase!", |
| 28 | + safety_settings: [{ |
| 29 | + "category": |
| 30 | + "HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT", |
| 31 | + "threshold": "HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE" |
| 32 | + }], |
| 33 | + location: 'us-central1', |
| 34 | + |
| 35 | + // Disable Vertex AI Gemini API access for testing. |
| 36 | + vertex_enabled: false |
| 37 | +}; |
| 38 | +// [END remote_config_server_vertex_default_values] |
| 39 | + |
| 40 | +// [START remote_config_server_vertex_create_function] |
| 41 | +// Export the function. |
| 42 | +exports.generateWithVertex = onRequest(async (request, response) => { |
| 43 | + |
| 44 | + try { |
| 45 | + |
| 46 | + // Set up Remote Config. |
| 47 | + const rc = getRemoteConfig(app); |
| 48 | + |
| 49 | + // Get the Remote Config template and assign default values. |
| 50 | + const template = await rc.getServerTemplate({ |
| 51 | + defaultConfig: defaultConfig |
| 52 | + }); |
| 53 | + |
| 54 | + // Add the template evaluation to a constant. |
| 55 | + const config = template.evaluate(); |
| 56 | + |
| 57 | + // Obtain values from Remote Config. |
| 58 | + const textModel = config.getString("model_name") || |
| 59 | + defaultConfig.model_name; |
| 60 | + const textPrompt = config.getString("prompt") || defaultConfig.prompt; |
| 61 | + const generationConfig = config.getString("generation_config") || |
| 62 | + defaultConfig.generation_config; |
| 63 | + const safetySettings = config.getString("safety_settings") || |
| 64 | + defaultConfig.safety_settings; |
| 65 | + const location = config.getString("location") || |
| 66 | + defaultConfig.location; |
| 67 | + const vertexEnabled = config.getBoolean("is_vertex_enabled") || |
| 68 | + defaultConfig.vertex_enabled; |
| 69 | +// [END remote_config_server_vertex_create_function] |
| 70 | + |
| 71 | +// [START remote_config_server_vertex_function_logic] |
| 72 | + // Allow user input. |
| 73 | + const userInput = request.query.prompt || ''; |
| 74 | + |
| 75 | + // Instantiate Vertex AI. |
| 76 | + const vertex_ai = new VertexAI({ project: project, location: location }); |
| 77 | + const generativeModel = vertex_ai.getGenerativeModel({ |
| 78 | + model: textModel, |
| 79 | + safety_settings: safetySettings, |
| 80 | + generation_config: generationConfig, |
| 81 | + }); |
| 82 | + |
| 83 | + // Create the chat; append user input to Remote Config-defined prompt. |
| 84 | + const chat = generativeModel.startChat(); |
| 85 | + const chatInput = textPrompt + " " + userInput; |
| 86 | + |
| 87 | + if (!chatInput) { |
| 88 | + return res.status(400).send('Missing text prompt'); |
| 89 | + } |
| 90 | + // If vertexEnabled isn't true, do not send queries to Vertex AI. |
| 91 | + if (vertexEnabled !== true) { |
| 92 | + response.status(200).send({ |
| 93 | + message: "Vertex AI call skipped. Vertex is not enabled." |
| 94 | + }); |
| 95 | + return; |
| 96 | + } |
| 97 | + |
| 98 | + console.log("\nRunning with model ", textModel, ", prompt: ", textPrompt, |
| 99 | + ", generationConfig: ", generationConfig, ", safetySettings: ", |
| 100 | + safetySettings, " in ", location, "\n"); |
| 101 | + |
| 102 | + const result = await chat.sendMessageStream(chatInput); |
| 103 | + response.writeHead(200, { 'Content-Type': 'text/plain' }); |
| 104 | + |
| 105 | + for await (const item of result.stream) { |
| 106 | + const chunk = item.candidates[0].content.parts[0].text; |
| 107 | + console.log("Received chunk:", chunk); |
| 108 | + response.write(chunk); |
| 109 | + } |
| 110 | + |
| 111 | + response.end(); |
| 112 | + |
| 113 | + } catch (error) { |
| 114 | + console.error(error); |
| 115 | + response.status(500).send('Internal server error'); |
| 116 | + } |
| 117 | +}); |
| 118 | +// [END remote_config_server_vertex_function_logic] |
| 119 | + |
0 commit comments