Skip to content

Commit d314d63

Browse files
committed
Convert TS enums exports in Firebase AI into const variables.
1 parent 3d44792 commit d314d63

File tree

4 files changed

+196
-87
lines changed

4 files changed

+196
-87
lines changed

.changeset/shy-yaks-hammer.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@firebase/ai': major
3+
'firebase': major
4+
---
5+
6+
Convert TS enums exports in Firebase AI into const variables.

packages/ai/src/types/enums.ts

Lines changed: 115 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -31,229 +31,284 @@ export const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'] as const;
3131
* Harm categories that would cause prompts or candidates to be blocked.
3232
* @public
3333
*/
34-
export enum HarmCategory {
35-
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',
36-
HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
37-
HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',
38-
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
39-
}
34+
export const HarmCategory = {
35+
HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',
36+
HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
37+
HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',
38+
HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'
39+
} as const;
40+
41+
/**
42+
* Harm categories that would cause prompts or candidates to be blocked.
43+
* @public
44+
*/
45+
export type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];
4046

4147
/**
4248
* Threshold above which a prompt or candidate will be blocked.
4349
* @public
4450
*/
45-
export enum HarmBlockThreshold {
51+
export const HarmBlockThreshold = {
4652
/**
4753
* Content with `NEGLIGIBLE` will be allowed.
4854
*/
49-
BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',
55+
BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',
5056
/**
5157
* Content with `NEGLIGIBLE` and `LOW` will be allowed.
5258
*/
53-
BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE',
59+
BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',
5460
/**
5561
* Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
5662
*/
57-
BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',
63+
BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',
5864
/**
5965
* All content will be allowed.
6066
*/
61-
BLOCK_NONE = 'BLOCK_NONE',
67+
BLOCK_NONE: 'BLOCK_NONE',
6268
/**
6369
* All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
6470
* to the {@link HarmCategory} will not be present in the response.
6571
*/
66-
OFF = 'OFF'
67-
}
72+
OFF: 'OFF'
73+
} as const;
74+
75+
/**
76+
* Threshold above which a prompt or candidate will be blocked.
77+
* @public
78+
*/
79+
export type HarmBlockThreshold =
80+
(typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];
6881

6982
/**
7083
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
7184
*
7285
* @public
7386
*/
74-
export enum HarmBlockMethod {
87+
export const HarmBlockMethod = {
7588
/**
7689
* The harm block method uses both probability and severity scores.
7790
*/
78-
SEVERITY = 'SEVERITY',
91+
SEVERITY: 'SEVERITY',
7992
/**
8093
* The harm block method uses the probability score.
8194
*/
82-
PROBABILITY = 'PROBABILITY'
83-
}
95+
PROBABILITY: 'PROBABILITY'
96+
} as const;
97+
98+
/**
99+
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
100+
*
101+
* @public
102+
*/
103+
export type HarmBlockMethod =
104+
(typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];
84105

85106
/**
86107
* Probability that a prompt or candidate matches a harm category.
87108
* @public
88109
*/
89-
export enum HarmProbability {
110+
export const HarmProbability = {
90111
/**
91112
* Content has a negligible chance of being unsafe.
92113
*/
93-
NEGLIGIBLE = 'NEGLIGIBLE',
114+
NEGLIGIBLE: 'NEGLIGIBLE',
94115
/**
95116
* Content has a low chance of being unsafe.
96117
*/
97-
LOW = 'LOW',
118+
LOW: 'LOW',
98119
/**
99120
* Content has a medium chance of being unsafe.
100121
*/
101-
MEDIUM = 'MEDIUM',
122+
MEDIUM: 'MEDIUM',
102123
/**
103124
* Content has a high chance of being unsafe.
104125
*/
105-
HIGH = 'HIGH'
106-
}
126+
HIGH: 'HIGH'
127+
} as const;
128+
129+
/**
130+
* Probability that a prompt or candidate matches a harm category.
131+
* @public
132+
*/
133+
export type HarmProbability =
134+
(typeof HarmProbability)[keyof typeof HarmProbability];
107135

108136
/**
109137
* Harm severity levels.
110138
* @public
111139
*/
112-
export enum HarmSeverity {
140+
export const HarmSeverity = {
113141
/**
114142
* Negligible level of harm severity.
115143
*/
116-
HARM_SEVERITY_NEGLIGIBLE = 'HARM_SEVERITY_NEGLIGIBLE',
144+
HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',
117145
/**
118146
* Low level of harm severity.
119147
*/
120-
HARM_SEVERITY_LOW = 'HARM_SEVERITY_LOW',
148+
HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',
121149
/**
122150
* Medium level of harm severity.
123151
*/
124-
HARM_SEVERITY_MEDIUM = 'HARM_SEVERITY_MEDIUM',
152+
HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',
125153
/**
126154
* High level of harm severity.
127155
*/
128-
HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH',
156+
HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',
129157
/**
130158
* Harm severity is not supported.
131159
*
132160
* @remarks
133161
* The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
134162
*/
135-
HARM_SEVERITY_UNSUPPORTED = 'HARM_SEVERITY_UNSUPPORTED'
136-
}
163+
HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'
164+
} as const;
165+
166+
/**
167+
* Harm severity levels.
168+
* @public
169+
*/
170+
export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
137171

138172
/**
139173
* Reason that a prompt was blocked.
140174
* @public
141175
*/
142-
export enum BlockReason {
176+
export const BlockReason = {
143177
/**
144178
* Content was blocked by safety settings.
145179
*/
146-
SAFETY = 'SAFETY',
180+
SAFETY: 'SAFETY',
147181
/**
148182
* Content was blocked, but the reason is uncategorized.
149183
*/
150-
OTHER = 'OTHER',
184+
OTHER: 'OTHER',
151185
/**
152186
* Content was blocked because it contained terms from the terminology blocklist.
153187
*/
154-
BLOCKLIST = 'BLOCKLIST',
188+
BLOCKLIST: 'BLOCKLIST',
155189
/**
156190
* Content was blocked due to prohibited content.
157191
*/
158-
PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
159-
}
192+
PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'
193+
} as const;
194+
195+
/**
196+
* Reason that a prompt was blocked.
197+
* @public
198+
*/
199+
export type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];
160200

161201
/**
162202
* Reason that a candidate finished.
163203
* @public
164204
*/
165-
export enum FinishReason {
205+
export const FinishReason = {
166206
/**
167207
* Natural stop point of the model or provided stop sequence.
168208
*/
169-
STOP = 'STOP',
209+
STOP: 'STOP',
170210
/**
171211
* The maximum number of tokens as specified in the request was reached.
172212
*/
173-
MAX_TOKENS = 'MAX_TOKENS',
213+
MAX_TOKENS: 'MAX_TOKENS',
174214
/**
175215
* The candidate content was flagged for safety reasons.
176216
*/
177-
SAFETY = 'SAFETY',
217+
SAFETY: 'SAFETY',
178218
/**
179219
* The candidate content was flagged for recitation reasons.
180220
*/
181-
RECITATION = 'RECITATION',
221+
RECITATION: 'RECITATION',
182222
/**
183223
* Unknown reason.
184224
*/
185-
OTHER = 'OTHER',
225+
OTHER: 'OTHER',
186226
/**
187227
* The candidate content contained forbidden terms.
188228
*/
189-
BLOCKLIST = 'BLOCKLIST',
229+
BLOCKLIST: 'BLOCKLIST',
190230
/**
191231
* The candidate content potentially contained prohibited content.
192232
*/
193-
PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',
233+
PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',
194234
/**
195235
* The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
196236
*/
197-
SPII = 'SPII',
237+
SPII: 'SPII',
198238
/**
199239
* The function call generated by the model was invalid.
200240
*/
201-
MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL'
202-
}
241+
MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'
242+
} as const;
243+
244+
/**
245+
* Reason that a candidate finished.
246+
* @public
247+
*/
248+
export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
203249

204250
/**
205251
* @public
206252
*/
207-
export enum FunctionCallingMode {
253+
export const FunctionCallingMode = {
208254
/**
209255
* Default model behavior; model decides to predict either a function call
210256
* or a natural language response.
211257
*/
212-
AUTO = 'AUTO',
258+
AUTO: 'AUTO',
213259
/**
214260
* Model is constrained to always predicting a function call only.
215261
* If `allowed_function_names` is set, the predicted function call will be
216262
* limited to any one of `allowed_function_names`, else the predicted
217263
* function call will be any one of the provided `function_declarations`.
218264
*/
219-
ANY = 'ANY',
265+
ANY: 'ANY',
220266
/**
221267
* Model will not predict any function call. Model behavior is same as when
222268
* not passing any function declarations.
223269
*/
224-
NONE = 'NONE'
225-
}
270+
NONE: 'NONE'
271+
} as const;
272+
273+
export type FunctionCallingMode =
274+
(typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
226275

227276
/**
228277
* Content part modality.
229278
* @public
230279
*/
231-
export enum Modality {
280+
export const Modality = {
232281
/**
233282
* Unspecified modality.
234283
*/
235-
MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED',
284+
MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',
236285
/**
237286
* Plain text.
238287
*/
239-
TEXT = 'TEXT',
288+
TEXT: 'TEXT',
240289
/**
241290
* Image.
242291
*/
243-
IMAGE = 'IMAGE',
292+
IMAGE: 'IMAGE',
244293
/**
245294
* Video.
246295
*/
247-
VIDEO = 'VIDEO',
296+
VIDEO: 'VIDEO',
248297
/**
249298
* Audio.
250299
*/
251-
AUDIO = 'AUDIO',
300+
AUDIO: 'AUDIO',
252301
/**
253302
* Document (for example, PDF).
254303
*/
255-
DOCUMENT = 'DOCUMENT'
256-
}
304+
DOCUMENT: 'DOCUMENT'
305+
} as const;
306+
307+
/**
308+
* Content part modality.
309+
* @public
310+
*/
311+
export type Modality = (typeof Modality)[keyof typeof Modality];
257312

258313
/**
259314
* Generation modalities to be returned in generation responses.

0 commit comments

Comments
 (0)