1
1
import asyncio
2
2
import json
3
3
from pathlib import Path
4
- from typing import get_args
4
+ from typing import Any , get_args
5
5
6
6
import click
7
7
from pydantic import ValidationError
@@ -27,7 +27,7 @@ def parse_json(ctx, param, value): # noqa: ARG001
27
27
raise click .BadParameter (f"{ param .name } must be a valid JSON string." ) from err
28
28
29
29
30
- def set_if_not_default (ctx : click .Context , ** kwargs ):
30
+ def set_if_not_default (ctx : click .Context , ** kwargs ) -> dict [ str , Any ] :
31
31
"""
32
32
Set the value of a click option if it is not the default value.
33
33
This is useful for setting options that are not None by default.
@@ -66,20 +66,20 @@ def cli():
66
66
"The type of backend to use to run requests against. Defaults to 'openai_http'."
67
67
f" Supported types: { ', ' .join (get_args (BackendType ))} "
68
68
),
69
- default = GenerativeTextScenario .model_fields [ "backend_type" ]. default ,
69
+ default = GenerativeTextScenario .get_default ( "backend_type" ) ,
70
70
)
71
71
@click .option (
72
72
"--backend-args" ,
73
73
callback = parse_json ,
74
- default = GenerativeTextScenario .model_fields [ "backend_args" ]. default ,
74
+ default = GenerativeTextScenario .get_default ( "backend_args" ) ,
75
75
help = (
76
76
"A JSON string containing any arguments to pass to the backend as a "
77
77
"dict with **kwargs."
78
78
),
79
79
)
80
80
@click .option (
81
81
"--model" ,
82
- default = GenerativeTextScenario .model_fields [ "model" ]. default ,
82
+ default = GenerativeTextScenario .get_default ( "model" ) ,
83
83
type = str ,
84
84
help = (
85
85
"The ID of the model to benchmark within the backend. "
@@ -88,7 +88,7 @@ def cli():
88
88
)
89
89
@click .option (
90
90
"--processor" ,
91
- default = GenerativeTextScenario .model_fields [ "processor" ]. default ,
91
+ default = GenerativeTextScenario .get_default ( "processor" ) ,
92
92
type = str ,
93
93
help = (
94
94
"The processor or tokenizer to use to calculate token counts for statistics "
@@ -98,7 +98,7 @@ def cli():
98
98
)
99
99
@click .option (
100
100
"--processor-args" ,
101
- default = GenerativeTextScenario .model_fields [ "processor_args" ]. default ,
101
+ default = GenerativeTextScenario .get_default ( "processor_args" ) ,
102
102
callback = parse_json ,
103
103
help = (
104
104
"A JSON string containing any arguments to pass to the processor constructor "
@@ -116,7 +116,7 @@ def cli():
116
116
)
117
117
@click .option (
118
118
"--data-args" ,
119
- default = GenerativeTextScenario .model_fields [ "data_args" ]. default ,
119
+ default = GenerativeTextScenario .get_default ( "data_args" ) ,
120
120
callback = parse_json ,
121
121
help = (
122
122
"A JSON string containing any arguments to pass to the dataset creation "
@@ -125,7 +125,7 @@ def cli():
125
125
)
126
126
@click .option (
127
127
"--data-sampler" ,
128
- default = GenerativeTextScenario .model_fields [ "data_sampler" ]. default ,
128
+ default = GenerativeTextScenario .get_default ( "data_sampler" ) ,
129
129
type = click .Choice (["random" ]),
130
130
help = (
131
131
"The data sampler type to use. 'random' will add a random shuffle on the data. "
@@ -142,7 +142,7 @@ def cli():
142
142
)
143
143
@click .option (
144
144
"--rate" ,
145
- default = GenerativeTextScenario .model_fields [ "rate" ]. default ,
145
+ default = GenerativeTextScenario .get_default ( "rate" ) ,
146
146
help = (
147
147
"The rates to run the benchmark at. "
148
148
"Can be a single number or a comma-separated list of numbers. "
@@ -155,7 +155,7 @@ def cli():
155
155
@click .option (
156
156
"--max-seconds" ,
157
157
type = float ,
158
- default = GenerativeTextScenario .model_fields [ "max_seconds" ]. default ,
158
+ default = GenerativeTextScenario .get_default ( "max_seconds" ) ,
159
159
help = (
160
160
"The maximum number of seconds each benchmark can run for. "
161
161
"If None, will run until max_requests or the data is exhausted."
@@ -164,7 +164,7 @@ def cli():
164
164
@click .option (
165
165
"--max-requests" ,
166
166
type = int ,
167
- default = GenerativeTextScenario .model_fields [ "max_requests" ]. default ,
167
+ default = GenerativeTextScenario .get_default ( "max_requests" ) ,
168
168
help = (
169
169
"The maximum number of requests each benchmark can run for. "
170
170
"If None, will run until max_seconds or the data is exhausted."
@@ -173,7 +173,7 @@ def cli():
173
173
@click .option (
174
174
"--warmup-percent" ,
175
175
type = float ,
176
- default = GenerativeTextScenario .model_fields [ "warmup_percent" ]. default ,
176
+ default = GenerativeTextScenario .get_default ( "warmup_percent" ) ,
177
177
help = (
178
178
"The percent of the benchmark (based on max-seconds, max-requets, "
179
179
"or lenth of dataset) to run as a warmup and not include in the final results. "
@@ -183,7 +183,7 @@ def cli():
183
183
@click .option (
184
184
"--cooldown-percent" ,
185
185
type = float ,
186
- default = GenerativeTextScenario .model_fields [ "cooldown_percent" ]. default ,
186
+ default = GenerativeTextScenario .get_default ( "cooldown_percent" ) ,
187
187
help = (
188
188
"The percent of the benchmark (based on max-seconds, max-requets, or lenth "
189
189
"of dataset) to run as a cooldown and not include in the final results. "
@@ -228,11 +228,11 @@ def cli():
228
228
"The number of samples to save in the output file. "
229
229
"If None (default), will save all samples."
230
230
),
231
- default = GenerativeTextScenario .model_fields [ "output_sampling" ]. default ,
231
+ default = GenerativeTextScenario .get_default ( "output_sampling" ) ,
232
232
)
233
233
@click .option (
234
234
"--random-seed" ,
235
- default = GenerativeTextScenario .model_fields [ "random_seed" ]. default ,
235
+ default = GenerativeTextScenario .get_default ( "random_seed" ) ,
236
236
type = int ,
237
237
help = "The random seed to use for benchmarking to ensure reproducibility." ,
238
238
)
0 commit comments