@@ -82,20 +82,20 @@ def cli():
82
82
"The type of backend to use to run requests against. Defaults to 'openai_http'."
83
83
f" Supported types: { ', ' .join (get_args (BackendType ))} "
84
84
),
85
- default = GenerativeTextScenario .backend_type ,
85
+ default = GenerativeTextScenario .model_fields [ " backend_type" ]. default ,
86
86
)
87
87
@click .option (
88
88
"--backend-args" ,
89
89
callback = parse_json ,
90
- default = GenerativeTextScenario .backend_args ,
90
+ default = GenerativeTextScenario .model_fields [ " backend_args" ]. default ,
91
91
help = (
92
92
"A JSON string containing any arguments to pass to the backend as a "
93
93
"dict with **kwargs."
94
94
),
95
95
)
96
96
@click .option (
97
97
"--model" ,
98
- default = GenerativeTextScenario .model ,
98
+ default = GenerativeTextScenario .model_fields [ " model" ]. default ,
99
99
type = str ,
100
100
help = (
101
101
"The ID of the model to benchmark within the backend. "
@@ -104,7 +104,7 @@ def cli():
104
104
)
105
105
@click .option (
106
106
"--processor" ,
107
- default = GenerativeTextScenario .processor ,
107
+ default = GenerativeTextScenario .model_fields [ " processor" ]. default ,
108
108
type = str ,
109
109
help = (
110
110
"The processor or tokenizer to use to calculate token counts for statistics "
@@ -114,7 +114,7 @@ def cli():
114
114
)
115
115
@click .option (
116
116
"--processor-args" ,
117
- default = GenerativeTextScenario .processor_args ,
117
+ default = GenerativeTextScenario .model_fields [ " processor_args" ]. default ,
118
118
callback = parse_json ,
119
119
help = (
120
120
"A JSON string containing any arguments to pass to the processor constructor "
@@ -133,7 +133,7 @@ def cli():
133
133
)
134
134
@click .option (
135
135
"--data-args" ,
136
- default = GenerativeTextScenario .data_args ,
136
+ default = GenerativeTextScenario .model_fields [ " data_args" ]. default ,
137
137
callback = parse_json ,
138
138
help = (
139
139
"A JSON string containing any arguments to pass to the dataset creation "
@@ -142,7 +142,7 @@ def cli():
142
142
)
143
143
@click .option (
144
144
"--data-sampler" ,
145
- default = GenerativeTextScenario .data_sampler ,
145
+ default = GenerativeTextScenario .model_fields [ " data_sampler" ]. default ,
146
146
type = click .Choice (["random" ]),
147
147
help = (
148
148
"The data sampler type to use. 'random' will add a random shuffle on the data. "
@@ -160,7 +160,7 @@ def cli():
160
160
)
161
161
@click .option (
162
162
"--rate" ,
163
- default = GenerativeTextScenario .rate ,
163
+ default = GenerativeTextScenario .model_fields [ " rate" ]. default ,
164
164
callback = parse_number_str ,
165
165
help = (
166
166
"The rates to run the benchmark at. "
@@ -174,7 +174,7 @@ def cli():
174
174
@click .option (
175
175
"--max-seconds" ,
176
176
type = float ,
177
- default = GenerativeTextScenario .max_seconds ,
177
+ default = GenerativeTextScenario .model_fields [ " max_seconds" ]. default ,
178
178
help = (
179
179
"The maximum number of seconds each benchmark can run for. "
180
180
"If None, will run until max_requests or the data is exhausted."
@@ -183,7 +183,7 @@ def cli():
183
183
@click .option (
184
184
"--max-requests" ,
185
185
type = int ,
186
- default = GenerativeTextScenario .max_requests ,
186
+ default = GenerativeTextScenario .model_fields [ " max_requests" ]. default ,
187
187
help = (
188
188
"The maximum number of requests each benchmark can run for. "
189
189
"If None, will run until max_seconds or the data is exhausted."
@@ -192,7 +192,7 @@ def cli():
192
192
@click .option (
193
193
"--warmup-percent" ,
194
194
type = float ,
195
- default = GenerativeTextScenario .warmup_percent ,
195
+ default = GenerativeTextScenario .model_fields [ " warmup_percent" ]. default ,
196
196
help = (
197
197
"The percent of the benchmark (based on max-seconds, max-requets, "
198
198
"or lenth of dataset) to run as a warmup and not include in the final results. "
@@ -202,7 +202,7 @@ def cli():
202
202
@click .option (
203
203
"--cooldown-percent" ,
204
204
type = float ,
205
- default = GenerativeTextScenario .cooldown_percent ,
205
+ default = GenerativeTextScenario .model_fields [ " cooldown_percent" ]. default ,
206
206
help = (
207
207
"The percent of the benchmark (based on max-seconds, max-requets, or lenth "
208
208
"of dataset) to run as a cooldown and not include in the final results. "
@@ -212,19 +212,19 @@ def cli():
212
212
@click .option (
213
213
"--disable-progress" ,
214
214
is_flag = True ,
215
- default = not GenerativeTextScenario .show_progress ,
215
+ default = not GenerativeTextScenario .model_fields [ " show_progress" ]. default ,
216
216
help = "Set this flag to disable progress updates to the console" ,
217
217
)
218
218
@click .option (
219
219
"--display-scheduler-stats" ,
220
220
is_flag = True ,
221
- default = GenerativeTextScenario .show_progress_scheduler_stats ,
221
+ default = GenerativeTextScenario .model_fields [ " show_progress_scheduler_stats" ]. default ,
222
222
help = "Set this flag to display stats for the processes running the benchmarks" ,
223
223
)
224
224
@click .option (
225
225
"--disable-console-outputs" ,
226
226
is_flag = True ,
227
- default = not GenerativeTextScenario .output_console ,
227
+ default = not GenerativeTextScenario .model_fields [ " output_console" ]. default ,
228
228
help = "Set this flag to disable console output" ,
229
229
)
230
230
@click .option (
@@ -241,7 +241,7 @@ def cli():
241
241
@click .option (
242
242
"--output-extras" ,
243
243
callback = parse_json ,
244
- default = GenerativeTextScenario .output_extras ,
244
+ default = GenerativeTextScenario .model_fields [ " output_extras" ]. default ,
245
245
help = "A JSON string of extra data to save with the output benchmarks" ,
246
246
)
247
247
@click .option (
@@ -251,11 +251,11 @@ def cli():
251
251
"The number of samples to save in the output file. "
252
252
"If None (default), will save all samples."
253
253
),
254
- default = GenerativeTextScenario .output_sampling ,
254
+ default = GenerativeTextScenario .model_fields [ " output_sampling" ]. default ,
255
255
)
256
256
@click .option (
257
257
"--random-seed" ,
258
- default = GenerativeTextScenario .random_seed ,
258
+ default = GenerativeTextScenario .model_fields [ " random_seed" ]. default ,
259
259
type = int ,
260
260
help = "The random seed to use for benchmarking to ensure reproducibility." ,
261
261
)
0 commit comments