Skip to content

Commit 7e3be63

Browse files
committed
Fix pydantic model parsing issues
1 parent e799682 commit 7e3be63

File tree

2 files changed

+22
-18
lines changed

2 files changed

+22
-18
lines changed

src/guidellm/__main__.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -82,20 +82,20 @@ def cli():
8282
"The type of backend to use to run requests against. Defaults to 'openai_http'."
8383
f" Supported types: {', '.join(get_args(BackendType))}"
8484
),
85-
default=GenerativeTextScenario.backend_type,
85+
default=GenerativeTextScenario.model_fields["backend_type"].default,
8686
)
8787
@click.option(
8888
"--backend-args",
8989
callback=parse_json,
90-
default=GenerativeTextScenario.backend_args,
90+
default=GenerativeTextScenario.model_fields["backend_args"].default,
9191
help=(
9292
"A JSON string containing any arguments to pass to the backend as a "
9393
"dict with **kwargs."
9494
),
9595
)
9696
@click.option(
9797
"--model",
98-
default=GenerativeTextScenario.model,
98+
default=GenerativeTextScenario.model_fields["model"].default,
9999
type=str,
100100
help=(
101101
"The ID of the model to benchmark within the backend. "
@@ -104,7 +104,7 @@ def cli():
104104
)
105105
@click.option(
106106
"--processor",
107-
default=GenerativeTextScenario.processor,
107+
default=GenerativeTextScenario.model_fields["processor"].default,
108108
type=str,
109109
help=(
110110
"The processor or tokenizer to use to calculate token counts for statistics "
@@ -114,7 +114,7 @@ def cli():
114114
)
115115
@click.option(
116116
"--processor-args",
117-
default=GenerativeTextScenario.processor_args,
117+
default=GenerativeTextScenario.model_fields["processor_args"].default,
118118
callback=parse_json,
119119
help=(
120120
"A JSON string containing any arguments to pass to the processor constructor "
@@ -133,7 +133,7 @@ def cli():
133133
)
134134
@click.option(
135135
"--data-args",
136-
default=GenerativeTextScenario.data_args,
136+
default=GenerativeTextScenario.model_fields["data_args"].default,
137137
callback=parse_json,
138138
help=(
139139
"A JSON string containing any arguments to pass to the dataset creation "
@@ -142,7 +142,7 @@ def cli():
142142
)
143143
@click.option(
144144
"--data-sampler",
145-
default=GenerativeTextScenario.data_sampler,
145+
default=GenerativeTextScenario.model_fields["data_sampler"].default,
146146
type=click.Choice(["random"]),
147147
help=(
148148
"The data sampler type to use. 'random' will add a random shuffle on the data. "
@@ -160,7 +160,7 @@ def cli():
160160
)
161161
@click.option(
162162
"--rate",
163-
default=GenerativeTextScenario.rate,
163+
default=GenerativeTextScenario.model_fields["rate"].default,
164164
callback=parse_number_str,
165165
help=(
166166
"The rates to run the benchmark at. "
@@ -174,7 +174,7 @@ def cli():
174174
@click.option(
175175
"--max-seconds",
176176
type=float,
177-
default=GenerativeTextScenario.max_seconds,
177+
default=GenerativeTextScenario.model_fields["max_seconds"].default,
178178
help=(
179179
"The maximum number of seconds each benchmark can run for. "
180180
"If None, will run until max_requests or the data is exhausted."
@@ -183,7 +183,7 @@ def cli():
183183
@click.option(
184184
"--max-requests",
185185
type=int,
186-
default=GenerativeTextScenario.max_requests,
186+
default=GenerativeTextScenario.model_fields["max_requests"].default,
187187
help=(
188188
"The maximum number of requests each benchmark can run for. "
189189
"If None, will run until max_seconds or the data is exhausted."
@@ -192,7 +192,7 @@ def cli():
192192
@click.option(
193193
"--warmup-percent",
194194
type=float,
195-
default=GenerativeTextScenario.warmup_percent,
195+
default=GenerativeTextScenario.model_fields["warmup_percent"].default,
196196
help=(
197197
"The percent of the benchmark (based on max-seconds, max-requets, "
198198
"or lenth of dataset) to run as a warmup and not include in the final results. "
@@ -202,7 +202,7 @@ def cli():
202202
@click.option(
203203
"--cooldown-percent",
204204
type=float,
205-
default=GenerativeTextScenario.cooldown_percent,
205+
default=GenerativeTextScenario.model_fields["cooldown_percent"].default,
206206
help=(
207207
"The percent of the benchmark (based on max-seconds, max-requets, or lenth "
208208
"of dataset) to run as a cooldown and not include in the final results. "
@@ -212,19 +212,19 @@ def cli():
212212
@click.option(
213213
"--disable-progress",
214214
is_flag=True,
215-
default=not GenerativeTextScenario.show_progress,
215+
default=not GenerativeTextScenario.model_fields["show_progress"].default,
216216
help="Set this flag to disable progress updates to the console",
217217
)
218218
@click.option(
219219
"--display-scheduler-stats",
220220
is_flag=True,
221-
default=GenerativeTextScenario.show_progress_scheduler_stats,
221+
default=GenerativeTextScenario.model_fields["show_progress_scheduler_stats"].default,
222222
help="Set this flag to display stats for the processes running the benchmarks",
223223
)
224224
@click.option(
225225
"--disable-console-outputs",
226226
is_flag=True,
227-
default=not GenerativeTextScenario.output_console,
227+
default=not GenerativeTextScenario.model_fields["output_console"].default,
228228
help="Set this flag to disable console output",
229229
)
230230
@click.option(
@@ -241,7 +241,7 @@ def cli():
241241
@click.option(
242242
"--output-extras",
243243
callback=parse_json,
244-
default=GenerativeTextScenario.output_extras,
244+
default=GenerativeTextScenario.model_fields["output_extras"].default,
245245
help="A JSON string of extra data to save with the output benchmarks",
246246
)
247247
@click.option(
@@ -251,11 +251,11 @@ def cli():
251251
"The number of samples to save in the output file. "
252252
"If None (default), will save all samples."
253253
),
254-
default=GenerativeTextScenario.output_sampling,
254+
default=GenerativeTextScenario.model_fields["output_sampling"].default,
255255
)
256256
@click.option(
257257
"--random-seed",
258-
default=GenerativeTextScenario.random_seed,
258+
default=GenerativeTextScenario.model_fields["random_seed"].default,
259259
type=int,
260260
help="The random seed to use for benchmarking to ensure reproducibility.",
261261
)

src/guidellm/benchmark/scenario.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,10 @@ def update(self, **fields: Any) -> Self:
3131

3232

3333
class GenerativeTextScenario(Scenario):
34+
# FIXME: This solves an issue with Pydantic and class types
35+
class Config:
36+
arbitrary_types_allowed = True
37+
3438
backend_type: BackendType = "openai_http"
3539
backend_args: Optional[dict[str, Any]] = None
3640
model: Optional[str] = None

0 commit comments

Comments
 (0)