Skip to content

Commit a15faf6

Browse files
committed
Fix test
1 parent a444677 commit a15faf6

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

examples/models/llama/export_llama_lib.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -905,7 +905,7 @@ def _to_edge_and_lower_llama_xnnpack(
905905
return builder.to_executorch(passes=additional_passes)
906906

907907

908-
def _to_edge_and_lower_llama(
908+
def _to_edge_and_lower_llama( # noqa: C901
909909
builder_exported,
910910
modelname,
911911
additional_passes,

examples/models/llava/export_llava.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def forward(self, images):
162162
use_kv_cache=True,
163163
example_inputs=(resized,),
164164
dynamic_shapes=dynamic_shapes,
165-
args=None,
165+
config=None,
166166
)
167167
.export()
168168
.pt2e_quantize([quantizer])

extension/llm/export/builder.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ def _export(self, module: Optional[torch.nn.Module] = None) -> ExportedProgram:
204204
# 1. torch.nn.attention.sdpa_kernel([SDPBackend.MATH]) is for bypassing the dynamo error when tracing
205205
# 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up)
206206
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
207-
if self.config.backend.qnn.enabled:
207+
if self.config and self.config.backend.qnn.enabled:
208208
# TODO: this is temporary, as qnn flow does not work with new, non-functional export IR.
209209
# See issue: https://github.com/pytorch/executorch/issues/7373
210210

@@ -250,7 +250,7 @@ def export(self) -> "LLMEdgeManager":
250250
# Persisting those changes back to an ExportedProgram will require
251251
# an additional export().
252252
self.pre_autograd_graph_module = exported_module.module()
253-
if self.config.export.export_only:
253+
if self.config and self.config.export.export_only:
254254
torch.export.save(exported_module, self.config.export.output_name)
255255
return self
256256

@@ -415,7 +415,7 @@ def export_to_edge(self) -> "LLMEdgeManager":
415415
self.export()
416416

417417
override_export_behaviour = contextlib.nullcontext()
418-
if self.config.backend.qnn.enabled:
418+
if self.config and self.config.backend.qnn.enabled:
419419
override_export_behaviour = patch.object(
420420
torch._utils_internal,
421421
"export_training_ir_rollout_check",

0 commit comments

Comments
 (0)