@@ -204,7 +204,7 @@ def _export(self, module: Optional[torch.nn.Module] = None) -> ExportedProgram:
204
204
# 1. torch.nn.attention.sdpa_kernel([SDPBackend.MATH]) is for bypassing the dynamo error when tracing
205
205
# 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up)
206
206
with torch .nn .attention .sdpa_kernel ([SDPBackend .MATH ]), torch .no_grad ():
207
- if self .config .backend .qnn .enabled :
207
+ if self .config and self . config .backend .qnn .enabled :
208
208
# TODO: this is temporary, as qnn flow does not work with new, non-functional export IR.
209
209
# See issue: https://github.com/pytorch/executorch/issues/7373
210
210
@@ -250,7 +250,7 @@ def export(self) -> "LLMEdgeManager":
250
250
# Persisting those changes back to an ExportedProgram will require
251
251
# an additional export().
252
252
self .pre_autograd_graph_module = exported_module .module ()
253
- if self .config .export .export_only :
253
+ if self .config and self . config .export .export_only :
254
254
torch .export .save (exported_module , self .config .export .output_name )
255
255
return self
256
256
@@ -415,7 +415,7 @@ def export_to_edge(self) -> "LLMEdgeManager":
415
415
self .export ()
416
416
417
417
override_export_behaviour = contextlib .nullcontext ()
418
- if self .config .backend .qnn .enabled :
418
+ if self .config and self . config .backend .qnn .enabled :
419
419
override_export_behaviour = patch .object (
420
420
torch ._utils_internal ,
421
421
"export_training_ir_rollout_check" ,
0 commit comments