We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 98cbd76 commit 39ac60eCopy full SHA for 39ac60e
examples/dynamo/torch_export_flux_dev.py
@@ -113,6 +113,7 @@
113
use_fp32_acc=True,
114
use_explicit_typing=True,
115
use_python_runtime=True,
116
+ immutable_weights=False,
117
)
118
119
# %%
@@ -121,11 +122,11 @@
121
122
# Release the GPU memory occupied by the exported program and the pipe.transformer
123
# Set the transformer in the Flux pipeline to the Torch-TRT compiled model
124
-del ep
125
-backbone.to("cpu")
126
pipe.to(DEVICE)
127
-torch.cuda.empty_cache()
+backbone.to("cpu")
128
pipe.transformer = trt_gm
+del ep
129
+torch.cuda.empty_cache()
130
pipe.transformer.config = config
131
trt_gm.device = torch.device("cuda")
132
0 commit comments