@@ -181,7 +181,7 @@ def forward(self, x):
181
181
182
182
tokenizer = AutoTokenizer .from_pretrained (name )
183
183
config = AutoConfig .from_pretrained (name , torchscript = True )
184
- model = AutoModelForCausalLM .from_config (config )
184
+ model = AutoModelForCausalLM .from_config (config , attn_implementation = "eager" )
185
185
text = "Replace me by any text you'd like."
186
186
encoded_input = tokenizer (text , return_tensors = "pt" )
187
187
inputs_dict = dict (encoded_input )
@@ -199,7 +199,7 @@ def forward(self, x):
199
199
200
200
tokenizer = AutoTokenizer .from_pretrained (name )
201
201
config = AutoConfig .from_pretrained (name , torchscript = True )
202
- model = AutoModelForMaskedLM .from_config (config )
202
+ model = AutoModelForMaskedLM .from_config (config , attn_implementation = "eager" )
203
203
text = "Replace me by any text you'd like."
204
204
encoded_input = tokenizer (text , return_tensors = "pt" )
205
205
example = dict (encoded_input )
@@ -209,7 +209,7 @@ def forward(self, x):
209
209
210
210
processor = AutoProcessor .from_pretrained (name )
211
211
config = AutoConfig .from_pretrained (name , torchscript = True )
212
- model = AutoModelForImageClassification .from_config (config )
212
+ model = AutoModelForImageClassification .from_config (config , attn_implementation = "eager" )
213
213
encoded_input = processor (images = self .image , return_tensors = "pt" )
214
214
example = dict (encoded_input )
215
215
elif auto_model == "AutoModelForSeq2SeqLM" :
@@ -218,7 +218,7 @@ def forward(self, x):
218
218
219
219
tokenizer = AutoTokenizer .from_pretrained (name )
220
220
config = AutoConfig .from_pretrained (name , torchscript = True )
221
- model = AutoModelForSeq2SeqLM .from_config (config )
221
+ model = AutoModelForSeq2SeqLM .from_config (config , attn_implementation = "eager" )
222
222
inputs = tokenizer ("Studies have been shown that owning a dog is good for you" , return_tensors = "pt" )
223
223
decoder_inputs = tokenizer (
224
224
"<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen" ,
@@ -232,7 +232,7 @@ def forward(self, x):
232
232
233
233
processor = AutoProcessor .from_pretrained (name )
234
234
config = AutoConfig .from_pretrained (name , torchscript = True )
235
- model = AutoModelForSpeechSeq2Seq .from_config (config )
235
+ model = AutoModelForSpeechSeq2Seq .from_config (config , attn_implementation = "eager" )
236
236
inputs = processor (torch .randn (1000 ).numpy (), sampling_rate = 16000 , return_tensors = "pt" )
237
237
example = dict (inputs )
238
238
elif auto_model == "AutoModelForCTC" :
@@ -241,7 +241,7 @@ def forward(self, x):
241
241
242
242
processor = AutoProcessor .from_pretrained (name )
243
243
config = AutoConfig .from_pretrained (name , torchscript = True )
244
- model = AutoModelForCTC .from_config (config )
244
+ model = AutoModelForCTC .from_config (config , attn_implementation = "eager" )
245
245
input_values = processor (torch .randn (1000 ).numpy (), return_tensors = "pt" )
246
246
example = dict (input_values )
247
247
elif auto_model == "AutoModelForTableQuestionAnswering" :
@@ -251,7 +251,7 @@ def forward(self, x):
251
251
252
252
tokenizer = AutoTokenizer .from_pretrained (name )
253
253
config = AutoConfig .from_pretrained (name , torchscript = True )
254
- model = AutoModelForTableQuestionAnswering .from_config (config )
254
+ model = AutoModelForTableQuestionAnswering .from_config (config , attn_implementation = "eager" )
255
255
data = {
256
256
"Actors" : ["Brad Pitt" , "Leonardo Di Caprio" , "George Clooney" ],
257
257
"Number of movies" : ["87" , "53" , "69" ],
@@ -304,7 +304,7 @@ def forward(self, x):
304
304
from transformers import AutoModel
305
305
306
306
config = AutoConfig .from_pretrained (name , torchscript = True )
307
- model = AutoModel .from_config (config )
307
+ model = AutoModel .from_config (config , attn_implementation = "eager" )
308
308
if hasattr (model , "set_default_language" ):
309
309
model .set_default_language ("en_XX" )
310
310
if example is None :
0 commit comments