-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add a QuestionAnswering bert model for testing (#19)
* Add matching transformers version as current tt-metal * Remove the type conversion passes and directly call transformers to use bfloat16 when initializing the model * Convert torch.Tensor.to to ttnn.as_tensor * Use ttnn.full correctly instead of aten.full for certain cases * Convert bert model to unittest * Move transformers installation to dev * Refactor model input and output print statement for test_bert
- Loading branch information
Showing
9 changed files
with
235 additions
and
202 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,3 +2,4 @@ | |
pytest==7.2.2 | ||
pytest-timeout==2.2.0 | ||
pre-commit==3.0.4 | ||
transformers==4.38.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
import torch | ||
import torch_ttnn | ||
import unittest | ||
from torch_ttnn import ttnn | ||
import collections | ||
|
||
# Load model directly | ||
from transformers import ( | ||
AutoTokenizer, | ||
AutoModelForQuestionAnswering, | ||
) | ||
|
||
|
||
class TestBert(unittest.TestCase): | ||
def setUp(self): | ||
# Open device 0 | ||
self.device: ttnn.Device = ttnn.open_device(device_id=0) | ||
|
||
def tearDown(self): | ||
# Close the device | ||
ttnn.close_device(self.device) | ||
|
||
def test_bert(self): | ||
# Download model from cloud | ||
model_name = "phiyodr/bert-large-finetuned-squad2" | ||
tokenizer = AutoTokenizer.from_pretrained( | ||
model_name, padding_side="left", torch_dtype=torch.bfloat16 | ||
) | ||
m = AutoModelForQuestionAnswering.from_pretrained( | ||
model_name, torch_dtype=torch.bfloat16 | ||
) | ||
m.eval() | ||
|
||
# Set up sample input | ||
context = 'Johann Joachim Winckelmann was a German art historian and archaeologist. He was a pioneering Hellenist who first articulated the difference between Greek, Greco-Roman and Roman art. "The prophet and founding hero of modern archaeology", Winckelmann was one of the founders of scientific archaeology and first applied the categories of style on a large, systematic basis to the history of art. ' | ||
question = "What discipline did Winkelmann create?" | ||
|
||
inputs = tokenizer.encode_plus( | ||
question, | ||
context, | ||
add_special_tokens=True, | ||
return_tensors="pt", | ||
max_length=256, | ||
padding="max_length", | ||
truncation=True, | ||
) | ||
|
||
# Run inference with the original model | ||
with torch.no_grad(): | ||
outputs_before = m(**inputs) | ||
|
||
# Helper function to decode output to human-readable text | ||
def decode_output(outputs): | ||
response_start = torch.argmax(outputs.start_logits) | ||
response_end = torch.argmax(outputs.end_logits) + 1 | ||
response_tokens = inputs.input_ids[0, response_start:response_end] | ||
return tokenizer.decode(response_tokens) | ||
|
||
answer_before = decode_output(outputs_before) | ||
|
||
# Compile model with ttnn backend | ||
option = torch_ttnn.TorchTtnnOption(device=self.device) | ||
m = torch.compile(m, backend=torch_ttnn.backend, options=option) | ||
|
||
# Run inference with the compiled model | ||
with torch.no_grad(): | ||
outputs_after = m(**inputs) | ||
option._out_fx_graphs[0].print_tabular() | ||
|
||
answer_after = decode_output(outputs_after) | ||
|
||
print( | ||
f""" | ||
model_name: {model_name} | ||
input: | ||
context: {context} | ||
question: {question} | ||
answer before: {answer_before} | ||
answer after: {answer_after} | ||
""" | ||
) | ||
|
||
# TODO: Add more checks for the compiled graph | ||
|
||
# Check inference result | ||
self.assertEqual(answer_before, answer_after) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.