Skip to content

Commit aefcc3b

Browse files
committed
[fix] remove useless code
1 parent 9572d78 commit aefcc3b

File tree

1 file changed

+0
-17
lines changed

1 file changed

+0
-17
lines changed

Diff for: EduNLP/ModelZoo/jiuzhang/modeling.py

-17
Original file line numberDiff line numberDiff line change
@@ -28,18 +28,12 @@
2828
BaseModelOutput,
2929
BaseModelOutputWithPastAndCrossAttentions,
3030
Seq2SeqModelOutput,
31-
Seq2SeqLMOutput,
32-
Seq2SeqSequenceClassifierOutput,
3331
)
3432
from transformers.modeling_utils import PreTrainedModel
35-
from transformers.utils import logging
3633
from transformers import BartConfig as CPTConfig
3734
from transformers import BertModel, BertConfig
3835

3936

40-
logger = logging.get_logger(__name__)
41-
42-
4337
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
4438
"""
4539
Shift input ids one token to the right.
@@ -84,17 +78,6 @@ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]
8478
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
8579

8680

87-
def attention_mask_func(attention_scores, attention_mask):
88-
return attention_scores + attention_mask
89-
90-
91-
def init_method(std):
92-
def init_(tensor):
93-
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
94-
95-
return init_
96-
97-
9881
class CPTLearnedPositionalEmbedding(nn.Embedding):
9982
"""
10083
This module learns positional embeddings up to a fixed maximum size.

0 commit comments

Comments
 (0)