XS-dev commited on
Commit
a5c42f2
1 Parent(s): 7105a67

按要求创建了文件夹,但是不知道tmd到底是什么文件在这个文件夹里面

Browse files
my-bert-model/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "gradient_checkpointing": false,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "bert",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 12,
17
+ "pad_token_id": 0,
18
+ "position_embedding_type": "absolute",
19
+ "transformers_version": "4.6.0.dev0",
20
+ "type_vocab_size": 2,
21
+ "use_cache": true,
22
+ "vocab_size": 30522
23
+ }
my-bert-model/modeling_bert.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+ import torch
3
+ import torch.utils.checkpoint
4
+ from torch import nn
5
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
6
+ from transformers import BertPreTrainedModel, BertModel
7
+ from transformers.modeling_outputs import SequenceClassifierOutput
8
+
9
+
10
+ class BertForSequenceClassification(BertPreTrainedModel):
11
+ def __init__(self, config):
12
+ super().__init__(config)
13
+ self.num_labels = config.num_labels
14
+ self.config = config
15
+
16
+ self.bert = BertModel(config)
17
+ classifier_dropout = (
18
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
19
+ )
20
+ self.dropout = nn.Dropout(classifier_dropout)
21
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
22
+
23
+ # Initialize weights and apply final processing
24
+ self.post_init()
25
+
26
+ def forward(
27
+ self,
28
+ input_ids: Optional[torch.Tensor] = None,
29
+ attention_mask: Optional[torch.Tensor] = None,
30
+ token_type_ids: Optional[torch.Tensor] = None,
31
+ position_ids: Optional[torch.Tensor] = None,
32
+ head_mask: Optional[torch.Tensor] = None,
33
+ inputs_embeds: Optional[torch.Tensor] = None,
34
+ labels: Optional[torch.Tensor] = None,
35
+ output_attentions: Optional[bool] = None,
36
+ output_hidden_states: Optional[bool] = None,
37
+ return_dict: Optional[bool] = None,
38
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
39
+ r"""
40
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
41
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
42
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
43
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
44
+ """
45
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
46
+
47
+ outputs = self.bert(
48
+ input_ids,
49
+ attention_mask=attention_mask,
50
+ token_type_ids=token_type_ids,
51
+ position_ids=position_ids,
52
+ head_mask=head_mask,
53
+ inputs_embeds=inputs_embeds,
54
+ output_attentions=output_attentions,
55
+ output_hidden_states=output_hidden_states,
56
+ return_dict=return_dict,
57
+ )
58
+
59
+ pooled_output = outputs[1]
60
+
61
+ pooled_output = self.dropout(pooled_output)
62
+ logits = self.classifier(pooled_output)
63
+
64
+ loss = None
65
+ if labels is not None:
66
+ if self.config.problem_type is None:
67
+ if self.num_labels == 1:
68
+ self.config.problem_type = "regression"
69
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
70
+ self.config.problem_type = "single_label_classification"
71
+ else:
72
+ self.config.problem_type = "multi_label_classification"
73
+
74
+ if self.config.problem_type == "regression":
75
+ loss_fct = MSELoss()
76
+ if self.num_labels == 1:
77
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
78
+ else:
79
+ loss = loss_fct(logits, labels)
80
+ elif self.config.problem_type == "single_label_classification":
81
+ loss_fct = CrossEntropyLoss()
82
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
83
+ elif self.config.problem_type == "multi_label_classification":
84
+ loss_fct = BCEWithLogitsLoss()
85
+ loss = loss_fct(logits, labels)
86
+ if not return_dict:
87
+ output = (logits,) + outputs[2:]
88
+ return ((loss,) + output) if loss is not None else output
89
+
90
+ return SequenceClassifierOutput(
91
+ loss=loss,
92
+ logits=logits,
93
+ hidden_states=outputs.hidden_states,
94
+ attentions=outputs.attentions,
95
+ )
my-bert-model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:097417381d6c7230bd9e3557456d726de6e83245ec8b24f529f60198a67b203a
3
+ size 440473133
my-bert-model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
my-bert-model/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "model_max_length": 512}
my-bert-model/utils_data.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset
2
+ import torch
3
+ import pandas as pd
4
+
5
+ def load_data(args, split):
6
+ df = pd.read_csv(f"{args.data_root}/{split}.csv")
7
+ texts = df['text'].values.tolist()
8
+ labels = df['target'].values.tolist()
9
+ return texts, labels
10
+
11
+ class MyDataset(Dataset):
12
+ def __init__(self, data, tokenizer, max_length, is_test):
13
+ self.tokenizer = tokenizer
14
+ self.max_length = max_length
15
+ self.texts = data[0]
16
+ self.labels = data[1]
17
+ self.is_test = is_test
18
+
19
+ def __len__(self):
20
+ """returns the length of dataframe"""
21
+ return len(self.texts)
22
+
23
+ def __getitem__(self, index):
24
+ """return the input ids, attention masks and target ids"""
25
+ text = str(self.texts[index])
26
+ source = self.tokenizer.batch_encode_plus(
27
+ [text],
28
+ max_length=self.max_length,
29
+ pad_to_max_length=True,
30
+ truncation=True,
31
+ padding="max_length",
32
+ return_tensors="pt",
33
+ )
34
+ source_ids = source["input_ids"].squeeze()
35
+ source_mask = source["attention_mask"].squeeze()
36
+ data_sample = {
37
+ "input_ids": source_ids,
38
+ "attention_mask": source_mask,
39
+ }
40
+ if not self.is_test:
41
+ label = self.labels[index]
42
+ target_ids = torch.tensor(label).squeeze()
43
+ data_sample["labels"] = target_ids
44
+ return data_sample
45
+
my-bert-model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff