-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrans_example.py
executable file
·257 lines (208 loc) · 8.8 KB
/
trans_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
'''
Try and download wiki dataset
'''
from datasets import load_dataset
from transformers import AutoTokenizer
from torch.utils.data import dataset
from torch import nn
import torch.nn.functional as F
import math
import torch
import copy
import time
#from transformer.model.encoder import encoder
import sys
sys.path.insert(0,'./model')
from encoder import TransformerEncoder as encoder
#===============================================================================
#===============================================================================
class TransformerModel(nn.Module):
def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int,
nlayers: int, dropout: float = 0.5):
super().__init__()
self.model_type = 'Transformer'
self.pos_encoder = PositionalEncoding(d_model, dropout)
'''
encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
'''
self.transformer_encoder = encoder(d_model, nlayers)
self.encoder = nn.Embedding(ntoken, d_model)
self.d_model = d_model
self.decoder = nn.Linear(d_model, ntoken)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
src: Tensor, shape [seq_len, batch_size]
src_mask: Tensor, shape [seq_len, seq_len]
Returns:
output Tensor of shape [seq_len, batch_size, ntoken]
"""
src = self.encoder(src) * math.sqrt(self.d_model)
src = self.pos_encoder(src)
#output = self.transformer_encoder(src, src_mask)
output = self.transformer_encoder(src)
output = self.decoder(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
x = x + self.pe[:x.size(0)]
return self.dropout(x)
def generate_square_subsequent_mask(sz: int) -> torch.Tensor:
'''Generates an upper-triangular matrix of -inf, with zeros on diag.'''
return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)
def data_process(raw_text) -> torch.Tensor:
'''Converts raw text into a flat Tensor.'''
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', )
#tokenizer.encode(x_txt, max_length=self.max_len*20, padding='max_length', truncation=True, return_tensors='pt')
data = [(tokenizer.encode(item['text'], return_tensors='pt')).flatten() for item in raw_text]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
def batchify(data: torch.Tensor, bsz: int) -> torch.Tensor:
'''
Divides the data into bsz separate sequences, removing extra elements
that wouldn't cleanly fit.
Args:
data: Tensor, shape [N]
bsz: int, batch size
Returns:
Tensor of shape [N // bsz, bsz]
'''
seq_len = data.size(0) // bsz
data = data[:seq_len * bsz]
data = data.view(bsz, seq_len).t().contiguous()
return data.to(device)
def get_batch(source: torch.Tensor, i: int, bptt: int) -> tuple[torch.Tensor, torch.Tensor]:
'''
Args:
source: Tensor, shape [full_seq_len, batch_size]
i: int
Returns:
tuple (data, target), where data has shape [seq_len, batch_size] and
target has shape [seq_len * batch_size]
'''
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].reshape(-1)
return data, target
def train(model: nn.Module) -> None:
bptt = 35 # Will need to fix
model.train() # turn on train mode
total_loss = 0.
log_interval = 200
start_time = time.time()
src_mask = generate_square_subsequent_mask(bptt).to(device)
num_batches = len(train_data) // bptt
for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
data, targets = get_batch(train_data, i, bptt)
seq_len = data.size(0)
if seq_len != bptt: # only on last batch
src_mask = src_mask[:seq_len, :seq_len]
output = model(data, src_mask)
#print(output.shape, targets.shape)
loss = criterion(output.view(-1, ntokens), targets)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
if batch % log_interval == 0 and batch > 0:
lr = scheduler.get_last_lr()[0]
ms_per_batch = (time.time() - start_time) * 1000 / log_interval
cur_loss = total_loss / log_interval
ppl = math.exp(cur_loss)
print(f'| epoch {epoch:3d} | {batch:5d}/{num_batches:5d} batches | '
f'lr {lr:02.2f} | ms/batch {ms_per_batch:5.2f} | '
f'loss {cur_loss:5.2f} | ppl {ppl:8.2f}')
total_loss = 0
start_time = time.time()
def evaluate(model: nn.Module, eval_data: torch.Tensor) -> float:
model.eval() # turn on evaluation mode
bptt = 35 # Will need to fix
total_loss = 0.
src_mask = generate_square_subsequent_mask(bptt).to(device)
with torch.no_grad():
for i in range(0, eval_data.size(0) - 1, bptt):
data, targets = get_batch(eval_data, i, bptt)
seq_len = data.size(0)
if seq_len != bptt:
src_mask = src_mask[:seq_len, :seq_len]
output = model(data, src_mask)
output_flat = output.view(-1, ntokens)
total_loss += seq_len * criterion(output_flat, targets).item()
return total_loss / (len(eval_data) - 1)
#===============================================================================
#===============================================================================
if __name__ == '__main__':
device = torch.device('cuda:0')
ntokens = 30522 #Pulled from tokenizer
batch_size = 20
eval_batch_size = 10
#train_ds = load_dataset('wikitext', 'wikitext-103-v1', split='train')
train_ds = load_dataset('wikitext', 'wikitext-2-v1', split='train')
val_ds = load_dataset('wikitext', 'wikitext-2-v1', split='validation')
train_data = (batchify(data_process(train_ds), batch_size)).to(device)
val_data = (batchify(data_process(val_ds), eval_batch_size)).to(device)
bptt = 35
emsize = 200 # embedding dimension
d_hid = 200 # dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # number of heads in nn.MultiheadAttention
dropout = 0.2 # dropout probability
model = TransformerModel(ntokens, emsize, nhead, d_hid, nlayers, dropout).to(device)
model = model.to(device)
# Run Model
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
best_val_loss = float('inf')
epochs = 200
best_model = None
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train(model)
val_loss = evaluate(model, val_data)
val_ppl = math.exp(val_loss)
elapsed = time.time() - epoch_start_time
print('-' * 89)
print(f'| end of epoch {epoch:3d} | time: {elapsed:5.2f}s | '
f'valid loss {val_loss:5.2f} | valid ppl {val_ppl:8.2f}')
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
scheduler.step()
'''
batch_size = 20
bptt = 35
train_data = batchify(train_data, batch_size)
num_batches = len(train_data) // bptt
for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
data, targets = get_batch(train_data,i,bptt)
print(data.shape)
'''
'''
a = train_ds[1]['text']
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', )
b = (tokenizer.encode(a, return_tensors='pt')).flatten()
print( b.shape )
'''