快速學(xué)習(xí)一個(gè)算法,Transformer
大家好,我是小寒
今天給大家介紹一個(gè)強(qiáng)大的算法模型,Transformer
Transformer 模型是由 Vaswani 等人在 2017 年提出的一種用于自然語言處理的深度學(xué)習(xí)模型,特別擅長于處理序列到序列的任務(wù),如機(jī)器翻譯、文本生成等。
今天,我們主要從編碼的角度來進(jìn)行說明。
Transformer 模型架構(gòu)
Transformer 模型由編碼器(Encoder)和解碼器(Decoder)兩部分組成,每部分包含多個(gè)相同的層 (Layer) 堆疊而成。
圖片
編碼器
編碼器由 N 個(gè)相同的層組成,每個(gè)層包括以下兩個(gè)子層:
- 多頭自注意力機(jī)制(Multi-Head Self-Attention Mechanism)
- 前饋神經(jīng)網(wǎng)絡(luò)(Feed-Forward Neural Network)
每個(gè)子層都采用殘差連接 (Residual Connection) 和層歸一化 (Layer Normalization)。
解碼器
解碼器也由 N 個(gè)相同的層組成,每個(gè)層包括以下三個(gè)子層:
- 多頭自注意力機(jī)制(Masked Multi-Head Self-Attention Mechanism)
- 編碼器-解碼器注意力機(jī)制(Encoder-Decoder Attention Mechanism)
- 前饋神經(jīng)網(wǎng)絡(luò)(Feed-Forward Neural Network)
同樣,每個(gè)子層都采用殘差連接和層歸一化。
Transformer 模型的主要組件
- Token Embedding
將輸入的離散單詞轉(zhuǎn)換為連續(xù)的向量表示。 - 位置編碼
在輸入序列中引入位置信息,因?yàn)?Transformer 本身沒有順序依賴,需要位置編碼來幫助模型識(shí)別序列的順序。 - 多頭自注意力
計(jì)算 token 之間的注意力分?jǐn)?shù),使模型能夠關(guān)注序列中的不同位置,從而捕捉全局信息。 - 前饋層
對(duì)每個(gè)位置的表示進(jìn)行獨(dú)立的線性變換和激活函數(shù),以增強(qiáng)模型的表達(dá)能力。 - 編碼器-解碼器注意力機(jī)制
解碼器中的每個(gè)位置可以關(guān)注編碼器中的所有位置,從而將編碼器的信息傳遞給解碼器。 - 殘差連接
在每個(gè)子層(子模塊)的輸入和輸出之間添加一個(gè)快捷連接(skip connection),以避免梯度消失和梯度爆炸問題,使得訓(xùn)練更穩(wěn)定。
代碼演練
首先,我們導(dǎo)入必要的庫;
import numpy as np
import torch
import math
from torch import nn
import torch.nn.functional as F
嵌入將單詞轉(zhuǎn)換為 Embedding ;
class Embeddings(nn.Module):
def __init__(self, vocab_size, d_model):
super(Embeddings, self).__init__()
self.embed = nn.Embedding(vocab_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.embed(x) * math.sqrt(self.d_model)
位置編碼添加了有關(guān)序列順序的信息。
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_sequence_length):
super().__init__()
self.max_sequence_length = max_sequence_length
self.d_model = d_model
def forward(self, x):
even_i = torch.arange(0, self.d_model, 2).float()
denominator = torch.pow(10000, even_i/self.d_model)
position = (torch.arange(self.max_sequence_length)
.reshape(self.max_sequence_length, 1))
even_PE = torch.sin(position / denominator)
odd_PE = torch.cos(position / denominator)
stacked = torch.stack([even_PE, odd_PE], dim=2)
PE = torch.flatten(stacked, start_dim=1, end_dim=2)
return PE
多頭自注意力層;
def scaled_dot_product(q, k, v, mask=None):
d_k = q.size()[-1]
scaled = torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(d_k)
if mask is not None:
scaled = scaled.permute(1, 0, 2, 3) + mask
scaled = scaled.permute(1, 0, 2, 3)
attention = F.softmax(scaled, dim=-1)
values = torch.matmul(attention, v)
return values, attention
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads):
super().__init__()
self.d_model = d_model
self.num_heads = num_heads
self.head_dim = d_model // num_heads
self.qkv_layer = nn.Linear(d_model , 3 * d_model)
self.linear_layer = nn.Linear(d_model, d_model)
def forward(self, x, mask):
batch_size, sequence_length, d_model = x.size()
qkv = self.qkv_layer(x)
qkv = qkv.reshape(batch_size, sequence_length, self.num_heads, 3 * self.head_dim)
qkv = qkv.permute(0, 2, 1, 3)
q, k, v = qkv.chunk(3, dim=-1)
values, attention = scaled_dot_product(q, k, v, mask)
values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, self.num_heads * self.head_dim)
out = self.linear_layer(values)
return out
前饋層使用 ReLu 激活函數(shù)和線性層。
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, hidden, drop_prob=0.1):
super(PositionwiseFeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, hidden)
self.linear2 = nn.Linear(hidden, d_model)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=drop_prob)
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.linear2(x)
return x
層規(guī)范化;
class LayerNormalization(nn.Module):
def __init__(self, parameters_shape, eps=1e-5):
super().__init__()
self.parameters_shape=parameters_shape
self.eps=eps
self.gamma = nn.Parameter(torch.ones(parameters_shape))
self.beta = nn.Parameter(torch.zeros(parameters_shape))
def forward(self, inputs):
dims = [-(i + 1) for i in range(len(self.parameters_shape))]
mean = inputs.mean(dim=dims, keepdim=True)
var = ((inputs - mean) ** 2).mean(dim=dims, keepdim=True)
std = (var + self.eps).sqrt()
y = (inputs - mean) / std
out = self.gamma * y + self.beta
return out
編碼器由多個(gè)編碼器層組成。
class EncoderLayer(nn.Module):
def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
super(EncoderLayer, self).__init__()
self.attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
self.norm1 = LayerNormalization(parameters_shape=[d_model])
self.dropout1 = nn.Dropout(p=drop_prob)
self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
self.norm2 = LayerNormalization(parameters_shape=[d_model])
self.dropout2 = nn.Dropout(p=drop_prob)
def forward(self, x, self_attention_mask):
residual_x = x.clone()
x = self.attention(x, mask=self_attention_mask)
x = self.dropout1(x)
x = self.norm1(x + residual_x)
residual_x = x.clone()
x = self.ffn(x)
x = self.dropout2(x)
x = self.norm2(x + residual_x)
return x
class SequentialEncoder(nn.Sequential):
def forward(self, *inputs):
x, self_attention_mask = inputs
for module in self._modules.values():
x = module(x, self_attention_mask)
return x
class Encoder(nn.Module):
def __init__(self,
d_model,
ffn_hidden,
num_heads,
drop_prob,
num_layers,
max_sequence_length,
language_to_index,
START_TOKEN,
END_TOKEN,
PADDING_TOKEN):
super().__init__()
self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
self.layers = SequentialEncoder(*[EncoderLayer(d_model, ffn_hidden, num_heads, drop_prob)
for _ in range(num_layers)])
def forward(self, x, self_attention_mask, start_token, end_token):
x = self.sentence_embedding(x, start_token, end_token)
x = self.layers(x, self_attention_mask)
return x
多頭交叉注意層;
class MultiHeadCrossAttention(nn.Module):
def __init__(self, d_model, num_heads):
super().__init__()
self.d_model = d_model
self.num_heads = num_heads
self.head_dim = d_model // num_heads
self.kv_layer = nn.Linear(d_model , 2 * d_model)
self.q_layer = nn.Linear(d_model , d_model)
self.linear_layer = nn.Linear(d_model, d_model)
def forward(self, x, y, mask):
batch_size, sequence_length, d_model = x.size()
kv = self.kv_layer(x)
q = self.q_layer(y)
kv = kv.reshape(batch_size, sequence_length, self.num_heads, 2 * self.head_dim)
q = q.reshape(batch_size, sequence_length, self.num_heads, self.head_dim)
kv = kv.permute(0, 2, 1, 3)
q = q.permute(0, 2, 1, 3)
k, v = kv.chunk(2, dim=-1)
values, attention = scaled_dot_product(q, k, v, mask)
values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, d_model)
out = self.linear_layer(values)
return out
解碼器由多個(gè)解碼器層組成。
class DecoderLayer(nn.Module):
def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
super(DecoderLayer, self).__init__()
self.self_attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
self.layer_norm1 = LayerNormalization(parameters_shape=[d_model])
self.dropout1 = nn.Dropout(p=drop_prob)
self.encoder_decoder_attention = MultiHeadCrossAttention(d_model=d_model, num_heads=num_heads)
self.layer_norm2 = LayerNormalization(parameters_shape=[d_model])
self.dropout2 = nn.Dropout(p=drop_prob)
self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
self.layer_norm3 = LayerNormalization(parameters_shape=[d_model])
self.dropout3 = nn.Dropout(p=drop_prob)
def forward(self, x, y, self_attention_mask, cross_attention_mask):
_y = y.clone()
y = self.self_attention(y, mask=self_attention_mask)
y = self.dropout1(y)
y = self.layer_norm1(y + _y)
_y = y.clone()
y = self.encoder_decoder_attention(x, y, mask=cross_attention_mask)
y = self.dropout2(y)
y = self.layer_norm2(y + _y)
_y = y.clone()
y = self.ffn(y)
y = self.dropout3(y)
y = self.layer_norm3(y + _y)
return y
class SequentialDecoder(nn.Sequential):
def forward(self, *inputs):
x, y, self_attention_mask, cross_attention_mask = inputs
for module in self._modules.values():
y = module(x, y, self_attention_mask, cross_attention_mask)
return y
class Decoder(nn.Module):
def __init__(self,
d_model,
ffn_hidden,
num_heads,
drop_prob,
num_layers,
max_sequence_length,
language_to_index,
START_TOKEN,
END_TOKEN,
PADDING_TOKEN):
super().__init__()
self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
self.layers = SequentialDecoder(*[DecoderLayer(d_model, ffn_hidden, num_heads, drop_prob) for _ in range(num_layers)])
def forward(self, x, y, self_attention_mask, cross_attention_mask, start_token, end_token):
y = self.sentence_embedding(y, start_token, end_token)
y = self.layers(x, y, self_attention_mask, cross_attention_mask)
return y
def forward(self, x, y, self_attention_mask, cross_attention_mask, start_token, end_token):
y = self.sentence_embedding(y, start_token, end_token)
y = self.layers(x, y, self_attention_mask, cross_attention_mask)
return y
transformer 模型
class Transformer(nn.Module):
def __init__(self,
d_model,
ffn_hidden,
num_heads,
drop_prob,
num_layers,
max_sequence_length,
spn_vocab_size,
english_to_index,
spanish_to_index,
START_TOKEN,
END_TOKEN,
PADDING_TOKEN
):
super().__init__()
self.encoder = Encoder(d_model, ffn_hidden, num_heads, drop_prob, num_layers, max_sequence_length, english_to_ind, START_TOKEN, END_TOKEN, PADDING_TOKEN)
self.decoder = Decoder(d_model, ffn_hidden, num_heads, drop_prob,num_layers, max_sequence_length, spanish_to_ind, START_TOKEN, END_TOKEN, PADDING_TOKEN)
self.linear = nn.Linear(d_model, spn_vocab_size)
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def forward(self,
x,
y,
encoder_self_attention_mask=None,
decoder_self_attention_mask=None,
decoder_cross_attention_mask=None,
enc_start_token=False,
enc_end_token=False,
dec_start_token=False, # We should make this true
dec_end_token=False): # x, y are batch of sentences
x = self.encoder(x, encoder_self_attention_mask, start_token=enc_start_token, end_token=enc_end_token)
out = self.decoder(x, y, decoder_self_attention_mask, decoder_cross_attention_mask, start_token=dec_start_token, end_token=dec_end_token)
out = self.linear(out)
return out