Skip to content

Commit 49ff9b5

Browse files
authored
Merge pull request huggingface#10 from nottombrown/multi-gpu
[WIP] Add multi-gpu support
2 parents 6a40c7d + db2f819 commit 49ff9b5

File tree

3 files changed

+66
-37
lines changed

3 files changed

+66
-37
lines changed

Diff for: README.md

-3
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,3 @@ The single run test accuracy of this PyTorch version is 85.84%, while the author
6161
The authors implementations uses 8 GPU and can thus accomodate a batch of 64 samples while the present implementation is single GPU and is in consequence limited to 20 instances on a K80 for memory reasons. In our test, increasing the batch size from 8 to 20 samples increased the test accuracy by 2.5 points. A better accuracy may be obtained by using a multi-GPU setting (on the TO-DO list).
6262

6363
The previous SOTA on the ROCStories dataset is 77.6% ("Hidden Coherence Model" of Chaturvedi et al. published in "Story Comprehension for Predicting What Happens Next" EMNLP 2017, which is a very nice paper too!)
64-
65-
### TO-DO list
66-
- [ ] Add Multi-GPU training logic

Diff for: model_pytorch.py

+62-33
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,12 @@
1111

1212

1313
def gelu(x):
14-
return 0.5*x*(1+torch.tanh(math.sqrt(2/math.pi)*(x+0.044715*torch.pow(x, 3))))
14+
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
15+
1516

1617
def swish(x):
17-
return x*torch.sigmoid(x)
18+
return x * torch.sigmoid(x)
19+
1820

1921
ACT_FNS = {
2022
'relu': nn.ReLU,
@@ -25,6 +27,7 @@ def swish(x):
2527

2628
class LayerNorm(nn.Module):
2729
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
30+
2831
def __init__(self, n_state, e=1e-5):
2932
super(LayerNorm, self).__init__()
3033
self.g = nn.Parameter(torch.ones(n_state))
@@ -43,12 +46,12 @@ def __init__(self, nf, rf, nx):
4346
super(Conv1D, self).__init__()
4447
self.rf = rf
4548
self.nf = nf
46-
if rf == 1: #faster 1x1 conv
49+
if rf == 1: # faster 1x1 conv
4750
w = torch.empty(nx, nf)
4851
nn.init.normal_(w, std=0.02)
4952
self.w = Parameter(w)
5053
self.b = Parameter(torch.zeros(nf))
51-
else: #was used to train LM
54+
else: # was used to train LM
5255
raise NotImplementedError
5356

5457
def forward(self, x):
@@ -64,9 +67,9 @@ def forward(self, x):
6467
class Attention(nn.Module):
6568
def __init__(self, nx, n_ctx, cfg, scale=False):
6669
super(Attention, self).__init__()
67-
n_state = nx # in Attention: n_state=768 (nx=n_embd)
68-
#[switch nx => n_state from Block to Attention to keep identical to TF implem]
69-
assert n_state % cfg.n_head==0
70+
n_state = nx # in Attention: n_state=768 (nx=n_embd)
71+
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
72+
assert n_state % cfg.n_head == 0
7073
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
7174
self.n_head = cfg.n_head
7275
self.split_size = n_state
@@ -80,19 +83,19 @@ def _attn(self, q, k, v):
8083
w = torch.matmul(q, k)
8184
if self.scale:
8285
w = w / math.sqrt(v.size(-1))
83-
w = w * self.b + -1e9*(1-self.b) # TF implem method: mask_attn_weights
86+
w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
8487
w = nn.Softmax(dim=-1)(w)
8588
w = self.attn_dropout(w)
8689
return torch.matmul(w, v)
8790

8891
def merge_heads(self, x):
8992
x = x.permute(0, 2, 1, 3).contiguous()
9093
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
91-
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
94+
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
9295

9396
def split_heads(self, x, k=False):
94-
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1)//self.n_head)
95-
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
97+
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
98+
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
9699
if k:
97100
return x.permute(0, 2, 3, 1)
98101
else:
@@ -112,7 +115,7 @@ def forward(self, x):
112115

113116

114117
class MLP(nn.Module):
115-
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
118+
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
116119
super(MLP, self).__init__()
117120
nx = cfg.n_embd
118121
self.c_fc = Conv1D(n_state, 1, nx)
@@ -132,19 +135,20 @@ def __init__(self, n_ctx, cfg, scale=False):
132135
nx = cfg.n_embd
133136
self.attn = Attention(nx, n_ctx, cfg, scale)
134137
self.ln_1 = LayerNorm(nx)
135-
self.mlp = MLP(4*nx, cfg)
138+
self.mlp = MLP(4 * nx, cfg)
136139
self.ln_2 = LayerNorm(nx)
137140

138141
def forward(self, x):
139142
a = self.attn(x)
140-
n = self.ln_1(x+a)
143+
n = self.ln_1(x + a)
141144
m = self.mlp(n)
142-
h = self.ln_2(n+m)
145+
h = self.ln_2(n + m)
143146
return h
144147

145148

146149
class Model(nn.Module):
147150
""" Transformer model """
151+
148152
def __init__(self, cfg, vocab=40990, n_ctx=512):
149153
super(Model, self).__init__()
150154
self.vocab = vocab
@@ -153,8 +157,8 @@ def __init__(self, cfg, vocab=40990, n_ctx=512):
153157
block = Block(n_ctx, cfg, scale=True)
154158
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
155159
self.decoder = nn.Linear(cfg.n_embd, vocab, bias=False)
156-
self.decoder.weight = self.embed.weight # Tied weights
157-
self.clf_dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
160+
self.decoder.weight = self.embed.weight # Tied weights
161+
self.clf_dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
158162

159163
nn.init.normal_(self.embed.weight, std=0.02)
160164

@@ -169,25 +173,27 @@ def forward(self, x):
169173

170174
class LMHead(nn.Module):
171175
""" Language Model Head for the transformer """
176+
172177
def __init__(self, model, cfg):
173178
super(LMHead, self).__init__()
174179
self.n_embd = cfg.n_embd
175-
self.decoder = lambda x: F.linear(x, model.embed.weight) # Tied weights
180+
self.decoder = lambda x: F.linear(x, model.embed.weight) # Tied weights
176181

177182
def forward(self, h):
178183
# Truncated Language modeling logits (we remove the last token)
179-
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd) # Shape: 252, 768
184+
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd) # Shape: 252, 768
180185
lm_logits = self.decoder(h_trunc)
181186
return lm_logits
182187

183188

184189
class ClfHead(nn.Module):
185190
""" Classifier Head for the transformer """
191+
186192
def __init__(self, clf_token, cfg):
187193
super(ClfHead, self).__init__()
188194
self.n_embd = cfg.n_embd
189195
self.clf_token = clf_token
190-
self.dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
196+
self.dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
191197
self.linear = nn.Linear(cfg.n_embd, 1)
192198
nn.init.normal_(self.linear.weight, std=0.02)
193199
nn.init.normal_(self.linear.bias, 0)
@@ -196,17 +202,30 @@ def forward(self, h, x):
196202
# Classification logits
197203
clf_h = h.view(-1, self.n_embd)
198204
flat = x[:, :, :, 0].contiguous().view(-1)
199-
#pool_idx = torch.eq(x[:, :, 0].contiguous().view(-1), self.clf_token)
200-
clf_h = clf_h[flat == self.clf_token, :] #.index_select(0, pool_idx)
205+
# pool_idx = torch.eq(x[:, :, 0].contiguous().view(-1), self.clf_token)
206+
clf_h = clf_h[flat == self.clf_token, :] # .index_select(0, pool_idx)
201207
clf_h = clf_h.view(-1, 2, self.n_embd, 1)
202208
clf_h = self.dropout(clf_h)
203209
clf_h = clf_h.view(-1, self.n_embd)
204210
clf_logits = self.linear(clf_h)
205211
return clf_logits.view(-1, 2)
206212

207213

208-
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n_embd=768, path='./model/', path_names='./'):
214+
class DataParallelWithEmbed(torch.nn.DataParallel):
215+
"""DataParallel that proxies the embed property to the wrapped module"""
216+
217+
def __init__(self, model):
218+
super(DataParallelWithEmbed, self).__init__(model)
219+
220+
@property
221+
def embed(self):
222+
return self.module.embed
223+
224+
225+
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n_embd=768, path='./model/',
226+
path_names='./'):
209227
# Load weights from TF model
228+
print("Loading weights...")
210229
names = json.load(open(path_names + 'parameters_names.json'))
211230
shapes = json.load(open(path + 'params_shapes.json'))
212231
offsets = np.cumsum([np.prod(shape) for shape in shapes])
@@ -216,32 +235,40 @@ def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n
216235
if n_ctx > 0:
217236
init_params[0] = init_params[0][:n_ctx]
218237
if n_special > 0:
219-
init_params[0] = np.concatenate([init_params[1],
220-
(np.random.randn(n_special, n_embd)*0.02).astype(np.float32),
221-
init_params[0]
222-
], 0)
238+
init_params[0] = np.concatenate(
239+
[init_params[1],
240+
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
241+
init_params[0]
242+
], 0)
223243
else:
224-
init_params[0] = np.concatenate([init_params[1],
225-
init_params[0]
226-
], 0)
244+
init_params[0] = np.concatenate(
245+
[init_params[1],
246+
init_params[0]
247+
], 0)
227248
del init_params[1]
228249
if n_transfer == -1:
229250
n_transfer = 0
230251
else:
231-
n_transfer = 1+n_transfer*12
252+
n_transfer = 1 + n_transfer * 12
232253
init_params = [arr.squeeze() for arr in init_params]
254+
233255
try:
234256
assert model.embed.weight.shape == init_params[0].shape
235257
except AssertionError as e:
236258
e.args += (model.embed.weight.shape, init_params[0].shape)
237259
raise
260+
238261
model.embed.weight.data = torch.from_numpy(init_params[0])
262+
263+
# Load the weights into our torch module
264+
module = model.module
265+
239266
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
240-
name = name[6:] # skip "model/"
267+
name = name[6:] # skip "model/"
241268
assert name[-2:] == ":0"
242269
name = name[:-2]
243270
name = name.split('/')
244-
pointer = model
271+
pointer = module
245272
for m_name in name:
246273
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
247274
l = re.split(r'(\d+)', m_name)
@@ -258,12 +285,14 @@ def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n
258285
raise
259286
pointer.data = torch.from_numpy(ip)
260287

288+
261289
class dotdict(dict):
262290
"""dot.notation access to dictionary attributes"""
263291
__getattr__ = dict.get
264292
__setattr__ = dict.__setitem__
265293
__delattr__ = dict.__delitem__
266294

295+
267296
DEFAULT_CONFIG = dotdict({
268297
'n_embd': 768,
269298
'n_head': 12,

Diff for: train.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
from analysis import rocstories as rocstories_analysis
1212
from datasets import rocstories
13-
from model_pytorch import Model, LMHead, ClfHead, load_openai_pretrained_model
13+
from model_pytorch import Model, LMHead, ClfHead, load_openai_pretrained_model, DataParallelWithEmbed
1414
from opt import OpenAIAdam
1515
from text_utils import TextEncoder
1616
from utils import (encode_dataset, iter_data,
@@ -237,6 +237,7 @@ def run_epoch():
237237
encoder = text_encoder.encoder
238238
n_vocab = len(text_encoder.encoder)
239239

240+
print("Encoding dataset...")
240241
(trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(
241242
rocstories(data_dir, n_valid=args.n_valid), encoder=text_encoder)
242243
n_y = 2
@@ -266,6 +267,8 @@ def run_epoch():
266267
n_updates_total = (n_train // n_batch_train) * args.n_iter
267268

268269
model = Model(args, vocab, n_ctx)
270+
model = DataParallelWithEmbed(model).cuda()
271+
269272
lm_head = LMHead(model, args)
270273
clf_head = ClfHead(clf_token, args)
271274

0 commit comments

Comments
 (0)