forked from KellerJordan/modded-nanogpt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path2e527b0b-3540-4bcd-a15d-955f86cb8bd2.txt
2165 lines (2092 loc) · 134 KB
/
2e527b0b-3540-4bcd-a15d-955f86cb8bd2.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import sys
with open(sys.argv[0]) as f:
code = f.read() # read the code of this file ASAP, for logging
import uuid
import glob
import time
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import torch._inductor.config as config
from torch.nn.parallel import DistributedDataParallel as DDP
# Use of FlexAttention contributed by @KoszarskyB
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
flex_attention = torch.compile(flex_attention, dynamic=False)
create_block_mask = torch.compile(create_block_mask, dynamic=False)
# -----------------------------------------------------------------------------
# Muon optimizer
def zeropower_via_svd(G, steps=None):
U, S, V = G.svd()
return U @ V.T
@torch.compile
def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
X /= (X.norm() + eps) # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
zeropower_backends = dict(svd=zeropower_via_svd, newtonschulz5=zeropower_via_newtonschulz5)
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
the advantage that it can be stably run in bfloat16 on the GPU.
Some warnings:
- This optimizer assumes that all parameters passed in are 2D.
- It should not be used for the embedding layer, the final fully connected layer, or any {0,1}-D
parameters; those should all be optimized by a standard method (e.g., AdamW).
- To use it with 4D convolutional filters, it works well to just flatten their last 3 dimensions.
- We believe it is unlikely to work well for training with small batch size.
- We believe it may not work well for finetuning pretrained models, but we haven't tested this.
- We have not yet tried this optimizer for training scenarios larger than NanoGPT (124M).
Arguments:
lr: The learning rate used by the internal SGD.
momentum: The momentum used by the internal SGD.
nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
backend: The chosen backend for the orthogonalization step. (recommended: 'newtonschulz5')
backend_steps: The number of iteration steps to use in the backend, if it is iterative.
"""
def __init__(self, params, lr=0.02, momentum=0.95, nesterov=True,
backend='newtonschulz5', backend_steps=5):
defaults = dict(lr=lr, momentum=momentum, nesterov=nesterov, backend=backend, backend_steps=backend_steps)
super().__init__(params, defaults)
def step(self):
for group in self.param_groups:
lr = group['lr']
momentum = group['momentum']
zeropower_backend = zeropower_backends[group['backend']]
# generate weight updates in distributed fashion
total_params = sum(p.numel() for p in group['params'])
updates_flat = torch.zeros(total_params, device='cuda', dtype=torch.bfloat16)
curr_idx = 0
for i, p in enumerate(group['params']):
# luckily this will perfectly distribute a transformer with multiple of 4 layers to 8 GPUs
if i % int(os.environ['WORLD_SIZE']) == int(os.environ['RANK']):
g = p.grad
assert g is not None
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(g)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(g)
g = g.add(buf, alpha=momentum) if group['nesterov'] else buf
g = zeropower_backend(g, steps=group['backend_steps'])
g *= max(1, g.size(0)/g.size(1))**0.5
updates_flat[curr_idx:curr_idx+p.numel()] = g.flatten()
curr_idx += p.numel()
# sync updates across devices. we are not memory-constrained so can do this simple deserialization
dist.all_reduce(updates_flat, op=dist.ReduceOp.SUM)
# deserialize and apply updates
curr_idx = 0
for p in group['params']:
g = updates_flat[curr_idx:curr_idx+p.numel()].view_as(p.data).type_as(p.data)
p.data.add_(g, alpha=-lr)
curr_idx += p.numel()
# -----------------------------------------------------------------------------
# PyTorch nn.Module definitions for the GPT-2 model
def norm(x):
return F.rms_norm(x, (x.size(-1),))
class CastedLinear(nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
def forward(self, x):
return F.linear(x, self.weight.to(x.dtype))
class Rotary(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
self.register_buffer('inv_freq', (1 / base) ** (torch.arange(0, dim, 2) / dim))
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x):
seq_len = x.shape[1]
if seq_len != self.seq_len_cached:
t = torch.arange(seq_len, device=x.device)
freqs = torch.outer(t, self.inv_freq)
self.seq_len_cached = seq_len
self.cos_cached = freqs.cos()
self.sin_cached = freqs.sin()
cos, sin = self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :]
# apply_rotary_emb(x, cos, sin)
x1, x2 = x.chunk(2, dim=3)
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat((y1, y2), 3).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, dim, n_head):
super().__init__()
assert dim % n_head == 0
self.n_head = n_head
self.c_q = CastedLinear(dim, dim)
self.c_k = CastedLinear(dim, dim)
self.c_v = CastedLinear(dim, dim)
# value residual lambda
self.lamb = nn.Parameter(torch.tensor(0.5)) # @Grad62304977
# rotary embeddings
self.rotary = Rotary(dim // n_head) # dim // n_head = head_dim
# output projection
self.c_proj = CastedLinear(dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x, vi, block_mask):
B, T = x.size(0), x.size(1) # batch size, sequence length
assert B == 1, "Must use batch size = 1 for FlexAttention"
q = self.c_q(x).view(B, T, self.n_head, -1)
k = self.c_k(x).view(B, T, self.n_head, -1)
v = self.c_v(x).view(B, T, self.n_head, -1)
v = (1 - self.lamb) * v + self.lamb * vi.view_as(v) # @Grad62304977
q, k = norm(q), norm(k) # QK norm suggested by @Grad62304977
q, k = self.rotary(q), self.rotary(k)
y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask)
y = y.transpose(1, 2).contiguous().view_as(x) # re-assemble all head outputs side by side
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, dim):
super().__init__()
self.c_fc = CastedLinear(dim, 4 * dim)
self.c_proj = CastedLinear(4 * dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = CausalSelfAttention(config.n_embd, config.n_head)
self.mlp = MLP(config.n_embd)
self.lambdas = nn.Parameter(torch.tensor([1., 0.]))
def forward(self, x, vi, x0, block_mask):
x = self.lambdas[0] * x + self.lambdas[1] * x0
x = x + self.attn(norm(x), vi, block_mask)
x = x + self.mlp(norm(x))
return x
# -----------------------------------------------------------------------------
# The main GPT-2 model
@dataclass
class GPTConfig:
vocab_size : int = 50304
n_layer : int = 12
n_head : int = 6 # head dim 128 suggested by @Grad62304977
n_embd : int = 768
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
# U-net design by @brendanh0gan
self.num_encoder_layers = config.n_layer // 2 # Half of the layers for encoder
self.num_decoder_layers = config.n_layer - self.num_encoder_layers # Remaining for decoder
# Add learnable skip connection weights for decoder layers
self.skip_weights = nn.Parameter(torch.ones(self.num_decoder_layers))
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
# token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual learning
vte = nn.Embedding(config.vocab_size, config.n_embd*12),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
))
self.lm_head = CastedLinear(config.n_embd, config.vocab_size)
self.lm_head.weight.data.zero_() # @Grad62304977
def forward(self, idx, target, attn_blocksize):
docs = (idx == 50256).cumsum(0)
def document_causal_mask(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = docs[q_idx] == docs[kv_idx]
window_mask = q_idx - kv_idx < attn_blocksize
return causal_mask & document_mask & window_mask
S = len(idx)
block_mask = create_block_mask(document_causal_mask, None, None, S, S, device="cuda", _compile=True)
# forward the GPT model itself
x = self.transformer.wte(idx[None]) # token embeddings of shape (b, t, n_embd)
x = norm(x) # @Grad62304977
x0 = x
vi = self.transformer.vte(idx[None]).chunk(12, dim=-1)
# Store outputs for U-Net skip connections
skip_connections = []
# Encoder pass - process only the first half of the blocks
for i in range(self.num_encoder_layers):
x = self.transformer.h[i](x, vi[i], x0, block_mask)
skip_connections.append(x)
# Decoder pass - process the remaining blocks with weighted skip connections
for i in range(self.num_decoder_layers):
x = x + self.skip_weights[i] * skip_connections.pop()
x = self.transformer.h[self.num_encoder_layers + i](x, vi[self.num_encoder_layers+i], x0, block_mask)
x = norm(x)
logits = self.lm_head(x)
logits = 30 * torch.tanh(logits / 30) # @Grad62304977
logits = logits.float()
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1))
return loss
# -----------------------------------------------------------------------------
# Our own simple Distributed Data Loader
def _peek_data_shard(filename):
# only reads the header, returns header data
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
if header[0] != 20240520:
print("ERROR: magic number mismatch in the data .bin file!")
print("---> HINT: Are you passing in a correct file with --input_bin?")
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
exit(1)
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
return ntok # for now just return the number of tokens
def _load_data_shard(filename):
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
# the rest of it are tokens, stored as uint16
tokens = np.frombuffer(f.read(), dtype=np.uint16)
assert len(tokens) == ntok, "number of tokens read does not match header?"
return tokens
class DistributedDataLoader:
def __init__(self, filename_pattern, T, process_rank, num_processes):
self.process_rank = process_rank
self.num_processes = num_processes
self.T = T
# glob files that match the pattern
self.files = sorted(glob.glob(filename_pattern))
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
# load and validate all data shards, count number of tokens in total
ntok_total = 0
for fname in self.files:
shard_ntok = _peek_data_shard(fname)
assert shard_ntok >= num_processes * T + 1
ntok_total += int(shard_ntok)
self.ntok_total = ntok_total
self.reset()
def reset(self):
self.current_shard = -1
self.advance()
def advance(self): # advance to next data shard
self.current_shard = (self.current_shard + 1) % len(self.files)
self.current_position = self.process_rank * self.T
self.tokens = _load_data_shard(self.files[self.current_shard])
def next_batch(self):
batch_size = self.T * self.num_processes
buf = self.tokens[self.current_position:self.current_position+self.T+1]
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
x = buf[:-1] # inputs
y = buf[1:] # targets
# advance current position and load next shard if necessary
self.current_position += batch_size
if self.current_position + batch_size >= len(self.tokens):
self.advance()
return x.cuda(), y.cuda()
# -----------------------------------------------------------------------------
# int main
@dataclass
class Hyperparameters:
# data hyperparams
input_bin : str = 'data/fineweb10B/fineweb_train_*.bin' # input .bin to train on
input_val_bin : str = 'data/fineweb10B/fineweb_val_*.bin' # input .bin to eval validation loss on
# optimization hyperparams
batch_size : int = 8 # batch size, in sequences, across all devices
sequence_length : int = 64*1024 # sequence length, in tokens
num_iterations : int = 1530 # number of iterations to run
warmup_iters : int = 0
cooldown_iters : int = 600 # number of iterations of linear warmup/cooldown for triangular or trapezoidal schedule
weight_decay : float = 0
# evaluation and logging hyperparams
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
val_tokens : int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
args = Hyperparameters()
# set up DDP (distributed data parallel). torchrun sets this env variable
assert torch.cuda.is_available()
dist.init_process_group(backend='nccl')
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
print(f"using device: {device}")
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
# begin logging
logfile = None
if master_process:
run_id = str(uuid.uuid4())
logdir = 'logs/%s/' % run_id
os.makedirs(logdir, exist_ok=True)
logfile = 'logs/%s.txt' % run_id
# create the log file
with open(logfile, "w") as f:
# begin the log by printing this file (the Python code)
f.write(code)
f.write('='*100 + '\n')
def print0(s, logonly=False):
if master_process:
with open(logfile, "a") as f:
if not logonly:
print(s)
f.write(s+'\n')
# log information about the hardware/software environment this is running on
# and print the full `nvidia-smi` to file
print0(f"Running pytorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}\nnvidia-smi:")
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
print0(f'{result.stdout}', logonly=True)
print0('='*100, logonly=True)
# convenience variables
T = args.sequence_length
# calculate the number of steps to take in the val loop.
assert args.val_tokens % (T * ddp_world_size) == 0
val_steps = args.val_tokens // (T * ddp_world_size)
# calculate the steps of gradient accumulation required to attain the desired global batch size.
assert args.batch_size % (ddp_world_size) == 0
train_accumulation_steps = args.batch_size // ddp_world_size
# load tokens
train_loader = DistributedDataLoader(args.input_bin, T, ddp_rank, ddp_world_size)
val_loader = DistributedDataLoader(args.input_val_bin, T, ddp_rank, ddp_world_size)
print0(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
print0(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
print0('='*100, logonly=True)
x, y = train_loader.next_batch()
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
# this originates from Karpathy's experiments.
num_vocab = 50304
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=12, n_head=6, n_embd=768))
model = model.cuda().bfloat16()
for m in model.modules():
if isinstance(m, CastedLinear):
m.float()
if hasattr(config, "coordinate_descent_tuning"):
config.coordinate_descent_tuning = True # suggested by @Chillee
model = torch.compile(model)
# here we wrap model into DDP container
model = DDP(model, device_ids=[ddp_local_rank])
raw_model = model.module # always contains the "raw" unwrapped model
# init the optimizer(s)
optimizer1 = torch.optim.Adam([raw_model.transformer.wte.weight, raw_model.transformer.vte.weight], lr=0.6, betas=(0.8, 0.95), fused=True)
optimizer2 = torch.optim.Adam([raw_model.lm_head.weight], lr=0.008, betas=(0.8, 0.95), fused=True)
params = list(raw_model.transformer.h.parameters())
matrix_params = [p for p in params if p.ndim == 2]
scalar_params = [p for p in params if p.ndim < 2] + [raw_model.skip_weights]
optimizer3 = Muon(matrix_params, lr=0.05, momentum=0.95)
optimizer4 = torch.optim.Adam(scalar_params, lr=0.04, betas=(0.8, 0.95), fused=True) # note that this learning rate is neither sensitive nor tuned
optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]
# learning rate decay scheduler (linear warmup and cooldown)
def get_lr(it):
assert it <= args.num_iterations
# 1) linear warmup for warmup_iters steps
if it < args.warmup_iters:
return (it+1) / args.warmup_iters
# 2) constant lr for a while
elif it < args.num_iterations - args.cooldown_iters:
return 1.0
# 3) linear cooldown
else:
decay_ratio = (args.num_iterations - it) / args.cooldown_iters
return decay_ratio
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
# Start training loop
training_time_ms = 0
# start the clock
torch.cuda.synchronize()
t0 = time.time()
# begin training
for step in range(args.num_iterations + 1):
last_step = (step == args.num_iterations)
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
# steps with dummy data first, and then re-initialize the model and reset the loader.
if step == 10:
training_time_ms = 0
t0 = time.time()
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
# Set the attention blocksize for the current step, in chunks of 64. By @fernbear.bsky.social
attn_blocksize = torch.tensor(64*((step/args.num_iterations * (1792 - 64) + 64)//64), dtype=torch.int, device='cuda')
# once in a while evaluate the validation dataset
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# run validation batches
model.eval()
val_loader.reset()
val_loss = 0.0
for _ in range(val_steps):
with torch.no_grad():
x_val, y_val = val_loader.next_batch()
val_loss += model(x_val, y_val, attn_blocksize=attn_blocksize)
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
val_loss /= val_steps
# log val loss to console and to logfile
print0(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# save the state of the training process
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
# bit confusing: we want to make sure to eval on 0th iteration
# but also after the very last iteration. so we loop for step <= num_iterations
# instead of just < num_iterations (one extra due to <=), only to do
# the validation/sampling one last time, and then we break right here as we're done.
if last_step:
break
# --------------- TRAINING SECTION BEGIN -----------------
model.train()
for i in range(1, train_accumulation_steps+1):
ctx = model.no_sync() if i < train_accumulation_steps else contextlib.nullcontext()
with ctx: # there's no need to sync gradients every accumulation step
# forward pass
loss = model(x, y, attn_blocksize=attn_blocksize)
# advance the dataset for the next batch
x, y = train_loader.next_batch()
# backward pass
loss.backward()
train_loss = loss.detach()
for p in model.parameters():
p.grad /= train_accumulation_steps
# momentum warmup for Muon
frac = min(step/300, 1)
optimizer3.param_groups[0]['momentum'] = (1 - frac) * 0.85 + frac * 0.95
# step the optimizers and schedulers
for opt, sched in zip(optimizers, schedulers):
opt.step()
sched.step()
# null the gradients
model.zero_grad(set_to_none=True)
# --------------- TRAINING SECTION END -------------------
# everything that follows now is just diagnostics, prints, logging, etc.
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
approx_time = training_time_ms + 1000 * (time.time() - t0)
print0(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
if master_process:
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")
# -------------------------------------------------------------------------
# clean up nice
dist.destroy_process_group()
====================================================================================================
Running pytorch 2.6.0.dev20241203+cu124 compiled for CUDA 12.4
nvidia-smi:
Thu Dec 5 01:16:35 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA H100 80GB HBM3 On | 00000000:19:00.0 Off | 0 |
| N/A 38C P0 75W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA H100 80GB HBM3 On | 00000000:3B:00.0 Off | 0 |
| N/A 30C P0 107W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA H100 80GB HBM3 On | 00000000:4C:00.0 Off | 0 |
| N/A 30C P0 117W / 700W | 41MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA H100 80GB HBM3 On | 00000000:5D:00.0 Off | 0 |
| N/A 37C P0 92W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA H100 80GB HBM3 On | 00000000:9B:00.0 Off | 0 |
| N/A 38C P0 88W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA H100 80GB HBM3 On | 00000000:BB:00.0 Off | 0 |
| N/A 29C P0 109W / 700W | 39MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA H100 80GB HBM3 On | 00000000:CB:00.0 Off | 0 |
| N/A 38C P0 110W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA H100 80GB HBM3 On | 00000000:DB:00.0 Off | 0 |
| N/A 29C P0 104W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
====================================================================================================
Training DataLoader: total number of tokens: 1100000000 across 11 files
Validation DataLoader: total number of tokens: 100000000 across 1 files
====================================================================================================
step:0/1530 val_loss:10.8258 train_time:0ms step_avg:nanms
step:1/1530 train_loss:10.8258 train_time:32524ms step_avg:nanms
step:2/1530 train_loss:10.0751 train_time:32636ms step_avg:nanms
step:3/1530 train_loss:8.3516 train_time:32796ms step_avg:nanms
step:4/1530 train_loss:7.6118 train_time:32959ms step_avg:nanms
step:5/1530 train_loss:7.5069 train_time:33119ms step_avg:nanms
step:6/1530 train_loss:7.0192 train_time:33280ms step_avg:nanms
step:7/1530 train_loss:7.1690 train_time:33441ms step_avg:nanms
step:8/1530 train_loss:6.7471 train_time:33602ms step_avg:nanms
step:9/1530 train_loss:6.6580 train_time:33763ms step_avg:nanms
step:10/1530 train_loss:6.6250 train_time:33922ms step_avg:nanms
step:11/1530 train_loss:6.4494 train_time:116ms step_avg:nanms
step:12/1530 train_loss:6.3327 train_time:276ms step_avg:nanms
step:13/1530 train_loss:6.2269 train_time:437ms step_avg:145.53ms
step:14/1530 train_loss:6.2111 train_time:597ms step_avg:149.35ms
step:15/1530 train_loss:6.1673 train_time:757ms step_avg:151.42ms
step:16/1530 train_loss:6.1258 train_time:918ms step_avg:152.99ms
step:17/1530 train_loss:6.1546 train_time:1078ms step_avg:154.07ms
step:18/1530 train_loss:5.9418 train_time:1239ms step_avg:154.82ms
step:19/1530 train_loss:5.9595 train_time:1399ms step_avg:155.45ms
step:20/1530 train_loss:5.6948 train_time:1558ms step_avg:155.84ms
step:21/1530 train_loss:5.9505 train_time:1719ms step_avg:156.31ms
step:22/1530 train_loss:6.1680 train_time:1879ms step_avg:156.60ms
step:23/1530 train_loss:5.8572 train_time:2039ms step_avg:156.88ms
step:24/1530 train_loss:6.0206 train_time:2201ms step_avg:157.20ms
step:25/1530 train_loss:5.6700 train_time:2360ms step_avg:157.31ms
step:26/1530 train_loss:5.5931 train_time:2520ms step_avg:157.48ms
step:27/1530 train_loss:5.7516 train_time:2681ms step_avg:157.68ms
step:28/1530 train_loss:5.4139 train_time:2840ms step_avg:157.80ms
step:29/1530 train_loss:5.6570 train_time:3000ms step_avg:157.91ms
step:30/1530 train_loss:5.4594 train_time:3161ms step_avg:158.06ms
step:31/1530 train_loss:5.4344 train_time:3321ms step_avg:158.13ms
step:32/1530 train_loss:5.2915 train_time:3484ms step_avg:158.35ms
step:33/1530 train_loss:5.5739 train_time:3643ms step_avg:158.38ms
step:34/1530 train_loss:5.4857 train_time:3804ms step_avg:158.49ms
step:35/1530 train_loss:5.5961 train_time:3964ms step_avg:158.56ms
step:36/1530 train_loss:5.5413 train_time:4123ms step_avg:158.58ms
step:37/1530 train_loss:5.4507 train_time:4284ms step_avg:158.66ms
step:38/1530 train_loss:5.2959 train_time:4445ms step_avg:158.74ms
step:39/1530 train_loss:5.3059 train_time:4604ms step_avg:158.75ms
step:40/1530 train_loss:5.2314 train_time:4764ms step_avg:158.80ms
step:41/1530 train_loss:5.2101 train_time:4923ms step_avg:158.81ms
step:42/1530 train_loss:5.1635 train_time:5085ms step_avg:158.89ms
step:43/1530 train_loss:5.2685 train_time:5243ms step_avg:158.87ms
step:44/1530 train_loss:5.2279 train_time:5404ms step_avg:158.96ms
step:45/1530 train_loss:5.3812 train_time:5565ms step_avg:158.99ms
step:46/1530 train_loss:5.1668 train_time:5724ms step_avg:159.00ms
step:47/1530 train_loss:5.0469 train_time:5885ms step_avg:159.05ms
step:48/1530 train_loss:5.2052 train_time:6046ms step_avg:159.10ms
step:49/1530 train_loss:5.1382 train_time:6206ms step_avg:159.12ms
step:50/1530 train_loss:5.2369 train_time:6365ms step_avg:159.12ms
step:51/1530 train_loss:5.1316 train_time:6527ms step_avg:159.20ms
step:52/1530 train_loss:5.0178 train_time:6688ms step_avg:159.25ms
step:53/1530 train_loss:5.1516 train_time:6848ms step_avg:159.25ms
step:54/1530 train_loss:4.9787 train_time:7009ms step_avg:159.30ms
step:55/1530 train_loss:5.3834 train_time:7170ms step_avg:159.34ms
step:56/1530 train_loss:5.0122 train_time:7332ms step_avg:159.38ms
step:57/1530 train_loss:4.8737 train_time:7493ms step_avg:159.43ms
step:58/1530 train_loss:5.0304 train_time:7653ms step_avg:159.44ms
step:59/1530 train_loss:5.0165 train_time:7814ms step_avg:159.47ms
step:60/1530 train_loss:5.1480 train_time:7974ms step_avg:159.48ms
step:61/1530 train_loss:4.8535 train_time:8136ms step_avg:159.52ms
step:62/1530 train_loss:4.9701 train_time:8297ms step_avg:159.55ms
step:63/1530 train_loss:4.9618 train_time:8456ms step_avg:159.55ms
step:64/1530 train_loss:4.9433 train_time:8616ms step_avg:159.56ms
step:65/1530 train_loss:4.7815 train_time:8777ms step_avg:159.58ms
step:66/1530 train_loss:4.9012 train_time:8938ms step_avg:159.60ms
step:67/1530 train_loss:4.8068 train_time:9098ms step_avg:159.62ms
step:68/1530 train_loss:5.1025 train_time:9258ms step_avg:159.62ms
step:69/1530 train_loss:4.7249 train_time:9419ms step_avg:159.64ms
step:70/1530 train_loss:4.8476 train_time:9578ms step_avg:159.64ms
step:71/1530 train_loss:4.9746 train_time:9738ms step_avg:159.64ms
step:72/1530 train_loss:4.8950 train_time:9899ms step_avg:159.66ms
step:73/1530 train_loss:4.7548 train_time:10059ms step_avg:159.67ms
step:74/1530 train_loss:4.8930 train_time:10219ms step_avg:159.68ms
step:75/1530 train_loss:4.8338 train_time:10379ms step_avg:159.68ms
step:76/1530 train_loss:4.7975 train_time:10539ms step_avg:159.68ms
step:77/1530 train_loss:4.9071 train_time:10699ms step_avg:159.69ms
step:78/1530 train_loss:5.1314 train_time:10860ms step_avg:159.71ms
step:79/1530 train_loss:4.7924 train_time:11020ms step_avg:159.70ms
step:80/1530 train_loss:4.8415 train_time:11180ms step_avg:159.71ms
step:81/1530 train_loss:4.6377 train_time:11340ms step_avg:159.71ms
step:82/1530 train_loss:4.8089 train_time:11500ms step_avg:159.72ms
step:83/1530 train_loss:4.7607 train_time:11659ms step_avg:159.72ms
step:84/1530 train_loss:4.7552 train_time:11819ms step_avg:159.72ms
step:85/1530 train_loss:4.6118 train_time:11980ms step_avg:159.73ms
step:86/1530 train_loss:4.8222 train_time:12139ms step_avg:159.72ms
step:87/1530 train_loss:4.7359 train_time:12300ms step_avg:159.74ms
step:88/1530 train_loss:4.7273 train_time:12460ms step_avg:159.74ms
step:89/1530 train_loss:4.7102 train_time:12620ms step_avg:159.74ms
step:90/1530 train_loss:4.6515 train_time:12780ms step_avg:159.75ms
step:91/1530 train_loss:4.6406 train_time:12939ms step_avg:159.74ms
step:92/1530 train_loss:4.8004 train_time:13101ms step_avg:159.77ms
step:93/1530 train_loss:4.6138 train_time:13260ms step_avg:159.76ms
step:94/1530 train_loss:4.6391 train_time:13420ms step_avg:159.76ms
step:95/1530 train_loss:4.6914 train_time:13580ms step_avg:159.76ms
step:96/1530 train_loss:4.5856 train_time:13740ms step_avg:159.77ms
step:97/1530 train_loss:4.6196 train_time:13900ms step_avg:159.77ms
step:98/1530 train_loss:4.5702 train_time:14059ms step_avg:159.76ms
step:99/1530 train_loss:4.6651 train_time:14219ms step_avg:159.77ms
step:100/1530 train_loss:4.6749 train_time:14379ms step_avg:159.77ms
step:101/1530 train_loss:4.5163 train_time:14540ms step_avg:159.78ms
step:102/1530 train_loss:4.6938 train_time:14700ms step_avg:159.79ms
step:103/1530 train_loss:4.5772 train_time:14860ms step_avg:159.78ms
step:104/1530 train_loss:4.5368 train_time:15021ms step_avg:159.79ms
step:105/1530 train_loss:4.5460 train_time:15180ms step_avg:159.79ms
step:106/1530 train_loss:4.5834 train_time:15340ms step_avg:159.79ms
step:107/1530 train_loss:4.5055 train_time:15500ms step_avg:159.80ms
step:108/1530 train_loss:4.3749 train_time:15660ms step_avg:159.80ms
step:109/1530 train_loss:4.4818 train_time:15819ms step_avg:159.79ms
step:110/1530 train_loss:4.4768 train_time:15980ms step_avg:159.80ms
step:111/1530 train_loss:4.4272 train_time:16141ms step_avg:159.81ms
step:112/1530 train_loss:4.5966 train_time:16300ms step_avg:159.80ms
step:113/1530 train_loss:4.4851 train_time:16459ms step_avg:159.80ms
step:114/1530 train_loss:4.3585 train_time:16619ms step_avg:159.80ms
step:115/1530 train_loss:4.5108 train_time:16782ms step_avg:159.83ms
step:116/1530 train_loss:4.4703 train_time:16945ms step_avg:159.86ms
step:117/1530 train_loss:4.3654 train_time:17110ms step_avg:159.90ms
step:118/1530 train_loss:4.6011 train_time:17273ms step_avg:159.94ms
step:119/1530 train_loss:4.4507 train_time:17437ms step_avg:159.97ms
step:120/1530 train_loss:4.3301 train_time:17601ms step_avg:160.01ms
step:121/1530 train_loss:4.2981 train_time:17763ms step_avg:160.03ms
step:122/1530 train_loss:4.4468 train_time:17927ms step_avg:160.06ms
step:123/1530 train_loss:4.2793 train_time:18091ms step_avg:160.10ms
step:124/1530 train_loss:4.5840 train_time:18255ms step_avg:160.13ms
step:125/1530 train_loss:4.4426 train_time:18419ms step_avg:160.17ms
step:125/1530 val_loss:4.3918 train_time:18467ms step_avg:160.58ms
step:126/1530 train_loss:4.4044 train_time:18585ms step_avg:160.22ms
step:127/1530 train_loss:4.4321 train_time:18751ms step_avg:160.27ms
step:128/1530 train_loss:4.3778 train_time:18915ms step_avg:160.30ms
step:129/1530 train_loss:4.6998 train_time:19079ms step_avg:160.33ms
step:130/1530 train_loss:4.3570 train_time:19243ms step_avg:160.36ms
step:131/1530 train_loss:4.3881 train_time:19406ms step_avg:160.38ms
step:132/1530 train_loss:4.3493 train_time:19572ms step_avg:160.42ms
step:133/1530 train_loss:4.4554 train_time:19736ms step_avg:160.46ms
step:134/1530 train_loss:4.2601 train_time:19899ms step_avg:160.48ms
step:135/1530 train_loss:4.4513 train_time:20064ms step_avg:160.51ms
step:136/1530 train_loss:4.2186 train_time:20228ms step_avg:160.54ms
step:137/1530 train_loss:4.3899 train_time:20392ms step_avg:160.57ms
step:138/1530 train_loss:4.2862 train_time:20556ms step_avg:160.59ms
step:139/1530 train_loss:4.3851 train_time:20719ms step_avg:160.61ms
step:140/1530 train_loss:4.4800 train_time:20882ms step_avg:160.63ms
step:141/1530 train_loss:4.3122 train_time:21045ms step_avg:160.65ms
step:142/1530 train_loss:4.3144 train_time:21210ms step_avg:160.68ms
step:143/1530 train_loss:4.2683 train_time:21375ms step_avg:160.71ms
step:144/1530 train_loss:4.3576 train_time:21539ms step_avg:160.74ms
step:145/1530 train_loss:4.3236 train_time:21702ms step_avg:160.76ms
step:146/1530 train_loss:4.1796 train_time:21867ms step_avg:160.79ms
step:147/1530 train_loss:4.3328 train_time:22031ms step_avg:160.81ms
step:148/1530 train_loss:4.3719 train_time:22195ms step_avg:160.83ms
step:149/1530 train_loss:4.3128 train_time:22360ms step_avg:160.86ms
step:150/1530 train_loss:4.4524 train_time:22523ms step_avg:160.88ms
step:151/1530 train_loss:4.2775 train_time:22686ms step_avg:160.89ms
step:152/1530 train_loss:4.2706 train_time:22851ms step_avg:160.92ms
step:153/1530 train_loss:4.3700 train_time:23015ms step_avg:160.95ms
step:154/1530 train_loss:4.3626 train_time:23180ms step_avg:160.97ms
step:155/1530 train_loss:4.2753 train_time:23344ms step_avg:160.99ms
step:156/1530 train_loss:4.3565 train_time:23508ms step_avg:161.01ms
step:157/1530 train_loss:4.4051 train_time:23672ms step_avg:161.03ms
step:158/1530 train_loss:4.2470 train_time:23837ms step_avg:161.06ms
step:159/1530 train_loss:4.3104 train_time:24001ms step_avg:161.08ms
step:160/1530 train_loss:4.1351 train_time:24164ms step_avg:161.09ms
step:161/1530 train_loss:4.3552 train_time:24327ms step_avg:161.11ms
step:162/1530 train_loss:4.3682 train_time:24490ms step_avg:161.12ms
step:163/1530 train_loss:4.3541 train_time:24655ms step_avg:161.14ms
step:164/1530 train_loss:4.1937 train_time:24818ms step_avg:161.16ms
step:165/1530 train_loss:4.2918 train_time:24982ms step_avg:161.17ms
step:166/1530 train_loss:4.3503 train_time:25145ms step_avg:161.19ms
step:167/1530 train_loss:4.2115 train_time:25309ms step_avg:161.21ms
step:168/1530 train_loss:4.2976 train_time:25474ms step_avg:161.23ms
step:169/1530 train_loss:4.1729 train_time:25638ms step_avg:161.24ms
step:170/1530 train_loss:4.0211 train_time:25802ms step_avg:161.26ms
step:171/1530 train_loss:4.2141 train_time:25965ms step_avg:161.27ms
step:172/1530 train_loss:4.2126 train_time:26127ms step_avg:161.28ms
step:173/1530 train_loss:4.2754 train_time:26290ms step_avg:161.29ms
step:174/1530 train_loss:4.4159 train_time:26454ms step_avg:161.30ms
step:175/1530 train_loss:4.2416 train_time:26617ms step_avg:161.31ms
step:176/1530 train_loss:4.0965 train_time:26779ms step_avg:161.32ms
step:177/1530 train_loss:4.0712 train_time:26942ms step_avg:161.33ms
step:178/1530 train_loss:4.1900 train_time:27103ms step_avg:161.33ms
step:179/1530 train_loss:4.1290 train_time:27266ms step_avg:161.34ms
step:180/1530 train_loss:4.1228 train_time:27429ms step_avg:161.35ms
step:181/1530 train_loss:4.3158 train_time:27592ms step_avg:161.36ms
step:182/1530 train_loss:4.1639 train_time:27755ms step_avg:161.37ms
step:183/1530 train_loss:4.1254 train_time:27918ms step_avg:161.37ms
step:184/1530 train_loss:4.1231 train_time:28081ms step_avg:161.38ms
step:185/1530 train_loss:4.2124 train_time:28243ms step_avg:161.39ms
step:186/1530 train_loss:4.1842 train_time:28404ms step_avg:161.39ms
step:187/1530 train_loss:4.2501 train_time:28568ms step_avg:161.40ms
step:188/1530 train_loss:4.1721 train_time:28875ms step_avg:162.22ms
step:189/1530 train_loss:4.1167 train_time:29212ms step_avg:163.20ms
step:190/1530 train_loss:4.2108 train_time:29376ms step_avg:163.20ms
step:191/1530 train_loss:4.0876 train_time:29538ms step_avg:163.20ms
step:192/1530 train_loss:4.0560 train_time:29700ms step_avg:163.19ms
step:193/1530 train_loss:4.2577 train_time:29863ms step_avg:163.19ms
step:194/1530 train_loss:4.1809 train_time:30026ms step_avg:163.18ms
step:195/1530 train_loss:4.3663 train_time:30188ms step_avg:163.18ms
step:196/1530 train_loss:4.1825 train_time:30353ms step_avg:163.19ms
step:197/1530 train_loss:4.0516 train_time:30517ms step_avg:163.19ms
step:198/1530 train_loss:4.1881 train_time:30680ms step_avg:163.19ms
step:199/1530 train_loss:4.0353 train_time:30843ms step_avg:163.19ms
step:200/1530 train_loss:4.1164 train_time:31006ms step_avg:163.19ms
step:201/1530 train_loss:4.0126 train_time:31168ms step_avg:163.18ms
step:202/1530 train_loss:4.2633 train_time:31332ms step_avg:163.19ms
step:203/1530 train_loss:4.0727 train_time:31496ms step_avg:163.19ms
step:204/1530 train_loss:4.1936 train_time:31659ms step_avg:163.19ms
step:205/1530 train_loss:4.2559 train_time:31822ms step_avg:163.19ms
step:206/1530 train_loss:3.9552 train_time:31984ms step_avg:163.19ms
step:207/1530 train_loss:4.0912 train_time:32146ms step_avg:163.18ms
step:208/1530 train_loss:4.1175 train_time:32309ms step_avg:163.18ms
step:209/1530 train_loss:4.2425 train_time:32472ms step_avg:163.18ms
step:210/1530 train_loss:4.1850 train_time:32636ms step_avg:163.18ms
step:211/1530 train_loss:4.0671 train_time:32799ms step_avg:163.18ms
step:212/1530 train_loss:4.1276 train_time:32961ms step_avg:163.18ms
step:213/1530 train_loss:4.0455 train_time:33123ms step_avg:163.17ms
step:214/1530 train_loss:4.1181 train_time:33285ms step_avg:163.16ms
step:215/1530 train_loss:3.9567 train_time:33447ms step_avg:163.16ms
step:216/1530 train_loss:4.0130 train_time:33611ms step_avg:163.16ms
step:217/1530 train_loss:4.0134 train_time:33775ms step_avg:163.16ms
step:218/1530 train_loss:4.0834 train_time:33938ms step_avg:163.16ms
step:219/1530 train_loss:4.0776 train_time:34100ms step_avg:163.16ms
step:220/1530 train_loss:4.0933 train_time:34263ms step_avg:163.16ms
step:221/1530 train_loss:4.0976 train_time:34425ms step_avg:163.15ms
step:222/1530 train_loss:4.0027 train_time:34587ms step_avg:163.15ms
step:223/1530 train_loss:3.9913 train_time:34751ms step_avg:163.15ms
step:224/1530 train_loss:4.3082 train_time:34914ms step_avg:163.15ms
step:225/1530 train_loss:3.9238 train_time:35078ms step_avg:163.15ms
step:226/1530 train_loss:3.9945 train_time:35240ms step_avg:163.15ms
step:227/1530 train_loss:3.9982 train_time:35403ms step_avg:163.15ms
step:228/1530 train_loss:4.1439 train_time:35567ms step_avg:163.15ms
step:229/1530 train_loss:3.9272 train_time:35734ms step_avg:163.17ms
step:230/1530 train_loss:4.0509 train_time:35900ms step_avg:163.18ms
step:231/1530 train_loss:3.9147 train_time:36065ms step_avg:163.19ms
step:232/1530 train_loss:3.9777 train_time:36233ms step_avg:163.21ms
step:233/1530 train_loss:4.1014 train_time:36398ms step_avg:163.22ms
step:234/1530 train_loss:4.0477 train_time:36564ms step_avg:163.23ms
step:235/1530 train_loss:3.9097 train_time:36730ms step_avg:163.25ms
step:236/1530 train_loss:4.0880 train_time:36896ms step_avg:163.26ms
step:237/1530 train_loss:4.0881 train_time:37061ms step_avg:163.27ms
step:238/1530 train_loss:3.9514 train_time:37227ms step_avg:163.28ms
step:239/1530 train_loss:4.0952 train_time:37394ms step_avg:163.29ms
step:240/1530 train_loss:4.1229 train_time:37561ms step_avg:163.31ms
step:241/1530 train_loss:3.9711 train_time:37726ms step_avg:163.31ms
step:242/1530 train_loss:4.1477 train_time:37891ms step_avg:163.32ms
step:243/1530 train_loss:4.0198 train_time:38057ms step_avg:163.34ms
step:244/1530 train_loss:4.0919 train_time:38222ms step_avg:163.34ms
step:245/1530 train_loss:4.1479 train_time:38388ms step_avg:163.35ms
step:246/1530 train_loss:4.0651 train_time:38555ms step_avg:163.37ms
step:247/1530 train_loss:4.0073 train_time:38721ms step_avg:163.38ms
step:248/1530 train_loss:4.1066 train_time:38886ms step_avg:163.39ms
step:249/1530 train_loss:3.9335 train_time:39053ms step_avg:163.40ms
step:250/1530 train_loss:3.9801 train_time:39219ms step_avg:163.41ms
step:250/1530 val_loss:4.0077 train_time:39268ms step_avg:163.62ms
step:251/1530 train_loss:4.0798 train_time:39388ms step_avg:163.43ms
step:252/1530 train_loss:4.1737 train_time:39556ms step_avg:163.45ms
step:253/1530 train_loss:3.9391 train_time:39722ms step_avg:163.47ms
step:254/1530 train_loss:3.8832 train_time:39888ms step_avg:163.48ms
step:255/1530 train_loss:4.0868 train_time:40053ms step_avg:163.48ms
step:256/1530 train_loss:3.9991 train_time:40219ms step_avg:163.49ms
step:257/1530 train_loss:3.9987 train_time:40385ms step_avg:163.50ms
step:258/1530 train_loss:3.9961 train_time:40550ms step_avg:163.51ms
step:259/1530 train_loss:4.0429 train_time:40717ms step_avg:163.52ms
step:260/1530 train_loss:4.0716 train_time:40882ms step_avg:163.53ms
step:261/1530 train_loss:4.0281 train_time:41048ms step_avg:163.54ms
step:262/1530 train_loss:3.9950 train_time:41216ms step_avg:163.55ms
step:263/1530 train_loss:3.9019 train_time:41381ms step_avg:163.56ms
step:264/1530 train_loss:3.9961 train_time:41546ms step_avg:163.57ms
step:265/1530 train_loss:3.8707 train_time:41714ms step_avg:163.58ms
step:266/1530 train_loss:3.9258 train_time:41879ms step_avg:163.59ms
step:267/1530 train_loss:3.9396 train_time:42045ms step_avg:163.60ms
step:268/1530 train_loss:3.9609 train_time:42211ms step_avg:163.61ms
step:269/1530 train_loss:3.8610 train_time:42376ms step_avg:163.61ms
step:270/1530 train_loss:4.1037 train_time:42541ms step_avg:163.62ms
step:271/1530 train_loss:3.9814 train_time:42707ms step_avg:163.63ms
step:272/1530 train_loss:3.9382 train_time:42873ms step_avg:163.64ms
step:273/1530 train_loss:3.9453 train_time:43039ms step_avg:163.65ms
step:274/1530 train_loss:4.0510 train_time:43205ms step_avg:163.66ms
step:275/1530 train_loss:4.0645 train_time:43369ms step_avg:163.66ms
step:276/1530 train_loss:4.2491 train_time:43537ms step_avg:163.67ms
step:277/1530 train_loss:4.0503 train_time:43703ms step_avg:163.68ms
step:278/1530 train_loss:4.0902 train_time:43869ms step_avg:163.69ms
step:279/1530 train_loss:4.0075 train_time:44035ms step_avg:163.70ms
step:280/1530 train_loss:4.1972 train_time:44202ms step_avg:163.71ms
step:281/1530 train_loss:3.9747 train_time:44368ms step_avg:163.72ms
step:282/1530 train_loss:3.9536 train_time:44537ms step_avg:163.74ms
step:283/1530 train_loss:3.9240 train_time:44702ms step_avg:163.74ms
step:284/1530 train_loss:4.0558 train_time:44869ms step_avg:163.75ms
step:285/1530 train_loss:4.0597 train_time:45035ms step_avg:163.76ms
step:286/1530 train_loss:4.0941 train_time:45200ms step_avg:163.77ms
step:287/1530 train_loss:3.9060 train_time:45365ms step_avg:163.77ms
step:288/1530 train_loss:4.0129 train_time:45529ms step_avg:163.77ms
step:289/1530 train_loss:3.8747 train_time:45696ms step_avg:163.78ms
step:290/1530 train_loss:3.8675 train_time:45861ms step_avg:163.79ms
step:291/1530 train_loss:3.9148 train_time:46025ms step_avg:163.79ms
step:292/1530 train_loss:3.8695 train_time:46190ms step_avg:163.79ms
step:293/1530 train_loss:3.9125 train_time:46356ms step_avg:163.80ms
step:294/1530 train_loss:3.9456 train_time:46521ms step_avg:163.80ms
step:295/1530 train_loss:3.8457 train_time:46686ms step_avg:163.81ms
step:296/1530 train_loss:3.8706 train_time:46850ms step_avg:163.81ms
step:297/1530 train_loss:3.8702 train_time:47016ms step_avg:163.82ms
step:298/1530 train_loss:3.9740 train_time:47181ms step_avg:163.82ms
step:299/1530 train_loss:3.8279 train_time:47345ms step_avg:163.82ms
step:300/1530 train_loss:3.9807 train_time:47510ms step_avg:163.83ms
step:301/1530 train_loss:3.9724 train_time:47676ms step_avg:163.83ms
step:302/1530 train_loss:3.9431 train_time:47841ms step_avg:163.84ms
step:303/1530 train_loss:3.9881 train_time:48005ms step_avg:163.84ms
step:304/1530 train_loss:3.9776 train_time:48170ms step_avg:163.84ms
step:305/1530 train_loss:4.4647 train_time:48335ms step_avg:163.85ms
step:306/1530 train_loss:3.9403 train_time:48500ms step_avg:163.85ms
step:307/1530 train_loss:3.8428 train_time:48665ms step_avg:163.86ms
step:308/1530 train_loss:3.9851 train_time:48829ms step_avg:163.86ms
step:309/1530 train_loss:3.8788 train_time:48996ms step_avg:163.87ms
step:310/1530 train_loss:4.0988 train_time:49163ms step_avg:163.88ms
step:311/1530 train_loss:3.9409 train_time:49329ms step_avg:163.88ms
step:312/1530 train_loss:3.8680 train_time:49494ms step_avg:163.89ms
step:313/1530 train_loss:3.9373 train_time:49660ms step_avg:163.89ms
step:314/1530 train_loss:4.0707 train_time:49825ms step_avg:163.90ms
step:315/1530 train_loss:3.9572 train_time:49989ms step_avg:163.90ms
step:316/1530 train_loss:3.8017 train_time:50154ms step_avg:163.90ms
step:317/1530 train_loss:3.8844 train_time:50320ms step_avg:163.91ms
step:318/1530 train_loss:3.9270 train_time:50484ms step_avg:163.91ms
step:319/1530 train_loss:3.8926 train_time:50648ms step_avg:163.91ms
step:320/1530 train_loss:4.0166 train_time:50815ms step_avg:163.92ms
step:321/1530 train_loss:3.9599 train_time:50980ms step_avg:163.92ms
step:322/1530 train_loss:3.9333 train_time:51146ms step_avg:163.93ms
step:323/1530 train_loss:4.0156 train_time:51311ms step_avg:163.93ms
step:324/1530 train_loss:3.9588 train_time:51477ms step_avg:163.94ms
step:325/1530 train_loss:4.0239 train_time:51642ms step_avg:163.94ms
step:326/1530 train_loss:3.9046 train_time:51807ms step_avg:163.95ms
step:327/1530 train_loss:4.3946 train_time:51974ms step_avg:163.96ms
step:328/1530 train_loss:4.0803 train_time:52139ms step_avg:163.96ms
step:329/1530 train_loss:3.8002 train_time:52306ms step_avg:163.97ms
step:330/1530 train_loss:3.7598 train_time:52470ms step_avg:163.97ms
step:331/1530 train_loss:3.9837 train_time:52636ms step_avg:163.98ms
step:332/1530 train_loss:3.9184 train_time:52801ms step_avg:163.98ms
step:333/1530 train_loss:3.8938 train_time:52965ms step_avg:163.98ms
step:334/1530 train_loss:3.8532 train_time:53130ms step_avg:163.98ms
step:335/1530 train_loss:4.0175 train_time:53295ms step_avg:163.99ms
step:336/1530 train_loss:3.9657 train_time:53460ms step_avg:163.99ms
step:337/1530 train_loss:4.4350 train_time:53627ms step_avg:164.00ms
step:338/1530 train_loss:3.9404 train_time:53792ms step_avg:164.00ms
step:339/1530 train_loss:3.8739 train_time:53958ms step_avg:164.00ms
step:340/1530 train_loss:3.9397 train_time:54123ms step_avg:164.01ms
step:341/1530 train_loss:3.8559 train_time:54288ms step_avg:164.01ms
step:342/1530 train_loss:3.8131 train_time:54457ms step_avg:164.03ms
step:343/1530 train_loss:3.8458 train_time:54626ms step_avg:164.04ms
step:344/1530 train_loss:4.0091 train_time:54793ms step_avg:164.05ms
step:345/1530 train_loss:3.8244 train_time:54964ms step_avg:164.07ms
step:346/1530 train_loss:3.7744 train_time:55130ms step_avg:164.08ms
step:347/1530 train_loss:3.8097 train_time:55300ms step_avg:164.09ms
step:348/1530 train_loss:3.8640 train_time:55468ms step_avg:164.11ms
step:349/1530 train_loss:3.8304 train_time:55638ms step_avg:164.12ms
step:350/1530 train_loss:3.5736 train_time:55806ms step_avg:164.13ms
step:351/1530 train_loss:3.8289 train_time:55972ms step_avg:164.14ms
step:352/1530 train_loss:4.1969 train_time:56141ms step_avg:164.15ms
step:353/1530 train_loss:3.6734 train_time:56309ms step_avg:164.17ms
step:354/1530 train_loss:3.9305 train_time:56476ms step_avg:164.17ms
step:355/1530 train_loss:3.7913 train_time:56644ms step_avg:164.19ms
step:356/1530 train_loss:3.8845 train_time:56810ms step_avg:164.19ms
step:357/1530 train_loss:3.7683 train_time:56979ms step_avg:164.21ms
step:358/1530 train_loss:3.8698 train_time:57146ms step_avg:164.21ms
step:359/1530 train_loss:3.7833 train_time:57317ms step_avg:164.23ms
step:360/1530 train_loss:3.4359 train_time:57486ms step_avg:164.24ms
step:361/1530 train_loss:4.0212 train_time:57653ms step_avg:164.25ms
step:362/1530 train_loss:3.9216 train_time:57822ms step_avg:164.27ms
step:363/1530 train_loss:3.8446 train_time:57990ms step_avg:164.28ms
step:364/1530 train_loss:3.7502 train_time:58158ms step_avg:164.29ms
step:365/1530 train_loss:3.9201 train_time:58326ms step_avg:164.30ms
step:366/1530 train_loss:3.8640 train_time:58493ms step_avg:164.31ms
step:367/1530 train_loss:3.8569 train_time:58663ms step_avg:164.32ms
step:368/1530 train_loss:3.8566 train_time:58830ms step_avg:164.33ms
step:369/1530 train_loss:3.7507 train_time:58998ms step_avg:164.34ms
step:370/1530 train_loss:3.8739 train_time:59165ms step_avg:164.35ms
step:371/1530 train_loss:3.7373 train_time:59332ms step_avg:164.36ms
step:372/1530 train_loss:3.6987 train_time:59502ms step_avg:164.37ms
step:373/1530 train_loss:3.9219 train_time:59670ms step_avg:164.38ms
step:374/1530 train_loss:3.8363 train_time:59838ms step_avg:164.39ms
step:375/1530 train_loss:3.8077 train_time:60008ms step_avg:164.40ms
step:375/1530 val_loss:3.8286 train_time:60055ms step_avg:164.53ms