Alex-xu commited on
Commit
9dd2df8
1 Parent(s): 859032e

Training in progress, step 4000, checkpoint

Browse files
checkpoint-4000/config.json CHANGED
@@ -16,7 +16,7 @@
16
  "intermediate_size": 3072,
17
  "layer_norm_eps": 1e-12,
18
  "max_blocks": 200,
19
- "max_position_embeddings": 1801,
20
  "max_relative_position_embeddings": 8,
21
  "model_type": "longelm",
22
  "node_size": 1,
 
16
  "intermediate_size": 3072,
17
  "layer_norm_eps": 1e-12,
18
  "max_blocks": 200,
19
+ "max_position_embeddings": 1805,
20
  "max_relative_position_embeddings": 8,
21
  "model_type": "longelm",
22
  "node_size": 1,
checkpoint-4000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d067815be37b0d22f1bb8db7b997859c8ae75e6abd64c43d7262a388b7dacc65
3
- size 1010407418
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57472fe8bdd46c789c07db1eb1160b173e99da32e650b26d95aacf8cff139382
3
+ size 1010431994
checkpoint-4000/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10624fb85a5ee3ad9a1bcf564c9dcd67d368a8806d3cae82917b9bf7b8f7ec83
3
- size 505188394
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3311a436e3159a2877d4322021ab52025ba0fe612f670baf225a43e1b4294f5
3
+ size 505200682
checkpoint-4000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c02e0dac05a0c0f70c4755fbb62dedae14550e9a1a6ce78e1c9454ea12d72501
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68bc0d70adf6224a01be8cb90ba7fc4b83ea9888a1f90a80537034688b19836
3
  size 1064
checkpoint-4000/trainer_state.json CHANGED
@@ -9,67 +9,67 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "ep_loss": 4.7343,
13
  "epoch": 0.04,
14
  "learning_rate": 2.5e-05,
15
- "loss": 11.6218,
16
- "mlm_loss": 6.8875,
17
  "step": 500
18
  },
19
  {
20
- "ep_loss": 0.4315,
21
  "epoch": 0.08,
22
  "learning_rate": 5e-05,
23
- "loss": 3.0285,
24
- "mlm_loss": 2.5969,
25
  "step": 1000
26
  },
27
  {
28
- "ep_loss": 0.3811,
29
  "epoch": 0.12,
30
  "learning_rate": 7.5e-05,
31
- "loss": 2.2571,
32
- "mlm_loss": 1.8761,
33
  "step": 1500
34
  },
35
  {
36
- "ep_loss": 0.37,
37
  "epoch": 0.15,
38
  "learning_rate": 0.0001,
39
- "loss": 1.9695,
40
- "mlm_loss": 1.5994,
41
  "step": 2000
42
  },
43
  {
44
- "ep_loss": 0.3813,
45
  "epoch": 0.19,
46
  "learning_rate": 0.00012495,
47
- "loss": 1.5597,
48
- "mlm_loss": 1.1785,
49
  "step": 2500
50
  },
51
  {
52
- "ep_loss": 0.3624,
53
  "epoch": 0.23,
54
  "learning_rate": 0.00014995,
55
- "loss": 1.4228,
56
- "mlm_loss": 1.0604,
57
  "step": 3000
58
  },
59
  {
60
- "ep_loss": 0.338,
61
  "epoch": 0.27,
62
- "learning_rate": 0.00017495,
63
- "loss": 1.3108,
64
- "mlm_loss": 0.9729,
65
  "step": 3500
66
  },
67
  {
68
- "ep_loss": 0.3524,
69
  "epoch": 0.31,
70
- "learning_rate": 0.00019994999999999998,
71
- "loss": 1.2734,
72
- "mlm_loss": 0.921,
73
  "step": 4000
74
  }
75
  ],
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "ep_loss": 4.4238,
13
  "epoch": 0.04,
14
  "learning_rate": 2.5e-05,
15
+ "loss": 11.1467,
16
+ "mlm_loss": 6.7229,
17
  "step": 500
18
  },
19
  {
20
+ "ep_loss": 0.4305,
21
  "epoch": 0.08,
22
  "learning_rate": 5e-05,
23
+ "loss": 2.9841,
24
+ "mlm_loss": 2.5537,
25
  "step": 1000
26
  },
27
  {
28
+ "ep_loss": 0.3724,
29
  "epoch": 0.12,
30
  "learning_rate": 7.5e-05,
31
+ "loss": 2.2371,
32
+ "mlm_loss": 1.8647,
33
  "step": 1500
34
  },
35
  {
36
+ "ep_loss": 0.364,
37
  "epoch": 0.15,
38
  "learning_rate": 0.0001,
39
+ "loss": 1.8477,
40
+ "mlm_loss": 1.4837,
41
  "step": 2000
42
  },
43
  {
44
+ "ep_loss": 0.3678,
45
  "epoch": 0.19,
46
  "learning_rate": 0.00012495,
47
+ "loss": 1.5215,
48
+ "mlm_loss": 1.1538,
49
  "step": 2500
50
  },
51
  {
52
+ "ep_loss": 0.3617,
53
  "epoch": 0.23,
54
  "learning_rate": 0.00014995,
55
+ "loss": 1.4119,
56
+ "mlm_loss": 1.0501,
57
  "step": 3000
58
  },
59
  {
60
+ "ep_loss": 0.3336,
61
  "epoch": 0.27,
62
+ "learning_rate": 0.0001749,
63
+ "loss": 1.3027,
64
+ "mlm_loss": 0.9691,
65
  "step": 3500
66
  },
67
  {
68
+ "ep_loss": 0.3348,
69
  "epoch": 0.31,
70
+ "learning_rate": 0.0001999,
71
+ "loss": 1.2441,
72
+ "mlm_loss": 0.9093,
73
  "step": 4000
74
  }
75
  ],
checkpoint-4000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cee56d4f38339eac5ecf098381d5222b3783455863547a17ac0df67c8caa7d15
3
  size 4664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b26e03be70ebe5d40a81a81be94421c0578a9d8742b065bc029501df48632957
3
  size 4664