MohamedAhmedAE commited on
Commit
7e92098
1 Parent(s): 8004e40

Training in progress, step 101500

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3758f44770210a3b7eacfee847ec1ee149ebf8014305087f3fe99a4b6341aea0
3
  size 5544997664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2801fc5fd3438e62fb52e2e065d7a41a311c5eb5c8d6526a16ac5ebf45c5eb82
3
  size 5544997664
last-checkpoint/adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "up_proj",
25
- "q_proj",
26
  "v_proj",
27
- "down_proj",
28
  "gate_proj",
 
 
 
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "v_proj",
24
+ "k_proj",
25
  "gate_proj",
26
+ "down_proj",
27
+ "q_proj",
28
+ "up_proj",
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3758f44770210a3b7eacfee847ec1ee149ebf8014305087f3fe99a4b6341aea0
3
  size 5544997664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff50e9a0eef14c00f32c5e550257295427f2d666e009aac32472aef43b0c78f
3
  size 5544997664
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52da7cdb552cc4b1c77028bfc6cb8beaf6f17504eab7f33b338cb73b1bbc098e
3
  size 674093138
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b1f3c8b9d1c8514057a760685418307853b2172387d395e371d81516debcc99
3
  size 674093138
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50720ef23e7b2524db51c1ac5de29d9722efc1e3831e9b3a0b9d8b48b10e49c6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d52e9778ae961a843d4efe5adba669832146332ec663eac9df46d71427724e3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:271ed2db05ddfde92e7d43bc2b4c677805ee9ac9b51633691308f1c378210911
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1dd725a3e5295711459643d6e1204a1d04a7f905cc6416544fa87ecdfb18228
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07540939717103208,
5
  "eval_steps": 200,
6
- "global_step": 101400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -7070,41 +7070,6 @@
7070
  "learning_rate": 1.9988888715649357e-05,
7071
  "loss": 1.5441,
7072
  "step": 100900
7073
- },
7074
- {
7075
- "epoch": 0.07511192420388797,
7076
- "grad_norm": 1.068562388420105,
7077
- "learning_rate": 1.998886668338319e-05,
7078
- "loss": 1.4998,
7079
- "step": 101000
7080
- },
7081
- {
7082
- "epoch": 0.075186292445674,
7083
- "grad_norm": 0.6331286430358887,
7084
- "learning_rate": 1.998884462930723e-05,
7085
- "loss": 1.5633,
7086
- "step": 101100
7087
- },
7088
- {
7089
- "epoch": 0.07526066068746003,
7090
- "grad_norm": 1.3566038608551025,
7091
- "learning_rate": 1.998882255342152e-05,
7092
- "loss": 1.4621,
7093
- "step": 101200
7094
- },
7095
- {
7096
- "epoch": 0.07533502892924605,
7097
- "grad_norm": 0.9672004580497742,
7098
- "learning_rate": 1.998880045572611e-05,
7099
- "loss": 1.5249,
7100
- "step": 101300
7101
- },
7102
- {
7103
- "epoch": 0.07540939717103208,
7104
- "grad_norm": 0.36732280254364014,
7105
- "learning_rate": 1.9988778336221045e-05,
7106
- "loss": 1.574,
7107
- "step": 101400
7108
  }
7109
  ],
7110
  "logging_steps": 100,
@@ -7124,7 +7089,7 @@
7124
  "attributes": {}
7125
  }
7126
  },
7127
- "total_flos": 1.3814760068425728e+18,
7128
  "train_batch_size": 1,
7129
  "trial_name": null,
7130
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07503755596210195,
5
  "eval_steps": 200,
6
+ "global_step": 100900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
7070
  "learning_rate": 1.9988888715649357e-05,
7071
  "loss": 1.5441,
7072
  "step": 100900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7073
  }
7074
  ],
7075
  "logging_steps": 100,
 
7089
  "attributes": {}
7090
  }
7091
  },
7092
+ "total_flos": 1.3747108667853128e+18,
7093
  "train_batch_size": 1,
7094
  "trial_name": null,
7095
  "trial_params": null