|
{ |
|
"best_metric": 1.7180215120315552, |
|
"best_model_checkpoint": "output/linkin-park/checkpoint-276", |
|
"epoch": 4.0, |
|
"global_step": 276, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001354300570714007, |
|
"loss": 2.895, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00013021156058794955, |
|
"loss": 2.9711, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001218137945423232, |
|
"loss": 3.0184, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00011067009906946294, |
|
"loss": 2.635, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.735550930058317e-05, |
|
"loss": 2.6937, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.255708249541069e-05, |
|
"loss": 2.9303, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.703844462707114e-05, |
|
"loss": 2.5872, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.160038588088708e-05, |
|
"loss": 2.5982, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.703953841164296e-05, |
|
"loss": 2.5227, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.410726866285494e-05, |
|
"loss": 2.6876, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3470905479788493e-05, |
|
"loss": 2.8587, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 5.679304716725937e-06, |
|
"loss": 2.566, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.1345272679252971e-06, |
|
"loss": 2.6052, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.4942777156829834, |
|
"eval_runtime": 4.673, |
|
"eval_samples_per_second": 22.041, |
|
"eval_steps_per_second": 2.782, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.9438, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.463841438293457, |
|
"eval_runtime": 4.6348, |
|
"eval_samples_per_second": 20.929, |
|
"eval_steps_per_second": 2.805, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.7199452243268996e-06, |
|
"loss": 2.4952, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 6.793535661894047e-06, |
|
"loss": 2.5481, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.4966360302693133e-05, |
|
"loss": 2.6492, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.582859959249087e-05, |
|
"loss": 2.3078, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.88355754965355e-05, |
|
"loss": 2.6034, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.333506393059682e-05, |
|
"loss": 2.4777, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.859999999999999e-05, |
|
"loss": 2.2842, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 8.386493606940316e-05, |
|
"loss": 2.577, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.836442450346448e-05, |
|
"loss": 2.3194, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011137140040750911, |
|
"loss": 2.3364, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00012223363969730684, |
|
"loss": 2.3984, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00013040646433810595, |
|
"loss": 2.2814, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.0001354800547756731, |
|
"loss": 2.2059, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001372, |
|
"loss": 1.9945, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.248004913330078, |
|
"eval_runtime": 4.6064, |
|
"eval_samples_per_second": 21.058, |
|
"eval_steps_per_second": 2.822, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0001354800547756731, |
|
"loss": 2.1018, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00013040646433810595, |
|
"loss": 1.8963, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00012223363969730686, |
|
"loss": 2.1895, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011137140040750914, |
|
"loss": 2.0564, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 9.836442450346452e-05, |
|
"loss": 2.1397, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 8.386493606940326e-05, |
|
"loss": 2.3483, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.860000000000001e-05, |
|
"loss": 1.9349, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.333506393059685e-05, |
|
"loss": 1.8439, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 3.8835575496535535e-05, |
|
"loss": 1.9104, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.58285995924909e-05, |
|
"loss": 1.7206, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.496636030269317e-05, |
|
"loss": 2.2577, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 6.793535661894062e-06, |
|
"loss": 1.8714, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.7199452243269073e-06, |
|
"loss": 2.2299, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.8604, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.0318267345428467, |
|
"eval_runtime": 4.5539, |
|
"eval_samples_per_second": 21.081, |
|
"eval_steps_per_second": 2.635, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.500582770777967e-06, |
|
"loss": 1.8704, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.166986896886688e-05, |
|
"loss": 1.6973, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 2.177685437520025e-05, |
|
"loss": 1.9541, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.4300000000000014e-05, |
|
"loss": 1.7097, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.859308855339902e-05, |
|
"loss": 1.8994, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 6.391857044318346e-05, |
|
"loss": 1.772, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 7.948562268689874e-05, |
|
"loss": 1.7301, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 9.449095682862937e-05, |
|
"loss": 2.1161, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.0001081602700970798, |
|
"loss": 1.876, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 0.00011978820084915117, |
|
"loss": 1.9729, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 0.00012877472652481797, |
|
"loss": 1.7309, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 0.00013465612591205902, |
|
"loss": 2.0053, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00013712890801216552, |
|
"loss": 1.7362, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.7180215120315552, |
|
"eval_runtime": 4.7996, |
|
"eval_samples_per_second": 20.835, |
|
"eval_steps_per_second": 2.709, |
|
"step": 276 |
|
} |
|
], |
|
"max_steps": 276, |
|
"num_train_epochs": 4, |
|
"total_flos": 286245421056000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|