|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.7408000000000001, |
|
"eval_steps": 500, |
|
"global_step": 136, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 4.844448880484722, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.7673, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 1.851370146022714, |
|
"learning_rate": 1.993800990199235e-05, |
|
"loss": 0.6958, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.9773835522231158, |
|
"learning_rate": 1.944669091607919e-05, |
|
"loss": 0.6505, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.613999907750424, |
|
"learning_rate": 1.8488342952846074e-05, |
|
"loss": 0.6289, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.5045322898097253, |
|
"learning_rate": 1.711034517110761e-05, |
|
"loss": 0.6127, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.4665742556940108, |
|
"learning_rate": 1.5380823531633727e-05, |
|
"loss": 0.6077, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.46191050014663815, |
|
"learning_rate": 1.3385282760863758e-05, |
|
"loss": 0.602, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 0.5517829769602872, |
|
"learning_rate": 1.1222379136816347e-05, |
|
"loss": 0.5795, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.4756581891233078, |
|
"learning_rate": 8.999043083759016e-06, |
|
"loss": 0.5125, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.5264850481107349, |
|
"learning_rate": 6.8251927065945755e-06, |
|
"loss": 0.509, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"grad_norm": 0.4493840501728769, |
|
"learning_rate": 4.808299619194251e-06, |
|
"loss": 0.5013, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 0.4061406187346213, |
|
"learning_rate": 3.0480757232535773e-06, |
|
"loss": 0.501, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 0.4224909726082761, |
|
"learning_rate": 1.6315436146506702e-06, |
|
"loss": 0.5023, |
|
"step": 130 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 17, |
|
"total_flos": 327624362295296.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|