|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 26.563100152564367, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 123.11854553222656, |
|
"logits/rejected": 97.00198364257812, |
|
"logps/chosen": -425.18585205078125, |
|
"logps/rejected": -424.1869201660156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 27.293107117678087, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 117.04379272460938, |
|
"logits/rejected": 135.89993286132812, |
|
"logps/chosen": -441.55804443359375, |
|
"logps/rejected": -523.9049682617188, |
|
"loss": 0.6893, |
|
"rewards/accuracies": 0.5347222089767456, |
|
"rewards/chosen": 0.01830328069627285, |
|
"rewards/margins": 0.016203498467803, |
|
"rewards/rejected": 0.002099784789606929, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 41.6541290719842, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 119.21928405761719, |
|
"logits/rejected": 126.63338470458984, |
|
"logps/chosen": -430.0658264160156, |
|
"logps/rejected": -516.3239135742188, |
|
"loss": 0.6084, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.03801226615905762, |
|
"rewards/margins": 0.25012144446372986, |
|
"rewards/rejected": -0.2881337106227875, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 30.808060184454277, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 109.26795959472656, |
|
"logits/rejected": 112.9657211303711, |
|
"logps/chosen": -496.3540954589844, |
|
"logps/rejected": -602.0138549804688, |
|
"loss": 0.5794, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.41233962774276733, |
|
"rewards/margins": 0.5356513857841492, |
|
"rewards/rejected": -0.9479910731315613, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 30.141976904526587, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 107.7169418334961, |
|
"logits/rejected": 102.60658264160156, |
|
"logps/chosen": -524.6154174804688, |
|
"logps/rejected": -586.1959838867188, |
|
"loss": 0.5711, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.5143404603004456, |
|
"rewards/margins": 0.4999730587005615, |
|
"rewards/rejected": -1.0143134593963623, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 26.331594229316682, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 116.38276672363281, |
|
"logits/rejected": 116.3095703125, |
|
"logps/chosen": -542.1165771484375, |
|
"logps/rejected": -607.3255615234375, |
|
"loss": 0.5531, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.5929387807846069, |
|
"rewards/margins": 0.5678332448005676, |
|
"rewards/rejected": -1.1607720851898193, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6015224342162793, |
|
"train_runtime": 574.0149, |
|
"train_samples_per_second": 11.759, |
|
"train_steps_per_second": 0.091 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|