File size: 1,566 Bytes
3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 06bb847 3011c87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
{
"best_metric": 0.8155183792114258,
"best_model_checkpoint": "./output/training_results/C020_Meta-Llama-3-8B_instruct_20240729_032429/checkpoint-10",
"epoch": 0.20833333333333334,
"eval_steps": 5,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020833333333333332,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 1.0187,
"step": 1
},
{
"epoch": 0.0625,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 0.946,
"step": 3
},
{
"epoch": 0.10416666666666667,
"eval_loss": 1.0251731872558594,
"eval_runtime": 0.8943,
"eval_samples_per_second": 380.189,
"eval_steps_per_second": 3.355,
"step": 5
},
{
"epoch": 0.125,
"grad_norm": 13.805769787987279,
"learning_rate": 2.25e-06,
"loss": 1.0778,
"step": 6
},
{
"epoch": 0.1875,
"grad_norm": 5.41697677358006,
"learning_rate": 2.865909090909091e-06,
"loss": 0.8781,
"step": 9
},
{
"epoch": 0.20833333333333334,
"eval_loss": 0.8155183792114258,
"eval_runtime": 0.8756,
"eval_samples_per_second": 388.315,
"eval_steps_per_second": 3.426,
"step": 10
}
],
"logging_steps": 3,
"max_steps": 48,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"total_flos": 207641640960.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|