{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.653500552592379, "eval_steps": 500, "global_step": 8500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "ep_loss": 4.4238, "epoch": 0.04, "learning_rate": 2.5e-05, "loss": 11.1467, "mlm_loss": 6.7229, "step": 500 }, { "ep_loss": 0.4305, "epoch": 0.08, "learning_rate": 5e-05, "loss": 2.9841, "mlm_loss": 2.5537, "step": 1000 }, { "ep_loss": 0.3724, "epoch": 0.12, "learning_rate": 7.5e-05, "loss": 2.2371, "mlm_loss": 1.8647, "step": 1500 }, { "ep_loss": 0.364, "epoch": 0.15, "learning_rate": 0.0001, "loss": 1.8477, "mlm_loss": 1.4837, "step": 2000 }, { "ep_loss": 0.3678, "epoch": 0.19, "learning_rate": 0.00012495, "loss": 1.5215, "mlm_loss": 1.1538, "step": 2500 }, { "ep_loss": 0.3617, "epoch": 0.23, "learning_rate": 0.00014995, "loss": 1.4119, "mlm_loss": 1.0501, "step": 3000 }, { "ep_loss": 0.3336, "epoch": 0.27, "learning_rate": 0.0001749, "loss": 1.3027, "mlm_loss": 0.9691, "step": 3500 }, { "ep_loss": 0.3348, "epoch": 0.31, "learning_rate": 0.0001999, "loss": 1.2441, "mlm_loss": 0.9093, "step": 4000 }, { "ep_loss": 0.3348, "epoch": 0.35, "learning_rate": 0.0002249, "loss": 1.1942, "mlm_loss": 0.8594, "step": 4500 }, { "ep_loss": 0.3331, "epoch": 0.38, "learning_rate": 0.0002499, "loss": 1.1466, "mlm_loss": 0.8135, "step": 5000 }, { "ep_loss": 0.3268, "epoch": 0.42, "learning_rate": 0.00027489999999999996, "loss": 1.1067, "mlm_loss": 0.7799, "step": 5500 }, { "ep_loss": 0.3378, "epoch": 0.46, "learning_rate": 0.00029985, "loss": 1.1007, "mlm_loss": 0.7629, "step": 6000 }, { "ep_loss": 0.3033, "epoch": 0.5, "learning_rate": 0.00032485, "loss": 1.028, "mlm_loss": 0.7246, "step": 6500 }, { "ep_loss": 0.2848, "epoch": 0.54, "learning_rate": 0.00034985, "loss": 0.9833, "mlm_loss": 0.6985, "step": 7000 }, { "ep_loss": 0.316, "epoch": 0.58, "learning_rate": 0.00037485000000000003, "loss": 1.0235, "mlm_loss": 0.7076, "step": 7500 }, { "ep_loss": 0.2885, "epoch": 0.62, "learning_rate": 0.00039975, "loss": 0.9673, "mlm_loss": 0.6789, "step": 8000 }, { "ep_loss": 0.274, "epoch": 0.65, "learning_rate": 0.00042475000000000005, "loss": 0.9266, "mlm_loss": 0.6525, "step": 8500 } ], "logging_steps": 500, "max_steps": 520240, "num_train_epochs": 40, "save_steps": 500, "total_flos": 8.115946811576812e+18, "trial_name": null, "trial_params": null }