| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 3756, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 1.9867021276595745e-05, | |
| "loss": 1.5454, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 2.890828402366864e-05, | |
| "loss": 1.2714, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 2.6689349112426035e-05, | |
| "loss": 1.222, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.447041420118343e-05, | |
| "loss": 1.2248, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 2.225147928994083e-05, | |
| "loss": 1.1943, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 2.0032544378698224e-05, | |
| "loss": 1.1817, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 1.781360946745562e-05, | |
| "loss": 1.183, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 1.5594674556213018e-05, | |
| "loss": 1.1724, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 1.3384615384615386e-05, | |
| "loss": 1.1625, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.1165680473372781e-05, | |
| "loss": 1.1664, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 8.946745562130178e-06, | |
| "loss": 1.1486, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 6.727810650887574e-06, | |
| "loss": 1.1553, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 4.508875739644971e-06, | |
| "loss": 1.1455, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 2.2899408284023667e-06, | |
| "loss": 1.1439, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 7.100591715976332e-08, | |
| "loss": 1.1479, | |
| "step": 3750 | |
| } | |
| ], | |
| "logging_steps": 250, | |
| "max_steps": 3756, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 3.2836685465124864e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |