| { |
| "best_global_step": 5445, |
| "best_metric": 3.0573878288269043, |
| "best_model_checkpoint": null, |
| "epoch": 20.0, |
| "eval_steps": 500, |
| "global_step": 9900, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.00202020202020202, |
| "grad_norm": 27.369495391845703, |
| "learning_rate": 0.0, |
| "loss": 10.3689, |
| "step": 1 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 1.4256974458694458, |
| "learning_rate": 9.88e-05, |
| "loss": 4.8295, |
| "step": 495 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 3.7844455242156982, |
| "eval_runtime": 19.6614, |
| "eval_samples_per_second": 1616.87, |
| "eval_steps_per_second": 6.358, |
| "step": 495 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 1.164453148841858, |
| "learning_rate": 9.478678038379531e-05, |
| "loss": 3.5371, |
| "step": 990 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 3.4359753131866455, |
| "eval_runtime": 19.6774, |
| "eval_samples_per_second": 1615.558, |
| "eval_steps_per_second": 6.352, |
| "step": 990 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 1.0340933799743652, |
| "learning_rate": 8.950959488272922e-05, |
| "loss": 3.2797, |
| "step": 1485 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 3.2933497428894043, |
| "eval_runtime": 19.6096, |
| "eval_samples_per_second": 1621.145, |
| "eval_steps_per_second": 6.374, |
| "step": 1485 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 1.068572998046875, |
| "learning_rate": 8.42324093816631e-05, |
| "loss": 3.1349, |
| "step": 1980 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 3.207876443862915, |
| "eval_runtime": 19.9041, |
| "eval_samples_per_second": 1597.16, |
| "eval_steps_per_second": 6.28, |
| "step": 1980 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 1.0857131481170654, |
| "learning_rate": 7.895522388059702e-05, |
| "loss": 3.0332, |
| "step": 2475 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 3.1527912616729736, |
| "eval_runtime": 19.6429, |
| "eval_samples_per_second": 1618.398, |
| "eval_steps_per_second": 6.364, |
| "step": 2475 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 1.0869224071502686, |
| "learning_rate": 7.367803837953093e-05, |
| "loss": 2.9527, |
| "step": 2970 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 3.1138641834259033, |
| "eval_runtime": 19.7113, |
| "eval_samples_per_second": 1612.783, |
| "eval_steps_per_second": 6.342, |
| "step": 2970 |
| }, |
| { |
| "epoch": 7.0, |
| "grad_norm": 1.11025869846344, |
| "learning_rate": 6.840085287846483e-05, |
| "loss": 2.8841, |
| "step": 3465 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_loss": 3.091607093811035, |
| "eval_runtime": 19.6526, |
| "eval_samples_per_second": 1617.6, |
| "eval_steps_per_second": 6.36, |
| "step": 3465 |
| }, |
| { |
| "epoch": 8.0, |
| "grad_norm": 1.156492829322815, |
| "learning_rate": 6.312366737739872e-05, |
| "loss": 2.824, |
| "step": 3960 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_loss": 3.074742555618286, |
| "eval_runtime": 19.9556, |
| "eval_samples_per_second": 1593.036, |
| "eval_steps_per_second": 6.264, |
| "step": 3960 |
| }, |
| { |
| "epoch": 9.0, |
| "grad_norm": 1.2233030796051025, |
| "learning_rate": 5.784648187633263e-05, |
| "loss": 2.7682, |
| "step": 4455 |
| }, |
| { |
| "epoch": 9.0, |
| "eval_loss": 3.062962293624878, |
| "eval_runtime": 19.6864, |
| "eval_samples_per_second": 1614.821, |
| "eval_steps_per_second": 6.35, |
| "step": 4455 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 1.3041876554489136, |
| "learning_rate": 5.256929637526653e-05, |
| "loss": 2.7163, |
| "step": 4950 |
| }, |
| { |
| "epoch": 10.0, |
| "eval_loss": 3.059563636779785, |
| "eval_runtime": 19.6977, |
| "eval_samples_per_second": 1613.894, |
| "eval_steps_per_second": 6.346, |
| "step": 4950 |
| }, |
| { |
| "epoch": 11.0, |
| "grad_norm": 1.3490029573440552, |
| "learning_rate": 4.7292110874200426e-05, |
| "loss": 2.6677, |
| "step": 5445 |
| }, |
| { |
| "epoch": 11.0, |
| "eval_loss": 3.0573878288269043, |
| "eval_runtime": 19.6953, |
| "eval_samples_per_second": 1614.092, |
| "eval_steps_per_second": 6.347, |
| "step": 5445 |
| }, |
| { |
| "epoch": 12.0, |
| "grad_norm": 1.4894585609436035, |
| "learning_rate": 4.201492537313433e-05, |
| "loss": 2.6216, |
| "step": 5940 |
| }, |
| { |
| "epoch": 12.0, |
| "eval_loss": 3.062603712081909, |
| "eval_runtime": 19.9746, |
| "eval_samples_per_second": 1591.524, |
| "eval_steps_per_second": 6.258, |
| "step": 5940 |
| }, |
| { |
| "epoch": 13.0, |
| "grad_norm": 1.5706915855407715, |
| "learning_rate": 3.673773987206823e-05, |
| "loss": 2.576, |
| "step": 6435 |
| }, |
| { |
| "epoch": 13.0, |
| "eval_loss": 3.07055401802063, |
| "eval_runtime": 19.7413, |
| "eval_samples_per_second": 1610.326, |
| "eval_steps_per_second": 6.332, |
| "step": 6435 |
| }, |
| { |
| "epoch": 14.0, |
| "grad_norm": 1.7185755968093872, |
| "learning_rate": 3.1460554371002134e-05, |
| "loss": 2.5352, |
| "step": 6930 |
| }, |
| { |
| "epoch": 14.0, |
| "eval_loss": 3.074070453643799, |
| "eval_runtime": 19.7238, |
| "eval_samples_per_second": 1611.759, |
| "eval_steps_per_second": 6.338, |
| "step": 6930 |
| }, |
| { |
| "epoch": 15.0, |
| "grad_norm": 1.6921051740646362, |
| "learning_rate": 2.6183368869936037e-05, |
| "loss": 2.4975, |
| "step": 7425 |
| }, |
| { |
| "epoch": 15.0, |
| "eval_loss": 3.085761785507202, |
| "eval_runtime": 19.7187, |
| "eval_samples_per_second": 1612.175, |
| "eval_steps_per_second": 6.339, |
| "step": 7425 |
| }, |
| { |
| "epoch": 16.0, |
| "grad_norm": 1.8211185932159424, |
| "learning_rate": 2.0906183368869936e-05, |
| "loss": 2.4642, |
| "step": 7920 |
| }, |
| { |
| "epoch": 16.0, |
| "eval_loss": 3.093783140182495, |
| "eval_runtime": 19.6893, |
| "eval_samples_per_second": 1614.581, |
| "eval_steps_per_second": 6.349, |
| "step": 7920 |
| }, |
| { |
| "epoch": 17.0, |
| "grad_norm": 1.937579870223999, |
| "learning_rate": 1.562899786780384e-05, |
| "loss": 2.435, |
| "step": 8415 |
| }, |
| { |
| "epoch": 17.0, |
| "eval_loss": 3.103745698928833, |
| "eval_runtime": 19.6907, |
| "eval_samples_per_second": 1614.467, |
| "eval_steps_per_second": 6.348, |
| "step": 8415 |
| }, |
| { |
| "epoch": 18.0, |
| "grad_norm": 1.9639313220977783, |
| "learning_rate": 1.035181236673774e-05, |
| "loss": 2.4088, |
| "step": 8910 |
| }, |
| { |
| "epoch": 18.0, |
| "eval_loss": 3.1111936569213867, |
| "eval_runtime": 19.7749, |
| "eval_samples_per_second": 1607.595, |
| "eval_steps_per_second": 6.321, |
| "step": 8910 |
| }, |
| { |
| "epoch": 19.0, |
| "grad_norm": 1.9801260232925415, |
| "learning_rate": 5.074626865671642e-06, |
| "loss": 2.3869, |
| "step": 9405 |
| }, |
| { |
| "epoch": 19.0, |
| "eval_loss": 3.117586135864258, |
| "eval_runtime": 19.7606, |
| "eval_samples_per_second": 1608.761, |
| "eval_steps_per_second": 6.326, |
| "step": 9405 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 9900, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 20, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.6536258183168e+17, |
| "train_batch_size": 256, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|