| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 6015, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.41562759767248547, | |
| "grad_norm": 368394.84375, | |
| "learning_rate": 1.833748960931006e-05, | |
| "loss": 0.7077, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8312551953449709, | |
| "grad_norm": 38091.30078125, | |
| "learning_rate": 1.6674979218620116e-05, | |
| "loss": 0.4632, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.3516891300678253, | |
| "eval_runtime": 35.0969, | |
| "eval_samples_per_second": 14.075, | |
| "eval_steps_per_second": 1.767, | |
| "step": 1203 | |
| }, | |
| { | |
| "epoch": 1.2468827930174564, | |
| "grad_norm": 34293.8984375, | |
| "learning_rate": 1.5012468827930176e-05, | |
| "loss": 0.4256, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.6625103906899419, | |
| "grad_norm": 44778.91015625, | |
| "learning_rate": 1.3349958437240233e-05, | |
| "loss": 0.3976, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.3173050582408905, | |
| "eval_runtime": 35.2821, | |
| "eval_samples_per_second": 14.001, | |
| "eval_steps_per_second": 1.757, | |
| "step": 2406 | |
| }, | |
| { | |
| "epoch": 2.0781379883624274, | |
| "grad_norm": 33540.33203125, | |
| "learning_rate": 1.1687448046550292e-05, | |
| "loss": 0.3783, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.493765586034913, | |
| "grad_norm": 42853.05078125, | |
| "learning_rate": 1.002493765586035e-05, | |
| "loss": 0.3694, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.9093931837073983, | |
| "grad_norm": 26482.623046875, | |
| "learning_rate": 8.362427265170407e-06, | |
| "loss": 0.3572, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.3047104477882385, | |
| "eval_runtime": 35.153, | |
| "eval_samples_per_second": 14.053, | |
| "eval_steps_per_second": 1.764, | |
| "step": 3609 | |
| }, | |
| { | |
| "epoch": 3.3250207813798838, | |
| "grad_norm": 54536.17578125, | |
| "learning_rate": 6.699916874480467e-06, | |
| "loss": 0.3517, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.7406483790523692, | |
| "grad_norm": 41206.8203125, | |
| "learning_rate": 5.037406483790524e-06, | |
| "loss": 0.3409, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.29912063479423523, | |
| "eval_runtime": 35.2976, | |
| "eval_samples_per_second": 13.995, | |
| "eval_steps_per_second": 1.756, | |
| "step": 4812 | |
| }, | |
| { | |
| "epoch": 4.156275976724855, | |
| "grad_norm": 46259.7421875, | |
| "learning_rate": 3.374896093100582e-06, | |
| "loss": 0.3315, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.57190357439734, | |
| "grad_norm": 53976.30859375, | |
| "learning_rate": 1.7123857024106402e-06, | |
| "loss": 0.3383, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 4.987531172069826, | |
| "grad_norm": 48498.83203125, | |
| "learning_rate": 4.987531172069826e-08, | |
| "loss": 0.3333, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.2967619299888611, | |
| "eval_runtime": 35.6175, | |
| "eval_samples_per_second": 13.87, | |
| "eval_steps_per_second": 1.741, | |
| "step": 6015 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 6015, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.93000089042944e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |