|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.061273571176912116, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003063678558845606, |
|
"grad_norm": 2.937537670135498, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.9979, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.006127357117691212, |
|
"grad_norm": 2.9786553382873535, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.9223, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.009191035676536818, |
|
"grad_norm": 2.899183750152588, |
|
"learning_rate": 9.987820251299121e-06, |
|
"loss": 2.9009, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.012254714235382424, |
|
"grad_norm": 2.9656615257263184, |
|
"learning_rate": 9.890738003669029e-06, |
|
"loss": 2.8884, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.015318392794228029, |
|
"grad_norm": 2.785738468170166, |
|
"learning_rate": 9.698463103929542e-06, |
|
"loss": 2.8738, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.018382071353073636, |
|
"grad_norm": 2.8539676666259766, |
|
"learning_rate": 9.414737964294636e-06, |
|
"loss": 2.8555, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.02144574991191924, |
|
"grad_norm": 2.6807861328125, |
|
"learning_rate": 9.045084971874738e-06, |
|
"loss": 2.8666, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.024509428470764847, |
|
"grad_norm": 2.81980037689209, |
|
"learning_rate": 8.596699001693257e-06, |
|
"loss": 2.8515, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.027573107029610452, |
|
"grad_norm": 2.636308193206787, |
|
"learning_rate": 8.078307376628292e-06, |
|
"loss": 2.8603, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.030636785588456058, |
|
"grad_norm": 2.5829219818115234, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 2.8555, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03370046414730166, |
|
"grad_norm": 2.622443914413452, |
|
"learning_rate": 6.873032967079562e-06, |
|
"loss": 2.8287, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.03676414270614727, |
|
"grad_norm": 2.57558274269104, |
|
"learning_rate": 6.209609477998339e-06, |
|
"loss": 2.8294, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.039827821264992874, |
|
"grad_norm": 2.6264266967773438, |
|
"learning_rate": 5.522642316338268e-06, |
|
"loss": 2.8315, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.04289149982383848, |
|
"grad_norm": 2.575040578842163, |
|
"learning_rate": 4.825502516487497e-06, |
|
"loss": 2.8338, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.04595517838268409, |
|
"grad_norm": 2.7474663257598877, |
|
"learning_rate": 4.131759111665349e-06, |
|
"loss": 2.8179, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.049018856941529694, |
|
"grad_norm": 2.669630527496338, |
|
"learning_rate": 3.4549150281252635e-06, |
|
"loss": 2.8021, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.0520825355003753, |
|
"grad_norm": 2.631849527359009, |
|
"learning_rate": 2.8081442660546126e-06, |
|
"loss": 2.8077, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.055146214059220905, |
|
"grad_norm": 2.609309673309326, |
|
"learning_rate": 2.204035482646267e-06, |
|
"loss": 2.8177, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.058209892618066514, |
|
"grad_norm": 2.68284273147583, |
|
"learning_rate": 1.6543469682057105e-06, |
|
"loss": 2.8051, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.061273571176912116, |
|
"grad_norm": 2.7649335861206055, |
|
"learning_rate": 1.1697777844051105e-06, |
|
"loss": 2.8117, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.91328310788096e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|