|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04595517838268409, |
|
"eval_steps": 500, |
|
"global_step": 1500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003063678558845606, |
|
"grad_norm": 3.035543918609619, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 3.0534, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.006127357117691212, |
|
"grad_norm": 2.96189546585083, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.9361, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.009191035676536818, |
|
"grad_norm": 2.852346420288086, |
|
"learning_rate": 9.987820251299121e-06, |
|
"loss": 2.9092, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.012254714235382424, |
|
"grad_norm": 2.963131904602051, |
|
"learning_rate": 9.890738003669029e-06, |
|
"loss": 2.8945, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.015318392794228029, |
|
"grad_norm": 2.8017725944519043, |
|
"learning_rate": 9.698463103929542e-06, |
|
"loss": 2.8791, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.018382071353073636, |
|
"grad_norm": 2.877586841583252, |
|
"learning_rate": 9.414737964294636e-06, |
|
"loss": 2.8601, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.02144574991191924, |
|
"grad_norm": 2.7040741443634033, |
|
"learning_rate": 9.045084971874738e-06, |
|
"loss": 2.8706, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.024509428470764847, |
|
"grad_norm": 2.8546905517578125, |
|
"learning_rate": 8.596699001693257e-06, |
|
"loss": 2.8554, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.027573107029610452, |
|
"grad_norm": 2.643134355545044, |
|
"learning_rate": 8.078307376628292e-06, |
|
"loss": 2.8639, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.030636785588456058, |
|
"grad_norm": 2.577639579772949, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 2.8588, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03370046414730166, |
|
"grad_norm": 2.65212082862854, |
|
"learning_rate": 6.873032967079562e-06, |
|
"loss": 2.8316, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.03676414270614727, |
|
"grad_norm": 2.6047818660736084, |
|
"learning_rate": 6.209609477998339e-06, |
|
"loss": 2.8323, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.039827821264992874, |
|
"grad_norm": 2.6327316761016846, |
|
"learning_rate": 5.522642316338268e-06, |
|
"loss": 2.8346, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.04289149982383848, |
|
"grad_norm": 2.5692760944366455, |
|
"learning_rate": 4.825502516487497e-06, |
|
"loss": 2.8368, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.04595517838268409, |
|
"grad_norm": 2.709742307662964, |
|
"learning_rate": 4.131759111665349e-06, |
|
"loss": 2.8207, |
|
"step": 1500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.43496233091072e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|