Grogros-dmWM-LLama-3-1B-Harm-HarmData-Al4-OWT-d4-a0.25-learnability_adv
/
checkpoint-2500
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.07659196397114015, | |
"eval_steps": 500, | |
"global_step": 2500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.003063678558845606, | |
"grad_norm": 3.0355441570281982, | |
"learning_rate": 4.000000000000001e-06, | |
"loss": 3.0534, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.006127357117691212, | |
"grad_norm": 2.961895227432251, | |
"learning_rate": 8.000000000000001e-06, | |
"loss": 2.9361, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.009191035676536818, | |
"grad_norm": 2.8523459434509277, | |
"learning_rate": 9.987820251299121e-06, | |
"loss": 2.9092, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.012254714235382424, | |
"grad_norm": 2.9632089138031006, | |
"learning_rate": 9.890738003669029e-06, | |
"loss": 2.8945, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.015318392794228029, | |
"grad_norm": 2.8018319606781006, | |
"learning_rate": 9.698463103929542e-06, | |
"loss": 2.8791, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.018382071353073636, | |
"grad_norm": 2.877758741378784, | |
"learning_rate": 9.414737964294636e-06, | |
"loss": 2.8601, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.02144574991191924, | |
"grad_norm": 2.7049593925476074, | |
"learning_rate": 9.045084971874738e-06, | |
"loss": 2.8707, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.024509428470764847, | |
"grad_norm": 2.8549087047576904, | |
"learning_rate": 8.596699001693257e-06, | |
"loss": 2.8554, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.027573107029610452, | |
"grad_norm": 2.643270969390869, | |
"learning_rate": 8.078307376628292e-06, | |
"loss": 2.8639, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.030636785588456058, | |
"grad_norm": 2.577594757080078, | |
"learning_rate": 7.500000000000001e-06, | |
"loss": 2.8589, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.03370046414730166, | |
"grad_norm": 2.69144344329834, | |
"learning_rate": 6.873032967079562e-06, | |
"loss": 2.8316, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.03676414270614727, | |
"grad_norm": 2.6036555767059326, | |
"learning_rate": 6.209609477998339e-06, | |
"loss": 2.8323, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.039827821264992874, | |
"grad_norm": 2.630338430404663, | |
"learning_rate": 5.522642316338268e-06, | |
"loss": 2.8346, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.04289149982383848, | |
"grad_norm": 2.5683858394622803, | |
"learning_rate": 4.825502516487497e-06, | |
"loss": 2.8368, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.04595517838268409, | |
"grad_norm": 2.70560359954834, | |
"learning_rate": 4.131759111665349e-06, | |
"loss": 2.8206, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 0.049018856941529694, | |
"grad_norm": 2.657520055770874, | |
"learning_rate": 3.4549150281252635e-06, | |
"loss": 2.8048, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 0.0520825355003753, | |
"grad_norm": 2.621300220489502, | |
"learning_rate": 2.8081442660546126e-06, | |
"loss": 2.8104, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 0.055146214059220905, | |
"grad_norm": 2.6119415760040283, | |
"learning_rate": 2.204035482646267e-06, | |
"loss": 2.8202, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 0.058209892618066514, | |
"grad_norm": 2.7000575065612793, | |
"learning_rate": 1.6543469682057105e-06, | |
"loss": 2.8078, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 0.061273571176912116, | |
"grad_norm": 2.788527727127075, | |
"learning_rate": 1.1697777844051105e-06, | |
"loss": 2.8143, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 0.06433724973575772, | |
"grad_norm": 2.729123830795288, | |
"learning_rate": 7.597595192178702e-07, | |
"loss": 2.7989, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 0.06740092829460333, | |
"grad_norm": 2.7026212215423584, | |
"learning_rate": 4.322727117869951e-07, | |
"loss": 2.8086, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 0.07046460685344894, | |
"grad_norm": 2.68709397315979, | |
"learning_rate": 1.9369152030840553e-07, | |
"loss": 2.807, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 0.07352828541229454, | |
"grad_norm": 2.7309343814849854, | |
"learning_rate": 4.865965629214819e-08, | |
"loss": 2.7994, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 0.07659196397114015, | |
"grad_norm": 2.7638888359069824, | |
"learning_rate": 0.0, | |
"loss": 2.8006, | |
"step": 2500 | |
} | |
], | |
"logging_steps": 100, | |
"max_steps": 2500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 500, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.3916038848512e+17, | |
"train_batch_size": 4, | |
"trial_name": null, | |
"trial_params": null | |
} | |