jfranklin-foundry's picture
Upload folder using huggingface_hub
2104588 verified
raw
history blame contribute delete
No virus
4.03 kB
{
"best_metric": 1.7466135025024414,
"best_model_checkpoint": "outputs/checkpoint-244",
"epoch": 6.996415770609319,
"eval_steps": 500,
"global_step": 244,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.5734767025089605,
"grad_norm": 0.8502072691917419,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.925,
"step": 20
},
{
"epoch": 0.974910394265233,
"eval_loss": 2.817981243133545,
"eval_runtime": 11.5812,
"eval_samples_per_second": 32.121,
"eval_steps_per_second": 4.058,
"step": 34
},
{
"epoch": 1.146953405017921,
"grad_norm": 0.916117787361145,
"learning_rate": 4.000000000000001e-06,
"loss": 2.8937,
"step": 40
},
{
"epoch": 1.7204301075268817,
"grad_norm": 1.073569416999817,
"learning_rate": 6e-06,
"loss": 2.8277,
"step": 60
},
{
"epoch": 1.978494623655914,
"eval_loss": 2.588735342025757,
"eval_runtime": 11.5877,
"eval_samples_per_second": 32.103,
"eval_steps_per_second": 4.056,
"step": 69
},
{
"epoch": 2.293906810035842,
"grad_norm": 1.3921942710876465,
"learning_rate": 8.000000000000001e-06,
"loss": 2.6548,
"step": 80
},
{
"epoch": 2.867383512544803,
"grad_norm": 1.2179991006851196,
"learning_rate": 1e-05,
"loss": 2.2871,
"step": 100
},
{
"epoch": 2.982078853046595,
"eval_loss": 1.9897719621658325,
"eval_runtime": 11.5904,
"eval_samples_per_second": 32.096,
"eval_steps_per_second": 4.055,
"step": 104
},
{
"epoch": 3.4408602150537635,
"grad_norm": 0.5500979423522949,
"learning_rate": 9.829629131445342e-06,
"loss": 2.0042,
"step": 120
},
{
"epoch": 3.985663082437276,
"eval_loss": 1.8679059743881226,
"eval_runtime": 11.5852,
"eval_samples_per_second": 32.11,
"eval_steps_per_second": 4.057,
"step": 139
},
{
"epoch": 4.014336917562724,
"grad_norm": 0.46987271308898926,
"learning_rate": 9.330127018922195e-06,
"loss": 1.9362,
"step": 140
},
{
"epoch": 4.587813620071684,
"grad_norm": 0.44649583101272583,
"learning_rate": 8.535533905932739e-06,
"loss": 1.891,
"step": 160
},
{
"epoch": 4.989247311827957,
"eval_loss": 1.8080483675003052,
"eval_runtime": 11.5897,
"eval_samples_per_second": 32.097,
"eval_steps_per_second": 4.055,
"step": 174
},
{
"epoch": 5.161290322580645,
"grad_norm": 0.4501400589942932,
"learning_rate": 7.500000000000001e-06,
"loss": 1.8352,
"step": 180
},
{
"epoch": 5.734767025089606,
"grad_norm": 0.44334864616394043,
"learning_rate": 6.294095225512604e-06,
"loss": 1.8157,
"step": 200
},
{
"epoch": 5.992831541218638,
"eval_loss": 1.7702627182006836,
"eval_runtime": 11.5903,
"eval_samples_per_second": 32.096,
"eval_steps_per_second": 4.055,
"step": 209
},
{
"epoch": 6.308243727598566,
"grad_norm": 0.4709676504135132,
"learning_rate": 5e-06,
"loss": 1.769,
"step": 220
},
{
"epoch": 6.881720430107527,
"grad_norm": 0.45132553577423096,
"learning_rate": 3.705904774487396e-06,
"loss": 1.7697,
"step": 240
},
{
"epoch": 6.996415770609319,
"eval_loss": 1.7466135025024414,
"eval_runtime": 11.5882,
"eval_samples_per_second": 32.102,
"eval_steps_per_second": 4.056,
"step": 244
}
],
"logging_steps": 20,
"max_steps": 340,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.2904027978850304e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}