|
{ |
|
"best_metric": 1.570174217224121, |
|
"best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-100", |
|
"epoch": 0.6756756756756757, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0773, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4e-05, |
|
"loss": 2.0879, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6e-05, |
|
"loss": 2.0647, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8e-05, |
|
"loss": 1.9962, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9106, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00012, |
|
"loss": 1.8651, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00014, |
|
"loss": 1.8169, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00016, |
|
"loss": 1.8535, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00018, |
|
"loss": 1.8226, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7774, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019999738007780348, |
|
"loss": 1.7297, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019998952044849376, |
|
"loss": 1.7607, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019997642152390314, |
|
"loss": 1.7323, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019995808399039496, |
|
"loss": 1.6993, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019993450880882735, |
|
"loss": 1.7053, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019990569721450326, |
|
"loss": 1.7207, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019987165071710527, |
|
"loss": 1.667, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019983237110061697, |
|
"loss": 1.7375, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001997878604232291, |
|
"loss": 1.725, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019973812101723188, |
|
"loss": 1.658, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001996831554888928, |
|
"loss": 1.6804, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019962296671832003, |
|
"loss": 1.7273, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019955755785931145, |
|
"loss": 1.6712, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019948693233918952, |
|
"loss": 1.6261, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001994110938586216, |
|
"loss": 1.673, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019933004639142605, |
|
"loss": 1.6796, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019924379418436404, |
|
"loss": 1.6701, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000199152341756917, |
|
"loss": 1.6619, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019905569390104986, |
|
"loss": 1.6282, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019895385568095982, |
|
"loss": 1.7152, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019884683243281116, |
|
"loss": 1.6431, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019873462976445553, |
|
"loss": 1.7584, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019861725355513823, |
|
"loss": 1.6342, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019849470995518992, |
|
"loss": 1.652, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019836700538570457, |
|
"loss": 1.6472, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001982341465382029, |
|
"loss": 1.6961, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019809614037428176, |
|
"loss": 1.6556, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019795299412524945, |
|
"loss": 1.6386, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019780471529174664, |
|
"loss": 1.6692, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019765131164335345, |
|
"loss": 1.5955, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 1.6292, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000197329162322457, |
|
"loss": 1.648, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019716043353007693, |
|
"loss": 1.7011, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019698661368216817, |
|
"loss": 1.6319, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019680771188662044, |
|
"loss": 1.6143, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019662373751760934, |
|
"loss": 1.6526, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001964347002151056, |
|
"loss": 1.5946, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019624060988436966, |
|
"loss": 1.6474, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019604147669543282, |
|
"loss": 1.6756, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001958373110825644, |
|
"loss": 1.6447, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_loss": 1.6321444511413574, |
|
"eval_runtime": 311.9756, |
|
"eval_samples_per_second": 4.795, |
|
"eval_steps_per_second": 1.199, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019562812374372473, |
|
"loss": 1.6902, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019541392564000488, |
|
"loss": 1.6152, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001951947279950522, |
|
"loss": 1.631, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019497054229448223, |
|
"loss": 1.6445, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019474138028527675, |
|
"loss": 1.6278, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001945072539751685, |
|
"loss": 1.6202, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019426817563201177, |
|
"loss": 1.6196, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019402415778313977, |
|
"loss": 1.6062, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019377521321470805, |
|
"loss": 1.5883, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019352135497102463, |
|
"loss": 1.6769, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019326259635386644, |
|
"loss": 1.6134, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001929989509217824, |
|
"loss": 1.5884, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019273043248938288, |
|
"loss": 1.6232, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001924570551266159, |
|
"loss": 1.6303, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019217883315802991, |
|
"loss": 1.6585, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019189578116202307, |
|
"loss": 1.6264, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019160791397007957, |
|
"loss": 1.5944, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019131524666599233, |
|
"loss": 1.6371, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019101779458507263, |
|
"loss": 1.592, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019071557331334669, |
|
"loss": 1.6369, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019040859868673887, |
|
"loss": 1.6355, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 1.6435, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00018978045395707418, |
|
"loss": 1.6074, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018945931676782373, |
|
"loss": 1.5909, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001891334920495795, |
|
"loss": 1.6151, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001888029968750498, |
|
"loss": 1.5976, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001884678485616675, |
|
"loss": 1.5716, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018812806467068268, |
|
"loss": 1.5879, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00018778366300624245, |
|
"loss": 1.5664, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018743466161445823, |
|
"loss": 1.6096, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018708107878245977, |
|
"loss": 1.603, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018672293303743738, |
|
"loss": 1.6123, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00018636024314567067, |
|
"loss": 1.6197, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018599302811154572, |
|
"loss": 1.5905, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018562130717655878, |
|
"loss": 1.6175, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00018524509981830852, |
|
"loss": 1.5753, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018486442574947511, |
|
"loss": 1.6248, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018447930491678733, |
|
"loss": 1.5616, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00018408975749997759, |
|
"loss": 1.596, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018369580391072433, |
|
"loss": 1.5591, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00018329746479158265, |
|
"loss": 1.6116, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018289476101490256, |
|
"loss": 1.5883, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018248771368173524, |
|
"loss": 1.6456, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018207634412072764, |
|
"loss": 1.5663, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001816606738870046, |
|
"loss": 1.6058, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018124072476103956, |
|
"loss": 1.5659, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018081651874751327, |
|
"loss": 1.5531, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018038807807416068, |
|
"loss": 1.5899, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017995542519060647, |
|
"loss": 1.5176, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017951858276718844, |
|
"loss": 1.6243, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_loss": 1.570174217224121, |
|
"eval_runtime": 311.9281, |
|
"eval_samples_per_second": 4.796, |
|
"eval_steps_per_second": 1.199, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 444, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"total_flos": 7.922919280261202e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|