Undi95's picture
Upload folder using huggingface_hub
8b56f99
raw
history blame contribute delete
No virus
43.7 kB
{
"best_metric": 1.514884114265442,
"best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-350",
"epoch": 2.364864864864865,
"eval_steps": 50,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 2.0773,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-05,
"loss": 2.0879,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 6e-05,
"loss": 2.0647,
"step": 3
},
{
"epoch": 0.03,
"learning_rate": 8e-05,
"loss": 1.9962,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 0.0001,
"loss": 1.9106,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 0.00012,
"loss": 1.8651,
"step": 6
},
{
"epoch": 0.05,
"learning_rate": 0.00014,
"loss": 1.8169,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 0.00016,
"loss": 1.8535,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 0.00018,
"loss": 1.8226,
"step": 9
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 1.7774,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 0.00019999738007780348,
"loss": 1.7297,
"step": 11
},
{
"epoch": 0.08,
"learning_rate": 0.00019998952044849376,
"loss": 1.7607,
"step": 12
},
{
"epoch": 0.09,
"learning_rate": 0.00019997642152390314,
"loss": 1.7323,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 0.00019995808399039496,
"loss": 1.6993,
"step": 14
},
{
"epoch": 0.1,
"learning_rate": 0.00019993450880882735,
"loss": 1.7053,
"step": 15
},
{
"epoch": 0.11,
"learning_rate": 0.00019990569721450326,
"loss": 1.7207,
"step": 16
},
{
"epoch": 0.11,
"learning_rate": 0.00019987165071710527,
"loss": 1.667,
"step": 17
},
{
"epoch": 0.12,
"learning_rate": 0.00019983237110061697,
"loss": 1.7375,
"step": 18
},
{
"epoch": 0.13,
"learning_rate": 0.0001997878604232291,
"loss": 1.725,
"step": 19
},
{
"epoch": 0.14,
"learning_rate": 0.00019973812101723188,
"loss": 1.658,
"step": 20
},
{
"epoch": 0.14,
"learning_rate": 0.0001996831554888928,
"loss": 1.6804,
"step": 21
},
{
"epoch": 0.15,
"learning_rate": 0.00019962296671832003,
"loss": 1.7273,
"step": 22
},
{
"epoch": 0.16,
"learning_rate": 0.00019955755785931145,
"loss": 1.6712,
"step": 23
},
{
"epoch": 0.16,
"learning_rate": 0.00019948693233918952,
"loss": 1.6261,
"step": 24
},
{
"epoch": 0.17,
"learning_rate": 0.0001994110938586216,
"loss": 1.673,
"step": 25
},
{
"epoch": 0.18,
"learning_rate": 0.00019933004639142605,
"loss": 1.6796,
"step": 26
},
{
"epoch": 0.18,
"learning_rate": 0.00019924379418436404,
"loss": 1.6701,
"step": 27
},
{
"epoch": 0.19,
"learning_rate": 0.000199152341756917,
"loss": 1.6619,
"step": 28
},
{
"epoch": 0.2,
"learning_rate": 0.00019905569390104986,
"loss": 1.6282,
"step": 29
},
{
"epoch": 0.2,
"learning_rate": 0.00019895385568095982,
"loss": 1.7152,
"step": 30
},
{
"epoch": 0.21,
"learning_rate": 0.00019884683243281116,
"loss": 1.6431,
"step": 31
},
{
"epoch": 0.22,
"learning_rate": 0.00019873462976445553,
"loss": 1.7584,
"step": 32
},
{
"epoch": 0.22,
"learning_rate": 0.00019861725355513823,
"loss": 1.6342,
"step": 33
},
{
"epoch": 0.23,
"learning_rate": 0.00019849470995518992,
"loss": 1.652,
"step": 34
},
{
"epoch": 0.24,
"learning_rate": 0.00019836700538570457,
"loss": 1.6472,
"step": 35
},
{
"epoch": 0.24,
"learning_rate": 0.0001982341465382029,
"loss": 1.6961,
"step": 36
},
{
"epoch": 0.25,
"learning_rate": 0.00019809614037428176,
"loss": 1.6556,
"step": 37
},
{
"epoch": 0.26,
"learning_rate": 0.00019795299412524945,
"loss": 1.6386,
"step": 38
},
{
"epoch": 0.26,
"learning_rate": 0.00019780471529174664,
"loss": 1.6692,
"step": 39
},
{
"epoch": 0.27,
"learning_rate": 0.00019765131164335345,
"loss": 1.5955,
"step": 40
},
{
"epoch": 0.28,
"learning_rate": 0.00019749279121818235,
"loss": 1.6292,
"step": 41
},
{
"epoch": 0.28,
"learning_rate": 0.000197329162322457,
"loss": 1.648,
"step": 42
},
{
"epoch": 0.29,
"learning_rate": 0.00019716043353007693,
"loss": 1.7011,
"step": 43
},
{
"epoch": 0.3,
"learning_rate": 0.00019698661368216817,
"loss": 1.6319,
"step": 44
},
{
"epoch": 0.3,
"learning_rate": 0.00019680771188662044,
"loss": 1.6143,
"step": 45
},
{
"epoch": 0.31,
"learning_rate": 0.00019662373751760934,
"loss": 1.6526,
"step": 46
},
{
"epoch": 0.32,
"learning_rate": 0.0001964347002151056,
"loss": 1.5946,
"step": 47
},
{
"epoch": 0.32,
"learning_rate": 0.00019624060988436966,
"loss": 1.6474,
"step": 48
},
{
"epoch": 0.33,
"learning_rate": 0.00019604147669543282,
"loss": 1.6756,
"step": 49
},
{
"epoch": 0.34,
"learning_rate": 0.0001958373110825644,
"loss": 1.6447,
"step": 50
},
{
"epoch": 0.34,
"eval_loss": 1.6321444511413574,
"eval_runtime": 311.9756,
"eval_samples_per_second": 4.795,
"eval_steps_per_second": 1.199,
"step": 50
},
{
"epoch": 0.34,
"learning_rate": 0.00019562812374372473,
"loss": 1.6902,
"step": 51
},
{
"epoch": 0.35,
"learning_rate": 0.00019541392564000488,
"loss": 1.6152,
"step": 52
},
{
"epoch": 0.36,
"learning_rate": 0.0001951947279950522,
"loss": 1.631,
"step": 53
},
{
"epoch": 0.36,
"learning_rate": 0.00019497054229448223,
"loss": 1.6445,
"step": 54
},
{
"epoch": 0.37,
"learning_rate": 0.00019474138028527675,
"loss": 1.6278,
"step": 55
},
{
"epoch": 0.38,
"learning_rate": 0.0001945072539751685,
"loss": 1.6202,
"step": 56
},
{
"epoch": 0.39,
"learning_rate": 0.00019426817563201177,
"loss": 1.6196,
"step": 57
},
{
"epoch": 0.39,
"learning_rate": 0.00019402415778313977,
"loss": 1.6062,
"step": 58
},
{
"epoch": 0.4,
"learning_rate": 0.00019377521321470805,
"loss": 1.5883,
"step": 59
},
{
"epoch": 0.41,
"learning_rate": 0.00019352135497102463,
"loss": 1.6769,
"step": 60
},
{
"epoch": 0.41,
"learning_rate": 0.00019326259635386644,
"loss": 1.6134,
"step": 61
},
{
"epoch": 0.42,
"learning_rate": 0.0001929989509217824,
"loss": 1.5884,
"step": 62
},
{
"epoch": 0.43,
"learning_rate": 0.00019273043248938288,
"loss": 1.6232,
"step": 63
},
{
"epoch": 0.43,
"learning_rate": 0.0001924570551266159,
"loss": 1.6303,
"step": 64
},
{
"epoch": 0.44,
"learning_rate": 0.00019217883315802991,
"loss": 1.6585,
"step": 65
},
{
"epoch": 0.45,
"learning_rate": 0.00019189578116202307,
"loss": 1.6264,
"step": 66
},
{
"epoch": 0.45,
"learning_rate": 0.00019160791397007957,
"loss": 1.5944,
"step": 67
},
{
"epoch": 0.46,
"learning_rate": 0.00019131524666599233,
"loss": 1.6371,
"step": 68
},
{
"epoch": 0.47,
"learning_rate": 0.00019101779458507263,
"loss": 1.592,
"step": 69
},
{
"epoch": 0.47,
"learning_rate": 0.00019071557331334669,
"loss": 1.6369,
"step": 70
},
{
"epoch": 0.48,
"learning_rate": 0.00019040859868673887,
"loss": 1.6355,
"step": 71
},
{
"epoch": 0.49,
"learning_rate": 0.0001900968867902419,
"loss": 1.6435,
"step": 72
},
{
"epoch": 0.49,
"learning_rate": 0.00018978045395707418,
"loss": 1.6074,
"step": 73
},
{
"epoch": 0.5,
"learning_rate": 0.00018945931676782373,
"loss": 1.5909,
"step": 74
},
{
"epoch": 0.51,
"learning_rate": 0.0001891334920495795,
"loss": 1.6151,
"step": 75
},
{
"epoch": 0.51,
"learning_rate": 0.0001888029968750498,
"loss": 1.5976,
"step": 76
},
{
"epoch": 0.52,
"learning_rate": 0.0001884678485616675,
"loss": 1.5716,
"step": 77
},
{
"epoch": 0.53,
"learning_rate": 0.00018812806467068268,
"loss": 1.5879,
"step": 78
},
{
"epoch": 0.53,
"learning_rate": 0.00018778366300624245,
"loss": 1.5664,
"step": 79
},
{
"epoch": 0.54,
"learning_rate": 0.00018743466161445823,
"loss": 1.6096,
"step": 80
},
{
"epoch": 0.55,
"learning_rate": 0.00018708107878245977,
"loss": 1.603,
"step": 81
},
{
"epoch": 0.55,
"learning_rate": 0.00018672293303743738,
"loss": 1.6123,
"step": 82
},
{
"epoch": 0.56,
"learning_rate": 0.00018636024314567067,
"loss": 1.6197,
"step": 83
},
{
"epoch": 0.57,
"learning_rate": 0.00018599302811154572,
"loss": 1.5905,
"step": 84
},
{
"epoch": 0.57,
"learning_rate": 0.00018562130717655878,
"loss": 1.6175,
"step": 85
},
{
"epoch": 0.58,
"learning_rate": 0.00018524509981830852,
"loss": 1.5753,
"step": 86
},
{
"epoch": 0.59,
"learning_rate": 0.00018486442574947511,
"loss": 1.6248,
"step": 87
},
{
"epoch": 0.59,
"learning_rate": 0.00018447930491678733,
"loss": 1.5616,
"step": 88
},
{
"epoch": 0.6,
"learning_rate": 0.00018408975749997759,
"loss": 1.596,
"step": 89
},
{
"epoch": 0.61,
"learning_rate": 0.00018369580391072433,
"loss": 1.5591,
"step": 90
},
{
"epoch": 0.61,
"learning_rate": 0.00018329746479158265,
"loss": 1.6116,
"step": 91
},
{
"epoch": 0.62,
"learning_rate": 0.00018289476101490256,
"loss": 1.5883,
"step": 92
},
{
"epoch": 0.63,
"learning_rate": 0.00018248771368173524,
"loss": 1.6456,
"step": 93
},
{
"epoch": 0.64,
"learning_rate": 0.00018207634412072764,
"loss": 1.5663,
"step": 94
},
{
"epoch": 0.64,
"learning_rate": 0.0001816606738870046,
"loss": 1.6058,
"step": 95
},
{
"epoch": 0.65,
"learning_rate": 0.00018124072476103956,
"loss": 1.5659,
"step": 96
},
{
"epoch": 0.66,
"learning_rate": 0.00018081651874751327,
"loss": 1.5531,
"step": 97
},
{
"epoch": 0.66,
"learning_rate": 0.00018038807807416068,
"loss": 1.5899,
"step": 98
},
{
"epoch": 0.67,
"learning_rate": 0.00017995542519060647,
"loss": 1.5176,
"step": 99
},
{
"epoch": 0.68,
"learning_rate": 0.00017951858276718844,
"loss": 1.6243,
"step": 100
},
{
"epoch": 0.68,
"eval_loss": 1.570174217224121,
"eval_runtime": 311.9281,
"eval_samples_per_second": 4.796,
"eval_steps_per_second": 1.199,
"step": 100
},
{
"epoch": 0.68,
"learning_rate": 0.00017907757369376985,
"loss": 1.5544,
"step": 101
},
{
"epoch": 0.69,
"learning_rate": 0.00017863242107853995,
"loss": 1.5498,
"step": 102
},
{
"epoch": 0.7,
"learning_rate": 0.000178183148246803,
"loss": 1.5598,
"step": 103
},
{
"epoch": 0.7,
"learning_rate": 0.0001777297787397563,
"loss": 1.526,
"step": 104
},
{
"epoch": 0.71,
"learning_rate": 0.00017727233631325664,
"loss": 1.5929,
"step": 105
},
{
"epoch": 0.72,
"learning_rate": 0.00017681084493657525,
"loss": 1.5623,
"step": 106
},
{
"epoch": 0.72,
"learning_rate": 0.0001763453287911422,
"loss": 1.6259,
"step": 107
},
{
"epoch": 0.73,
"learning_rate": 0.0001758758122692791,
"loss": 1.5729,
"step": 108
},
{
"epoch": 0.74,
"learning_rate": 0.00017540231997292114,
"loss": 1.5769,
"step": 109
},
{
"epoch": 0.74,
"learning_rate": 0.00017492487671232784,
"loss": 1.542,
"step": 110
},
{
"epoch": 0.75,
"learning_rate": 0.00017444350750478316,
"loss": 1.5598,
"step": 111
},
{
"epoch": 0.76,
"learning_rate": 0.00017395823757328444,
"loss": 1.5982,
"step": 112
},
{
"epoch": 0.76,
"learning_rate": 0.00017346909234522109,
"loss": 1.54,
"step": 113
},
{
"epoch": 0.77,
"learning_rate": 0.00017297609745104184,
"loss": 1.5536,
"step": 114
},
{
"epoch": 0.78,
"learning_rate": 0.000172479278722912,
"loss": 1.5671,
"step": 115
},
{
"epoch": 0.78,
"learning_rate": 0.0001719786621933599,
"loss": 1.5779,
"step": 116
},
{
"epoch": 0.79,
"learning_rate": 0.00017147427409391265,
"loss": 1.5637,
"step": 117
},
{
"epoch": 0.8,
"learning_rate": 0.00017096614085372185,
"loss": 1.5705,
"step": 118
},
{
"epoch": 0.8,
"learning_rate": 0.0001704542890981785,
"loss": 1.5621,
"step": 119
},
{
"epoch": 0.81,
"learning_rate": 0.00016993874564751822,
"loss": 1.56,
"step": 120
},
{
"epoch": 0.82,
"learning_rate": 0.00016941953751541553,
"loss": 1.5931,
"step": 121
},
{
"epoch": 0.82,
"learning_rate": 0.00016889669190756868,
"loss": 1.5508,
"step": 122
},
{
"epoch": 0.83,
"learning_rate": 0.00016837023622027388,
"loss": 1.528,
"step": 123
},
{
"epoch": 0.84,
"learning_rate": 0.00016784019803899,
"loss": 1.528,
"step": 124
},
{
"epoch": 0.84,
"learning_rate": 0.00016730660513689292,
"loss": 1.5825,
"step": 125
},
{
"epoch": 0.85,
"learning_rate": 0.0001667694854734204,
"loss": 1.5595,
"step": 126
},
{
"epoch": 0.86,
"learning_rate": 0.00016622886719280705,
"loss": 1.5411,
"step": 127
},
{
"epoch": 0.86,
"learning_rate": 0.0001656847786226095,
"loss": 1.5616,
"step": 128
},
{
"epoch": 0.87,
"learning_rate": 0.00016513724827222227,
"loss": 1.5769,
"step": 129
},
{
"epoch": 0.88,
"learning_rate": 0.00016458630483138356,
"loss": 1.5407,
"step": 130
},
{
"epoch": 0.89,
"learning_rate": 0.0001640319771686725,
"loss": 1.5193,
"step": 131
},
{
"epoch": 0.89,
"learning_rate": 0.00016347429432999602,
"loss": 1.4997,
"step": 132
},
{
"epoch": 0.9,
"learning_rate": 0.00016291328553706704,
"loss": 1.5718,
"step": 133
},
{
"epoch": 0.91,
"learning_rate": 0.00016234898018587337,
"loss": 1.5238,
"step": 134
},
{
"epoch": 0.91,
"learning_rate": 0.0001617814078451373,
"loss": 1.565,
"step": 135
},
{
"epoch": 0.92,
"learning_rate": 0.0001612105982547663,
"loss": 1.5667,
"step": 136
},
{
"epoch": 0.93,
"learning_rate": 0.0001606365813242947,
"loss": 1.5599,
"step": 137
},
{
"epoch": 0.93,
"learning_rate": 0.00016005938713131642,
"loss": 1.5277,
"step": 138
},
{
"epoch": 0.94,
"learning_rate": 0.00015947904591990907,
"loss": 1.4543,
"step": 139
},
{
"epoch": 0.95,
"learning_rate": 0.00015889558809904902,
"loss": 1.5824,
"step": 140
},
{
"epoch": 0.95,
"learning_rate": 0.0001583090442410182,
"loss": 1.561,
"step": 141
},
{
"epoch": 0.96,
"learning_rate": 0.00015771944507980207,
"loss": 1.5249,
"step": 142
},
{
"epoch": 0.97,
"learning_rate": 0.00015712682150947923,
"loss": 1.5219,
"step": 143
},
{
"epoch": 0.97,
"learning_rate": 0.00015653120458260263,
"loss": 1.4947,
"step": 144
},
{
"epoch": 0.98,
"learning_rate": 0.00015593262550857233,
"loss": 1.5432,
"step": 145
},
{
"epoch": 0.99,
"learning_rate": 0.00015533111565200044,
"loss": 1.5583,
"step": 146
},
{
"epoch": 0.99,
"learning_rate": 0.00015472670653106745,
"loss": 1.56,
"step": 147
},
{
"epoch": 1.0,
"learning_rate": 0.0001541194298158708,
"loss": 1.5059,
"step": 148
},
{
"epoch": 1.01,
"learning_rate": 0.0001535093173267654,
"loss": 1.5834,
"step": 149
},
{
"epoch": 1.01,
"learning_rate": 0.00015289640103269625,
"loss": 1.527,
"step": 150
},
{
"epoch": 1.01,
"eval_loss": 1.5405874252319336,
"eval_runtime": 311.8121,
"eval_samples_per_second": 4.798,
"eval_steps_per_second": 1.199,
"step": 150
},
{
"epoch": 1.02,
"learning_rate": 0.0001522807130495235,
"loss": 1.4796,
"step": 151
},
{
"epoch": 1.03,
"learning_rate": 0.00015166228563833934,
"loss": 1.466,
"step": 152
},
{
"epoch": 1.03,
"learning_rate": 0.00015104115120377783,
"loss": 1.469,
"step": 153
},
{
"epoch": 1.04,
"learning_rate": 0.00015041734229231688,
"loss": 1.5355,
"step": 154
},
{
"epoch": 1.05,
"learning_rate": 0.00014979089159057265,
"loss": 1.5019,
"step": 155
},
{
"epoch": 1.05,
"learning_rate": 0.00014916183192358718,
"loss": 1.5015,
"step": 156
},
{
"epoch": 1.06,
"learning_rate": 0.00014853019625310813,
"loss": 1.5418,
"step": 157
},
{
"epoch": 1.07,
"learning_rate": 0.00014789601767586173,
"loss": 1.5265,
"step": 158
},
{
"epoch": 1.07,
"learning_rate": 0.00014725932942181872,
"loss": 1.5189,
"step": 159
},
{
"epoch": 1.08,
"learning_rate": 0.00014662016485245274,
"loss": 1.4945,
"step": 160
},
{
"epoch": 1.09,
"learning_rate": 0.00014597855745899274,
"loss": 1.4742,
"step": 161
},
{
"epoch": 1.09,
"learning_rate": 0.00014533454086066772,
"loss": 1.5178,
"step": 162
},
{
"epoch": 1.1,
"learning_rate": 0.0001446881488029453,
"loss": 1.5304,
"step": 163
},
{
"epoch": 1.11,
"learning_rate": 0.00014403941515576344,
"loss": 1.5476,
"step": 164
},
{
"epoch": 1.11,
"learning_rate": 0.00014338837391175582,
"loss": 1.4911,
"step": 165
},
{
"epoch": 1.12,
"learning_rate": 0.00014273505918447054,
"loss": 1.4941,
"step": 166
},
{
"epoch": 1.13,
"learning_rate": 0.00014207950520658274,
"loss": 1.5374,
"step": 167
},
{
"epoch": 1.14,
"learning_rate": 0.00014142174632810072,
"loss": 1.4581,
"step": 168
},
{
"epoch": 1.14,
"learning_rate": 0.00014076181701456623,
"loss": 1.5601,
"step": 169
},
{
"epoch": 1.15,
"learning_rate": 0.0001400997518452484,
"loss": 1.4741,
"step": 170
},
{
"epoch": 1.16,
"learning_rate": 0.00013943558551133186,
"loss": 1.5178,
"step": 171
},
{
"epoch": 1.16,
"learning_rate": 0.00013876935281409907,
"loss": 1.5491,
"step": 172
},
{
"epoch": 1.17,
"learning_rate": 0.0001381010886631066,
"loss": 1.4954,
"step": 173
},
{
"epoch": 1.18,
"learning_rate": 0.00013743082807435615,
"loss": 1.4939,
"step": 174
},
{
"epoch": 1.18,
"learning_rate": 0.00013675860616845954,
"loss": 1.5415,
"step": 175
},
{
"epoch": 1.19,
"learning_rate": 0.00013608445816879866,
"loss": 1.5166,
"step": 176
},
{
"epoch": 1.2,
"learning_rate": 0.00013540841939967962,
"loss": 1.4965,
"step": 177
},
{
"epoch": 1.2,
"learning_rate": 0.00013473052528448201,
"loss": 1.4909,
"step": 178
},
{
"epoch": 1.21,
"learning_rate": 0.00013405081134380264,
"loss": 1.5002,
"step": 179
},
{
"epoch": 1.22,
"learning_rate": 0.00013336931319359426,
"loss": 1.4465,
"step": 180
},
{
"epoch": 1.22,
"learning_rate": 0.0001326860665432995,
"loss": 1.5397,
"step": 181
},
{
"epoch": 1.23,
"learning_rate": 0.00013200110719397968,
"loss": 1.4682,
"step": 182
},
{
"epoch": 1.24,
"learning_rate": 0.00013131447103643885,
"loss": 1.4864,
"step": 183
},
{
"epoch": 1.24,
"learning_rate": 0.00013062619404934317,
"loss": 1.4745,
"step": 184
},
{
"epoch": 1.25,
"learning_rate": 0.00012993631229733582,
"loss": 1.4745,
"step": 185
},
{
"epoch": 1.26,
"learning_rate": 0.00012924486192914705,
"loss": 1.4671,
"step": 186
},
{
"epoch": 1.26,
"learning_rate": 0.0001285518791757002,
"loss": 1.4875,
"step": 187
},
{
"epoch": 1.27,
"learning_rate": 0.00012785740034821329,
"loss": 1.5469,
"step": 188
},
{
"epoch": 1.28,
"learning_rate": 0.0001271614618362962,
"loss": 1.5117,
"step": 189
},
{
"epoch": 1.28,
"learning_rate": 0.00012646410010604397,
"loss": 1.4958,
"step": 190
},
{
"epoch": 1.29,
"learning_rate": 0.00012576535169812615,
"loss": 1.5226,
"step": 191
},
{
"epoch": 1.3,
"learning_rate": 0.00012506525322587207,
"loss": 1.5602,
"step": 192
},
{
"epoch": 1.3,
"learning_rate": 0.0001243638413733522,
"loss": 1.4868,
"step": 193
},
{
"epoch": 1.31,
"learning_rate": 0.0001236611528934562,
"loss": 1.4798,
"step": 194
},
{
"epoch": 1.32,
"learning_rate": 0.00012295722460596697,
"loss": 1.5437,
"step": 195
},
{
"epoch": 1.32,
"learning_rate": 0.00012225209339563145,
"loss": 1.4851,
"step": 196
},
{
"epoch": 1.33,
"learning_rate": 0.00012154579621022777,
"loss": 1.4799,
"step": 197
},
{
"epoch": 1.34,
"learning_rate": 0.00012083837005862946,
"loss": 1.4948,
"step": 198
},
{
"epoch": 1.34,
"learning_rate": 0.00012012985200886602,
"loss": 1.4851,
"step": 199
},
{
"epoch": 1.35,
"learning_rate": 0.00011942027918618074,
"loss": 1.4873,
"step": 200
},
{
"epoch": 1.35,
"eval_loss": 1.5275379419326782,
"eval_runtime": 311.7793,
"eval_samples_per_second": 4.798,
"eval_steps_per_second": 1.2,
"step": 200
},
{
"epoch": 1.36,
"learning_rate": 0.00011870968877108546,
"loss": 1.4698,
"step": 201
},
{
"epoch": 1.36,
"learning_rate": 0.0001179981179974121,
"loss": 1.5118,
"step": 202
},
{
"epoch": 1.37,
"learning_rate": 0.00011728560415036201,
"loss": 1.4641,
"step": 203
},
{
"epoch": 1.38,
"learning_rate": 0.00011657218456455206,
"loss": 1.4629,
"step": 204
},
{
"epoch": 1.39,
"learning_rate": 0.00011585789662205835,
"loss": 1.4944,
"step": 205
},
{
"epoch": 1.39,
"learning_rate": 0.00011514277775045768,
"loss": 1.4857,
"step": 206
},
{
"epoch": 1.4,
"learning_rate": 0.00011442686542086609,
"loss": 1.4841,
"step": 207
},
{
"epoch": 1.41,
"learning_rate": 0.00011371019714597562,
"loss": 1.4868,
"step": 208
},
{
"epoch": 1.41,
"learning_rate": 0.00011299281047808877,
"loss": 1.4687,
"step": 209
},
{
"epoch": 1.42,
"learning_rate": 0.00011227474300715055,
"loss": 1.4597,
"step": 210
},
{
"epoch": 1.43,
"learning_rate": 0.00011155603235877912,
"loss": 1.5115,
"step": 211
},
{
"epoch": 1.43,
"learning_rate": 0.00011083671619229408,
"loss": 1.525,
"step": 212
},
{
"epoch": 1.44,
"learning_rate": 0.00011011683219874323,
"loss": 1.5116,
"step": 213
},
{
"epoch": 1.45,
"learning_rate": 0.00010939641809892767,
"loss": 1.4964,
"step": 214
},
{
"epoch": 1.45,
"learning_rate": 0.0001086755116414252,
"loss": 1.4583,
"step": 215
},
{
"epoch": 1.46,
"learning_rate": 0.00010795415060061243,
"loss": 1.4918,
"step": 216
},
{
"epoch": 1.47,
"learning_rate": 0.00010723237277468538,
"loss": 1.5259,
"step": 217
},
{
"epoch": 1.47,
"learning_rate": 0.00010651021598367906,
"loss": 1.5294,
"step": 218
},
{
"epoch": 1.48,
"learning_rate": 0.00010578771806748546,
"loss": 1.4972,
"step": 219
},
{
"epoch": 1.49,
"learning_rate": 0.00010506491688387127,
"loss": 1.462,
"step": 220
},
{
"epoch": 1.49,
"learning_rate": 0.00010434185030649372,
"loss": 1.4782,
"step": 221
},
{
"epoch": 1.5,
"learning_rate": 0.00010361855622291637,
"loss": 1.4435,
"step": 222
},
{
"epoch": 1.51,
"learning_rate": 0.00010289507253262358,
"loss": 1.4993,
"step": 223
},
{
"epoch": 1.51,
"learning_rate": 0.00010217143714503508,
"loss": 1.5007,
"step": 224
},
{
"epoch": 1.52,
"learning_rate": 0.00010144768797751905,
"loss": 1.4938,
"step": 225
},
{
"epoch": 1.53,
"learning_rate": 0.00010072386295340572,
"loss": 1.4986,
"step": 226
},
{
"epoch": 1.53,
"learning_rate": 0.0001,
"loss": 1.4512,
"step": 227
},
{
"epoch": 1.54,
"learning_rate": 9.927613704659429e-05,
"loss": 1.5191,
"step": 228
},
{
"epoch": 1.55,
"learning_rate": 9.855231202248097e-05,
"loss": 1.5092,
"step": 229
},
{
"epoch": 1.55,
"learning_rate": 9.782856285496495e-05,
"loss": 1.4555,
"step": 230
},
{
"epoch": 1.56,
"learning_rate": 9.710492746737643e-05,
"loss": 1.4859,
"step": 231
},
{
"epoch": 1.57,
"learning_rate": 9.638144377708367e-05,
"loss": 1.5297,
"step": 232
},
{
"epoch": 1.57,
"learning_rate": 9.565814969350629e-05,
"loss": 1.4883,
"step": 233
},
{
"epoch": 1.58,
"learning_rate": 9.493508311612874e-05,
"loss": 1.4751,
"step": 234
},
{
"epoch": 1.59,
"learning_rate": 9.421228193251452e-05,
"loss": 1.4805,
"step": 235
},
{
"epoch": 1.59,
"learning_rate": 9.348978401632101e-05,
"loss": 1.4906,
"step": 236
},
{
"epoch": 1.6,
"learning_rate": 9.276762722531463e-05,
"loss": 1.4829,
"step": 237
},
{
"epoch": 1.61,
"learning_rate": 9.204584939938762e-05,
"loss": 1.4818,
"step": 238
},
{
"epoch": 1.61,
"learning_rate": 9.132448835857483e-05,
"loss": 1.5235,
"step": 239
},
{
"epoch": 1.62,
"learning_rate": 9.060358190107234e-05,
"loss": 1.4753,
"step": 240
},
{
"epoch": 1.63,
"learning_rate": 8.98831678012568e-05,
"loss": 1.4732,
"step": 241
},
{
"epoch": 1.64,
"learning_rate": 8.916328380770595e-05,
"loss": 1.4651,
"step": 242
},
{
"epoch": 1.64,
"learning_rate": 8.844396764122093e-05,
"loss": 1.4852,
"step": 243
},
{
"epoch": 1.65,
"learning_rate": 8.772525699284946e-05,
"loss": 1.4747,
"step": 244
},
{
"epoch": 1.66,
"learning_rate": 8.700718952191124e-05,
"loss": 1.5208,
"step": 245
},
{
"epoch": 1.66,
"learning_rate": 8.628980285402439e-05,
"loss": 1.4901,
"step": 246
},
{
"epoch": 1.67,
"learning_rate": 8.557313457913394e-05,
"loss": 1.4801,
"step": 247
},
{
"epoch": 1.68,
"learning_rate": 8.485722224954237e-05,
"loss": 1.4781,
"step": 248
},
{
"epoch": 1.68,
"learning_rate": 8.414210337794166e-05,
"loss": 1.4976,
"step": 249
},
{
"epoch": 1.69,
"learning_rate": 8.342781543544798e-05,
"loss": 1.5005,
"step": 250
},
{
"epoch": 1.69,
"eval_loss": 1.5195521116256714,
"eval_runtime": 311.8576,
"eval_samples_per_second": 4.797,
"eval_steps_per_second": 1.199,
"step": 250
},
{
"epoch": 1.7,
"learning_rate": 8.271439584963802e-05,
"loss": 1.48,
"step": 251
},
{
"epoch": 1.7,
"learning_rate": 8.200188200258791e-05,
"loss": 1.5102,
"step": 252
},
{
"epoch": 1.71,
"learning_rate": 8.129031122891459e-05,
"loss": 1.4419,
"step": 253
},
{
"epoch": 1.72,
"learning_rate": 8.057972081381927e-05,
"loss": 1.4944,
"step": 254
},
{
"epoch": 1.72,
"learning_rate": 7.987014799113397e-05,
"loss": 1.4637,
"step": 255
},
{
"epoch": 1.73,
"learning_rate": 7.916162994137056e-05,
"loss": 1.5022,
"step": 256
},
{
"epoch": 1.74,
"learning_rate": 7.845420378977223e-05,
"loss": 1.4504,
"step": 257
},
{
"epoch": 1.74,
"learning_rate": 7.774790660436858e-05,
"loss": 1.466,
"step": 258
},
{
"epoch": 1.75,
"learning_rate": 7.704277539403304e-05,
"loss": 1.4501,
"step": 259
},
{
"epoch": 1.76,
"learning_rate": 7.633884710654383e-05,
"loss": 1.5231,
"step": 260
},
{
"epoch": 1.76,
"learning_rate": 7.56361586266478e-05,
"loss": 1.498,
"step": 261
},
{
"epoch": 1.77,
"learning_rate": 7.493474677412794e-05,
"loss": 1.5227,
"step": 262
},
{
"epoch": 1.78,
"learning_rate": 7.423464830187386e-05,
"loss": 1.5031,
"step": 263
},
{
"epoch": 1.78,
"learning_rate": 7.353589989395604e-05,
"loss": 1.479,
"step": 264
},
{
"epoch": 1.79,
"learning_rate": 7.283853816370386e-05,
"loss": 1.4836,
"step": 265
},
{
"epoch": 1.8,
"learning_rate": 7.214259965178674e-05,
"loss": 1.4626,
"step": 266
},
{
"epoch": 1.8,
"learning_rate": 7.14481208242998e-05,
"loss": 1.4506,
"step": 267
},
{
"epoch": 1.81,
"learning_rate": 7.075513807085299e-05,
"loss": 1.4606,
"step": 268
},
{
"epoch": 1.82,
"learning_rate": 7.006368770266421e-05,
"loss": 1.4762,
"step": 269
},
{
"epoch": 1.82,
"learning_rate": 6.937380595065685e-05,
"loss": 1.5297,
"step": 270
},
{
"epoch": 1.83,
"learning_rate": 6.868552896356117e-05,
"loss": 1.4686,
"step": 271
},
{
"epoch": 1.84,
"learning_rate": 6.799889280602031e-05,
"loss": 1.4208,
"step": 272
},
{
"epoch": 1.84,
"learning_rate": 6.731393345670051e-05,
"loss": 1.4916,
"step": 273
},
{
"epoch": 1.85,
"learning_rate": 6.663068680640574e-05,
"loss": 1.4945,
"step": 274
},
{
"epoch": 1.86,
"learning_rate": 6.59491886561974e-05,
"loss": 1.4625,
"step": 275
},
{
"epoch": 1.86,
"learning_rate": 6.526947471551798e-05,
"loss": 1.4791,
"step": 276
},
{
"epoch": 1.87,
"learning_rate": 6.45915806003204e-05,
"loss": 1.4837,
"step": 277
},
{
"epoch": 1.88,
"learning_rate": 6.391554183120138e-05,
"loss": 1.4536,
"step": 278
},
{
"epoch": 1.89,
"learning_rate": 6.324139383154049e-05,
"loss": 1.4307,
"step": 279
},
{
"epoch": 1.89,
"learning_rate": 6.25691719256439e-05,
"loss": 1.4663,
"step": 280
},
{
"epoch": 1.9,
"learning_rate": 6.189891133689342e-05,
"loss": 1.4668,
"step": 281
},
{
"epoch": 1.91,
"learning_rate": 6.123064718590099e-05,
"loss": 1.4562,
"step": 282
},
{
"epoch": 1.91,
"learning_rate": 6.0564414488668165e-05,
"loss": 1.527,
"step": 283
},
{
"epoch": 1.92,
"learning_rate": 5.9900248154751616e-05,
"loss": 1.5169,
"step": 284
},
{
"epoch": 1.93,
"learning_rate": 5.923818298543379e-05,
"loss": 1.4942,
"step": 285
},
{
"epoch": 1.93,
"learning_rate": 5.857825367189931e-05,
"loss": 1.4828,
"step": 286
},
{
"epoch": 1.94,
"learning_rate": 5.7920494793417326e-05,
"loss": 1.5331,
"step": 287
},
{
"epoch": 1.95,
"learning_rate": 5.7264940815529485e-05,
"loss": 1.4857,
"step": 288
},
{
"epoch": 1.95,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.4651,
"step": 289
},
{
"epoch": 1.96,
"learning_rate": 5.596058484423656e-05,
"loss": 1.4988,
"step": 290
},
{
"epoch": 1.97,
"learning_rate": 5.531185119705474e-05,
"loss": 1.496,
"step": 291
},
{
"epoch": 1.97,
"learning_rate": 5.46654591393323e-05,
"loss": 1.4849,
"step": 292
},
{
"epoch": 1.98,
"learning_rate": 5.402144254100725e-05,
"loss": 1.5382,
"step": 293
},
{
"epoch": 1.99,
"learning_rate": 5.337983514754723e-05,
"loss": 1.4595,
"step": 294
},
{
"epoch": 1.99,
"learning_rate": 5.274067057818131e-05,
"loss": 1.5216,
"step": 295
},
{
"epoch": 2.0,
"learning_rate": 5.2103982324138244e-05,
"loss": 1.472,
"step": 296
},
{
"epoch": 2.01,
"learning_rate": 5.146980374689192e-05,
"loss": 1.4603,
"step": 297
},
{
"epoch": 2.01,
"learning_rate": 5.083816807641284e-05,
"loss": 1.431,
"step": 298
},
{
"epoch": 2.02,
"learning_rate": 5.0209108409427384e-05,
"loss": 1.4643,
"step": 299
},
{
"epoch": 2.03,
"learning_rate": 4.958265770768316e-05,
"loss": 1.4054,
"step": 300
},
{
"epoch": 2.03,
"eval_loss": 1.5153393745422363,
"eval_runtime": 312.7093,
"eval_samples_per_second": 4.784,
"eval_steps_per_second": 1.196,
"step": 300
},
{
"epoch": 2.03,
"learning_rate": 4.895884879622216e-05,
"loss": 1.4389,
"step": 301
},
{
"epoch": 2.04,
"learning_rate": 4.833771436166069e-05,
"loss": 1.4052,
"step": 302
},
{
"epoch": 2.05,
"learning_rate": 4.7719286950476525e-05,
"loss": 1.4469,
"step": 303
},
{
"epoch": 2.05,
"learning_rate": 4.710359896730379e-05,
"loss": 1.4418,
"step": 304
},
{
"epoch": 2.06,
"learning_rate": 4.649068267323465e-05,
"loss": 1.4759,
"step": 305
},
{
"epoch": 2.07,
"learning_rate": 4.5880570184129215e-05,
"loss": 1.4329,
"step": 306
},
{
"epoch": 2.07,
"learning_rate": 4.5273293468932584e-05,
"loss": 1.4015,
"step": 307
},
{
"epoch": 2.08,
"learning_rate": 4.466888434799958e-05,
"loss": 1.4387,
"step": 308
},
{
"epoch": 2.09,
"learning_rate": 4.406737449142769e-05,
"loss": 1.4713,
"step": 309
},
{
"epoch": 2.09,
"learning_rate": 4.34687954173974e-05,
"loss": 1.4665,
"step": 310
},
{
"epoch": 2.1,
"learning_rate": 4.287317849052075e-05,
"loss": 1.4308,
"step": 311
},
{
"epoch": 2.11,
"learning_rate": 4.2280554920197936e-05,
"loss": 1.4307,
"step": 312
},
{
"epoch": 2.11,
"learning_rate": 4.169095575898181e-05,
"loss": 1.4356,
"step": 313
},
{
"epoch": 2.12,
"learning_rate": 4.1104411900951015e-05,
"loss": 1.4521,
"step": 314
},
{
"epoch": 2.13,
"learning_rate": 4.052095408009096e-05,
"loss": 1.4454,
"step": 315
},
{
"epoch": 2.14,
"learning_rate": 3.994061286868361e-05,
"loss": 1.4114,
"step": 316
},
{
"epoch": 2.14,
"learning_rate": 3.9363418675705334e-05,
"loss": 1.4255,
"step": 317
},
{
"epoch": 2.15,
"learning_rate": 3.878940174523371e-05,
"loss": 1.4438,
"step": 318
},
{
"epoch": 2.16,
"learning_rate": 3.821859215486274e-05,
"loss": 1.4493,
"step": 319
},
{
"epoch": 2.16,
"learning_rate": 3.7651019814126654e-05,
"loss": 1.4245,
"step": 320
},
{
"epoch": 2.17,
"learning_rate": 3.7086714462933006e-05,
"loss": 1.4372,
"step": 321
},
{
"epoch": 2.18,
"learning_rate": 3.652570567000402e-05,
"loss": 1.4101,
"step": 322
},
{
"epoch": 2.18,
"learning_rate": 3.5968022831327505e-05,
"loss": 1.4433,
"step": 323
},
{
"epoch": 2.19,
"learning_rate": 3.541369516861648e-05,
"loss": 1.4999,
"step": 324
},
{
"epoch": 2.2,
"learning_rate": 3.4862751727777797e-05,
"loss": 1.4423,
"step": 325
},
{
"epoch": 2.2,
"learning_rate": 3.431522137739049e-05,
"loss": 1.3616,
"step": 326
},
{
"epoch": 2.21,
"learning_rate": 3.377113280719295e-05,
"loss": 1.4282,
"step": 327
},
{
"epoch": 2.22,
"learning_rate": 3.323051452657961e-05,
"loss": 1.4705,
"step": 328
},
{
"epoch": 2.22,
"learning_rate": 3.269339486310711e-05,
"loss": 1.4255,
"step": 329
},
{
"epoch": 2.23,
"learning_rate": 3.215980196101002e-05,
"loss": 1.4869,
"step": 330
},
{
"epoch": 2.24,
"learning_rate": 3.162976377972614e-05,
"loss": 1.4884,
"step": 331
},
{
"epoch": 2.24,
"learning_rate": 3.110330809243134e-05,
"loss": 1.4677,
"step": 332
},
{
"epoch": 2.25,
"learning_rate": 3.058046248458446e-05,
"loss": 1.3935,
"step": 333
},
{
"epoch": 2.26,
"learning_rate": 3.0061254352481804e-05,
"loss": 1.4546,
"step": 334
},
{
"epoch": 2.26,
"learning_rate": 2.954571090182149e-05,
"loss": 1.4612,
"step": 335
},
{
"epoch": 2.27,
"learning_rate": 2.9033859146278197e-05,
"loss": 1.4626,
"step": 336
},
{
"epoch": 2.28,
"learning_rate": 2.852572590608735e-05,
"loss": 1.4729,
"step": 337
},
{
"epoch": 2.28,
"learning_rate": 2.8021337806640135e-05,
"loss": 1.4555,
"step": 338
},
{
"epoch": 2.29,
"learning_rate": 2.7520721277088024e-05,
"loss": 1.4631,
"step": 339
},
{
"epoch": 2.3,
"learning_rate": 2.702390254895819e-05,
"loss": 1.4732,
"step": 340
},
{
"epoch": 2.3,
"learning_rate": 2.6530907654778958e-05,
"loss": 1.419,
"step": 341
},
{
"epoch": 2.31,
"learning_rate": 2.6041762426715566e-05,
"loss": 1.4219,
"step": 342
},
{
"epoch": 2.32,
"learning_rate": 2.5556492495216867e-05,
"loss": 1.4772,
"step": 343
},
{
"epoch": 2.32,
"learning_rate": 2.5075123287672175e-05,
"loss": 1.4066,
"step": 344
},
{
"epoch": 2.33,
"learning_rate": 2.4597680027078873e-05,
"loss": 1.4213,
"step": 345
},
{
"epoch": 2.34,
"learning_rate": 2.4124187730720917e-05,
"loss": 1.4578,
"step": 346
},
{
"epoch": 2.34,
"learning_rate": 2.3654671208857826e-05,
"loss": 1.4273,
"step": 347
},
{
"epoch": 2.35,
"learning_rate": 2.3189155063424782e-05,
"loss": 1.4381,
"step": 348
},
{
"epoch": 2.36,
"learning_rate": 2.2727663686743385e-05,
"loss": 1.4327,
"step": 349
},
{
"epoch": 2.36,
"learning_rate": 2.2270221260243673e-05,
"loss": 1.4145,
"step": 350
},
{
"epoch": 2.36,
"eval_loss": 1.514884114265442,
"eval_runtime": 312.6299,
"eval_samples_per_second": 4.785,
"eval_steps_per_second": 1.196,
"step": 350
}
],
"logging_steps": 1,
"max_steps": 444,
"num_train_epochs": 3,
"save_steps": 50,
"total_flos": 2.7717336710066995e+18,
"trial_name": null,
"trial_params": null
}