kanishka's picture
End of training
a642c9b verified
raw
history blame contribute delete
No virus
65.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 371880,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 0.8108707666397095,
"learning_rate": 3.125e-05,
"loss": 6.2176,
"step": 1000
},
{
"epoch": 0.11,
"grad_norm": 0.8937825560569763,
"learning_rate": 6.25e-05,
"loss": 5.014,
"step": 2000
},
{
"epoch": 0.16,
"grad_norm": 0.8364607691764832,
"learning_rate": 9.375e-05,
"loss": 4.6797,
"step": 3000
},
{
"epoch": 0.22,
"grad_norm": 0.80474454164505,
"learning_rate": 0.000125,
"loss": 4.4554,
"step": 4000
},
{
"epoch": 0.27,
"grad_norm": 0.8272607922554016,
"learning_rate": 0.00015625,
"loss": 4.2966,
"step": 5000
},
{
"epoch": 0.32,
"grad_norm": 0.7503081560134888,
"learning_rate": 0.0001875,
"loss": 4.1799,
"step": 6000
},
{
"epoch": 0.38,
"grad_norm": 0.6826441884040833,
"learning_rate": 0.00021875,
"loss": 4.0754,
"step": 7000
},
{
"epoch": 0.43,
"grad_norm": 0.675743579864502,
"learning_rate": 0.00025,
"loss": 3.9852,
"step": 8000
},
{
"epoch": 0.48,
"grad_norm": 0.6586818099021912,
"learning_rate": 0.00028121875,
"loss": 3.905,
"step": 9000
},
{
"epoch": 0.54,
"grad_norm": 0.6130994558334351,
"learning_rate": 0.00031246875000000003,
"loss": 3.8441,
"step": 10000
},
{
"epoch": 0.59,
"grad_norm": 0.5862373113632202,
"learning_rate": 0.00034368749999999997,
"loss": 3.8003,
"step": 11000
},
{
"epoch": 0.65,
"grad_norm": 0.5662557482719421,
"learning_rate": 0.0003749375,
"loss": 3.7638,
"step": 12000
},
{
"epoch": 0.7,
"grad_norm": 0.5084989666938782,
"learning_rate": 0.0004061875,
"loss": 3.7227,
"step": 13000
},
{
"epoch": 0.75,
"grad_norm": 0.4641186594963074,
"learning_rate": 0.0004374375,
"loss": 3.6876,
"step": 14000
},
{
"epoch": 0.81,
"grad_norm": 0.4646807610988617,
"learning_rate": 0.00046865625,
"loss": 3.6618,
"step": 15000
},
{
"epoch": 0.86,
"grad_norm": 0.4126532971858978,
"learning_rate": 0.00049990625,
"loss": 3.6414,
"step": 16000
},
{
"epoch": 0.91,
"grad_norm": 0.4246925711631775,
"learning_rate": 0.00053109375,
"loss": 3.6218,
"step": 17000
},
{
"epoch": 0.97,
"grad_norm": 0.38441503047943115,
"learning_rate": 0.00056234375,
"loss": 3.6021,
"step": 18000
},
{
"epoch": 1.0,
"eval_accuracy": 0.35802428577743683,
"eval_loss": 3.802968978881836,
"eval_runtime": 153.3684,
"eval_samples_per_second": 377.64,
"eval_steps_per_second": 5.901,
"step": 18594
},
{
"epoch": 1.02,
"grad_norm": 0.3549180030822754,
"learning_rate": 0.0005935625,
"loss": 3.5728,
"step": 19000
},
{
"epoch": 1.08,
"grad_norm": 0.32603904604911804,
"learning_rate": 0.00062478125,
"loss": 3.5442,
"step": 20000
},
{
"epoch": 1.13,
"grad_norm": 0.31714433431625366,
"learning_rate": 0.0006560312499999999,
"loss": 3.5347,
"step": 21000
},
{
"epoch": 1.18,
"grad_norm": 0.3043628931045532,
"learning_rate": 0.00068728125,
"loss": 3.524,
"step": 22000
},
{
"epoch": 1.24,
"grad_norm": 0.29280972480773926,
"learning_rate": 0.00071853125,
"loss": 3.5164,
"step": 23000
},
{
"epoch": 1.29,
"grad_norm": 0.306801974773407,
"learning_rate": 0.0007497500000000001,
"loss": 3.5009,
"step": 24000
},
{
"epoch": 1.34,
"grad_norm": 0.27948838472366333,
"learning_rate": 0.000781,
"loss": 3.4938,
"step": 25000
},
{
"epoch": 1.4,
"grad_norm": 0.3129396438598633,
"learning_rate": 0.00081225,
"loss": 3.4841,
"step": 26000
},
{
"epoch": 1.45,
"grad_norm": 0.24496008455753326,
"learning_rate": 0.00084346875,
"loss": 3.4793,
"step": 27000
},
{
"epoch": 1.51,
"grad_norm": 0.23840215802192688,
"learning_rate": 0.00087471875,
"loss": 3.4676,
"step": 28000
},
{
"epoch": 1.56,
"grad_norm": 0.2514866292476654,
"learning_rate": 0.0009059375,
"loss": 3.4596,
"step": 29000
},
{
"epoch": 1.61,
"grad_norm": 0.24790681898593903,
"learning_rate": 0.0009371875,
"loss": 3.4503,
"step": 30000
},
{
"epoch": 1.67,
"grad_norm": 0.23787342011928558,
"learning_rate": 0.0009684062500000001,
"loss": 3.4442,
"step": 31000
},
{
"epoch": 1.72,
"grad_norm": 0.25654569268226624,
"learning_rate": 0.0009996562500000001,
"loss": 3.4397,
"step": 32000
},
{
"epoch": 1.77,
"grad_norm": 0.2547774016857147,
"learning_rate": 0.0009970901494645169,
"loss": 3.4263,
"step": 33000
},
{
"epoch": 1.83,
"grad_norm": 0.2178904265165329,
"learning_rate": 0.00099415087678004,
"loss": 3.4174,
"step": 34000
},
{
"epoch": 1.88,
"grad_norm": 0.2203870415687561,
"learning_rate": 0.000991208661880664,
"loss": 3.4045,
"step": 35000
},
{
"epoch": 1.94,
"grad_norm": 0.23818820714950562,
"learning_rate": 0.000988269389196187,
"loss": 3.3961,
"step": 36000
},
{
"epoch": 1.99,
"grad_norm": 0.21215203404426575,
"learning_rate": 0.0009853271742968105,
"loss": 3.3886,
"step": 37000
},
{
"epoch": 2.0,
"eval_accuracy": 0.3807623229030497,
"eval_loss": 3.6018991470336914,
"eval_runtime": 154.0265,
"eval_samples_per_second": 376.026,
"eval_steps_per_second": 5.876,
"step": 37188
},
{
"epoch": 2.04,
"grad_norm": 0.21038022637367249,
"learning_rate": 0.0009823849593974345,
"loss": 3.3355,
"step": 38000
},
{
"epoch": 2.1,
"grad_norm": 0.2191259115934372,
"learning_rate": 0.0009794456867129576,
"loss": 3.3288,
"step": 39000
},
{
"epoch": 2.15,
"grad_norm": 0.19959229230880737,
"learning_rate": 0.0009765064140284806,
"loss": 3.3227,
"step": 40000
},
{
"epoch": 2.21,
"grad_norm": 0.2156379371881485,
"learning_rate": 0.0009735641991291045,
"loss": 3.3187,
"step": 41000
},
{
"epoch": 2.26,
"grad_norm": 0.2161584496498108,
"learning_rate": 0.0009706219842297282,
"loss": 3.3161,
"step": 42000
},
{
"epoch": 2.31,
"grad_norm": 0.1998431235551834,
"learning_rate": 0.000967679769330352,
"loss": 3.3137,
"step": 43000
},
{
"epoch": 2.37,
"grad_norm": 0.19886909425258636,
"learning_rate": 0.0009647375544309756,
"loss": 3.3056,
"step": 44000
},
{
"epoch": 2.42,
"grad_norm": 0.196481391787529,
"learning_rate": 0.0009617953395315993,
"loss": 3.304,
"step": 45000
},
{
"epoch": 2.47,
"grad_norm": 0.1846245974302292,
"learning_rate": 0.0009588590090620219,
"loss": 3.2982,
"step": 46000
},
{
"epoch": 2.53,
"grad_norm": 0.26932549476623535,
"learning_rate": 0.0009559167941626457,
"loss": 3.2934,
"step": 47000
},
{
"epoch": 2.58,
"grad_norm": 0.19994695484638214,
"learning_rate": 0.0009529775214781687,
"loss": 3.2856,
"step": 48000
},
{
"epoch": 2.64,
"grad_norm": 0.1982845515012741,
"learning_rate": 0.0009500353065787925,
"loss": 3.2853,
"step": 49000
},
{
"epoch": 2.69,
"grad_norm": 0.22861963510513306,
"learning_rate": 0.0009470930916794163,
"loss": 3.2773,
"step": 50000
},
{
"epoch": 2.74,
"grad_norm": 0.21855035424232483,
"learning_rate": 0.0009441538189949395,
"loss": 3.2775,
"step": 51000
},
{
"epoch": 2.8,
"grad_norm": 0.1985599547624588,
"learning_rate": 0.0009412116040955631,
"loss": 3.2705,
"step": 52000
},
{
"epoch": 2.85,
"grad_norm": 0.21117864549160004,
"learning_rate": 0.0009382723314110863,
"loss": 3.2669,
"step": 53000
},
{
"epoch": 2.9,
"grad_norm": 0.20696263015270233,
"learning_rate": 0.0009353301165117101,
"loss": 3.2612,
"step": 54000
},
{
"epoch": 2.96,
"grad_norm": 0.19961431622505188,
"learning_rate": 0.0009323879016123337,
"loss": 3.2595,
"step": 55000
},
{
"epoch": 3.0,
"eval_accuracy": 0.39251212938512425,
"eval_loss": 3.45461368560791,
"eval_runtime": 154.2675,
"eval_samples_per_second": 375.439,
"eval_steps_per_second": 5.866,
"step": 55782
},
{
"epoch": 3.01,
"grad_norm": 0.2054821103811264,
"learning_rate": 0.0009294456867129574,
"loss": 3.2412,
"step": 56000
},
{
"epoch": 3.07,
"grad_norm": 0.24094636738300323,
"learning_rate": 0.0009265064140284807,
"loss": 3.1894,
"step": 57000
},
{
"epoch": 3.12,
"grad_norm": 0.20816025137901306,
"learning_rate": 0.0009235671413440038,
"loss": 3.1928,
"step": 58000
},
{
"epoch": 3.17,
"grad_norm": 0.19106166064739227,
"learning_rate": 0.0009206278686595268,
"loss": 3.1975,
"step": 59000
},
{
"epoch": 3.23,
"grad_norm": 0.19138582050800323,
"learning_rate": 0.0009176856537601506,
"loss": 3.1966,
"step": 60000
},
{
"epoch": 3.28,
"grad_norm": 0.22713367640972137,
"learning_rate": 0.0009147434388607744,
"loss": 3.1969,
"step": 61000
},
{
"epoch": 3.33,
"grad_norm": 0.21247854828834534,
"learning_rate": 0.0009118012239613982,
"loss": 3.1941,
"step": 62000
},
{
"epoch": 3.39,
"grad_norm": 0.21524560451507568,
"learning_rate": 0.0009088590090620219,
"loss": 3.198,
"step": 63000
},
{
"epoch": 3.44,
"grad_norm": 0.19328753650188446,
"learning_rate": 0.000905919736377545,
"loss": 3.1909,
"step": 64000
},
{
"epoch": 3.5,
"grad_norm": 0.20355555415153503,
"learning_rate": 0.0009029775214781688,
"loss": 3.193,
"step": 65000
},
{
"epoch": 3.55,
"grad_norm": 0.19756613671779633,
"learning_rate": 0.0009000353065787925,
"loss": 3.1905,
"step": 66000
},
{
"epoch": 3.6,
"grad_norm": 0.19862300157546997,
"learning_rate": 0.0008970960338943157,
"loss": 3.194,
"step": 67000
},
{
"epoch": 3.66,
"grad_norm": 0.22642965614795685,
"learning_rate": 0.0008941538189949394,
"loss": 3.1927,
"step": 68000
},
{
"epoch": 3.71,
"grad_norm": 0.23159223794937134,
"learning_rate": 0.0008912116040955631,
"loss": 3.1915,
"step": 69000
},
{
"epoch": 3.76,
"grad_norm": 0.19946345686912537,
"learning_rate": 0.0008882693891961869,
"loss": 3.1871,
"step": 70000
},
{
"epoch": 3.82,
"grad_norm": 0.2760438621044159,
"learning_rate": 0.00088533011651171,
"loss": 3.1881,
"step": 71000
},
{
"epoch": 3.87,
"grad_norm": 0.24984723329544067,
"learning_rate": 0.0008823879016123337,
"loss": 3.1819,
"step": 72000
},
{
"epoch": 3.93,
"grad_norm": 0.20177915692329407,
"learning_rate": 0.000879448628927857,
"loss": 3.1835,
"step": 73000
},
{
"epoch": 3.98,
"grad_norm": 0.20022615790367126,
"learning_rate": 0.0008765064140284806,
"loss": 3.1812,
"step": 74000
},
{
"epoch": 4.0,
"eval_accuracy": 0.3980135339604644,
"eval_loss": 3.4454047679901123,
"eval_runtime": 153.6394,
"eval_samples_per_second": 376.974,
"eval_steps_per_second": 5.89,
"step": 74376
},
{
"epoch": 4.03,
"grad_norm": 0.19454769790172577,
"learning_rate": 0.0008735641991291044,
"loss": 3.133,
"step": 75000
},
{
"epoch": 4.09,
"grad_norm": 0.20843255519866943,
"learning_rate": 0.0008706249264446276,
"loss": 3.116,
"step": 76000
},
{
"epoch": 4.14,
"grad_norm": 0.21121448278427124,
"learning_rate": 0.0008676827115452513,
"loss": 3.1183,
"step": 77000
},
{
"epoch": 4.19,
"grad_norm": 0.2094685435295105,
"learning_rate": 0.0008647434388607744,
"loss": 3.118,
"step": 78000
},
{
"epoch": 4.25,
"grad_norm": 0.2301424890756607,
"learning_rate": 0.0008618012239613982,
"loss": 3.1265,
"step": 79000
},
{
"epoch": 4.3,
"grad_norm": 0.24608786404132843,
"learning_rate": 0.0008588590090620219,
"loss": 3.1284,
"step": 80000
},
{
"epoch": 4.36,
"grad_norm": 0.21532559394836426,
"learning_rate": 0.000855919736377545,
"loss": 3.1279,
"step": 81000
},
{
"epoch": 4.41,
"grad_norm": 0.18981580436229706,
"learning_rate": 0.0008529775214781688,
"loss": 3.1293,
"step": 82000
},
{
"epoch": 4.46,
"grad_norm": 0.2737584412097931,
"learning_rate": 0.0008500353065787926,
"loss": 3.1304,
"step": 83000
},
{
"epoch": 4.52,
"grad_norm": 0.1946648210287094,
"learning_rate": 0.0008470930916794163,
"loss": 3.1229,
"step": 84000
},
{
"epoch": 4.57,
"grad_norm": 0.20237942039966583,
"learning_rate": 0.0008441538189949394,
"loss": 3.1282,
"step": 85000
},
{
"epoch": 4.63,
"grad_norm": 0.20490285754203796,
"learning_rate": 0.0008412116040955632,
"loss": 3.1285,
"step": 86000
},
{
"epoch": 4.68,
"grad_norm": 0.20395901799201965,
"learning_rate": 0.0008382723314110862,
"loss": 3.124,
"step": 87000
},
{
"epoch": 4.73,
"grad_norm": 0.22166697680950165,
"learning_rate": 0.00083533011651171,
"loss": 3.1295,
"step": 88000
},
{
"epoch": 4.79,
"grad_norm": 0.21369241178035736,
"learning_rate": 0.0008323879016123338,
"loss": 3.1255,
"step": 89000
},
{
"epoch": 4.84,
"grad_norm": 0.21132268011569977,
"learning_rate": 0.0008294486289278568,
"loss": 3.1248,
"step": 90000
},
{
"epoch": 4.89,
"grad_norm": 0.2022734135389328,
"learning_rate": 0.0008265064140284807,
"loss": 3.1268,
"step": 91000
},
{
"epoch": 4.95,
"grad_norm": 0.25799113512039185,
"learning_rate": 0.0008235641991291044,
"loss": 3.1282,
"step": 92000
},
{
"epoch": 5.0,
"eval_accuracy": 0.4015370314907847,
"eval_loss": 3.4115054607391357,
"eval_runtime": 153.7951,
"eval_samples_per_second": 376.592,
"eval_steps_per_second": 5.884,
"step": 92970
},
{
"epoch": 5.0,
"grad_norm": 0.21077696979045868,
"learning_rate": 0.0008206219842297281,
"loss": 3.1186,
"step": 93000
},
{
"epoch": 5.06,
"grad_norm": 0.21865037083625793,
"learning_rate": 0.0008176827115452513,
"loss": 3.055,
"step": 94000
},
{
"epoch": 5.11,
"grad_norm": 0.23599696159362793,
"learning_rate": 0.0008147434388607744,
"loss": 3.0637,
"step": 95000
},
{
"epoch": 5.16,
"grad_norm": 0.21938414871692657,
"learning_rate": 0.000811801223961398,
"loss": 3.0664,
"step": 96000
},
{
"epoch": 5.22,
"grad_norm": 0.23077841103076935,
"learning_rate": 0.000808859009062022,
"loss": 3.0707,
"step": 97000
},
{
"epoch": 5.27,
"grad_norm": 0.21069471538066864,
"learning_rate": 0.000805919736377545,
"loss": 3.0729,
"step": 98000
},
{
"epoch": 5.32,
"grad_norm": 0.23917998373508453,
"learning_rate": 0.0008029775214781688,
"loss": 3.0714,
"step": 99000
},
{
"epoch": 5.38,
"grad_norm": 0.18625333905220032,
"learning_rate": 0.0008000353065787926,
"loss": 3.0735,
"step": 100000
},
{
"epoch": 5.43,
"grad_norm": 0.2624528408050537,
"learning_rate": 0.0007970960338943156,
"loss": 3.0791,
"step": 101000
},
{
"epoch": 5.49,
"grad_norm": 0.20559582114219666,
"learning_rate": 0.0007941538189949394,
"loss": 3.0794,
"step": 102000
},
{
"epoch": 5.54,
"grad_norm": 0.20613546669483185,
"learning_rate": 0.0007912116040955632,
"loss": 3.0787,
"step": 103000
},
{
"epoch": 5.59,
"grad_norm": 0.22262121737003326,
"learning_rate": 0.000788269389196187,
"loss": 3.0778,
"step": 104000
},
{
"epoch": 5.65,
"grad_norm": 0.21528062224388123,
"learning_rate": 0.00078533011651171,
"loss": 3.0796,
"step": 105000
},
{
"epoch": 5.7,
"grad_norm": 0.20560766756534576,
"learning_rate": 0.0007823879016123338,
"loss": 3.0832,
"step": 106000
},
{
"epoch": 5.75,
"grad_norm": 0.22400015592575073,
"learning_rate": 0.0007794486289278569,
"loss": 3.0857,
"step": 107000
},
{
"epoch": 5.81,
"grad_norm": 0.22037924826145172,
"learning_rate": 0.0007765064140284806,
"loss": 3.0776,
"step": 108000
},
{
"epoch": 5.86,
"grad_norm": 0.20716436207294464,
"learning_rate": 0.0007735671413440037,
"loss": 3.0792,
"step": 109000
},
{
"epoch": 5.92,
"grad_norm": 0.2654527723789215,
"learning_rate": 0.0007706249264446275,
"loss": 3.0851,
"step": 110000
},
{
"epoch": 5.97,
"grad_norm": 0.20225553214550018,
"learning_rate": 0.0007676856537601507,
"loss": 3.0804,
"step": 111000
},
{
"epoch": 6.0,
"eval_accuracy": 0.40455787284397654,
"eval_loss": 3.3764584064483643,
"eval_runtime": 154.2451,
"eval_samples_per_second": 375.493,
"eval_steps_per_second": 5.867,
"step": 111564
},
{
"epoch": 6.02,
"grad_norm": 0.2055094987154007,
"learning_rate": 0.0007647434388607743,
"loss": 3.0519,
"step": 112000
},
{
"epoch": 6.08,
"grad_norm": 0.2185896337032318,
"learning_rate": 0.0007618041661762976,
"loss": 3.0174,
"step": 113000
},
{
"epoch": 6.13,
"grad_norm": 0.20856788754463196,
"learning_rate": 0.0007588619512769213,
"loss": 3.0198,
"step": 114000
},
{
"epoch": 6.18,
"grad_norm": 0.22500406205654144,
"learning_rate": 0.0007559197363775451,
"loss": 3.0234,
"step": 115000
},
{
"epoch": 6.24,
"grad_norm": 0.21073341369628906,
"learning_rate": 0.0007529775214781687,
"loss": 3.0292,
"step": 116000
},
{
"epoch": 6.29,
"grad_norm": 0.22741083800792694,
"learning_rate": 0.0007500353065787924,
"loss": 3.0311,
"step": 117000
},
{
"epoch": 6.35,
"grad_norm": 0.23282620310783386,
"learning_rate": 0.0007470930916794163,
"loss": 3.036,
"step": 118000
},
{
"epoch": 6.4,
"grad_norm": 0.2192760556936264,
"learning_rate": 0.0007441538189949394,
"loss": 3.0341,
"step": 119000
},
{
"epoch": 6.45,
"grad_norm": 0.2083265483379364,
"learning_rate": 0.0007412116040955633,
"loss": 3.0376,
"step": 120000
},
{
"epoch": 6.51,
"grad_norm": 0.2194899469614029,
"learning_rate": 0.0007382723314110863,
"loss": 3.0377,
"step": 121000
},
{
"epoch": 6.56,
"grad_norm": 0.22579005360603333,
"learning_rate": 0.00073533011651171,
"loss": 3.0399,
"step": 122000
},
{
"epoch": 6.62,
"grad_norm": 0.21945177018642426,
"learning_rate": 0.0007323879016123339,
"loss": 3.038,
"step": 123000
},
{
"epoch": 6.67,
"grad_norm": 0.23311348259449005,
"learning_rate": 0.0007294515711427562,
"loss": 3.0418,
"step": 124000
},
{
"epoch": 6.72,
"grad_norm": 0.20455820858478546,
"learning_rate": 0.00072650935624338,
"loss": 3.0488,
"step": 125000
},
{
"epoch": 6.78,
"grad_norm": 0.21514442563056946,
"learning_rate": 0.0007235671413440038,
"loss": 3.0442,
"step": 126000
},
{
"epoch": 6.83,
"grad_norm": 0.256694495677948,
"learning_rate": 0.0007206249264446275,
"loss": 3.0428,
"step": 127000
},
{
"epoch": 6.88,
"grad_norm": 0.2359844148159027,
"learning_rate": 0.0007176827115452513,
"loss": 3.0464,
"step": 128000
},
{
"epoch": 6.94,
"grad_norm": 0.22172461450099945,
"learning_rate": 0.0007147434388607744,
"loss": 3.0473,
"step": 129000
},
{
"epoch": 6.99,
"grad_norm": 0.32295501232147217,
"learning_rate": 0.0007118012239613981,
"loss": 3.0457,
"step": 130000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4062039005098178,
"eval_loss": 3.3680343627929688,
"eval_runtime": 153.628,
"eval_samples_per_second": 377.002,
"eval_steps_per_second": 5.891,
"step": 130158
},
{
"epoch": 7.05,
"grad_norm": 0.22584497928619385,
"learning_rate": 0.0007088590090620219,
"loss": 2.9831,
"step": 131000
},
{
"epoch": 7.1,
"grad_norm": 0.20827385783195496,
"learning_rate": 0.000705919736377545,
"loss": 2.9797,
"step": 132000
},
{
"epoch": 7.15,
"grad_norm": 0.2678986191749573,
"learning_rate": 0.0007029775214781687,
"loss": 2.9887,
"step": 133000
},
{
"epoch": 7.21,
"grad_norm": 0.25479185581207275,
"learning_rate": 0.0007000353065787925,
"loss": 2.9954,
"step": 134000
},
{
"epoch": 7.26,
"grad_norm": 0.23116907477378845,
"learning_rate": 0.0006970930916794163,
"loss": 2.9968,
"step": 135000
},
{
"epoch": 7.31,
"grad_norm": 0.25440552830696106,
"learning_rate": 0.0006941508767800401,
"loss": 2.9965,
"step": 136000
},
{
"epoch": 7.37,
"grad_norm": 0.23387254774570465,
"learning_rate": 0.0006912116040955631,
"loss": 3.0031,
"step": 137000
},
{
"epoch": 7.42,
"grad_norm": 0.23628811538219452,
"learning_rate": 0.0006882693891961869,
"loss": 3.0035,
"step": 138000
},
{
"epoch": 7.48,
"grad_norm": 0.21515734493732452,
"learning_rate": 0.0006853301165117101,
"loss": 3.0011,
"step": 139000
},
{
"epoch": 7.53,
"grad_norm": 0.28068262338638306,
"learning_rate": 0.0006823879016123337,
"loss": 3.0051,
"step": 140000
},
{
"epoch": 7.58,
"grad_norm": 0.2298477292060852,
"learning_rate": 0.0006794486289278569,
"loss": 3.0091,
"step": 141000
},
{
"epoch": 7.64,
"grad_norm": 0.23959632217884064,
"learning_rate": 0.0006765064140284807,
"loss": 3.0079,
"step": 142000
},
{
"epoch": 7.69,
"grad_norm": 0.21614809334278107,
"learning_rate": 0.0006735700835589031,
"loss": 3.0101,
"step": 143000
},
{
"epoch": 7.74,
"grad_norm": 0.3193395435810089,
"learning_rate": 0.0006706278686595268,
"loss": 3.0085,
"step": 144000
},
{
"epoch": 7.8,
"grad_norm": 0.2261291742324829,
"learning_rate": 0.0006676856537601506,
"loss": 3.0156,
"step": 145000
},
{
"epoch": 7.85,
"grad_norm": 0.31309646368026733,
"learning_rate": 0.0006647463810756738,
"loss": 3.0141,
"step": 146000
},
{
"epoch": 7.91,
"grad_norm": 0.2506345808506012,
"learning_rate": 0.0006618041661762975,
"loss": 3.0153,
"step": 147000
},
{
"epoch": 7.96,
"grad_norm": 0.24304701387882233,
"learning_rate": 0.0006588648934918207,
"loss": 3.0127,
"step": 148000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4069256373864405,
"eval_loss": 3.3718349933624268,
"eval_runtime": 153.6896,
"eval_samples_per_second": 376.851,
"eval_steps_per_second": 5.888,
"step": 148752
},
{
"epoch": 8.01,
"grad_norm": 0.2484602928161621,
"learning_rate": 0.0006559226785924444,
"loss": 2.9984,
"step": 149000
},
{
"epoch": 8.07,
"grad_norm": 0.31583237648010254,
"learning_rate": 0.0006529804636930682,
"loss": 2.9455,
"step": 150000
},
{
"epoch": 8.12,
"grad_norm": 0.24394702911376953,
"learning_rate": 0.0006500382487936918,
"loss": 2.9544,
"step": 151000
},
{
"epoch": 8.17,
"grad_norm": 0.29708966612815857,
"learning_rate": 0.000647098976109215,
"loss": 2.9598,
"step": 152000
},
{
"epoch": 8.23,
"grad_norm": 0.23569166660308838,
"learning_rate": 0.0006441567612098388,
"loss": 2.9661,
"step": 153000
},
{
"epoch": 8.28,
"grad_norm": 0.26388081908226013,
"learning_rate": 0.0006412145463104624,
"loss": 2.9691,
"step": 154000
},
{
"epoch": 8.34,
"grad_norm": 0.22147905826568604,
"learning_rate": 0.0006382752736259857,
"loss": 2.9663,
"step": 155000
},
{
"epoch": 8.39,
"grad_norm": 0.24741463363170624,
"learning_rate": 0.0006353330587266094,
"loss": 2.9733,
"step": 156000
},
{
"epoch": 8.44,
"grad_norm": 0.25748059153556824,
"learning_rate": 0.0006323937860421325,
"loss": 2.9751,
"step": 157000
},
{
"epoch": 8.5,
"grad_norm": 0.2481698840856552,
"learning_rate": 0.0006294515711427563,
"loss": 2.9752,
"step": 158000
},
{
"epoch": 8.55,
"grad_norm": 0.23890063166618347,
"learning_rate": 0.00062650935624338,
"loss": 2.9759,
"step": 159000
},
{
"epoch": 8.6,
"grad_norm": 0.2355976551771164,
"learning_rate": 0.0006235700835589031,
"loss": 2.9747,
"step": 160000
},
{
"epoch": 8.66,
"grad_norm": 0.25961464643478394,
"learning_rate": 0.0006206278686595269,
"loss": 2.9835,
"step": 161000
},
{
"epoch": 8.71,
"grad_norm": 0.2471887469291687,
"learning_rate": 0.00061768859597505,
"loss": 2.9821,
"step": 162000
},
{
"epoch": 8.77,
"grad_norm": 0.2203267514705658,
"learning_rate": 0.0006147463810756738,
"loss": 2.984,
"step": 163000
},
{
"epoch": 8.82,
"grad_norm": 0.23866264522075653,
"learning_rate": 0.0006118041661762975,
"loss": 2.9875,
"step": 164000
},
{
"epoch": 8.87,
"grad_norm": 0.22918641567230225,
"learning_rate": 0.0006088648934918207,
"loss": 2.984,
"step": 165000
},
{
"epoch": 8.93,
"grad_norm": 0.23822970688343048,
"learning_rate": 0.0006059226785924444,
"loss": 2.9854,
"step": 166000
},
{
"epoch": 8.98,
"grad_norm": 0.22895199060440063,
"learning_rate": 0.0006029804636930681,
"loss": 2.9897,
"step": 167000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4080178161450047,
"eval_loss": 3.3722941875457764,
"eval_runtime": 153.9821,
"eval_samples_per_second": 376.135,
"eval_steps_per_second": 5.877,
"step": 167346
},
{
"epoch": 9.04,
"grad_norm": 0.245958149433136,
"learning_rate": 0.0006000411910085913,
"loss": 2.9388,
"step": 168000
},
{
"epoch": 9.09,
"grad_norm": 0.22162730991840363,
"learning_rate": 0.0005970989761092151,
"loss": 2.9246,
"step": 169000
},
{
"epoch": 9.14,
"grad_norm": 0.37472179532051086,
"learning_rate": 0.0005941597034247381,
"loss": 2.9315,
"step": 170000
},
{
"epoch": 9.2,
"grad_norm": 0.22953909635543823,
"learning_rate": 0.000591217488525362,
"loss": 2.9302,
"step": 171000
},
{
"epoch": 9.25,
"grad_norm": 0.2361230105161667,
"learning_rate": 0.0005882752736259857,
"loss": 2.941,
"step": 172000
},
{
"epoch": 9.3,
"grad_norm": 0.25345179438591003,
"learning_rate": 0.0005853330587266094,
"loss": 2.9453,
"step": 173000
},
{
"epoch": 9.36,
"grad_norm": 0.23210594058036804,
"learning_rate": 0.0005823937860421326,
"loss": 2.9429,
"step": 174000
},
{
"epoch": 9.41,
"grad_norm": 0.22280991077423096,
"learning_rate": 0.0005794515711427563,
"loss": 2.9464,
"step": 175000
},
{
"epoch": 9.47,
"grad_norm": 0.24767878651618958,
"learning_rate": 0.0005765122984582793,
"loss": 2.9467,
"step": 176000
},
{
"epoch": 9.52,
"grad_norm": 0.2509325444698334,
"learning_rate": 0.0005735700835589032,
"loss": 2.9484,
"step": 177000
},
{
"epoch": 9.57,
"grad_norm": 0.22943289577960968,
"learning_rate": 0.0005706308108744262,
"loss": 2.9523,
"step": 178000
},
{
"epoch": 9.63,
"grad_norm": 0.2891969084739685,
"learning_rate": 0.00056768859597505,
"loss": 2.9557,
"step": 179000
},
{
"epoch": 9.68,
"grad_norm": 0.242570161819458,
"learning_rate": 0.0005647493232905732,
"loss": 2.9549,
"step": 180000
},
{
"epoch": 9.73,
"grad_norm": 0.26979508996009827,
"learning_rate": 0.0005618071083911968,
"loss": 2.9595,
"step": 181000
},
{
"epoch": 9.79,
"grad_norm": 0.24241873621940613,
"learning_rate": 0.0005588678357067201,
"loss": 2.958,
"step": 182000
},
{
"epoch": 9.84,
"grad_norm": 0.24058844149112701,
"learning_rate": 0.0005559256208073438,
"loss": 2.9619,
"step": 183000
},
{
"epoch": 9.9,
"grad_norm": 0.23146815598011017,
"learning_rate": 0.0005529834059079676,
"loss": 2.9608,
"step": 184000
},
{
"epoch": 9.95,
"grad_norm": 0.22514961659908295,
"learning_rate": 0.0005500441332234907,
"loss": 2.9602,
"step": 185000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4082696816900534,
"eval_loss": 3.3669023513793945,
"eval_runtime": 154.1716,
"eval_samples_per_second": 375.672,
"eval_steps_per_second": 5.87,
"step": 185940
},
{
"epoch": 10.0,
"grad_norm": 0.29674816131591797,
"learning_rate": 0.0005471019183241144,
"loss": 2.9548,
"step": 186000
},
{
"epoch": 10.06,
"grad_norm": 0.2413276582956314,
"learning_rate": 0.0005441597034247382,
"loss": 2.8963,
"step": 187000
},
{
"epoch": 10.11,
"grad_norm": 0.2437901645898819,
"learning_rate": 0.0005412174885253619,
"loss": 2.9023,
"step": 188000
},
{
"epoch": 10.16,
"grad_norm": 0.27865538001060486,
"learning_rate": 0.0005382811580557843,
"loss": 2.9052,
"step": 189000
},
{
"epoch": 10.22,
"grad_norm": 0.26435813307762146,
"learning_rate": 0.0005353389431564082,
"loss": 2.9114,
"step": 190000
},
{
"epoch": 10.27,
"grad_norm": 0.3438023328781128,
"learning_rate": 0.0005323996704719313,
"loss": 2.915,
"step": 191000
},
{
"epoch": 10.33,
"grad_norm": 0.2424338161945343,
"learning_rate": 0.0005294574555725549,
"loss": 2.9198,
"step": 192000
},
{
"epoch": 10.38,
"grad_norm": 0.2525182366371155,
"learning_rate": 0.0005265152406731788,
"loss": 2.9189,
"step": 193000
},
{
"epoch": 10.43,
"grad_norm": 0.23211458325386047,
"learning_rate": 0.0005235730257738025,
"loss": 2.9227,
"step": 194000
},
{
"epoch": 10.49,
"grad_norm": 0.2459993213415146,
"learning_rate": 0.0005206308108744263,
"loss": 2.9274,
"step": 195000
},
{
"epoch": 10.54,
"grad_norm": 0.26037323474884033,
"learning_rate": 0.0005176915381899494,
"loss": 2.9266,
"step": 196000
},
{
"epoch": 10.59,
"grad_norm": 0.23404568433761597,
"learning_rate": 0.0005147522655054725,
"loss": 2.9263,
"step": 197000
},
{
"epoch": 10.65,
"grad_norm": 0.2657169997692108,
"learning_rate": 0.0005118100506060963,
"loss": 2.9336,
"step": 198000
},
{
"epoch": 10.7,
"grad_norm": 0.2722354233264923,
"learning_rate": 0.0005088678357067201,
"loss": 2.9352,
"step": 199000
},
{
"epoch": 10.76,
"grad_norm": 0.2347659170627594,
"learning_rate": 0.0005059256208073439,
"loss": 2.9349,
"step": 200000
},
{
"epoch": 10.81,
"grad_norm": 0.2561778724193573,
"learning_rate": 0.0005029892903377663,
"loss": 2.9333,
"step": 201000
},
{
"epoch": 10.86,
"grad_norm": 0.24672134220600128,
"learning_rate": 0.00050004707543839,
"loss": 2.9346,
"step": 202000
},
{
"epoch": 10.92,
"grad_norm": 0.24908240139484406,
"learning_rate": 0.0004971048605390138,
"loss": 2.9392,
"step": 203000
},
{
"epoch": 10.97,
"grad_norm": 0.24685357511043549,
"learning_rate": 0.0004941655878545369,
"loss": 2.9377,
"step": 204000
},
{
"epoch": 11.0,
"eval_accuracy": 0.4095683109207261,
"eval_loss": 3.3622021675109863,
"eval_runtime": 153.8148,
"eval_samples_per_second": 376.544,
"eval_steps_per_second": 5.884,
"step": 204534
},
{
"epoch": 11.03,
"grad_norm": 0.24828964471817017,
"learning_rate": 0.0004912233729551607,
"loss": 2.9099,
"step": 205000
},
{
"epoch": 11.08,
"grad_norm": 0.2552799880504608,
"learning_rate": 0.00048828115805578436,
"loss": 2.877,
"step": 206000
},
{
"epoch": 11.13,
"grad_norm": 0.26694855093955994,
"learning_rate": 0.00048533894315640815,
"loss": 2.8848,
"step": 207000
},
{
"epoch": 11.19,
"grad_norm": 0.2933233678340912,
"learning_rate": 0.0004823996704719313,
"loss": 2.8884,
"step": 208000
},
{
"epoch": 11.24,
"grad_norm": 0.3140556812286377,
"learning_rate": 0.000479457455572555,
"loss": 2.8935,
"step": 209000
},
{
"epoch": 11.29,
"grad_norm": 0.2629554867744446,
"learning_rate": 0.00047651818288807816,
"loss": 2.8933,
"step": 210000
},
{
"epoch": 11.35,
"grad_norm": 0.2608068585395813,
"learning_rate": 0.0004735759679887019,
"loss": 2.8964,
"step": 211000
},
{
"epoch": 11.4,
"grad_norm": 0.28370392322540283,
"learning_rate": 0.00047063375308932563,
"loss": 2.8947,
"step": 212000
},
{
"epoch": 11.46,
"grad_norm": 0.242896169424057,
"learning_rate": 0.0004676915381899494,
"loss": 2.9034,
"step": 213000
},
{
"epoch": 11.51,
"grad_norm": 0.2785256505012512,
"learning_rate": 0.0004647522655054725,
"loss": 2.9076,
"step": 214000
},
{
"epoch": 11.56,
"grad_norm": 0.3482002019882202,
"learning_rate": 0.00046181005060609624,
"loss": 2.9073,
"step": 215000
},
{
"epoch": 11.62,
"grad_norm": 0.23567426204681396,
"learning_rate": 0.00045887077792161943,
"loss": 2.9085,
"step": 216000
},
{
"epoch": 11.67,
"grad_norm": 0.261733740568161,
"learning_rate": 0.00045592856302224317,
"loss": 2.9073,
"step": 217000
},
{
"epoch": 11.72,
"grad_norm": 0.26738300919532776,
"learning_rate": 0.00045298929033776625,
"loss": 2.9069,
"step": 218000
},
{
"epoch": 11.78,
"grad_norm": 0.23981693387031555,
"learning_rate": 0.00045004707543839004,
"loss": 2.9113,
"step": 219000
},
{
"epoch": 11.83,
"grad_norm": 0.3659937381744385,
"learning_rate": 0.0004471048605390138,
"loss": 2.9166,
"step": 220000
},
{
"epoch": 11.89,
"grad_norm": 0.24524465203285217,
"learning_rate": 0.0004441626456396375,
"loss": 2.9113,
"step": 221000
},
{
"epoch": 11.94,
"grad_norm": 0.24333979189395905,
"learning_rate": 0.00044122337295516065,
"loss": 2.9147,
"step": 222000
},
{
"epoch": 11.99,
"grad_norm": 0.3160707652568817,
"learning_rate": 0.0004382811580557844,
"loss": 2.9136,
"step": 223000
},
{
"epoch": 12.0,
"eval_accuracy": 0.4097532631334546,
"eval_loss": 3.37412428855896,
"eval_runtime": 153.7573,
"eval_samples_per_second": 376.685,
"eval_steps_per_second": 5.886,
"step": 223128
},
{
"epoch": 12.05,
"grad_norm": 0.2786504924297333,
"learning_rate": 0.0004353418853713075,
"loss": 2.8632,
"step": 224000
},
{
"epoch": 12.1,
"grad_norm": 0.2501066327095032,
"learning_rate": 0.00043240261268683066,
"loss": 2.8621,
"step": 225000
},
{
"epoch": 12.15,
"grad_norm": 0.28876030445098877,
"learning_rate": 0.0004294603977874544,
"loss": 2.8624,
"step": 226000
},
{
"epoch": 12.21,
"grad_norm": 0.26900097727775574,
"learning_rate": 0.00042651818288807813,
"loss": 2.8696,
"step": 227000
},
{
"epoch": 12.26,
"grad_norm": 0.32585257291793823,
"learning_rate": 0.0004235759679887019,
"loss": 2.869,
"step": 228000
},
{
"epoch": 12.32,
"grad_norm": 0.2984183728694916,
"learning_rate": 0.00042063669530422506,
"loss": 2.8723,
"step": 229000
},
{
"epoch": 12.37,
"grad_norm": 0.25952082872390747,
"learning_rate": 0.00041769742261974814,
"loss": 2.8813,
"step": 230000
},
{
"epoch": 12.42,
"grad_norm": 0.272550493478775,
"learning_rate": 0.0004147552077203719,
"loss": 2.8817,
"step": 231000
},
{
"epoch": 12.48,
"grad_norm": 0.3032265305519104,
"learning_rate": 0.00041181299282099567,
"loss": 2.8825,
"step": 232000
},
{
"epoch": 12.53,
"grad_norm": 0.26728367805480957,
"learning_rate": 0.0004088707779216194,
"loss": 2.8847,
"step": 233000
},
{
"epoch": 12.58,
"grad_norm": 0.2473539412021637,
"learning_rate": 0.0004059315052371425,
"loss": 2.8873,
"step": 234000
},
{
"epoch": 12.64,
"grad_norm": 0.2527707517147064,
"learning_rate": 0.0004029892903377663,
"loss": 2.888,
"step": 235000
},
{
"epoch": 12.69,
"grad_norm": 0.25239551067352295,
"learning_rate": 0.0004000500176532894,
"loss": 2.8873,
"step": 236000
},
{
"epoch": 12.75,
"grad_norm": 0.2829276919364929,
"learning_rate": 0.00039711074496881255,
"loss": 2.8898,
"step": 237000
},
{
"epoch": 12.8,
"grad_norm": 0.279778391122818,
"learning_rate": 0.0003941685300694363,
"loss": 2.8884,
"step": 238000
},
{
"epoch": 12.85,
"grad_norm": 0.259166419506073,
"learning_rate": 0.00039122631517006,
"loss": 2.8938,
"step": 239000
},
{
"epoch": 12.91,
"grad_norm": 0.25353771448135376,
"learning_rate": 0.00038828410027068376,
"loss": 2.8903,
"step": 240000
},
{
"epoch": 12.96,
"grad_norm": 0.28928855061531067,
"learning_rate": 0.00038534188537130755,
"loss": 2.8939,
"step": 241000
},
{
"epoch": 13.0,
"eval_accuracy": 0.4091496995013613,
"eval_loss": 3.382020950317383,
"eval_runtime": 154.4792,
"eval_samples_per_second": 374.924,
"eval_steps_per_second": 5.858,
"step": 241722
},
{
"epoch": 13.01,
"grad_norm": 0.2669352889060974,
"learning_rate": 0.00038240261268683063,
"loss": 2.8786,
"step": 242000
},
{
"epoch": 13.07,
"grad_norm": 0.34619081020355225,
"learning_rate": 0.00037946334000235377,
"loss": 2.8411,
"step": 243000
},
{
"epoch": 13.12,
"grad_norm": 0.2849779725074768,
"learning_rate": 0.00037652112510297756,
"loss": 2.8399,
"step": 244000
},
{
"epoch": 13.18,
"grad_norm": 0.2858317494392395,
"learning_rate": 0.0003735818524185007,
"loss": 2.8474,
"step": 245000
},
{
"epoch": 13.23,
"grad_norm": 0.3325158953666687,
"learning_rate": 0.0003706396375191244,
"loss": 2.8495,
"step": 246000
},
{
"epoch": 13.28,
"grad_norm": 0.2560212016105652,
"learning_rate": 0.00036769742261974817,
"loss": 2.8569,
"step": 247000
},
{
"epoch": 13.34,
"grad_norm": 0.2676179111003876,
"learning_rate": 0.0003647552077203719,
"loss": 2.8553,
"step": 248000
},
{
"epoch": 13.39,
"grad_norm": 0.2544156610965729,
"learning_rate": 0.00036181593503589504,
"loss": 2.8598,
"step": 249000
},
{
"epoch": 13.45,
"grad_norm": 0.25966116786003113,
"learning_rate": 0.0003588737201365188,
"loss": 2.8591,
"step": 250000
},
{
"epoch": 13.5,
"grad_norm": 0.30520108342170715,
"learning_rate": 0.0003559315052371425,
"loss": 2.8619,
"step": 251000
},
{
"epoch": 13.55,
"grad_norm": 0.24815978109836578,
"learning_rate": 0.00035298929033776626,
"loss": 2.8629,
"step": 252000
},
{
"epoch": 13.61,
"grad_norm": 0.32576534152030945,
"learning_rate": 0.00035005001765328945,
"loss": 2.8695,
"step": 253000
},
{
"epoch": 13.66,
"grad_norm": 0.28603044152259827,
"learning_rate": 0.00034710780275391313,
"loss": 2.8704,
"step": 254000
},
{
"epoch": 13.71,
"grad_norm": 0.25637853145599365,
"learning_rate": 0.00034416558785453687,
"loss": 2.8683,
"step": 255000
},
{
"epoch": 13.77,
"grad_norm": 0.2801949977874756,
"learning_rate": 0.00034122631517006006,
"loss": 2.8667,
"step": 256000
},
{
"epoch": 13.82,
"grad_norm": 0.2888367176055908,
"learning_rate": 0.0003382870424855832,
"loss": 2.8714,
"step": 257000
},
{
"epoch": 13.88,
"grad_norm": 0.25710824131965637,
"learning_rate": 0.0003353448275862069,
"loss": 2.8685,
"step": 258000
},
{
"epoch": 13.93,
"grad_norm": 0.2829296886920929,
"learning_rate": 0.00033240261268683067,
"loss": 2.8743,
"step": 259000
},
{
"epoch": 13.98,
"grad_norm": 0.30661359429359436,
"learning_rate": 0.0003294603977874544,
"loss": 2.8736,
"step": 260000
},
{
"epoch": 14.0,
"eval_accuracy": 0.40986666645168407,
"eval_loss": 3.3767247200012207,
"eval_runtime": 153.89,
"eval_samples_per_second": 376.36,
"eval_steps_per_second": 5.881,
"step": 260316
},
{
"epoch": 14.04,
"grad_norm": 0.2881269156932831,
"learning_rate": 0.00032651818288807814,
"loss": 2.8388,
"step": 261000
},
{
"epoch": 14.09,
"grad_norm": 0.28384995460510254,
"learning_rate": 0.0003235789102036013,
"loss": 2.8223,
"step": 262000
},
{
"epoch": 14.14,
"grad_norm": 0.2660735249519348,
"learning_rate": 0.000320636695304225,
"loss": 2.8256,
"step": 263000
},
{
"epoch": 14.2,
"grad_norm": 0.25953084230422974,
"learning_rate": 0.00031769742261974815,
"loss": 2.8299,
"step": 264000
},
{
"epoch": 14.25,
"grad_norm": 0.29592570662498474,
"learning_rate": 0.0003147581499352713,
"loss": 2.8349,
"step": 265000
},
{
"epoch": 14.31,
"grad_norm": 0.3023082911968231,
"learning_rate": 0.000311815935035895,
"loss": 2.8355,
"step": 266000
},
{
"epoch": 14.36,
"grad_norm": 0.2720430791378021,
"learning_rate": 0.00030887372013651876,
"loss": 2.8371,
"step": 267000
},
{
"epoch": 14.41,
"grad_norm": 0.33669525384902954,
"learning_rate": 0.00030593150523714255,
"loss": 2.8416,
"step": 268000
},
{
"epoch": 14.47,
"grad_norm": 0.27477729320526123,
"learning_rate": 0.0003029922325526657,
"loss": 2.8407,
"step": 269000
},
{
"epoch": 14.52,
"grad_norm": 0.2937850058078766,
"learning_rate": 0.00030005001765328937,
"loss": 2.8447,
"step": 270000
},
{
"epoch": 14.57,
"grad_norm": 0.2728506028652191,
"learning_rate": 0.00029710780275391316,
"loss": 2.8477,
"step": 271000
},
{
"epoch": 14.63,
"grad_norm": 0.29167652130126953,
"learning_rate": 0.0002941685300694363,
"loss": 2.8472,
"step": 272000
},
{
"epoch": 14.68,
"grad_norm": 0.2727662920951843,
"learning_rate": 0.00029122631517006003,
"loss": 2.8476,
"step": 273000
},
{
"epoch": 14.74,
"grad_norm": 0.28426212072372437,
"learning_rate": 0.0002882841002706838,
"loss": 2.8478,
"step": 274000
},
{
"epoch": 14.79,
"grad_norm": 0.28404590487480164,
"learning_rate": 0.0002853418853713075,
"loss": 2.8519,
"step": 275000
},
{
"epoch": 14.84,
"grad_norm": 0.3085533082485199,
"learning_rate": 0.00028240261268683064,
"loss": 2.8547,
"step": 276000
},
{
"epoch": 14.9,
"grad_norm": 0.27339011430740356,
"learning_rate": 0.00027946039778745444,
"loss": 2.8573,
"step": 277000
},
{
"epoch": 14.95,
"grad_norm": 0.2646634876728058,
"learning_rate": 0.0002765211251029775,
"loss": 2.8493,
"step": 278000
},
{
"epoch": 15.0,
"eval_accuracy": 0.4100954213678993,
"eval_loss": 3.3848283290863037,
"eval_runtime": 154.01,
"eval_samples_per_second": 376.067,
"eval_steps_per_second": 5.876,
"step": 278910
},
{
"epoch": 15.0,
"grad_norm": 0.2736656665802002,
"learning_rate": 0.00027357891020360125,
"loss": 2.8501,
"step": 279000
},
{
"epoch": 15.06,
"grad_norm": 0.3071829378604889,
"learning_rate": 0.0002706396375191244,
"loss": 2.8049,
"step": 280000
},
{
"epoch": 15.11,
"grad_norm": 0.26746442914009094,
"learning_rate": 0.0002676974226197482,
"loss": 2.8131,
"step": 281000
},
{
"epoch": 15.17,
"grad_norm": 0.2784716486930847,
"learning_rate": 0.0002647552077203719,
"loss": 2.8127,
"step": 282000
},
{
"epoch": 15.22,
"grad_norm": 0.2832111120223999,
"learning_rate": 0.00026181299282099566,
"loss": 2.8129,
"step": 283000
},
{
"epoch": 15.27,
"grad_norm": 0.26944035291671753,
"learning_rate": 0.0002588737201365188,
"loss": 2.8152,
"step": 284000
},
{
"epoch": 15.33,
"grad_norm": 0.2603321075439453,
"learning_rate": 0.00025593150523714253,
"loss": 2.8193,
"step": 285000
},
{
"epoch": 15.38,
"grad_norm": 0.35142990946769714,
"learning_rate": 0.0002529892903377663,
"loss": 2.8178,
"step": 286000
},
{
"epoch": 15.44,
"grad_norm": 0.2828776240348816,
"learning_rate": 0.0002500500176532894,
"loss": 2.8211,
"step": 287000
},
{
"epoch": 15.49,
"grad_norm": 0.2981540560722351,
"learning_rate": 0.00024711074496881253,
"loss": 2.826,
"step": 288000
},
{
"epoch": 15.54,
"grad_norm": 0.256558358669281,
"learning_rate": 0.00024416853006943627,
"loss": 2.8269,
"step": 289000
},
{
"epoch": 15.6,
"grad_norm": 0.28373146057128906,
"learning_rate": 0.0002412292573849594,
"loss": 2.8261,
"step": 290000
},
{
"epoch": 15.65,
"grad_norm": 0.26360180974006653,
"learning_rate": 0.00023828704248558314,
"loss": 2.8315,
"step": 291000
},
{
"epoch": 15.7,
"grad_norm": 0.3487566113471985,
"learning_rate": 0.0002353448275862069,
"loss": 2.8313,
"step": 292000
},
{
"epoch": 15.76,
"grad_norm": 0.35876941680908203,
"learning_rate": 0.00023240261268683067,
"loss": 2.8315,
"step": 293000
},
{
"epoch": 15.81,
"grad_norm": 0.3193157911300659,
"learning_rate": 0.00022946334000235378,
"loss": 2.835,
"step": 294000
},
{
"epoch": 15.87,
"grad_norm": 0.2774643301963806,
"learning_rate": 0.00022652112510297755,
"loss": 2.836,
"step": 295000
},
{
"epoch": 15.92,
"grad_norm": 0.25371402502059937,
"learning_rate": 0.00022357891020360126,
"loss": 2.8341,
"step": 296000
},
{
"epoch": 15.97,
"grad_norm": 0.3146109879016876,
"learning_rate": 0.00022063669530422502,
"loss": 2.831,
"step": 297000
},
{
"epoch": 16.0,
"eval_accuracy": 0.40995870587465466,
"eval_loss": 3.395188331604004,
"eval_runtime": 153.8316,
"eval_samples_per_second": 376.503,
"eval_steps_per_second": 5.883,
"step": 297504
},
{
"epoch": 16.03,
"grad_norm": 0.320112407207489,
"learning_rate": 0.00021769742261974816,
"loss": 2.8136,
"step": 298000
},
{
"epoch": 16.08,
"grad_norm": 0.3046302795410156,
"learning_rate": 0.00021475520772037192,
"loss": 2.7897,
"step": 299000
},
{
"epoch": 16.13,
"grad_norm": 0.2765304446220398,
"learning_rate": 0.00021181299282099563,
"loss": 2.7962,
"step": 300000
},
{
"epoch": 16.19,
"grad_norm": 0.29775017499923706,
"learning_rate": 0.0002088737201365188,
"loss": 2.7979,
"step": 301000
},
{
"epoch": 16.24,
"grad_norm": 0.2856539189815521,
"learning_rate": 0.00020593150523714253,
"loss": 2.8003,
"step": 302000
},
{
"epoch": 16.3,
"grad_norm": 0.27068325877189636,
"learning_rate": 0.00020299223255266567,
"loss": 2.7995,
"step": 303000
},
{
"epoch": 16.35,
"grad_norm": 0.28066492080688477,
"learning_rate": 0.0002000500176532894,
"loss": 2.8054,
"step": 304000
},
{
"epoch": 16.4,
"grad_norm": 0.3302575349807739,
"learning_rate": 0.00019710780275391317,
"loss": 2.8034,
"step": 305000
},
{
"epoch": 16.46,
"grad_norm": 0.2813708782196045,
"learning_rate": 0.00019416558785453688,
"loss": 2.809,
"step": 306000
},
{
"epoch": 16.51,
"grad_norm": 0.32888713479042053,
"learning_rate": 0.00019122631517006004,
"loss": 2.8106,
"step": 307000
},
{
"epoch": 16.56,
"grad_norm": 0.27354925870895386,
"learning_rate": 0.00018828410027068378,
"loss": 2.8067,
"step": 308000
},
{
"epoch": 16.62,
"grad_norm": 0.2700726389884949,
"learning_rate": 0.0001853448275862069,
"loss": 2.811,
"step": 309000
},
{
"epoch": 16.67,
"grad_norm": 0.27493104338645935,
"learning_rate": 0.00018240555490173002,
"loss": 2.8089,
"step": 310000
},
{
"epoch": 16.73,
"grad_norm": 0.29335400462150574,
"learning_rate": 0.00017946334000235379,
"loss": 2.8103,
"step": 311000
},
{
"epoch": 16.78,
"grad_norm": 0.27957841753959656,
"learning_rate": 0.00017652112510297752,
"loss": 2.8143,
"step": 312000
},
{
"epoch": 16.83,
"grad_norm": 0.27593499422073364,
"learning_rate": 0.0001735789102036013,
"loss": 2.8125,
"step": 313000
},
{
"epoch": 16.89,
"grad_norm": 0.29090675711631775,
"learning_rate": 0.00017063669530422503,
"loss": 2.8127,
"step": 314000
},
{
"epoch": 16.94,
"grad_norm": 0.2894745171070099,
"learning_rate": 0.00016769448040484877,
"loss": 2.8173,
"step": 315000
},
{
"epoch": 16.99,
"grad_norm": 0.3210045397281647,
"learning_rate": 0.0001647552077203719,
"loss": 2.8158,
"step": 316000
},
{
"epoch": 17.0,
"eval_accuracy": 0.41055447638772274,
"eval_loss": 3.401242733001709,
"eval_runtime": 153.8825,
"eval_samples_per_second": 376.378,
"eval_steps_per_second": 5.881,
"step": 316098
},
{
"epoch": 17.05,
"grad_norm": 0.32659468054771423,
"learning_rate": 0.00016181299282099566,
"loss": 2.7802,
"step": 317000
},
{
"epoch": 17.1,
"grad_norm": 0.2894715964794159,
"learning_rate": 0.00015887077792161938,
"loss": 2.7795,
"step": 318000
},
{
"epoch": 17.16,
"grad_norm": 0.29618579149246216,
"learning_rate": 0.00015593150523714254,
"loss": 2.7792,
"step": 319000
},
{
"epoch": 17.21,
"grad_norm": 0.32222601771354675,
"learning_rate": 0.00015298929033776628,
"loss": 2.7825,
"step": 320000
},
{
"epoch": 17.26,
"grad_norm": 0.2724851667881012,
"learning_rate": 0.0001500500176532894,
"loss": 2.7885,
"step": 321000
},
{
"epoch": 17.32,
"grad_norm": 0.3210499584674835,
"learning_rate": 0.00014710780275391315,
"loss": 2.7837,
"step": 322000
},
{
"epoch": 17.37,
"grad_norm": 0.27571025490760803,
"learning_rate": 0.0001441655878545369,
"loss": 2.7831,
"step": 323000
},
{
"epoch": 17.42,
"grad_norm": 0.30725350975990295,
"learning_rate": 0.00014122337295516065,
"loss": 2.7861,
"step": 324000
},
{
"epoch": 17.48,
"grad_norm": 0.3012644052505493,
"learning_rate": 0.00013828410027068378,
"loss": 2.7897,
"step": 325000
},
{
"epoch": 17.53,
"grad_norm": 0.30297723412513733,
"learning_rate": 0.00013534188537130752,
"loss": 2.7938,
"step": 326000
},
{
"epoch": 17.59,
"grad_norm": 0.30885782837867737,
"learning_rate": 0.00013240261268683066,
"loss": 2.7955,
"step": 327000
},
{
"epoch": 17.64,
"grad_norm": 0.294734925031662,
"learning_rate": 0.0001294603977874544,
"loss": 2.7934,
"step": 328000
},
{
"epoch": 17.69,
"grad_norm": 0.28373879194259644,
"learning_rate": 0.00012651818288807816,
"loss": 2.7934,
"step": 329000
},
{
"epoch": 17.75,
"grad_norm": 0.2635454535484314,
"learning_rate": 0.00012357891020360127,
"loss": 2.794,
"step": 330000
},
{
"epoch": 17.8,
"grad_norm": 0.29980969429016113,
"learning_rate": 0.00012063669530422502,
"loss": 2.7911,
"step": 331000
},
{
"epoch": 17.86,
"grad_norm": 0.2912716567516327,
"learning_rate": 0.00011769742261974815,
"loss": 2.7942,
"step": 332000
},
{
"epoch": 17.91,
"grad_norm": 0.2887323498725891,
"learning_rate": 0.0001147552077203719,
"loss": 2.7984,
"step": 333000
},
{
"epoch": 17.96,
"grad_norm": 0.28569164872169495,
"learning_rate": 0.00011181299282099564,
"loss": 2.8018,
"step": 334000
},
{
"epoch": 18.0,
"eval_accuracy": 0.4104207840872034,
"eval_loss": 3.40279221534729,
"eval_runtime": 155.722,
"eval_samples_per_second": 371.932,
"eval_steps_per_second": 5.812,
"step": 334692
},
{
"epoch": 18.02,
"grad_norm": 0.3532341718673706,
"learning_rate": 0.0001088707779216194,
"loss": 2.7902,
"step": 335000
},
{
"epoch": 18.07,
"grad_norm": 0.340742826461792,
"learning_rate": 0.00010593150523714253,
"loss": 2.7657,
"step": 336000
},
{
"epoch": 18.12,
"grad_norm": 0.29802101850509644,
"learning_rate": 0.00010298929033776628,
"loss": 2.7679,
"step": 337000
},
{
"epoch": 18.18,
"grad_norm": 0.2924386262893677,
"learning_rate": 0.00010004707543839002,
"loss": 2.7702,
"step": 338000
},
{
"epoch": 18.23,
"grad_norm": 0.2923070788383484,
"learning_rate": 9.710780275391315e-05,
"loss": 2.7676,
"step": 339000
},
{
"epoch": 18.29,
"grad_norm": 0.2915445566177368,
"learning_rate": 9.41655878545369e-05,
"loss": 2.7725,
"step": 340000
},
{
"epoch": 18.34,
"grad_norm": 0.2909591794013977,
"learning_rate": 9.122631517006002e-05,
"loss": 2.7718,
"step": 341000
},
{
"epoch": 18.39,
"grad_norm": 0.3041108548641205,
"learning_rate": 8.828704248558316e-05,
"loss": 2.7736,
"step": 342000
},
{
"epoch": 18.45,
"grad_norm": 0.297532320022583,
"learning_rate": 8.534482758620691e-05,
"loss": 2.7728,
"step": 343000
},
{
"epoch": 18.5,
"grad_norm": 0.3025277554988861,
"learning_rate": 8.240555490173002e-05,
"loss": 2.776,
"step": 344000
},
{
"epoch": 18.55,
"grad_norm": 0.2893424332141876,
"learning_rate": 7.946334000235378e-05,
"loss": 2.7743,
"step": 345000
},
{
"epoch": 18.61,
"grad_norm": 0.2967279553413391,
"learning_rate": 7.652112510297753e-05,
"loss": 2.7797,
"step": 346000
},
{
"epoch": 18.66,
"grad_norm": 0.3163110017776489,
"learning_rate": 7.357891020360127e-05,
"loss": 2.7776,
"step": 347000
},
{
"epoch": 18.72,
"grad_norm": 0.27627646923065186,
"learning_rate": 7.06396375191244e-05,
"loss": 2.7742,
"step": 348000
},
{
"epoch": 18.77,
"grad_norm": 0.29440227150917053,
"learning_rate": 6.769742261974816e-05,
"loss": 2.7737,
"step": 349000
},
{
"epoch": 18.82,
"grad_norm": 0.28819799423217773,
"learning_rate": 6.47552077203719e-05,
"loss": 2.7803,
"step": 350000
},
{
"epoch": 18.88,
"grad_norm": 0.36465972661972046,
"learning_rate": 6.181299282099565e-05,
"loss": 2.7794,
"step": 351000
},
{
"epoch": 18.93,
"grad_norm": 0.2758486270904541,
"learning_rate": 5.887372013651877e-05,
"loss": 2.7776,
"step": 352000
},
{
"epoch": 18.98,
"grad_norm": 0.35758283734321594,
"learning_rate": 5.593150523714252e-05,
"loss": 2.7777,
"step": 353000
},
{
"epoch": 19.0,
"eval_accuracy": 0.4102629734269421,
"eval_loss": 3.4115071296691895,
"eval_runtime": 155.5477,
"eval_samples_per_second": 372.349,
"eval_steps_per_second": 5.818,
"step": 353286
},
{
"epoch": 19.04,
"grad_norm": 0.30845823884010315,
"learning_rate": 5.299223255266565e-05,
"loss": 2.764,
"step": 354000
},
{
"epoch": 19.09,
"grad_norm": 0.30031487345695496,
"learning_rate": 5.00500176532894e-05,
"loss": 2.76,
"step": 355000
},
{
"epoch": 19.15,
"grad_norm": 0.3072240352630615,
"learning_rate": 4.710780275391314e-05,
"loss": 2.7564,
"step": 356000
},
{
"epoch": 19.2,
"grad_norm": 0.2831727862358093,
"learning_rate": 4.4168530069436276e-05,
"loss": 2.756,
"step": 357000
},
{
"epoch": 19.25,
"grad_norm": 0.29800769686698914,
"learning_rate": 4.122631517006002e-05,
"loss": 2.7556,
"step": 358000
},
{
"epoch": 19.31,
"grad_norm": 0.31122684478759766,
"learning_rate": 3.828410027068377e-05,
"loss": 2.7633,
"step": 359000
},
{
"epoch": 19.36,
"grad_norm": 0.3061014413833618,
"learning_rate": 3.53448275862069e-05,
"loss": 2.7596,
"step": 360000
},
{
"epoch": 19.41,
"grad_norm": 0.29348084330558777,
"learning_rate": 3.2402612686830645e-05,
"loss": 2.7616,
"step": 361000
},
{
"epoch": 19.47,
"grad_norm": 0.2803378403186798,
"learning_rate": 2.9460397787454396e-05,
"loss": 2.7585,
"step": 362000
},
{
"epoch": 19.52,
"grad_norm": 0.31245797872543335,
"learning_rate": 2.6521125102977523e-05,
"loss": 2.7632,
"step": 363000
},
{
"epoch": 19.58,
"grad_norm": 0.28981760144233704,
"learning_rate": 2.357891020360127e-05,
"loss": 2.7584,
"step": 364000
},
{
"epoch": 19.63,
"grad_norm": 0.311199814081192,
"learning_rate": 2.063669530422502e-05,
"loss": 2.7607,
"step": 365000
},
{
"epoch": 19.68,
"grad_norm": 0.28949621319770813,
"learning_rate": 1.7694480404848772e-05,
"loss": 2.7595,
"step": 366000
},
{
"epoch": 19.74,
"grad_norm": 0.2877204716205597,
"learning_rate": 1.4755207720371896e-05,
"loss": 2.7597,
"step": 367000
},
{
"epoch": 19.79,
"grad_norm": 0.28899291157722473,
"learning_rate": 1.1812992820995646e-05,
"loss": 2.7604,
"step": 368000
},
{
"epoch": 19.85,
"grad_norm": 0.29409030079841614,
"learning_rate": 8.873720136518773e-06,
"loss": 2.761,
"step": 369000
},
{
"epoch": 19.9,
"grad_norm": 0.2844576835632324,
"learning_rate": 5.934447452041897e-06,
"loss": 2.7607,
"step": 370000
},
{
"epoch": 19.95,
"grad_norm": 0.30620551109313965,
"learning_rate": 2.992232552665647e-06,
"loss": 2.7582,
"step": 371000
},
{
"epoch": 20.0,
"eval_accuracy": 0.41011443389103847,
"eval_loss": 3.4167680740356445,
"eval_runtime": 156.3007,
"eval_samples_per_second": 370.555,
"eval_steps_per_second": 5.79,
"step": 371880
},
{
"epoch": 20.0,
"step": 371880,
"total_flos": 1.56683622053376e+18,
"train_loss": 3.0287782370153007,
"train_runtime": 81352.6361,
"train_samples_per_second": 146.277,
"train_steps_per_second": 4.571
}
],
"logging_steps": 1000,
"max_steps": 371880,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 5000,
"total_flos": 1.56683622053376e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}