loubnabnl's picture
loubnabnl HF staff
Model save
d0a2ced verified
raw
history blame contribute delete
No virus
29.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9990828492815653,
"eval_steps": 500,
"global_step": 817,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012228676245796392,
"grad_norm": 0.9656172692131952,
"learning_rate": 1.2195121951219513e-05,
"loss": 1.5985,
"step": 1
},
{
"epoch": 0.006114338122898196,
"grad_norm": 0.8490138044628,
"learning_rate": 6.097560975609756e-05,
"loss": 1.5836,
"step": 5
},
{
"epoch": 0.012228676245796393,
"grad_norm": 0.7251834848572155,
"learning_rate": 0.00012195121951219512,
"loss": 1.5497,
"step": 10
},
{
"epoch": 0.01834301436869459,
"grad_norm": 0.5397663521595886,
"learning_rate": 0.00018292682926829268,
"loss": 1.4557,
"step": 15
},
{
"epoch": 0.024457352491592785,
"grad_norm": 0.48494502338158535,
"learning_rate": 0.00024390243902439024,
"loss": 1.3847,
"step": 20
},
{
"epoch": 0.03057169061449098,
"grad_norm": 0.3802721963418244,
"learning_rate": 0.0003048780487804878,
"loss": 1.349,
"step": 25
},
{
"epoch": 0.03668602873738918,
"grad_norm": 0.1943427476761954,
"learning_rate": 0.00036585365853658537,
"loss": 1.3079,
"step": 30
},
{
"epoch": 0.042800366860287375,
"grad_norm": 0.21858154502076185,
"learning_rate": 0.0004268292682926829,
"loss": 1.3241,
"step": 35
},
{
"epoch": 0.04891470498318557,
"grad_norm": 0.16994532307041646,
"learning_rate": 0.0004878048780487805,
"loss": 1.2754,
"step": 40
},
{
"epoch": 0.055029043106083766,
"grad_norm": 0.2204934202105616,
"learning_rate": 0.0005487804878048781,
"loss": 1.272,
"step": 45
},
{
"epoch": 0.06114338122898196,
"grad_norm": 0.21536784653767524,
"learning_rate": 0.0006097560975609756,
"loss": 1.257,
"step": 50
},
{
"epoch": 0.06725771935188016,
"grad_norm": 0.19219678018040015,
"learning_rate": 0.0006707317073170732,
"loss": 1.2599,
"step": 55
},
{
"epoch": 0.07337205747477836,
"grad_norm": 0.17692073178635473,
"learning_rate": 0.0007317073170731707,
"loss": 1.2478,
"step": 60
},
{
"epoch": 0.07948639559767655,
"grad_norm": 0.19182124802405837,
"learning_rate": 0.0007926829268292683,
"loss": 1.2423,
"step": 65
},
{
"epoch": 0.08560073372057475,
"grad_norm": 0.16009438254198957,
"learning_rate": 0.0008536585365853659,
"loss": 1.2378,
"step": 70
},
{
"epoch": 0.09171507184347294,
"grad_norm": 0.18014008005376794,
"learning_rate": 0.0009146341463414635,
"loss": 1.2335,
"step": 75
},
{
"epoch": 0.09782940996637114,
"grad_norm": 0.2215774427067512,
"learning_rate": 0.000975609756097561,
"loss": 1.2216,
"step": 80
},
{
"epoch": 0.10394374808926933,
"grad_norm": 0.24152505471619,
"learning_rate": 0.0009999588943391596,
"loss": 1.2264,
"step": 85
},
{
"epoch": 0.11005808621216753,
"grad_norm": 0.20887709607992783,
"learning_rate": 0.0009997077175540067,
"loss": 1.2237,
"step": 90
},
{
"epoch": 0.11617242433506574,
"grad_norm": 0.227408391864606,
"learning_rate": 0.0009992283150399447,
"loss": 1.2272,
"step": 95
},
{
"epoch": 0.12228676245796392,
"grad_norm": 0.20420551904342377,
"learning_rate": 0.000998520905748941,
"loss": 1.2199,
"step": 100
},
{
"epoch": 0.12840110058086213,
"grad_norm": 0.18388708677430865,
"learning_rate": 0.0009975858127678633,
"loss": 1.2077,
"step": 105
},
{
"epoch": 0.13451543870376031,
"grad_norm": 0.1761201680401494,
"learning_rate": 0.0009964234631709187,
"loss": 1.209,
"step": 110
},
{
"epoch": 0.1406297768266585,
"grad_norm": 0.1869432830788675,
"learning_rate": 0.0009950343878246009,
"loss": 1.2213,
"step": 115
},
{
"epoch": 0.14674411494955672,
"grad_norm": 0.16454603069227128,
"learning_rate": 0.0009934192211452344,
"loss": 1.2123,
"step": 120
},
{
"epoch": 0.1528584530724549,
"grad_norm": 0.17917229261500017,
"learning_rate": 0.0009915787008092246,
"loss": 1.2061,
"step": 125
},
{
"epoch": 0.1589727911953531,
"grad_norm": 0.18308827320602392,
"learning_rate": 0.0009895136674161465,
"loss": 1.1957,
"step": 130
},
{
"epoch": 0.16508712931825129,
"grad_norm": 0.2523777211829573,
"learning_rate": 0.0009872250641048289,
"loss": 1.2054,
"step": 135
},
{
"epoch": 0.1712014674411495,
"grad_norm": 0.19359747631411936,
"learning_rate": 0.0009847139361226047,
"loss": 1.1985,
"step": 140
},
{
"epoch": 0.1773158055640477,
"grad_norm": 0.23659135039209145,
"learning_rate": 0.0009819814303479266,
"loss": 1.2001,
"step": 145
},
{
"epoch": 0.18343014368694588,
"grad_norm": 0.2132854592433382,
"learning_rate": 0.0009790287947665682,
"loss": 1.1941,
"step": 150
},
{
"epoch": 0.1895444818098441,
"grad_norm": 0.19468221406150724,
"learning_rate": 0.0009758573779016438,
"loss": 1.1951,
"step": 155
},
{
"epoch": 0.19565881993274228,
"grad_norm": 0.20444228050847948,
"learning_rate": 0.0009724686281977146,
"loss": 1.1769,
"step": 160
},
{
"epoch": 0.20177315805564047,
"grad_norm": 0.2084687257234228,
"learning_rate": 0.0009688640933592572,
"loss": 1.1779,
"step": 165
},
{
"epoch": 0.20788749617853866,
"grad_norm": 0.1754600490753915,
"learning_rate": 0.0009650454196437975,
"loss": 1.1789,
"step": 170
},
{
"epoch": 0.21400183430143688,
"grad_norm": 0.18164504355813466,
"learning_rate": 0.0009610143511100353,
"loss": 1.1754,
"step": 175
},
{
"epoch": 0.22011617242433507,
"grad_norm": 0.15925810705098936,
"learning_rate": 0.0009567727288213005,
"loss": 1.1752,
"step": 180
},
{
"epoch": 0.22623051054723325,
"grad_norm": 0.1606766523072054,
"learning_rate": 0.0009523224900047051,
"loss": 1.1626,
"step": 185
},
{
"epoch": 0.23234484867013147,
"grad_norm": 0.16231938577173957,
"learning_rate": 0.0009476656671663766,
"loss": 1.1711,
"step": 190
},
{
"epoch": 0.23845918679302966,
"grad_norm": 0.16604052424339474,
"learning_rate": 0.0009428043871631739,
"loss": 1.1568,
"step": 195
},
{
"epoch": 0.24457352491592785,
"grad_norm": 0.1902768291722177,
"learning_rate": 0.0009377408702313137,
"loss": 1.1697,
"step": 200
},
{
"epoch": 0.25068786303882606,
"grad_norm": 0.17198562148043406,
"learning_rate": 0.0009324774289723468,
"loss": 1.168,
"step": 205
},
{
"epoch": 0.25680220116172425,
"grad_norm": 0.22648890933241356,
"learning_rate": 0.0009270164672969508,
"loss": 1.1651,
"step": 210
},
{
"epoch": 0.26291653928462244,
"grad_norm": 0.1891627168828701,
"learning_rate": 0.0009213604793270196,
"loss": 1.1613,
"step": 215
},
{
"epoch": 0.26903087740752063,
"grad_norm": 0.22234369849486232,
"learning_rate": 0.000915512048256552,
"loss": 1.1624,
"step": 220
},
{
"epoch": 0.2751452155304188,
"grad_norm": 0.1664595846397265,
"learning_rate": 0.0009094738451718594,
"loss": 1.1653,
"step": 225
},
{
"epoch": 0.281259553653317,
"grad_norm": 0.1414580652123752,
"learning_rate": 0.0009032486278316315,
"loss": 1.1664,
"step": 230
},
{
"epoch": 0.28737389177621525,
"grad_norm": 0.18032760970578282,
"learning_rate": 0.0008968392394074163,
"loss": 1.1633,
"step": 235
},
{
"epoch": 0.29348822989911344,
"grad_norm": 0.1818715808401911,
"learning_rate": 0.0008902486071850926,
"loss": 1.1558,
"step": 240
},
{
"epoch": 0.29960256802201163,
"grad_norm": 0.15581643179469015,
"learning_rate": 0.0008834797412279236,
"loss": 1.1474,
"step": 245
},
{
"epoch": 0.3057169061449098,
"grad_norm": 0.18628404047741984,
"learning_rate": 0.0008765357330018055,
"loss": 1.1407,
"step": 250
},
{
"epoch": 0.311831244267808,
"grad_norm": 0.1828690939349481,
"learning_rate": 0.0008694197539633384,
"loss": 1.1503,
"step": 255
},
{
"epoch": 0.3179455823907062,
"grad_norm": 0.18698429489329732,
"learning_rate": 0.0008621350541113637,
"loss": 1.1464,
"step": 260
},
{
"epoch": 0.3240599205136044,
"grad_norm": 0.23750782357098996,
"learning_rate": 0.0008546849605026289,
"loss": 1.1463,
"step": 265
},
{
"epoch": 0.33017425863650257,
"grad_norm": 0.15108943315319773,
"learning_rate": 0.0008470728757322603,
"loss": 1.1403,
"step": 270
},
{
"epoch": 0.3362885967594008,
"grad_norm": 0.17391393351502005,
"learning_rate": 0.0008393022763797346,
"loss": 1.1516,
"step": 275
},
{
"epoch": 0.342402934882299,
"grad_norm": 0.15776911814405858,
"learning_rate": 0.0008313767114210615,
"loss": 1.1475,
"step": 280
},
{
"epoch": 0.3485172730051972,
"grad_norm": 0.14255060003592154,
"learning_rate": 0.0008232998006078997,
"loss": 1.1451,
"step": 285
},
{
"epoch": 0.3546316111280954,
"grad_norm": 0.1378983067954376,
"learning_rate": 0.0008150752328143514,
"loss": 1.1392,
"step": 290
},
{
"epoch": 0.36074594925099357,
"grad_norm": 0.23739900281670415,
"learning_rate": 0.0008067067643521834,
"loss": 1.1143,
"step": 295
},
{
"epoch": 0.36686028737389176,
"grad_norm": 0.22514124514702608,
"learning_rate": 0.0007981982172552517,
"loss": 1.149,
"step": 300
},
{
"epoch": 0.37297462549678995,
"grad_norm": 0.16838851595065868,
"learning_rate": 0.0007895534775339084,
"loss": 1.1305,
"step": 305
},
{
"epoch": 0.3790889636196882,
"grad_norm": 0.21771378993365692,
"learning_rate": 0.0007807764934001874,
"loss": 1.1173,
"step": 310
},
{
"epoch": 0.3852033017425864,
"grad_norm": 0.2105543225329841,
"learning_rate": 0.000771871273464585,
"loss": 1.1364,
"step": 315
},
{
"epoch": 0.39131763986548457,
"grad_norm": 0.1861657230738697,
"learning_rate": 0.0007628418849052523,
"loss": 1.127,
"step": 320
},
{
"epoch": 0.39743197798838276,
"grad_norm": 0.13053611456069847,
"learning_rate": 0.0007536924516104411,
"loss": 1.128,
"step": 325
},
{
"epoch": 0.40354631611128094,
"grad_norm": 0.1859969356442677,
"learning_rate": 0.0007444271522950469,
"loss": 1.1125,
"step": 330
},
{
"epoch": 0.40966065423417913,
"grad_norm": 0.17941469695630224,
"learning_rate": 0.0007350502185921132,
"loss": 1.1245,
"step": 335
},
{
"epoch": 0.4157749923570773,
"grad_norm": 0.15378212114500625,
"learning_rate": 0.0007255659331201672,
"loss": 1.1329,
"step": 340
},
{
"epoch": 0.42188933047997557,
"grad_norm": 0.13386440928296833,
"learning_rate": 0.0007159786275272686,
"loss": 1.1203,
"step": 345
},
{
"epoch": 0.42800366860287375,
"grad_norm": 0.15071278960167503,
"learning_rate": 0.0007062926805126653,
"loss": 1.1286,
"step": 350
},
{
"epoch": 0.43411800672577194,
"grad_norm": 0.14282041282285501,
"learning_rate": 0.0006965125158269618,
"loss": 1.1211,
"step": 355
},
{
"epoch": 0.44023234484867013,
"grad_norm": 0.1480484564790176,
"learning_rate": 0.0006866426002517105,
"loss": 1.1192,
"step": 360
},
{
"epoch": 0.4463466829715683,
"grad_norm": 0.13208648368632173,
"learning_rate": 0.0006766874415593496,
"loss": 1.1227,
"step": 365
},
{
"epoch": 0.4524610210944665,
"grad_norm": 0.1570658350610698,
"learning_rate": 0.0006666515864544209,
"loss": 1.1199,
"step": 370
},
{
"epoch": 0.4585753592173647,
"grad_norm": 0.16013392261730966,
"learning_rate": 0.0006565396184970059,
"loss": 1.1312,
"step": 375
},
{
"epoch": 0.46468969734026294,
"grad_norm": 0.1433005725195342,
"learning_rate": 0.0006463561560093292,
"loss": 1.1181,
"step": 380
},
{
"epoch": 0.47080403546316113,
"grad_norm": 0.13688297258711019,
"learning_rate": 0.0006361058499664855,
"loss": 1.1138,
"step": 385
},
{
"epoch": 0.4769183735860593,
"grad_norm": 0.12936178817677169,
"learning_rate": 0.0006257933818722543,
"loss": 1.1136,
"step": 390
},
{
"epoch": 0.4830327117089575,
"grad_norm": 0.1450123517204614,
"learning_rate": 0.0006154234616209693,
"loss": 1.1123,
"step": 395
},
{
"epoch": 0.4891470498318557,
"grad_norm": 0.19082099550140247,
"learning_rate": 0.0006050008253464246,
"loss": 1.1071,
"step": 400
},
{
"epoch": 0.4952613879547539,
"grad_norm": 0.1633019133744981,
"learning_rate": 0.0005945302332587938,
"loss": 1.1046,
"step": 405
},
{
"epoch": 0.5013757260776521,
"grad_norm": 0.14334565800139673,
"learning_rate": 0.0005840164674705543,
"loss": 1.0991,
"step": 410
},
{
"epoch": 0.5074900642005503,
"grad_norm": 0.1283754985718675,
"learning_rate": 0.000573464329812409,
"loss": 1.1018,
"step": 415
},
{
"epoch": 0.5136044023234485,
"grad_norm": 0.1463492410025938,
"learning_rate": 0.0005628786396402013,
"loss": 1.1037,
"step": 420
},
{
"epoch": 0.5197187404463467,
"grad_norm": 0.17110501996898891,
"learning_rate": 0.0005522642316338268,
"loss": 1.109,
"step": 425
},
{
"epoch": 0.5258330785692449,
"grad_norm": 0.14603146426070193,
"learning_rate": 0.0005416259535891447,
"loss": 1.107,
"step": 430
},
{
"epoch": 0.5319474166921431,
"grad_norm": 0.12977502479364594,
"learning_rate": 0.0005309686642039016,
"loss": 1.0912,
"step": 435
},
{
"epoch": 0.5380617548150413,
"grad_norm": 0.15402365849950914,
"learning_rate": 0.0005202972308586735,
"loss": 1.1141,
"step": 440
},
{
"epoch": 0.5441760929379394,
"grad_norm": 0.1482038633414531,
"learning_rate": 0.0005096165273938436,
"loss": 1.1078,
"step": 445
},
{
"epoch": 0.5502904310608376,
"grad_norm": 0.1582032648284069,
"learning_rate": 0.0004989314318836302,
"loss": 1.0996,
"step": 450
},
{
"epoch": 0.5564047691837358,
"grad_norm": 0.13579614369914994,
"learning_rate": 0.00048824682440817927,
"loss": 1.0987,
"step": 455
},
{
"epoch": 0.562519107306634,
"grad_norm": 0.1300660489532684,
"learning_rate": 0.0004775675848247427,
"loss": 1.1099,
"step": 460
},
{
"epoch": 0.5686334454295322,
"grad_norm": 0.13968338513281628,
"learning_rate": 0.0004668985905389563,
"loss": 1.0937,
"step": 465
},
{
"epoch": 0.5747477835524305,
"grad_norm": 0.16262859924407072,
"learning_rate": 0.0004562447142772404,
"loss": 1.0971,
"step": 470
},
{
"epoch": 0.5808621216753287,
"grad_norm": 0.13724758532244924,
"learning_rate": 0.0004456108218613346,
"loss": 1.0858,
"step": 475
},
{
"epoch": 0.5869764597982269,
"grad_norm": 0.15943293068583117,
"learning_rate": 0.00043500176998598775,
"loss": 1.0849,
"step": 480
},
{
"epoch": 0.5930907979211251,
"grad_norm": 0.13656142679668365,
"learning_rate": 0.0004244224040008156,
"loss": 1.084,
"step": 485
},
{
"epoch": 0.5992051360440233,
"grad_norm": 0.1918987941973429,
"learning_rate": 0.00041387755569734057,
"loss": 1.0952,
"step": 490
},
{
"epoch": 0.6053194741669214,
"grad_norm": 0.13992824168264428,
"learning_rate": 0.0004033720411022235,
"loss": 1.0829,
"step": 495
},
{
"epoch": 0.6114338122898196,
"grad_norm": 0.12770811747765382,
"learning_rate": 0.00039291065827769484,
"loss": 1.0834,
"step": 500
},
{
"epoch": 0.6175481504127178,
"grad_norm": 0.14513522913624471,
"learning_rate": 0.0003824981851301924,
"loss": 1.0759,
"step": 505
},
{
"epoch": 0.623662488535616,
"grad_norm": 0.15906008890579026,
"learning_rate": 0.0003721393772282022,
"loss": 1.0838,
"step": 510
},
{
"epoch": 0.6297768266585142,
"grad_norm": 0.14753359066184585,
"learning_rate": 0.00036183896563030295,
"loss": 1.096,
"step": 515
},
{
"epoch": 0.6358911647814124,
"grad_norm": 0.19625007746221967,
"learning_rate": 0.0003516016547244047,
"loss": 1.0884,
"step": 520
},
{
"epoch": 0.6420055029043106,
"grad_norm": 0.1414937009903846,
"learning_rate": 0.00034143212007916793,
"loss": 1.0891,
"step": 525
},
{
"epoch": 0.6481198410272088,
"grad_norm": 0.12644236847717116,
"learning_rate": 0.00033133500630858504,
"loss": 1.0756,
"step": 530
},
{
"epoch": 0.654234179150107,
"grad_norm": 0.1491455392148122,
"learning_rate": 0.0003213149249506997,
"loss": 1.0861,
"step": 535
},
{
"epoch": 0.6603485172730051,
"grad_norm": 0.14457526186454986,
"learning_rate": 0.00031137645236143204,
"loss": 1.0728,
"step": 540
},
{
"epoch": 0.6664628553959034,
"grad_norm": 0.12012485497625133,
"learning_rate": 0.0003015241276244729,
"loss": 1.0841,
"step": 545
},
{
"epoch": 0.6725771935188016,
"grad_norm": 0.13978822306142538,
"learning_rate": 0.00029176245047820063,
"loss": 1.0621,
"step": 550
},
{
"epoch": 0.6786915316416998,
"grad_norm": 0.12853015004355592,
"learning_rate": 0.0002820958792605669,
"loss": 1.0708,
"step": 555
},
{
"epoch": 0.684805869764598,
"grad_norm": 0.11585186120998929,
"learning_rate": 0.00027252882887289287,
"loss": 1.0711,
"step": 560
},
{
"epoch": 0.6909202078874962,
"grad_norm": 0.14865383342499527,
"learning_rate": 0.0002630656687635007,
"loss": 1.0844,
"step": 565
},
{
"epoch": 0.6970345460103944,
"grad_norm": 0.13508046722758654,
"learning_rate": 0.0002537107209321074,
"loss": 1.0791,
"step": 570
},
{
"epoch": 0.7031488841332926,
"grad_norm": 0.11778899644846881,
"learning_rate": 0.0002444682579558872,
"loss": 1.0785,
"step": 575
},
{
"epoch": 0.7092632222561908,
"grad_norm": 0.15166870043920086,
"learning_rate": 0.00023534250103810628,
"loss": 1.0667,
"step": 580
},
{
"epoch": 0.715377560379089,
"grad_norm": 0.12076822512991668,
"learning_rate": 0.00022633761808022273,
"loss": 1.0687,
"step": 585
},
{
"epoch": 0.7214918985019871,
"grad_norm": 0.11772715196105518,
"learning_rate": 0.00021745772177832756,
"loss": 1.0827,
"step": 590
},
{
"epoch": 0.7276062366248853,
"grad_norm": 0.15073172455203387,
"learning_rate": 0.00020870686774480197,
"loss": 1.0708,
"step": 595
},
{
"epoch": 0.7337205747477835,
"grad_norm": 0.1284498530243719,
"learning_rate": 0.00020008905265604316,
"loss": 1.064,
"step": 600
},
{
"epoch": 0.7398349128706817,
"grad_norm": 0.13233007567855126,
"learning_rate": 0.00019160821242710958,
"loss": 1.0775,
"step": 605
},
{
"epoch": 0.7459492509935799,
"grad_norm": 0.118946525193676,
"learning_rate": 0.00018326822041411523,
"loss": 1.0706,
"step": 610
},
{
"epoch": 0.7520635891164782,
"grad_norm": 0.11579836167906876,
"learning_rate": 0.00017507288564519647,
"loss": 1.065,
"step": 615
},
{
"epoch": 0.7581779272393764,
"grad_norm": 0.1407092750820331,
"learning_rate": 0.00016702595108085945,
"loss": 1.0822,
"step": 620
},
{
"epoch": 0.7642922653622746,
"grad_norm": 0.12439943984469826,
"learning_rate": 0.0001591310919045003,
"loss": 1.0793,
"step": 625
},
{
"epoch": 0.7704066034851728,
"grad_norm": 0.13472018270174022,
"learning_rate": 0.00015139191384388095,
"loss": 1.0572,
"step": 630
},
{
"epoch": 0.776520941608071,
"grad_norm": 0.12796711656400106,
"learning_rate": 0.00014381195152432768,
"loss": 1.0679,
"step": 635
},
{
"epoch": 0.7826352797309691,
"grad_norm": 0.11666652195838892,
"learning_rate": 0.00013639466685440134,
"loss": 1.0668,
"step": 640
},
{
"epoch": 0.7887496178538673,
"grad_norm": 0.14185284679055496,
"learning_rate": 0.00012914344744478112,
"loss": 1.0763,
"step": 645
},
{
"epoch": 0.7948639559767655,
"grad_norm": 0.14426969385622815,
"learning_rate": 0.0001220616050610791,
"loss": 1.0618,
"step": 650
},
{
"epoch": 0.8009782940996637,
"grad_norm": 0.12467918636907535,
"learning_rate": 0.00011515237411129698,
"loss": 1.0667,
"step": 655
},
{
"epoch": 0.8070926322225619,
"grad_norm": 0.13918265869059762,
"learning_rate": 0.00010841891016861154,
"loss": 1.0566,
"step": 660
},
{
"epoch": 0.8132069703454601,
"grad_norm": 0.11731789548520001,
"learning_rate": 0.00010186428853016605,
"loss": 1.0615,
"step": 665
},
{
"epoch": 0.8193213084683583,
"grad_norm": 0.11238429005387958,
"learning_rate": 9.549150281252633e-05,
"loss": 1.0674,
"step": 670
},
{
"epoch": 0.8254356465912565,
"grad_norm": 0.10911112358488025,
"learning_rate": 8.930346358443952e-05,
"loss": 1.0661,
"step": 675
},
{
"epoch": 0.8315499847141546,
"grad_norm": 0.1203925287318847,
"learning_rate": 8.330299703752498e-05,
"loss": 1.0651,
"step": 680
},
{
"epoch": 0.8376643228370529,
"grad_norm": 0.11047748542152162,
"learning_rate": 7.749284369549953e-05,
"loss": 1.0586,
"step": 685
},
{
"epoch": 0.8437786609599511,
"grad_norm": 0.1722568562787882,
"learning_rate": 7.187565716252992e-05,
"loss": 1.0704,
"step": 690
},
{
"epoch": 0.8498929990828493,
"grad_norm": 0.11963193105519696,
"learning_rate": 6.645400291128356e-05,
"loss": 1.066,
"step": 695
},
{
"epoch": 0.8560073372057475,
"grad_norm": 0.11447323495223453,
"learning_rate": 6.123035711122859e-05,
"loss": 1.0584,
"step": 700
},
{
"epoch": 0.8621216753286457,
"grad_norm": 0.10602469302602112,
"learning_rate": 5.6207105497722956e-05,
"loss": 1.0554,
"step": 705
},
{
"epoch": 0.8682360134515439,
"grad_norm": 0.12779086014302005,
"learning_rate": 5.138654228240425e-05,
"loss": 1.06,
"step": 710
},
{
"epoch": 0.8743503515744421,
"grad_norm": 0.10712527024629802,
"learning_rate": 4.677086910538092e-05,
"loss": 1.057,
"step": 715
},
{
"epoch": 0.8804646896973403,
"grad_norm": 0.1110509251221197,
"learning_rate": 4.236219402970326e-05,
"loss": 1.0662,
"step": 720
},
{
"epoch": 0.8865790278202385,
"grad_norm": 0.10627329261644873,
"learning_rate": 3.816253057857144e-05,
"loss": 1.0539,
"step": 725
},
{
"epoch": 0.8926933659431366,
"grad_norm": 0.11117466264985419,
"learning_rate": 3.417379681572297e-05,
"loss": 1.0711,
"step": 730
},
{
"epoch": 0.8988077040660348,
"grad_norm": 0.12734986634092663,
"learning_rate": 3.0397814469416973e-05,
"loss": 1.0684,
"step": 735
},
{
"epoch": 0.904922042188933,
"grad_norm": 0.10919861325825958,
"learning_rate": 2.683630810041787e-05,
"loss": 1.0644,
"step": 740
},
{
"epoch": 0.9110363803118312,
"grad_norm": 0.11597392535446147,
"learning_rate": 2.349090431435641e-05,
"loss": 1.0538,
"step": 745
},
{
"epoch": 0.9171507184347294,
"grad_norm": 0.10441245889752122,
"learning_rate": 2.0363131018828753e-05,
"loss": 1.0599,
"step": 750
},
{
"epoch": 0.9232650565576276,
"grad_norm": 0.11638590802463976,
"learning_rate": 1.7454416725573353e-05,
"loss": 1.0486,
"step": 755
},
{
"epoch": 0.9293793946805259,
"grad_norm": 0.10648348001728884,
"learning_rate": 1.4766089898042678e-05,
"loss": 1.0654,
"step": 760
},
{
"epoch": 0.9354937328034241,
"grad_norm": 0.1110444377849314,
"learning_rate": 1.2299378344669988e-05,
"loss": 1.0588,
"step": 765
},
{
"epoch": 0.9416080709263223,
"grad_norm": 0.12147062781920358,
"learning_rate": 1.0055408658106447e-05,
"loss": 1.0592,
"step": 770
},
{
"epoch": 0.9477224090492204,
"grad_norm": 0.112528959579753,
"learning_rate": 8.035205700685167e-06,
"loss": 1.0713,
"step": 775
},
{
"epoch": 0.9538367471721186,
"grad_norm": 0.10757445583800135,
"learning_rate": 6.239692136348285e-06,
"loss": 1.0676,
"step": 780
},
{
"epoch": 0.9599510852950168,
"grad_norm": 0.10358653956633265,
"learning_rate": 4.669688009248607e-06,
"loss": 1.0485,
"step": 785
},
{
"epoch": 0.966065423417915,
"grad_norm": 0.10626706710404664,
"learning_rate": 3.325910369220975e-06,
"loss": 1.0511,
"step": 790
},
{
"epoch": 0.9721797615408132,
"grad_norm": 0.09843904746519867,
"learning_rate": 2.20897294429212e-06,
"loss": 1.0504,
"step": 795
},
{
"epoch": 0.9782940996637114,
"grad_norm": 0.10693604886979197,
"learning_rate": 1.3193858603794961e-06,
"loss": 1.0729,
"step": 800
},
{
"epoch": 0.9844084377866096,
"grad_norm": 0.10245590009649519,
"learning_rate": 6.575554083078084e-07,
"loss": 1.0542,
"step": 805
},
{
"epoch": 0.9905227759095078,
"grad_norm": 0.10972041398172515,
"learning_rate": 2.2378385824833868e-07,
"loss": 1.0521,
"step": 810
},
{
"epoch": 0.996637114032406,
"grad_norm": 0.10636546627124496,
"learning_rate": 1.8269321666375404e-08,
"loss": 1.0698,
"step": 815
},
{
"epoch": 0.9990828492815653,
"eval_loss": 1.4192770719528198,
"eval_runtime": 103.0721,
"eval_samples_per_second": 203.256,
"eval_steps_per_second": 6.355,
"step": 817
},
{
"epoch": 0.9990828492815653,
"step": 817,
"total_flos": 45136994107392.0,
"train_loss": 1.1321502848091733,
"train_runtime": 1720.7287,
"train_samples_per_second": 60.822,
"train_steps_per_second": 0.475
}
],
"logging_steps": 5,
"max_steps": 817,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 45136994107392.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}