Qyin / trainer_state.json
ZZW45's picture
Upload folder using huggingface_hub
f45adb8 verified
raw
history blame contribute delete
No virus
23.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.51219512195122,
"eval_steps": 100,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1951219512195122,
"grad_norm": 3.5239181518554688,
"learning_rate": 4.999451708687114e-05,
"loss": 1.1522,
"num_input_tokens_seen": 2231104,
"step": 5
},
{
"epoch": 0.3902439024390244,
"grad_norm": 1.2978184223175049,
"learning_rate": 4.997807075247146e-05,
"loss": 0.8861,
"num_input_tokens_seen": 4468608,
"step": 10
},
{
"epoch": 0.5853658536585366,
"grad_norm": 1.0176712274551392,
"learning_rate": 4.995066821070679e-05,
"loss": 0.8351,
"num_input_tokens_seen": 6779584,
"step": 15
},
{
"epoch": 0.7804878048780488,
"grad_norm": 0.6517723798751831,
"learning_rate": 4.991232148123761e-05,
"loss": 0.7797,
"num_input_tokens_seen": 9106880,
"step": 20
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.6675965785980225,
"learning_rate": 4.9863047384206835e-05,
"loss": 0.7575,
"num_input_tokens_seen": 11435136,
"step": 25
},
{
"epoch": 1.170731707317073,
"grad_norm": 1.225784182548523,
"learning_rate": 4.980286753286195e-05,
"loss": 0.7345,
"num_input_tokens_seen": 13738880,
"step": 30
},
{
"epoch": 1.3658536585365852,
"grad_norm": 0.4991011321544647,
"learning_rate": 4.9731808324074717e-05,
"loss": 0.7443,
"num_input_tokens_seen": 16001920,
"step": 35
},
{
"epoch": 1.5609756097560976,
"grad_norm": 0.7908358573913574,
"learning_rate": 4.964990092676263e-05,
"loss": 0.7211,
"num_input_tokens_seen": 18282944,
"step": 40
},
{
"epoch": 1.7560975609756098,
"grad_norm": 0.9223726391792297,
"learning_rate": 4.9557181268217227e-05,
"loss": 0.7306,
"num_input_tokens_seen": 20575296,
"step": 45
},
{
"epoch": 1.951219512195122,
"grad_norm": 0.8891558051109314,
"learning_rate": 4.9453690018345144e-05,
"loss": 0.7039,
"num_input_tokens_seen": 22849856,
"step": 50
},
{
"epoch": 2.1463414634146343,
"grad_norm": 1.3324096202850342,
"learning_rate": 4.933947257182901e-05,
"loss": 0.7044,
"num_input_tokens_seen": 25176896,
"step": 55
},
{
"epoch": 2.341463414634146,
"grad_norm": 1.0357320308685303,
"learning_rate": 4.9214579028215776e-05,
"loss": 0.6768,
"num_input_tokens_seen": 27457472,
"step": 60
},
{
"epoch": 2.5365853658536586,
"grad_norm": 0.6939280033111572,
"learning_rate": 4.907906416994146e-05,
"loss": 0.6906,
"num_input_tokens_seen": 29772544,
"step": 65
},
{
"epoch": 2.7317073170731705,
"grad_norm": 0.7578678727149963,
"learning_rate": 4.893298743830168e-05,
"loss": 0.6595,
"num_input_tokens_seen": 32013760,
"step": 70
},
{
"epoch": 2.926829268292683,
"grad_norm": 0.548279345035553,
"learning_rate": 4.877641290737884e-05,
"loss": 0.6514,
"num_input_tokens_seen": 34342848,
"step": 75
},
{
"epoch": 3.1219512195121952,
"grad_norm": 1.1982917785644531,
"learning_rate": 4.860940925593703e-05,
"loss": 0.6498,
"num_input_tokens_seen": 36592000,
"step": 80
},
{
"epoch": 3.317073170731707,
"grad_norm": 1.0446078777313232,
"learning_rate": 4.843204973729729e-05,
"loss": 0.6537,
"num_input_tokens_seen": 38878528,
"step": 85
},
{
"epoch": 3.5121951219512195,
"grad_norm": 0.7469108700752258,
"learning_rate": 4.8244412147206284e-05,
"loss": 0.5996,
"num_input_tokens_seen": 41156672,
"step": 90
},
{
"epoch": 3.7073170731707314,
"grad_norm": 0.7562854886054993,
"learning_rate": 4.8046578789712515e-05,
"loss": 0.6289,
"num_input_tokens_seen": 43475456,
"step": 95
},
{
"epoch": 3.902439024390244,
"grad_norm": 0.7673454880714417,
"learning_rate": 4.783863644106502e-05,
"loss": 0.6607,
"num_input_tokens_seen": 45798272,
"step": 100
},
{
"epoch": 3.902439024390244,
"eval_loss": 0.6353042125701904,
"eval_runtime": 174.6543,
"eval_samples_per_second": 12.07,
"eval_steps_per_second": 0.504,
"num_input_tokens_seen": 45798272,
"step": 100
},
{
"epoch": 4.097560975609756,
"grad_norm": 0.6962050199508667,
"learning_rate": 4.762067631165049e-05,
"loss": 0.5924,
"num_input_tokens_seen": 48035456,
"step": 105
},
{
"epoch": 4.2926829268292686,
"grad_norm": 1.080012559890747,
"learning_rate": 4.7392794005985326e-05,
"loss": 0.6107,
"num_input_tokens_seen": 50295680,
"step": 110
},
{
"epoch": 4.487804878048781,
"grad_norm": 0.6142390966415405,
"learning_rate": 4.715508948078037e-05,
"loss": 0.6016,
"num_input_tokens_seen": 52611072,
"step": 115
},
{
"epoch": 4.682926829268292,
"grad_norm": 0.7695103883743286,
"learning_rate": 4.690766700109659e-05,
"loss": 0.5946,
"num_input_tokens_seen": 54817920,
"step": 120
},
{
"epoch": 4.878048780487805,
"grad_norm": 0.7141166925430298,
"learning_rate": 4.665063509461097e-05,
"loss": 0.5926,
"num_input_tokens_seen": 57135552,
"step": 125
},
{
"epoch": 5.073170731707317,
"grad_norm": 0.9515812993049622,
"learning_rate": 4.638410650401267e-05,
"loss": 0.5986,
"num_input_tokens_seen": 59455680,
"step": 130
},
{
"epoch": 5.2682926829268295,
"grad_norm": 1.5192577838897705,
"learning_rate": 4.610819813755038e-05,
"loss": 0.5619,
"num_input_tokens_seen": 61782208,
"step": 135
},
{
"epoch": 5.463414634146342,
"grad_norm": 1.0420284271240234,
"learning_rate": 4.5823031017752485e-05,
"loss": 0.5739,
"num_input_tokens_seen": 64051520,
"step": 140
},
{
"epoch": 5.658536585365853,
"grad_norm": 0.6723448038101196,
"learning_rate": 4.5528730228342605e-05,
"loss": 0.5673,
"num_input_tokens_seen": 66320192,
"step": 145
},
{
"epoch": 5.853658536585366,
"grad_norm": 0.8727495074272156,
"learning_rate": 4.522542485937369e-05,
"loss": 0.5607,
"num_input_tokens_seen": 68535424,
"step": 150
},
{
"epoch": 6.048780487804878,
"grad_norm": 1.1162189245224,
"learning_rate": 4.491324795060491e-05,
"loss": 0.5711,
"num_input_tokens_seen": 70861632,
"step": 155
},
{
"epoch": 6.2439024390243905,
"grad_norm": 1.0553621053695679,
"learning_rate": 4.4592336433146e-05,
"loss": 0.5437,
"num_input_tokens_seen": 73124480,
"step": 160
},
{
"epoch": 6.439024390243903,
"grad_norm": 1.105603575706482,
"learning_rate": 4.426283106939474e-05,
"loss": 0.5437,
"num_input_tokens_seen": 75439232,
"step": 165
},
{
"epoch": 6.634146341463414,
"grad_norm": 0.9295886158943176,
"learning_rate": 4.3924876391293915e-05,
"loss": 0.5321,
"num_input_tokens_seen": 77689536,
"step": 170
},
{
"epoch": 6.829268292682927,
"grad_norm": 0.9514282941818237,
"learning_rate": 4.357862063693486e-05,
"loss": 0.5075,
"num_input_tokens_seen": 80000000,
"step": 175
},
{
"epoch": 7.024390243902439,
"grad_norm": 0.641543447971344,
"learning_rate": 4.3224215685535294e-05,
"loss": 0.5246,
"num_input_tokens_seen": 82294336,
"step": 180
},
{
"epoch": 7.219512195121951,
"grad_norm": 0.6666741967201233,
"learning_rate": 4.2861816990820084e-05,
"loss": 0.5021,
"num_input_tokens_seen": 84609856,
"step": 185
},
{
"epoch": 7.414634146341464,
"grad_norm": 0.9410921931266785,
"learning_rate": 4.249158351283414e-05,
"loss": 0.4766,
"num_input_tokens_seen": 86956608,
"step": 190
},
{
"epoch": 7.609756097560975,
"grad_norm": 0.9510065913200378,
"learning_rate": 4.211367764821722e-05,
"loss": 0.4922,
"num_input_tokens_seen": 89199040,
"step": 195
},
{
"epoch": 7.804878048780488,
"grad_norm": 0.9778364896774292,
"learning_rate": 4.172826515897146e-05,
"loss": 0.5149,
"num_input_tokens_seen": 91464512,
"step": 200
},
{
"epoch": 7.804878048780488,
"eval_loss": 0.6269230246543884,
"eval_runtime": 173.5455,
"eval_samples_per_second": 12.147,
"eval_steps_per_second": 0.507,
"num_input_tokens_seen": 91464512,
"step": 200
},
{
"epoch": 8.0,
"grad_norm": 1.1030495166778564,
"learning_rate": 4.133551509975264e-05,
"loss": 0.5019,
"num_input_tokens_seen": 93698368,
"step": 205
},
{
"epoch": 8.195121951219512,
"grad_norm": 0.8688378930091858,
"learning_rate": 4.093559974371725e-05,
"loss": 0.4275,
"num_input_tokens_seen": 96008256,
"step": 210
},
{
"epoch": 8.390243902439025,
"grad_norm": 0.9601365327835083,
"learning_rate": 4.052869450695776e-05,
"loss": 0.4545,
"num_input_tokens_seen": 98232832,
"step": 215
},
{
"epoch": 8.585365853658537,
"grad_norm": 0.9640222191810608,
"learning_rate": 4.011497787155938e-05,
"loss": 0.4577,
"num_input_tokens_seen": 100520128,
"step": 220
},
{
"epoch": 8.78048780487805,
"grad_norm": 0.7299013137817383,
"learning_rate": 3.969463130731183e-05,
"loss": 0.4589,
"num_input_tokens_seen": 102814976,
"step": 225
},
{
"epoch": 8.975609756097562,
"grad_norm": 0.6459980607032776,
"learning_rate": 3.92678391921108e-05,
"loss": 0.4482,
"num_input_tokens_seen": 105142528,
"step": 230
},
{
"epoch": 9.170731707317072,
"grad_norm": 0.8188865184783936,
"learning_rate": 3.883478873108361e-05,
"loss": 0.4301,
"num_input_tokens_seen": 107394880,
"step": 235
},
{
"epoch": 9.365853658536585,
"grad_norm": 0.7741889953613281,
"learning_rate": 3.8395669874474915e-05,
"loss": 0.3852,
"num_input_tokens_seen": 109706560,
"step": 240
},
{
"epoch": 9.560975609756097,
"grad_norm": 0.7010043263435364,
"learning_rate": 3.795067523432826e-05,
"loss": 0.3886,
"num_input_tokens_seen": 111993664,
"step": 245
},
{
"epoch": 9.75609756097561,
"grad_norm": 0.5523029565811157,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.4028,
"num_input_tokens_seen": 114255232,
"step": 250
},
{
"epoch": 9.951219512195122,
"grad_norm": 0.5837187170982361,
"learning_rate": 3.704384185254288e-05,
"loss": 0.3847,
"num_input_tokens_seen": 116578560,
"step": 255
},
{
"epoch": 10.146341463414634,
"grad_norm": 0.7814375162124634,
"learning_rate": 3.6582400877996546e-05,
"loss": 0.3516,
"num_input_tokens_seen": 118836800,
"step": 260
},
{
"epoch": 10.341463414634147,
"grad_norm": 0.9329513907432556,
"learning_rate": 3.611587947962319e-05,
"loss": 0.3504,
"num_input_tokens_seen": 121148608,
"step": 265
},
{
"epoch": 10.536585365853659,
"grad_norm": 0.5801968574523926,
"learning_rate": 3.564448228912682e-05,
"loss": 0.3303,
"num_input_tokens_seen": 123490176,
"step": 270
},
{
"epoch": 10.731707317073171,
"grad_norm": 0.5544772744178772,
"learning_rate": 3.516841607689501e-05,
"loss": 0.336,
"num_input_tokens_seen": 125741376,
"step": 275
},
{
"epoch": 10.926829268292684,
"grad_norm": 0.5629393458366394,
"learning_rate": 3.4687889661302576e-05,
"loss": 0.337,
"num_input_tokens_seen": 127996480,
"step": 280
},
{
"epoch": 11.121951219512194,
"grad_norm": 0.6132429242134094,
"learning_rate": 3.4203113817116957e-05,
"loss": 0.3102,
"num_input_tokens_seen": 130286208,
"step": 285
},
{
"epoch": 11.317073170731707,
"grad_norm": 0.6265811920166016,
"learning_rate": 3.3714301183045385e-05,
"loss": 0.2777,
"num_input_tokens_seen": 132600256,
"step": 290
},
{
"epoch": 11.512195121951219,
"grad_norm": 0.7565940618515015,
"learning_rate": 3.322166616846458e-05,
"loss": 0.2875,
"num_input_tokens_seen": 134908928,
"step": 295
},
{
"epoch": 11.707317073170731,
"grad_norm": 0.8063022494316101,
"learning_rate": 3.272542485937369e-05,
"loss": 0.2768,
"num_input_tokens_seen": 137181248,
"step": 300
},
{
"epoch": 11.707317073170731,
"eval_loss": 0.718697726726532,
"eval_runtime": 173.4793,
"eval_samples_per_second": 12.151,
"eval_steps_per_second": 0.507,
"num_input_tokens_seen": 137181248,
"step": 300
},
{
"epoch": 11.902439024390244,
"grad_norm": 0.7160292267799377,
"learning_rate": 3.222579492361179e-05,
"loss": 0.2823,
"num_input_tokens_seen": 139385728,
"step": 305
},
{
"epoch": 12.097560975609756,
"grad_norm": 0.6414353251457214,
"learning_rate": 3.172299551538164e-05,
"loss": 0.27,
"num_input_tokens_seen": 141670400,
"step": 310
},
{
"epoch": 12.292682926829269,
"grad_norm": 0.6131883859634399,
"learning_rate": 3.121724717912138e-05,
"loss": 0.2308,
"num_input_tokens_seen": 143944512,
"step": 315
},
{
"epoch": 12.487804878048781,
"grad_norm": 0.46672749519348145,
"learning_rate": 3.0708771752766394e-05,
"loss": 0.2236,
"num_input_tokens_seen": 146200128,
"step": 320
},
{
"epoch": 12.682926829268293,
"grad_norm": 0.48453348875045776,
"learning_rate": 3.0197792270443982e-05,
"loss": 0.2227,
"num_input_tokens_seen": 148494912,
"step": 325
},
{
"epoch": 12.878048780487806,
"grad_norm": 0.5639292597770691,
"learning_rate": 2.9684532864643122e-05,
"loss": 0.2236,
"num_input_tokens_seen": 150765632,
"step": 330
},
{
"epoch": 13.073170731707316,
"grad_norm": 0.45188578963279724,
"learning_rate": 2.916921866790256e-05,
"loss": 0.2193,
"num_input_tokens_seen": 153025984,
"step": 335
},
{
"epoch": 13.268292682926829,
"grad_norm": 0.5362280011177063,
"learning_rate": 2.8652075714060295e-05,
"loss": 0.1804,
"num_input_tokens_seen": 155359744,
"step": 340
},
{
"epoch": 13.463414634146341,
"grad_norm": 0.46796223521232605,
"learning_rate": 2.8133330839107608e-05,
"loss": 0.1763,
"num_input_tokens_seen": 157653760,
"step": 345
},
{
"epoch": 13.658536585365853,
"grad_norm": 0.6022464036941528,
"learning_rate": 2.761321158169134e-05,
"loss": 0.1785,
"num_input_tokens_seen": 159970112,
"step": 350
},
{
"epoch": 13.853658536585366,
"grad_norm": 0.6007694005966187,
"learning_rate": 2.7091946083307896e-05,
"loss": 0.1847,
"num_input_tokens_seen": 162266048,
"step": 355
},
{
"epoch": 14.048780487804878,
"grad_norm": 0.5330171585083008,
"learning_rate": 2.656976298823284e-05,
"loss": 0.1765,
"num_input_tokens_seen": 164502784,
"step": 360
},
{
"epoch": 14.24390243902439,
"grad_norm": 0.4508293569087982,
"learning_rate": 2.604689134322999e-05,
"loss": 0.1433,
"num_input_tokens_seen": 166793792,
"step": 365
},
{
"epoch": 14.439024390243903,
"grad_norm": 0.4898134469985962,
"learning_rate": 2.5523560497083926e-05,
"loss": 0.1436,
"num_input_tokens_seen": 169105536,
"step": 370
},
{
"epoch": 14.634146341463415,
"grad_norm": 0.5252612829208374,
"learning_rate": 2.5e-05,
"loss": 0.1456,
"num_input_tokens_seen": 171380160,
"step": 375
},
{
"epoch": 14.829268292682928,
"grad_norm": 0.45608317852020264,
"learning_rate": 2.447643950291608e-05,
"loss": 0.1361,
"num_input_tokens_seen": 173636544,
"step": 380
},
{
"epoch": 15.024390243902438,
"grad_norm": 0.5009132623672485,
"learning_rate": 2.3953108656770016e-05,
"loss": 0.1381,
"num_input_tokens_seen": 175912832,
"step": 385
},
{
"epoch": 15.21951219512195,
"grad_norm": 0.4505337178707123,
"learning_rate": 2.3430237011767167e-05,
"loss": 0.1121,
"num_input_tokens_seen": 178157824,
"step": 390
},
{
"epoch": 15.414634146341463,
"grad_norm": 0.43384668231010437,
"learning_rate": 2.2908053916692117e-05,
"loss": 0.1103,
"num_input_tokens_seen": 180468736,
"step": 395
},
{
"epoch": 15.609756097560975,
"grad_norm": 0.4433673620223999,
"learning_rate": 2.238678841830867e-05,
"loss": 0.1126,
"num_input_tokens_seen": 182788736,
"step": 400
},
{
"epoch": 15.609756097560975,
"eval_loss": 0.8370733261108398,
"eval_runtime": 173.2462,
"eval_samples_per_second": 12.168,
"eval_steps_per_second": 0.508,
"num_input_tokens_seen": 182788736,
"step": 400
},
{
"epoch": 15.804878048780488,
"grad_norm": 0.39896300435066223,
"learning_rate": 2.186666916089239e-05,
"loss": 0.1114,
"num_input_tokens_seen": 185050944,
"step": 405
},
{
"epoch": 16.0,
"grad_norm": 0.3975840210914612,
"learning_rate": 2.1347924285939714e-05,
"loss": 0.1135,
"num_input_tokens_seen": 187289920,
"step": 410
},
{
"epoch": 16.195121951219512,
"grad_norm": 0.3381832242012024,
"learning_rate": 2.0830781332097446e-05,
"loss": 0.0935,
"num_input_tokens_seen": 189562944,
"step": 415
},
{
"epoch": 16.390243902439025,
"grad_norm": 0.3201667070388794,
"learning_rate": 2.031546713535688e-05,
"loss": 0.0851,
"num_input_tokens_seen": 191813696,
"step": 420
},
{
"epoch": 16.585365853658537,
"grad_norm": 0.38780859112739563,
"learning_rate": 1.980220772955602e-05,
"loss": 0.0897,
"num_input_tokens_seen": 194094144,
"step": 425
},
{
"epoch": 16.78048780487805,
"grad_norm": 0.31540414690971375,
"learning_rate": 1.9291228247233605e-05,
"loss": 0.0886,
"num_input_tokens_seen": 196385984,
"step": 430
},
{
"epoch": 16.975609756097562,
"grad_norm": 0.5678117275238037,
"learning_rate": 1.8782752820878634e-05,
"loss": 0.0899,
"num_input_tokens_seen": 198700032,
"step": 435
},
{
"epoch": 17.170731707317074,
"grad_norm": 0.4425772726535797,
"learning_rate": 1.827700448461836e-05,
"loss": 0.0753,
"num_input_tokens_seen": 201036480,
"step": 440
},
{
"epoch": 17.365853658536587,
"grad_norm": 0.32113131880760193,
"learning_rate": 1.7774205076388206e-05,
"loss": 0.0724,
"num_input_tokens_seen": 203331520,
"step": 445
},
{
"epoch": 17.5609756097561,
"grad_norm": 0.2662774920463562,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.0711,
"num_input_tokens_seen": 205627072,
"step": 450
},
{
"epoch": 17.75609756097561,
"grad_norm": 0.27264896035194397,
"learning_rate": 1.677833383153542e-05,
"loss": 0.0719,
"num_input_tokens_seen": 207845568,
"step": 455
},
{
"epoch": 17.951219512195124,
"grad_norm": 0.29512110352516174,
"learning_rate": 1.6285698816954624e-05,
"loss": 0.0704,
"num_input_tokens_seen": 210180736,
"step": 460
},
{
"epoch": 18.146341463414632,
"grad_norm": 0.252193421125412,
"learning_rate": 1.5796886182883053e-05,
"loss": 0.0666,
"num_input_tokens_seen": 212454016,
"step": 465
},
{
"epoch": 18.341463414634145,
"grad_norm": 0.23161688446998596,
"learning_rate": 1.5312110338697426e-05,
"loss": 0.0564,
"num_input_tokens_seen": 214714240,
"step": 470
},
{
"epoch": 18.536585365853657,
"grad_norm": 0.25446033477783203,
"learning_rate": 1.4831583923104999e-05,
"loss": 0.0617,
"num_input_tokens_seen": 217009664,
"step": 475
},
{
"epoch": 18.73170731707317,
"grad_norm": 0.2264089584350586,
"learning_rate": 1.4355517710873184e-05,
"loss": 0.0587,
"num_input_tokens_seen": 219270336,
"step": 480
},
{
"epoch": 18.926829268292682,
"grad_norm": 0.21758581697940826,
"learning_rate": 1.388412052037682e-05,
"loss": 0.0573,
"num_input_tokens_seen": 221604736,
"step": 485
},
{
"epoch": 19.121951219512194,
"grad_norm": 0.21286001801490784,
"learning_rate": 1.3417599122003464e-05,
"loss": 0.0537,
"num_input_tokens_seen": 223843968,
"step": 490
},
{
"epoch": 19.317073170731707,
"grad_norm": 0.19537770748138428,
"learning_rate": 1.2956158147457115e-05,
"loss": 0.0505,
"num_input_tokens_seen": 226198144,
"step": 495
},
{
"epoch": 19.51219512195122,
"grad_norm": 0.18156716227531433,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.0494,
"num_input_tokens_seen": 228521280,
"step": 500
},
{
"epoch": 19.51219512195122,
"eval_loss": 0.9114091396331787,
"eval_runtime": 173.5404,
"eval_samples_per_second": 12.147,
"eval_steps_per_second": 0.507,
"num_input_tokens_seen": 228521280,
"step": 500
}
],
"logging_steps": 5,
"max_steps": 750,
"num_input_tokens_seen": 228521280,
"num_train_epochs": 30,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.694721607846593e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}