llama-3-8b-instruct-gapo-v2-jaccard_score-beta10-gamma0.3-lr1.0e-6-he_scale-rerun
/
trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9982631930527722, | |
"eval_steps": 400, | |
"global_step": 467, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.01068804275217101, | |
"grad_norm": 51.9219704867916, | |
"learning_rate": 1.0638297872340425e-07, | |
"logits/chosen": -1.0090945959091187, | |
"logits/rejected": -0.9798948168754578, | |
"logps/chosen": -0.2737821936607361, | |
"logps/rejected": -0.2714875340461731, | |
"loss": 2.9624, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -2.7378220558166504, | |
"rewards/margins": -0.02294684387743473, | |
"rewards/rejected": -2.7148749828338623, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.02137608550434202, | |
"grad_norm": 44.19632899404353, | |
"learning_rate": 2.127659574468085e-07, | |
"logits/chosen": -1.0468437671661377, | |
"logits/rejected": -0.9794818758964539, | |
"logps/chosen": -0.2943094074726105, | |
"logps/rejected": -0.2997472286224365, | |
"loss": 3.2285, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -2.943093776702881, | |
"rewards/margins": 0.054378531873226166, | |
"rewards/rejected": -2.9974725246429443, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03206412825651302, | |
"grad_norm": 51.06549177485018, | |
"learning_rate": 3.1914893617021275e-07, | |
"logits/chosen": -0.9672244191169739, | |
"logits/rejected": -0.9858204126358032, | |
"logps/chosen": -0.2640294134616852, | |
"logps/rejected": -0.30089089274406433, | |
"loss": 3.2949, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -2.640293836593628, | |
"rewards/margins": 0.3686152398586273, | |
"rewards/rejected": -3.008909225463867, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.04275217100868404, | |
"grad_norm": 74.83681858154256, | |
"learning_rate": 4.25531914893617e-07, | |
"logits/chosen": -0.9596344232559204, | |
"logits/rejected": -0.933236300945282, | |
"logps/chosen": -0.2778920531272888, | |
"logps/rejected": -0.29140567779541016, | |
"loss": 3.0107, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -2.7789206504821777, | |
"rewards/margins": 0.13513638079166412, | |
"rewards/rejected": -2.9140570163726807, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.053440213760855046, | |
"grad_norm": 53.488412779583946, | |
"learning_rate": 5.319148936170212e-07, | |
"logits/chosen": -1.0089600086212158, | |
"logits/rejected": -0.9799430966377258, | |
"logps/chosen": -0.27165931463241577, | |
"logps/rejected": -0.2783976197242737, | |
"loss": 3.2847, | |
"rewards/accuracies": 0.5, | |
"rewards/chosen": -2.7165932655334473, | |
"rewards/margins": 0.06738300621509552, | |
"rewards/rejected": -2.7839760780334473, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.06412825651302605, | |
"grad_norm": 49.467165701102346, | |
"learning_rate": 6.382978723404255e-07, | |
"logits/chosen": -0.9974703788757324, | |
"logits/rejected": -0.952576756477356, | |
"logps/chosen": -0.2734770178794861, | |
"logps/rejected": -0.27953463792800903, | |
"loss": 3.0768, | |
"rewards/accuracies": 0.42500001192092896, | |
"rewards/chosen": -2.734769821166992, | |
"rewards/margins": 0.06057664006948471, | |
"rewards/rejected": -2.795346736907959, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.07481629926519706, | |
"grad_norm": 62.938723083055315, | |
"learning_rate": 7.446808510638297e-07, | |
"logits/chosen": -1.055626630783081, | |
"logits/rejected": -0.9798073768615723, | |
"logps/chosen": -0.29555463790893555, | |
"logps/rejected": -0.3219638466835022, | |
"loss": 3.0274, | |
"rewards/accuracies": 0.512499988079071, | |
"rewards/chosen": -2.9555466175079346, | |
"rewards/margins": 0.26409202814102173, | |
"rewards/rejected": -3.2196388244628906, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.08550434201736808, | |
"grad_norm": 60.78411434363956, | |
"learning_rate": 8.51063829787234e-07, | |
"logits/chosen": -0.9993880987167358, | |
"logits/rejected": -0.9557031393051147, | |
"logps/chosen": -0.2811431288719177, | |
"logps/rejected": -0.3281245231628418, | |
"loss": 2.953, | |
"rewards/accuracies": 0.59375, | |
"rewards/chosen": -2.8114311695098877, | |
"rewards/margins": 0.469813734292984, | |
"rewards/rejected": -3.281245470046997, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.09619238476953908, | |
"grad_norm": 37.98461592095763, | |
"learning_rate": 9.574468085106384e-07, | |
"logits/chosen": -1.0411790609359741, | |
"logits/rejected": -0.9992967844009399, | |
"logps/chosen": -0.30747953057289124, | |
"logps/rejected": -0.3615890145301819, | |
"loss": 3.0174, | |
"rewards/accuracies": 0.543749988079071, | |
"rewards/chosen": -3.0747952461242676, | |
"rewards/margins": 0.5410946607589722, | |
"rewards/rejected": -3.6158900260925293, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.10688042752171009, | |
"grad_norm": 117.4214402638359, | |
"learning_rate": 9.998741174712533e-07, | |
"logits/chosen": -1.0310847759246826, | |
"logits/rejected": -0.9821462631225586, | |
"logps/chosen": -0.31808674335479736, | |
"logps/rejected": -0.3582245707511902, | |
"loss": 3.0624, | |
"rewards/accuracies": 0.4937500059604645, | |
"rewards/chosen": -3.1808676719665527, | |
"rewards/margins": 0.4013778567314148, | |
"rewards/rejected": -3.5822455883026123, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.11756847027388109, | |
"grad_norm": 170.90804995599427, | |
"learning_rate": 9.991050648838675e-07, | |
"logits/chosen": -1.0685880184173584, | |
"logits/rejected": -1.0338609218597412, | |
"logps/chosen": -0.310200572013855, | |
"logps/rejected": -0.37700629234313965, | |
"loss": 2.7117, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -3.10200572013855, | |
"rewards/margins": 0.6680572628974915, | |
"rewards/rejected": -3.770063877105713, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.1282565130260521, | |
"grad_norm": 134.13615502558548, | |
"learning_rate": 9.97637968732563e-07, | |
"logits/chosen": -1.109356164932251, | |
"logits/rejected": -1.0756348371505737, | |
"logps/chosen": -0.37418466806411743, | |
"logps/rejected": -0.3752634525299072, | |
"loss": 2.9555, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -3.7418465614318848, | |
"rewards/margins": 0.010788190178573132, | |
"rewards/rejected": -3.7526347637176514, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.13894455577822312, | |
"grad_norm": 62.988442946717754, | |
"learning_rate": 9.954748808839674e-07, | |
"logits/chosen": -1.0235508680343628, | |
"logits/rejected": -0.9943636059761047, | |
"logps/chosen": -0.3913114666938782, | |
"logps/rejected": -0.4661448001861572, | |
"loss": 2.8601, | |
"rewards/accuracies": 0.6000000238418579, | |
"rewards/chosen": -3.9131150245666504, | |
"rewards/margins": 0.7483334541320801, | |
"rewards/rejected": -4.6614484786987305, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.14963259853039412, | |
"grad_norm": 43.4379227945051, | |
"learning_rate": 9.926188266120295e-07, | |
"logits/chosen": -1.0352689027786255, | |
"logits/rejected": -1.0102473497390747, | |
"logps/chosen": -0.353306382894516, | |
"logps/rejected": -0.4401523172855377, | |
"loss": 2.8751, | |
"rewards/accuracies": 0.5562499761581421, | |
"rewards/chosen": -3.5330634117126465, | |
"rewards/margins": 0.8684590458869934, | |
"rewards/rejected": -4.401522636413574, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.16032064128256512, | |
"grad_norm": 83.11781382244568, | |
"learning_rate": 9.890738003669027e-07, | |
"logits/chosen": -0.996098518371582, | |
"logits/rejected": -0.9254637956619263, | |
"logps/chosen": -0.34757333993911743, | |
"logps/rejected": -0.413855642080307, | |
"loss": 2.8749, | |
"rewards/accuracies": 0.53125, | |
"rewards/chosen": -3.475733518600464, | |
"rewards/margins": 0.6628231406211853, | |
"rewards/rejected": -4.138556480407715, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.17100868403473615, | |
"grad_norm": 51.202568906876266, | |
"learning_rate": 9.848447601883433e-07, | |
"logits/chosen": -0.9647027850151062, | |
"logits/rejected": -0.950391411781311, | |
"logps/chosen": -0.3513404130935669, | |
"logps/rejected": -0.46568623185157776, | |
"loss": 2.6436, | |
"rewards/accuracies": 0.59375, | |
"rewards/chosen": -3.513404369354248, | |
"rewards/margins": 1.1434578895568848, | |
"rewards/rejected": -4.656861782073975, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.18169672678690715, | |
"grad_norm": 70.97672847293224, | |
"learning_rate": 9.799376207714444e-07, | |
"logits/chosen": -0.9941811561584473, | |
"logits/rejected": -0.9714711308479309, | |
"logps/chosen": -0.3401820659637451, | |
"logps/rejected": -0.40950268507003784, | |
"loss": 2.6659, | |
"rewards/accuracies": 0.606249988079071, | |
"rewards/chosen": -3.401820659637451, | |
"rewards/margins": 0.6932064890861511, | |
"rewards/rejected": -4.095026969909668, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.19238476953907815, | |
"grad_norm": 152.3633388876466, | |
"learning_rate": 9.743592451943998e-07, | |
"logits/chosen": -1.0390641689300537, | |
"logits/rejected": -1.002964735031128, | |
"logps/chosen": -0.4347335696220398, | |
"logps/rejected": -0.5336366891860962, | |
"loss": 2.8761, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -4.347336769104004, | |
"rewards/margins": 0.9890311360359192, | |
"rewards/rejected": -5.336367130279541, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.20307281229124916, | |
"grad_norm": 48.723602234021136, | |
"learning_rate": 9.681174353198686e-07, | |
"logits/chosen": -1.112140417098999, | |
"logits/rejected": -1.0275517702102661, | |
"logps/chosen": -0.4523504674434662, | |
"logps/rejected": -0.5077233910560608, | |
"loss": 2.653, | |
"rewards/accuracies": 0.543749988079071, | |
"rewards/chosen": -4.523504734039307, | |
"rewards/margins": 0.5537286996841431, | |
"rewards/rejected": -5.077233791351318, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.21376085504342018, | |
"grad_norm": 67.84801804788158, | |
"learning_rate": 9.612209208833646e-07, | |
"logits/chosen": -1.000867486000061, | |
"logits/rejected": -0.9747543334960938, | |
"logps/chosen": -0.4416694641113281, | |
"logps/rejected": -0.5102046728134155, | |
"loss": 2.8531, | |
"rewards/accuracies": 0.612500011920929, | |
"rewards/chosen": -4.4166951179504395, | |
"rewards/margins": 0.6853527426719666, | |
"rewards/rejected": -5.102047920227051, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.22444889779559118, | |
"grad_norm": 73.17857628661973, | |
"learning_rate": 9.536793472839324e-07, | |
"logits/chosen": -1.0074902772903442, | |
"logits/rejected": -0.9537180662155151, | |
"logps/chosen": -0.40940943360328674, | |
"logps/rejected": -0.5277153849601746, | |
"loss": 2.9423, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -4.094094276428223, | |
"rewards/margins": 1.1830594539642334, | |
"rewards/rejected": -5.277153968811035, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.23513694054776219, | |
"grad_norm": 58.188153286610884, | |
"learning_rate": 9.455032620941839e-07, | |
"logits/chosen": -0.9680612683296204, | |
"logits/rejected": -0.9060274362564087, | |
"logps/chosen": -0.4779647886753082, | |
"logps/rejected": -0.6175049543380737, | |
"loss": 2.7583, | |
"rewards/accuracies": 0.6312500238418579, | |
"rewards/chosen": -4.7796478271484375, | |
"rewards/margins": 1.3954023122787476, | |
"rewards/rejected": -6.175050258636475, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.2458249832999332, | |
"grad_norm": 55.673813232077116, | |
"learning_rate": 9.367041003085648e-07, | |
"logits/chosen": -1.0318225622177124, | |
"logits/rejected": -0.9690453410148621, | |
"logps/chosen": -0.5004153251647949, | |
"logps/rejected": -0.564782440662384, | |
"loss": 2.6268, | |
"rewards/accuracies": 0.6187499761581421, | |
"rewards/chosen": -5.004153251647949, | |
"rewards/margins": 0.643670916557312, | |
"rewards/rejected": -5.647824287414551, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.2565130260521042, | |
"grad_norm": 93.24909470705533, | |
"learning_rate": 9.272941683504808e-07, | |
"logits/chosen": -0.9859496355056763, | |
"logits/rejected": -0.892734169960022, | |
"logps/chosen": -0.5100774168968201, | |
"logps/rejected": -0.7314041256904602, | |
"loss": 2.6175, | |
"rewards/accuracies": 0.7562500238418579, | |
"rewards/chosen": -5.100773811340332, | |
"rewards/margins": 2.213266372680664, | |
"rewards/rejected": -7.314040184020996, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.26720106880427524, | |
"grad_norm": 62.49186315009438, | |
"learning_rate": 9.172866268606513e-07, | |
"logits/chosen": -1.0501500368118286, | |
"logits/rejected": -1.0046356916427612, | |
"logps/chosen": -0.568703293800354, | |
"logps/rejected": -0.6640041470527649, | |
"loss": 2.4292, | |
"rewards/accuracies": 0.637499988079071, | |
"rewards/chosen": -5.687033176422119, | |
"rewards/margins": 0.9530088305473328, | |
"rewards/rejected": -6.640041351318359, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.27788911155644624, | |
"grad_norm": 94.37106722966159, | |
"learning_rate": 9.066954722907638e-07, | |
"logits/chosen": -1.0626311302185059, | |
"logits/rejected": -1.0533256530761719, | |
"logps/chosen": -0.5523235201835632, | |
"logps/rejected": -0.8242877125740051, | |
"loss": 2.3709, | |
"rewards/accuracies": 0.71875, | |
"rewards/chosen": -5.5232343673706055, | |
"rewards/margins": 2.719642162322998, | |
"rewards/rejected": -8.242877006530762, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.28857715430861725, | |
"grad_norm": 78.14100135139738, | |
"learning_rate": 8.955355173281707e-07, | |
"logits/chosen": -1.0362498760223389, | |
"logits/rejected": -0.9840585589408875, | |
"logps/chosen": -0.5984936952590942, | |
"logps/rejected": -0.7128698825836182, | |
"loss": 2.355, | |
"rewards/accuracies": 0.6937500238418579, | |
"rewards/chosen": -5.984936714172363, | |
"rewards/margins": 1.1437618732452393, | |
"rewards/rejected": -7.128698825836182, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.29926519706078825, | |
"grad_norm": 80.07356057826689, | |
"learning_rate": 8.838223701790055e-07, | |
"logits/chosen": -1.0999033451080322, | |
"logits/rejected": -1.0737004280090332, | |
"logps/chosen": -0.719946026802063, | |
"logps/rejected": -0.8201924562454224, | |
"loss": 2.4387, | |
"rewards/accuracies": 0.699999988079071, | |
"rewards/chosen": -7.199460029602051, | |
"rewards/margins": 1.0024646520614624, | |
"rewards/rejected": -8.201925277709961, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.30995323981295925, | |
"grad_norm": 90.63467648251373, | |
"learning_rate": 8.71572412738697e-07, | |
"logits/chosen": -1.0016281604766846, | |
"logits/rejected": -0.9718937873840332, | |
"logps/chosen": -0.6957625150680542, | |
"logps/rejected": -0.89495849609375, | |
"loss": 2.3174, | |
"rewards/accuracies": 0.7250000238418579, | |
"rewards/chosen": -6.957624912261963, | |
"rewards/margins": 1.991959810256958, | |
"rewards/rejected": -8.9495849609375, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.32064128256513025, | |
"grad_norm": 73.56126608282666, | |
"learning_rate": 8.588027776804058e-07, | |
"logits/chosen": -1.0221322774887085, | |
"logits/rejected": -0.9965847134590149, | |
"logps/chosen": -0.7128551006317139, | |
"logps/rejected": -0.896537184715271, | |
"loss": 2.3663, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -7.128551483154297, | |
"rewards/margins": 1.8368213176727295, | |
"rewards/rejected": -8.965372085571289, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.33132932531730125, | |
"grad_norm": 81.6804828395219, | |
"learning_rate": 8.455313244934324e-07, | |
"logits/chosen": -1.016101598739624, | |
"logits/rejected": -0.9916119575500488, | |
"logps/chosen": -0.7570836544036865, | |
"logps/rejected": -1.0055702924728394, | |
"loss": 2.1714, | |
"rewards/accuracies": 0.7124999761581421, | |
"rewards/chosen": -7.570836544036865, | |
"rewards/margins": 2.4848668575286865, | |
"rewards/rejected": -10.055703163146973, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.3420173680694723, | |
"grad_norm": 99.4901399654396, | |
"learning_rate": 8.317766145051057e-07, | |
"logits/chosen": -1.0358034372329712, | |
"logits/rejected": -1.015195608139038, | |
"logps/chosen": -0.8496212959289551, | |
"logps/rejected": -1.2102388143539429, | |
"loss": 2.1439, | |
"rewards/accuracies": 0.75, | |
"rewards/chosen": -8.49621295928955, | |
"rewards/margins": 3.606174945831299, | |
"rewards/rejected": -12.102388381958008, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.3527054108216433, | |
"grad_norm": 70.81537442159531, | |
"learning_rate": 8.175578849210894e-07, | |
"logits/chosen": -1.025247573852539, | |
"logits/rejected": -0.9949723482131958, | |
"logps/chosen": -0.9383013844490051, | |
"logps/rejected": -1.2926217317581177, | |
"loss": 2.1748, | |
"rewards/accuracies": 0.731249988079071, | |
"rewards/chosen": -9.383014678955078, | |
"rewards/margins": 3.5432026386260986, | |
"rewards/rejected": -12.926218032836914, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.3633934535738143, | |
"grad_norm": 87.64514196797495, | |
"learning_rate": 8.028950219204099e-07, | |
"logits/chosen": -1.0285688638687134, | |
"logits/rejected": -1.003366470336914, | |
"logps/chosen": -0.8483174443244934, | |
"logps/rejected": -1.227891445159912, | |
"loss": 1.9588, | |
"rewards/accuracies": 0.793749988079071, | |
"rewards/chosen": -8.483174324035645, | |
"rewards/margins": 3.7957394123077393, | |
"rewards/rejected": -12.278913497924805, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.3740814963259853, | |
"grad_norm": 112.86629209538306, | |
"learning_rate": 7.878085328428368e-07, | |
"logits/chosen": -1.0398296117782593, | |
"logits/rejected": -0.9856590032577515, | |
"logps/chosen": -0.975140392780304, | |
"logps/rejected": -1.1828876733779907, | |
"loss": 1.8418, | |
"rewards/accuracies": 0.762499988079071, | |
"rewards/chosen": -9.75140380859375, | |
"rewards/margins": 2.07747220993042, | |
"rewards/rejected": -11.828875541687012, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.3847695390781563, | |
"grad_norm": 78.96808907768161, | |
"learning_rate": 7.723195175075135e-07, | |
"logits/chosen": -0.9896231889724731, | |
"logits/rejected": -0.9649994969367981, | |
"logps/chosen": -0.9527977108955383, | |
"logps/rejected": -1.2905272245407104, | |
"loss": 1.8762, | |
"rewards/accuracies": 0.768750011920929, | |
"rewards/chosen": -9.527976036071777, | |
"rewards/margins": 3.377295732498169, | |
"rewards/rejected": -12.9052734375, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.3954575818303273, | |
"grad_norm": 108.85803981934055, | |
"learning_rate": 7.564496387029531e-07, | |
"logits/chosen": -1.0348621606826782, | |
"logits/rejected": -0.9732630848884583, | |
"logps/chosen": -1.0269982814788818, | |
"logps/rejected": -1.4066517353057861, | |
"loss": 1.7941, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -10.269983291625977, | |
"rewards/margins": 3.7965340614318848, | |
"rewards/rejected": -14.06651782989502, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.4061456245824983, | |
"grad_norm": 88.17963395634109, | |
"learning_rate": 7.402210918896689e-07, | |
"logits/chosen": -1.0109108686447144, | |
"logits/rejected": -1.016540765762329, | |
"logps/chosen": -1.1914116144180298, | |
"logps/rejected": -1.6932926177978516, | |
"loss": 1.7838, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -11.914116859436035, | |
"rewards/margins": 5.018809795379639, | |
"rewards/rejected": -16.932926177978516, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.4168336673346693, | |
"grad_norm": 93.26710505616924, | |
"learning_rate": 7.236565741578162e-07, | |
"logits/chosen": -0.9588305354118347, | |
"logits/rejected": -0.9351094961166382, | |
"logps/chosen": -1.1738094091415405, | |
"logps/rejected": -1.507836103439331, | |
"loss": 1.7312, | |
"rewards/accuracies": 0.78125, | |
"rewards/chosen": -11.738093376159668, | |
"rewards/margins": 3.340266704559326, | |
"rewards/rejected": -15.078363418579102, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.42752171008684037, | |
"grad_norm": 148.12833430588864, | |
"learning_rate": 7.067792524832603e-07, | |
"logits/chosen": -0.962164044380188, | |
"logits/rejected": -0.946324348449707, | |
"logps/chosen": -1.190048098564148, | |
"logps/rejected": -1.5737119913101196, | |
"loss": 1.8488, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -11.900480270385742, | |
"rewards/margins": 3.8366382122039795, | |
"rewards/rejected": -15.737119674682617, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.43820975283901137, | |
"grad_norm": 88.33539547998033, | |
"learning_rate": 6.896127313264642e-07, | |
"logits/chosen": -0.9857925176620483, | |
"logits/rejected": -0.9326913952827454, | |
"logps/chosen": -1.2696114778518677, | |
"logps/rejected": -1.6608413457870483, | |
"loss": 1.844, | |
"rewards/accuracies": 0.793749988079071, | |
"rewards/chosen": -12.696113586425781, | |
"rewards/margins": 3.9123005867004395, | |
"rewards/rejected": -16.608415603637695, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.44889779559118237, | |
"grad_norm": 96.6188335214616, | |
"learning_rate": 6.721810196195174e-07, | |
"logits/chosen": -1.0050965547561646, | |
"logits/rejected": -0.9934624433517456, | |
"logps/chosen": -1.2922513484954834, | |
"logps/rejected": -1.710461974143982, | |
"loss": 1.6898, | |
"rewards/accuracies": 0.84375, | |
"rewards/chosen": -12.922513961791992, | |
"rewards/margins": 4.182106018066406, | |
"rewards/rejected": -17.10462188720703, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.45958583834335337, | |
"grad_norm": 96.68880124766956, | |
"learning_rate": 6.545084971874736e-07, | |
"logits/chosen": -0.9563829302787781, | |
"logits/rejected": -0.9366313815116882, | |
"logps/chosen": -1.364743947982788, | |
"logps/rejected": -1.8573719263076782, | |
"loss": 1.5133, | |
"rewards/accuracies": 0.8062499761581421, | |
"rewards/chosen": -13.647438049316406, | |
"rewards/margins": 4.926279067993164, | |
"rewards/rejected": -18.573719024658203, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.47027388109552437, | |
"grad_norm": 150.34366732566772, | |
"learning_rate": 6.3661988065096e-07, | |
"logits/chosen": -1.0227452516555786, | |
"logits/rejected": -1.0000264644622803, | |
"logps/chosen": -1.4215940237045288, | |
"logps/rejected": -1.926209807395935, | |
"loss": 1.6503, | |
"rewards/accuracies": 0.768750011920929, | |
"rewards/chosen": -14.21593952178955, | |
"rewards/margins": 5.0461578369140625, | |
"rewards/rejected": -19.262096405029297, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.48096192384769537, | |
"grad_norm": 118.50430730198147, | |
"learning_rate": 6.185401888577487e-07, | |
"logits/chosen": -0.9958481788635254, | |
"logits/rejected": -0.9570272564888, | |
"logps/chosen": -1.4551509618759155, | |
"logps/rejected": -1.8940250873565674, | |
"loss": 1.4789, | |
"rewards/accuracies": 0.731249988079071, | |
"rewards/chosen": -14.551508903503418, | |
"rewards/margins": 4.388740062713623, | |
"rewards/rejected": -18.940250396728516, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.4916499665998664, | |
"grad_norm": 97.94256873991337, | |
"learning_rate": 6.002947078916364e-07, | |
"logits/chosen": -1.0675132274627686, | |
"logits/rejected": -1.012401819229126, | |
"logps/chosen": -1.4190456867218018, | |
"logps/rejected": -1.884321928024292, | |
"loss": 1.5692, | |
"rewards/accuracies": 0.7437499761581421, | |
"rewards/chosen": -14.190455436706543, | |
"rewards/margins": 4.652764320373535, | |
"rewards/rejected": -18.843217849731445, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.5023380093520374, | |
"grad_norm": 97.69525089331223, | |
"learning_rate": 5.819089557075688e-07, | |
"logits/chosen": -1.0891209840774536, | |
"logits/rejected": -1.0585644245147705, | |
"logps/chosen": -1.4393976926803589, | |
"logps/rejected": -1.9622634649276733, | |
"loss": 1.625, | |
"rewards/accuracies": 0.7562500238418579, | |
"rewards/chosen": -14.393977165222168, | |
"rewards/margins": 5.228659152984619, | |
"rewards/rejected": -19.622636795043945, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.5130260521042084, | |
"grad_norm": 75.47943033357323, | |
"learning_rate": 5.634086464424742e-07, | |
"logits/chosen": -1.0535987615585327, | |
"logits/rejected": -1.051977276802063, | |
"logps/chosen": -1.3285123109817505, | |
"logps/rejected": -1.7905857563018799, | |
"loss": 1.559, | |
"rewards/accuracies": 0.793749988079071, | |
"rewards/chosen": -13.285120964050293, | |
"rewards/margins": 4.620734691619873, | |
"rewards/rejected": -17.90585708618164, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.5237140948563794, | |
"grad_norm": 100.04007765445822, | |
"learning_rate": 5.448196544517167e-07, | |
"logits/chosen": -1.1635222434997559, | |
"logits/rejected": -1.0964869260787964, | |
"logps/chosen": -1.320629358291626, | |
"logps/rejected": -1.8691266775131226, | |
"loss": 1.4185, | |
"rewards/accuracies": 0.831250011920929, | |
"rewards/chosen": -13.206293106079102, | |
"rewards/margins": 5.484975337982178, | |
"rewards/rejected": -18.691268920898438, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.5344021376085505, | |
"grad_norm": 90.6563420202814, | |
"learning_rate": 5.26167978121472e-07, | |
"logits/chosen": -1.084279179573059, | |
"logits/rejected": -1.0659908056259155, | |
"logps/chosen": -1.389957308769226, | |
"logps/rejected": -1.9566434621810913, | |
"loss": 1.481, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -13.899574279785156, | |
"rewards/margins": 5.666860580444336, | |
"rewards/rejected": -19.56643295288086, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.5450901803607214, | |
"grad_norm": 182.74327675067556, | |
"learning_rate": 5.074797035076318e-07, | |
"logits/chosen": -1.1342357397079468, | |
"logits/rejected": -1.1009563207626343, | |
"logps/chosen": -1.4785124063491821, | |
"logps/rejected": -1.9460515975952148, | |
"loss": 1.5455, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -14.785125732421875, | |
"rewards/margins": 4.67539119720459, | |
"rewards/rejected": -19.460514068603516, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.5557782231128925, | |
"grad_norm": 107.72294875535913, | |
"learning_rate": 4.887809678520975e-07, | |
"logits/chosen": -1.0977184772491455, | |
"logits/rejected": -1.0647294521331787, | |
"logps/chosen": -1.377906322479248, | |
"logps/rejected": -1.8358585834503174, | |
"loss": 1.4211, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -13.779062271118164, | |
"rewards/margins": 4.579524040222168, | |
"rewards/rejected": -18.358585357666016, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.5664662658650634, | |
"grad_norm": 75.63967900485645, | |
"learning_rate": 4.700979230274829e-07, | |
"logits/chosen": -1.046972393989563, | |
"logits/rejected": -1.027153730392456, | |
"logps/chosen": -1.4691593647003174, | |
"logps/rejected": -1.9394676685333252, | |
"loss": 1.4695, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -14.6915922164917, | |
"rewards/margins": 4.703083038330078, | |
"rewards/rejected": -19.394678115844727, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.5771543086172345, | |
"grad_norm": 193.81178663729204, | |
"learning_rate": 4.514566989613559e-07, | |
"logits/chosen": -1.0509687662124634, | |
"logits/rejected": -1.0184508562088013, | |
"logps/chosen": -1.3223508596420288, | |
"logps/rejected": -1.8159472942352295, | |
"loss": 1.5709, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -13.22350788116455, | |
"rewards/margins": 4.935965061187744, | |
"rewards/rejected": -18.159473419189453, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.5878423513694054, | |
"grad_norm": 93.1736953697589, | |
"learning_rate": 4.328833670911724e-07, | |
"logits/chosen": -1.0397508144378662, | |
"logits/rejected": -0.9964058995246887, | |
"logps/chosen": -1.2974085807800293, | |
"logps/rejected": -1.7082370519638062, | |
"loss": 1.551, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -12.974085807800293, | |
"rewards/margins": 4.108286380767822, | |
"rewards/rejected": -17.08237075805664, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.5985303941215765, | |
"grad_norm": 76.2073903395276, | |
"learning_rate": 4.144039039010124e-07, | |
"logits/chosen": -1.1180676221847534, | |
"logits/rejected": -1.09043550491333, | |
"logps/chosen": -1.346557378768921, | |
"logps/rejected": -1.8500111103057861, | |
"loss": 1.4682, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -13.46557331085205, | |
"rewards/margins": 5.034535884857178, | |
"rewards/rejected": -18.500110626220703, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.6092184368737475, | |
"grad_norm": 94.07989298570106, | |
"learning_rate": 3.960441545911204e-07, | |
"logits/chosen": -1.0859159231185913, | |
"logits/rejected": -1.0468647480010986, | |
"logps/chosen": -1.399982213973999, | |
"logps/rejected": -1.926845908164978, | |
"loss": 1.2667, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -13.999821662902832, | |
"rewards/margins": 5.268637657165527, | |
"rewards/rejected": -19.26845932006836, | |
"step": 285 | |
}, | |
{ | |
"epoch": 0.6199064796259185, | |
"grad_norm": 89.07279022649925, | |
"learning_rate": 3.778297969310529e-07, | |
"logits/chosen": -1.110459327697754, | |
"logits/rejected": -1.0649018287658691, | |
"logps/chosen": -1.3886842727661133, | |
"logps/rejected": -1.8522567749023438, | |
"loss": 1.5307, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -13.886842727661133, | |
"rewards/margins": 4.635725975036621, | |
"rewards/rejected": -18.522567749023438, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.6305945223780896, | |
"grad_norm": 104.92194222567683, | |
"learning_rate": 3.5978630534699865e-07, | |
"logits/chosen": -1.0469499826431274, | |
"logits/rejected": -1.0278840065002441, | |
"logps/chosen": -1.4743906259536743, | |
"logps/rejected": -1.9389221668243408, | |
"loss": 1.3919, | |
"rewards/accuracies": 0.8062499761581421, | |
"rewards/chosen": -14.743906021118164, | |
"rewards/margins": 4.645317077636719, | |
"rewards/rejected": -19.38922119140625, | |
"step": 295 | |
}, | |
{ | |
"epoch": 0.6412825651302605, | |
"grad_norm": 115.157031251591, | |
"learning_rate": 3.4193891529348795e-07, | |
"logits/chosen": -0.9734314680099487, | |
"logits/rejected": -0.9448921084403992, | |
"logps/chosen": -1.4543259143829346, | |
"logps/rejected": -1.8318170309066772, | |
"loss": 1.7971, | |
"rewards/accuracies": 0.737500011920929, | |
"rewards/chosen": -14.543258666992188, | |
"rewards/margins": 3.7749099731445312, | |
"rewards/rejected": -18.31817054748535, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.6519706078824316, | |
"grad_norm": 87.78878822591881, | |
"learning_rate": 3.243125879593286e-07, | |
"logits/chosen": -1.0921169519424438, | |
"logits/rejected": -1.041258692741394, | |
"logps/chosen": -1.3990426063537598, | |
"logps/rejected": -1.7868198156356812, | |
"loss": 1.4931, | |
"rewards/accuracies": 0.768750011920929, | |
"rewards/chosen": -13.990427017211914, | |
"rewards/margins": 3.877769947052002, | |
"rewards/rejected": -17.86819839477539, | |
"step": 305 | |
}, | |
{ | |
"epoch": 0.6626586506346025, | |
"grad_norm": 105.06509533230586, | |
"learning_rate": 3.069319753571269e-07, | |
"logits/chosen": -1.1367733478546143, | |
"logits/rejected": -1.1123126745224, | |
"logps/chosen": -1.4809437990188599, | |
"logps/rejected": -1.9527852535247803, | |
"loss": 1.6001, | |
"rewards/accuracies": 0.8062499761581421, | |
"rewards/chosen": -14.80943775177002, | |
"rewards/margins": 4.718415260314941, | |
"rewards/rejected": -19.52785301208496, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.6733466933867736, | |
"grad_norm": 100.34303754111261, | |
"learning_rate": 2.898213858452173e-07, | |
"logits/chosen": -1.1173804998397827, | |
"logits/rejected": -1.0558984279632568, | |
"logps/chosen": -1.3943215608596802, | |
"logps/rejected": -1.851544976234436, | |
"loss": 1.4087, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -13.943216323852539, | |
"rewards/margins": 4.572234153747559, | |
"rewards/rejected": -18.51544761657715, | |
"step": 315 | |
}, | |
{ | |
"epoch": 0.6840347361389446, | |
"grad_norm": 113.86543674722715, | |
"learning_rate": 2.730047501302266e-07, | |
"logits/chosen": -1.0984666347503662, | |
"logits/rejected": -1.0932129621505737, | |
"logps/chosen": -1.4517738819122314, | |
"logps/rejected": -2.008024215698242, | |
"loss": 1.4795, | |
"rewards/accuracies": 0.856249988079071, | |
"rewards/chosen": -14.517735481262207, | |
"rewards/margins": 5.562503814697266, | |
"rewards/rejected": -20.080242156982422, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.6947227788911156, | |
"grad_norm": 99.61197741979623, | |
"learning_rate": 2.5650558779781635e-07, | |
"logits/chosen": -1.116877794265747, | |
"logits/rejected": -1.0635037422180176, | |
"logps/chosen": -1.6024644374847412, | |
"logps/rejected": -2.2202725410461426, | |
"loss": 1.4448, | |
"rewards/accuracies": 0.84375, | |
"rewards/chosen": -16.02464485168457, | |
"rewards/margins": 6.178084373474121, | |
"rewards/rejected": -22.202730178833008, | |
"step": 325 | |
}, | |
{ | |
"epoch": 0.7054108216432866, | |
"grad_norm": 77.71480741385236, | |
"learning_rate": 2.403469744184154e-07, | |
"logits/chosen": -1.0377768278121948, | |
"logits/rejected": -0.9935773611068726, | |
"logps/chosen": -1.4898747205734253, | |
"logps/rejected": -1.9272472858428955, | |
"loss": 1.4709, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -14.898748397827148, | |
"rewards/margins": 4.37372350692749, | |
"rewards/rejected": -19.272472381591797, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.7160988643954576, | |
"grad_norm": 89.73568696356142, | |
"learning_rate": 2.2455150927394878e-07, | |
"logits/chosen": -1.0675146579742432, | |
"logits/rejected": -1.0462026596069336, | |
"logps/chosen": -1.4130958318710327, | |
"logps/rejected": -1.9325294494628906, | |
"loss": 1.39, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -14.130956649780273, | |
"rewards/margins": 5.194336891174316, | |
"rewards/rejected": -19.32529640197754, | |
"step": 335 | |
}, | |
{ | |
"epoch": 0.7267869071476286, | |
"grad_norm": 110.68879818809171, | |
"learning_rate": 2.0914128375069722e-07, | |
"logits/chosen": -1.0806689262390137, | |
"logits/rejected": -1.042923927307129, | |
"logps/chosen": -1.4482777118682861, | |
"logps/rejected": -1.9667714834213257, | |
"loss": 1.4947, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -14.48277759552002, | |
"rewards/margins": 5.184937000274658, | |
"rewards/rejected": -19.667715072631836, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.7374749498997996, | |
"grad_norm": 118.37607191091168, | |
"learning_rate": 1.9413785044249676e-07, | |
"logits/chosen": -1.1191717386245728, | |
"logits/rejected": -1.0921493768692017, | |
"logps/chosen": -1.5390311479568481, | |
"logps/rejected": -2.1164662837982178, | |
"loss": 1.6411, | |
"rewards/accuracies": 0.856249988079071, | |
"rewards/chosen": -15.390310287475586, | |
"rewards/margins": 5.774353981018066, | |
"rewards/rejected": -21.164663314819336, | |
"step": 345 | |
}, | |
{ | |
"epoch": 0.7481629926519706, | |
"grad_norm": 177.61167476414326, | |
"learning_rate": 1.7956219300748792e-07, | |
"logits/chosen": -1.1068302392959595, | |
"logits/rejected": -1.1092784404754639, | |
"logps/chosen": -1.4487911462783813, | |
"logps/rejected": -1.9363235235214233, | |
"loss": 1.5257, | |
"rewards/accuracies": 0.831250011920929, | |
"rewards/chosen": -14.487910270690918, | |
"rewards/margins": 4.875324726104736, | |
"rewards/rejected": -19.363237380981445, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.7588510354041417, | |
"grad_norm": 93.0216947858328, | |
"learning_rate": 1.6543469682057104e-07, | |
"logits/chosen": -1.0323774814605713, | |
"logits/rejected": -1.0446354150772095, | |
"logps/chosen": -1.3830714225769043, | |
"logps/rejected": -1.8855358362197876, | |
"loss": 1.352, | |
"rewards/accuracies": 0.856249988079071, | |
"rewards/chosen": -13.830714225769043, | |
"rewards/margins": 5.02464485168457, | |
"rewards/rejected": -18.855358123779297, | |
"step": 355 | |
}, | |
{ | |
"epoch": 0.7695390781563126, | |
"grad_norm": 94.72973814318713, | |
"learning_rate": 1.5177512046261666e-07, | |
"logits/chosen": -1.0865602493286133, | |
"logits/rejected": -1.0826921463012695, | |
"logps/chosen": -1.4152530431747437, | |
"logps/rejected": -2.0346670150756836, | |
"loss": 1.4234, | |
"rewards/accuracies": 0.831250011920929, | |
"rewards/chosen": -14.1525297164917, | |
"rewards/margins": 6.1941399574279785, | |
"rewards/rejected": -20.346668243408203, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.7802271209084837, | |
"grad_norm": 68.4731130261061, | |
"learning_rate": 1.3860256808630427e-07, | |
"logits/chosen": -1.135087490081787, | |
"logits/rejected": -1.059451937675476, | |
"logps/chosen": -1.4586212635040283, | |
"logps/rejected": -2.061830997467041, | |
"loss": 1.4973, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -14.586212158203125, | |
"rewards/margins": 6.0320963859558105, | |
"rewards/rejected": -20.618310928344727, | |
"step": 365 | |
}, | |
{ | |
"epoch": 0.7909151636606546, | |
"grad_norm": 121.84202884529543, | |
"learning_rate": 1.2593546269723647e-07, | |
"logits/chosen": -1.0569720268249512, | |
"logits/rejected": -1.040205717086792, | |
"logps/chosen": -1.4913088083267212, | |
"logps/rejected": -1.9543384313583374, | |
"loss": 1.5364, | |
"rewards/accuracies": 0.84375, | |
"rewards/chosen": -14.913087844848633, | |
"rewards/margins": 4.630299091339111, | |
"rewards/rejected": -19.543384552001953, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.8016032064128257, | |
"grad_norm": 132.37754504186287, | |
"learning_rate": 1.1379152038770029e-07, | |
"logits/chosen": -1.0853582620620728, | |
"logits/rejected": -1.0880931615829468, | |
"logps/chosen": -1.561675786972046, | |
"logps/rejected": -2.0865683555603027, | |
"loss": 1.4922, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -15.6167573928833, | |
"rewards/margins": 5.24892520904541, | |
"rewards/rejected": -20.86568260192871, | |
"step": 375 | |
}, | |
{ | |
"epoch": 0.8122912491649966, | |
"grad_norm": 146.30353813707708, | |
"learning_rate": 1.0218772555910954e-07, | |
"logits/chosen": -1.1082967519760132, | |
"logits/rejected": -1.084113359451294, | |
"logps/chosen": -1.449220895767212, | |
"logps/rejected": -1.9152443408966064, | |
"loss": 1.6124, | |
"rewards/accuracies": 0.831250011920929, | |
"rewards/chosen": -14.492208480834961, | |
"rewards/margins": 4.660233974456787, | |
"rewards/rejected": -19.15244483947754, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.8229792919171677, | |
"grad_norm": 77.25479077025736, | |
"learning_rate": 9.114030716778432e-08, | |
"logits/chosen": -1.0906922817230225, | |
"logits/rejected": -1.0660183429718018, | |
"logps/chosen": -1.457859992980957, | |
"logps/rejected": -2.104210138320923, | |
"loss": 1.2879, | |
"rewards/accuracies": 0.856249988079071, | |
"rewards/chosen": -14.578600883483887, | |
"rewards/margins": 6.4635009765625, | |
"rewards/rejected": -21.042102813720703, | |
"step": 385 | |
}, | |
{ | |
"epoch": 0.8336673346693386, | |
"grad_norm": 93.07916389497416, | |
"learning_rate": 8.066471602728803e-08, | |
"logits/chosen": -1.1058779954910278, | |
"logits/rejected": -1.0853393077850342, | |
"logps/chosen": -1.526141881942749, | |
"logps/rejected": -2.099813461303711, | |
"loss": 1.3994, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -15.261419296264648, | |
"rewards/margins": 5.736716270446777, | |
"rewards/rejected": -20.99813461303711, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.8443553774215097, | |
"grad_norm": 101.69355206701071, | |
"learning_rate": 7.077560319906694e-08, | |
"logits/chosen": -1.1104137897491455, | |
"logits/rejected": -1.0867125988006592, | |
"logps/chosen": -1.461976408958435, | |
"logps/rejected": -2.0008349418640137, | |
"loss": 1.5022, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -14.61976432800293, | |
"rewards/margins": 5.388585090637207, | |
"rewards/rejected": -20.008350372314453, | |
"step": 395 | |
}, | |
{ | |
"epoch": 0.8550434201736807, | |
"grad_norm": 78.27794611977339, | |
"learning_rate": 6.148679950161672e-08, | |
"logits/chosen": -1.1054575443267822, | |
"logits/rejected": -1.086721658706665, | |
"logps/chosen": -1.4834736585617065, | |
"logps/rejected": -1.9632450342178345, | |
"loss": 1.353, | |
"rewards/accuracies": 0.800000011920929, | |
"rewards/chosen": -14.834736824035645, | |
"rewards/margins": 4.797713756561279, | |
"rewards/rejected": -19.6324520111084, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.8550434201736807, | |
"eval_logits/chosen": -1.3153965473175049, | |
"eval_logits/rejected": -1.323797345161438, | |
"eval_logps/chosen": -1.4682824611663818, | |
"eval_logps/rejected": -1.983279824256897, | |
"eval_loss": 1.3837593793869019, | |
"eval_rewards/accuracies": 0.8231707215309143, | |
"eval_rewards/chosen": -14.68282413482666, | |
"eval_rewards/margins": 5.149974822998047, | |
"eval_rewards/rejected": -19.83279800415039, | |
"eval_runtime": 89.1693, | |
"eval_samples_per_second": 21.992, | |
"eval_steps_per_second": 1.379, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.8657314629258517, | |
"grad_norm": 123.22959345953065, | |
"learning_rate": 5.2811296166831666e-08, | |
"logits/chosen": -1.072684645652771, | |
"logits/rejected": -1.0882729291915894, | |
"logps/chosen": -1.541043996810913, | |
"logps/rejected": -1.9995849132537842, | |
"loss": 1.3185, | |
"rewards/accuracies": 0.8687499761581421, | |
"rewards/chosen": -15.410443305969238, | |
"rewards/margins": 4.585409641265869, | |
"rewards/rejected": -19.995851516723633, | |
"step": 405 | |
}, | |
{ | |
"epoch": 0.8764195056780227, | |
"grad_norm": 147.91474233820844, | |
"learning_rate": 4.4761226670592066e-08, | |
"logits/chosen": -1.0857017040252686, | |
"logits/rejected": -1.0723140239715576, | |
"logps/chosen": -1.4827214479446411, | |
"logps/rejected": -1.9837812185287476, | |
"loss": 1.4599, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -14.827215194702148, | |
"rewards/margins": 5.010597229003906, | |
"rewards/rejected": -19.837810516357422, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.8871075484301937, | |
"grad_norm": 91.16953725496764, | |
"learning_rate": 3.734784976300165e-08, | |
"logits/chosen": -1.08389413356781, | |
"logits/rejected": -1.025193691253662, | |
"logps/chosen": -1.436957597732544, | |
"logps/rejected": -2.0282580852508545, | |
"loss": 1.5869, | |
"rewards/accuracies": 0.8374999761581421, | |
"rewards/chosen": -14.369573593139648, | |
"rewards/margins": 5.913008213043213, | |
"rewards/rejected": -20.282581329345703, | |
"step": 415 | |
}, | |
{ | |
"epoch": 0.8977955911823647, | |
"grad_norm": 107.28232680425738, | |
"learning_rate": 3.058153372200695e-08, | |
"logits/chosen": -1.1248770952224731, | |
"logits/rejected": -1.0667715072631836, | |
"logps/chosen": -1.3739981651306152, | |
"logps/rejected": -1.912716269493103, | |
"loss": 1.4124, | |
"rewards/accuracies": 0.8500000238418579, | |
"rewards/chosen": -13.739982604980469, | |
"rewards/margins": 5.387181282043457, | |
"rewards/rejected": -19.12716293334961, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.9084836339345357, | |
"grad_norm": 120.49009743628018, | |
"learning_rate": 2.4471741852423233e-08, | |
"logits/chosen": -1.1182382106781006, | |
"logits/rejected": -1.1063783168792725, | |
"logps/chosen": -1.5358679294586182, | |
"logps/rejected": -1.9908769130706787, | |
"loss": 1.6669, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -15.358678817749023, | |
"rewards/margins": 4.5500898361206055, | |
"rewards/rejected": -19.908771514892578, | |
"step": 425 | |
}, | |
{ | |
"epoch": 0.9191716766867067, | |
"grad_norm": 121.92864479159549, | |
"learning_rate": 1.9027019250647036e-08, | |
"logits/chosen": -1.0952109098434448, | |
"logits/rejected": -1.0773556232452393, | |
"logps/chosen": -1.5638394355773926, | |
"logps/rejected": -2.070910930633545, | |
"loss": 1.4596, | |
"rewards/accuracies": 0.824999988079071, | |
"rewards/chosen": -15.638395309448242, | |
"rewards/margins": 5.070715427398682, | |
"rewards/rejected": -20.709110260009766, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.9298597194388778, | |
"grad_norm": 105.14729635844223, | |
"learning_rate": 1.4254980853566246e-08, | |
"logits/chosen": -1.0598957538604736, | |
"logits/rejected": -1.0141620635986328, | |
"logps/chosen": -1.4233707189559937, | |
"logps/rejected": -1.9325555562973022, | |
"loss": 1.4575, | |
"rewards/accuracies": 0.875, | |
"rewards/chosen": -14.233708381652832, | |
"rewards/margins": 5.091846466064453, | |
"rewards/rejected": -19.3255558013916, | |
"step": 435 | |
}, | |
{ | |
"epoch": 0.9405477621910487, | |
"grad_norm": 84.89716957923389, | |
"learning_rate": 1.016230078838226e-08, | |
"logits/chosen": -1.0672032833099365, | |
"logits/rejected": -1.004197359085083, | |
"logps/chosen": -1.4600632190704346, | |
"logps/rejected": -1.9247214794158936, | |
"loss": 1.3976, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -14.60063362121582, | |
"rewards/margins": 4.646584510803223, | |
"rewards/rejected": -19.24721908569336, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.9512358049432198, | |
"grad_norm": 100.8050659107904, | |
"learning_rate": 6.754703038239329e-09, | |
"logits/chosen": -1.0271015167236328, | |
"logits/rejected": -1.0067023038864136, | |
"logps/chosen": -1.479617953300476, | |
"logps/rejected": -2.0660512447357178, | |
"loss": 1.1916, | |
"rewards/accuracies": 0.862500011920929, | |
"rewards/chosen": -14.796180725097656, | |
"rewards/margins": 5.86433219909668, | |
"rewards/rejected": -20.660511016845703, | |
"step": 445 | |
}, | |
{ | |
"epoch": 0.9619238476953907, | |
"grad_norm": 108.38487580301923, | |
"learning_rate": 4.036953436716895e-09, | |
"logits/chosen": -1.1366522312164307, | |
"logits/rejected": -1.1128257513046265, | |
"logps/chosen": -1.4388145208358765, | |
"logps/rejected": -1.9377214908599854, | |
"loss": 1.5938, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -14.388147354125977, | |
"rewards/margins": 4.989068031311035, | |
"rewards/rejected": -19.377216339111328, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.9726118904475618, | |
"grad_norm": 103.77485773787681, | |
"learning_rate": 2.0128530023804656e-09, | |
"logits/chosen": -1.1026852130889893, | |
"logits/rejected": -1.0658283233642578, | |
"logps/chosen": -1.4252578020095825, | |
"logps/rejected": -2.0259361267089844, | |
"loss": 1.0798, | |
"rewards/accuracies": 0.8999999761581421, | |
"rewards/chosen": -14.25257682800293, | |
"rewards/margins": 6.006783485412598, | |
"rewards/rejected": -20.259363174438477, | |
"step": 455 | |
}, | |
{ | |
"epoch": 0.9832999331997327, | |
"grad_norm": 94.49790728355107, | |
"learning_rate": 6.852326227130833e-10, | |
"logits/chosen": -1.1023436784744263, | |
"logits/rejected": -1.0878626108169556, | |
"logps/chosen": -1.5501340627670288, | |
"logps/rejected": -2.1030094623565674, | |
"loss": 1.389, | |
"rewards/accuracies": 0.8125, | |
"rewards/chosen": -15.50133991241455, | |
"rewards/margins": 5.528753757476807, | |
"rewards/rejected": -21.030094146728516, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.9939879759519038, | |
"grad_norm": 98.7777667397615, | |
"learning_rate": 5.594909486328348e-11, | |
"logits/chosen": -1.0701709985733032, | |
"logits/rejected": -1.0728482007980347, | |
"logps/chosen": -1.511604905128479, | |
"logps/rejected": -2.0450565814971924, | |
"loss": 1.5678, | |
"rewards/accuracies": 0.8187500238418579, | |
"rewards/chosen": -15.116047859191895, | |
"rewards/margins": 5.334519863128662, | |
"rewards/rejected": -20.4505672454834, | |
"step": 465 | |
}, | |
{ | |
"epoch": 0.9982631930527722, | |
"step": 467, | |
"total_flos": 0.0, | |
"train_loss": 1.957533876696797, | |
"train_runtime": 11480.123, | |
"train_samples_per_second": 5.216, | |
"train_steps_per_second": 0.041 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 467, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 1000000, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 0.0, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |