selmamalak commited on
Commit
af30143
1 Parent(s): 02755c3

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. all_results.json +16 -0
  3. eval_results.json +11 -0
  4. train_results.json +8 -0
  5. trainer_state.json +913 -0
README.md CHANGED
@@ -23,11 +23,11 @@ should probably proofread and complete it, then remove this comment. -->
23
 
24
  This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the medmnist-v2 dataset.
25
  It achieves the following results on the evaluation set:
26
- - Loss: 0.6096
27
- - Accuracy: 0.7727
28
- - Precision: 0.6427
29
- - Recall: 0.5346
30
- - F1: 0.5283
31
 
32
  ## Model description
33
 
 
23
 
24
  This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the medmnist-v2 dataset.
25
  It achieves the following results on the evaluation set:
26
+ - Loss: 0.6240
27
+ - Accuracy: 0.7561
28
+ - Precision: 0.5742
29
+ - Recall: 0.5353
30
+ - F1: 0.5271
31
 
32
  ## Model description
33
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.95,
3
+ "eval_accuracy": 0.7561097256857855,
4
+ "eval_f1": 0.5270746588062634,
5
+ "eval_loss": 0.6239711046218872,
6
+ "eval_precision": 0.5741529640634522,
7
+ "eval_recall": 0.5353373036113024,
8
+ "eval_runtime": 11.7344,
9
+ "eval_samples_per_second": 170.865,
10
+ "eval_steps_per_second": 10.738,
11
+ "total_flos": 5.440571948014866e+18,
12
+ "train_loss": 0.7656277652180523,
13
+ "train_runtime": 925.5702,
14
+ "train_samples_per_second": 75.705,
15
+ "train_steps_per_second": 1.178
16
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.95,
3
+ "eval_accuracy": 0.7561097256857855,
4
+ "eval_f1": 0.5270746588062634,
5
+ "eval_loss": 0.6239711046218872,
6
+ "eval_precision": 0.5741529640634522,
7
+ "eval_recall": 0.5353373036113024,
8
+ "eval_runtime": 11.7344,
9
+ "eval_samples_per_second": 170.865,
10
+ "eval_steps_per_second": 10.738
11
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.95,
3
+ "total_flos": 5.440571948014866e+18,
4
+ "train_loss": 0.7656277652180523,
5
+ "train_runtime": 925.5702,
6
+ "train_samples_per_second": 75.705,
7
+ "train_steps_per_second": 1.178
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7726819541375872,
3
+ "best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-lora-medmnistv2/checkpoint-1090",
4
+ "epoch": 9.954337899543379,
5
+ "eval_steps": 500,
6
+ "global_step": 1090,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "grad_norm": 4.391805171966553,
14
+ "learning_rate": 0.004954128440366973,
15
+ "loss": 1.2554,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.18,
20
+ "grad_norm": 1.9513263702392578,
21
+ "learning_rate": 0.004908256880733945,
22
+ "loss": 1.0074,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.27,
27
+ "grad_norm": 3.8831961154937744,
28
+ "learning_rate": 0.004862385321100918,
29
+ "loss": 0.9068,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.37,
34
+ "grad_norm": 2.7356910705566406,
35
+ "learning_rate": 0.00481651376146789,
36
+ "loss": 0.9577,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.46,
41
+ "grad_norm": 2.5790963172912598,
42
+ "learning_rate": 0.0047706422018348625,
43
+ "loss": 0.9124,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.55,
48
+ "grad_norm": 2.810718297958374,
49
+ "learning_rate": 0.004724770642201835,
50
+ "loss": 0.8901,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.64,
55
+ "grad_norm": 2.3134875297546387,
56
+ "learning_rate": 0.004678899082568808,
57
+ "loss": 0.9111,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.73,
62
+ "grad_norm": 1.8082305192947388,
63
+ "learning_rate": 0.00463302752293578,
64
+ "loss": 0.9059,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.82,
69
+ "grad_norm": 1.4370285272598267,
70
+ "learning_rate": 0.0045871559633027525,
71
+ "loss": 0.9984,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.91,
76
+ "grad_norm": 2.8687102794647217,
77
+ "learning_rate": 0.004541284403669725,
78
+ "loss": 0.9135,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 1.0,
83
+ "eval_accuracy": 0.7198404785643071,
84
+ "eval_f1": 0.3049533355905189,
85
+ "eval_loss": 0.7698342800140381,
86
+ "eval_precision": 0.5179078489450006,
87
+ "eval_recall": 0.3103063518754385,
88
+ "eval_runtime": 5.826,
89
+ "eval_samples_per_second": 172.159,
90
+ "eval_steps_per_second": 10.814,
91
+ "step": 109
92
+ },
93
+ {
94
+ "epoch": 1.0,
95
+ "grad_norm": 1.885165810585022,
96
+ "learning_rate": 0.004495412844036698,
97
+ "loss": 0.8622,
98
+ "step": 110
99
+ },
100
+ {
101
+ "epoch": 1.1,
102
+ "grad_norm": 2.4616353511810303,
103
+ "learning_rate": 0.0044495412844036695,
104
+ "loss": 0.9407,
105
+ "step": 120
106
+ },
107
+ {
108
+ "epoch": 1.19,
109
+ "grad_norm": 1.2185616493225098,
110
+ "learning_rate": 0.004403669724770643,
111
+ "loss": 0.8853,
112
+ "step": 130
113
+ },
114
+ {
115
+ "epoch": 1.28,
116
+ "grad_norm": 2.0212063789367676,
117
+ "learning_rate": 0.004357798165137615,
118
+ "loss": 0.8534,
119
+ "step": 140
120
+ },
121
+ {
122
+ "epoch": 1.37,
123
+ "grad_norm": 1.129385232925415,
124
+ "learning_rate": 0.004311926605504587,
125
+ "loss": 0.8613,
126
+ "step": 150
127
+ },
128
+ {
129
+ "epoch": 1.46,
130
+ "grad_norm": 1.726841926574707,
131
+ "learning_rate": 0.0042660550458715595,
132
+ "loss": 0.8104,
133
+ "step": 160
134
+ },
135
+ {
136
+ "epoch": 1.55,
137
+ "grad_norm": 2.272818088531494,
138
+ "learning_rate": 0.004220183486238533,
139
+ "loss": 0.915,
140
+ "step": 170
141
+ },
142
+ {
143
+ "epoch": 1.64,
144
+ "grad_norm": 1.117639422416687,
145
+ "learning_rate": 0.004174311926605505,
146
+ "loss": 0.8753,
147
+ "step": 180
148
+ },
149
+ {
150
+ "epoch": 1.74,
151
+ "grad_norm": 0.8981102108955383,
152
+ "learning_rate": 0.004128440366972477,
153
+ "loss": 0.8429,
154
+ "step": 190
155
+ },
156
+ {
157
+ "epoch": 1.83,
158
+ "grad_norm": 1.282863974571228,
159
+ "learning_rate": 0.00408256880733945,
160
+ "loss": 0.8355,
161
+ "step": 200
162
+ },
163
+ {
164
+ "epoch": 1.92,
165
+ "grad_norm": 0.9718325734138489,
166
+ "learning_rate": 0.004036697247706422,
167
+ "loss": 0.8352,
168
+ "step": 210
169
+ },
170
+ {
171
+ "epoch": 2.0,
172
+ "eval_accuracy": 0.7298105682951147,
173
+ "eval_f1": 0.3883733866690214,
174
+ "eval_loss": 0.7352049350738525,
175
+ "eval_precision": 0.536225724752637,
176
+ "eval_recall": 0.42308831430611293,
177
+ "eval_runtime": 5.7891,
178
+ "eval_samples_per_second": 173.258,
179
+ "eval_steps_per_second": 10.883,
180
+ "step": 219
181
+ },
182
+ {
183
+ "epoch": 2.01,
184
+ "grad_norm": 1.3424675464630127,
185
+ "learning_rate": 0.003990825688073394,
186
+ "loss": 0.8943,
187
+ "step": 220
188
+ },
189
+ {
190
+ "epoch": 2.1,
191
+ "grad_norm": 1.352690577507019,
192
+ "learning_rate": 0.003944954128440367,
193
+ "loss": 0.8426,
194
+ "step": 230
195
+ },
196
+ {
197
+ "epoch": 2.19,
198
+ "grad_norm": 1.743643879890442,
199
+ "learning_rate": 0.0038990825688073397,
200
+ "loss": 0.7838,
201
+ "step": 240
202
+ },
203
+ {
204
+ "epoch": 2.28,
205
+ "grad_norm": 0.9074971675872803,
206
+ "learning_rate": 0.0038532110091743124,
207
+ "loss": 0.8546,
208
+ "step": 250
209
+ },
210
+ {
211
+ "epoch": 2.37,
212
+ "grad_norm": 0.6970580816268921,
213
+ "learning_rate": 0.0038073394495412843,
214
+ "loss": 0.8481,
215
+ "step": 260
216
+ },
217
+ {
218
+ "epoch": 2.47,
219
+ "grad_norm": 1.3498753309249878,
220
+ "learning_rate": 0.003761467889908257,
221
+ "loss": 0.766,
222
+ "step": 270
223
+ },
224
+ {
225
+ "epoch": 2.56,
226
+ "grad_norm": 1.2216682434082031,
227
+ "learning_rate": 0.0037155963302752293,
228
+ "loss": 0.8654,
229
+ "step": 280
230
+ },
231
+ {
232
+ "epoch": 2.65,
233
+ "grad_norm": 1.1758558750152588,
234
+ "learning_rate": 0.003669724770642202,
235
+ "loss": 0.8438,
236
+ "step": 290
237
+ },
238
+ {
239
+ "epoch": 2.74,
240
+ "grad_norm": 0.8012980222702026,
241
+ "learning_rate": 0.0036238532110091743,
242
+ "loss": 0.8363,
243
+ "step": 300
244
+ },
245
+ {
246
+ "epoch": 2.83,
247
+ "grad_norm": 1.5546942949295044,
248
+ "learning_rate": 0.003577981651376147,
249
+ "loss": 0.8998,
250
+ "step": 310
251
+ },
252
+ {
253
+ "epoch": 2.92,
254
+ "grad_norm": 0.9878047704696655,
255
+ "learning_rate": 0.0035321100917431194,
256
+ "loss": 0.7891,
257
+ "step": 320
258
+ },
259
+ {
260
+ "epoch": 3.0,
261
+ "eval_accuracy": 0.7178464606181456,
262
+ "eval_f1": 0.36668660138047887,
263
+ "eval_loss": 0.7574967741966248,
264
+ "eval_precision": 0.3953602798961963,
265
+ "eval_recall": 0.40001083689608274,
266
+ "eval_runtime": 5.9834,
267
+ "eval_samples_per_second": 167.63,
268
+ "eval_steps_per_second": 10.529,
269
+ "step": 328
270
+ },
271
+ {
272
+ "epoch": 3.01,
273
+ "grad_norm": 1.59013032913208,
274
+ "learning_rate": 0.003486238532110092,
275
+ "loss": 0.8096,
276
+ "step": 330
277
+ },
278
+ {
279
+ "epoch": 3.11,
280
+ "grad_norm": 0.7891358733177185,
281
+ "learning_rate": 0.0034403669724770644,
282
+ "loss": 0.8351,
283
+ "step": 340
284
+ },
285
+ {
286
+ "epoch": 3.2,
287
+ "grad_norm": 1.1285407543182373,
288
+ "learning_rate": 0.003394495412844037,
289
+ "loss": 0.8189,
290
+ "step": 350
291
+ },
292
+ {
293
+ "epoch": 3.29,
294
+ "grad_norm": 1.166321873664856,
295
+ "learning_rate": 0.003348623853211009,
296
+ "loss": 0.7379,
297
+ "step": 360
298
+ },
299
+ {
300
+ "epoch": 3.38,
301
+ "grad_norm": 0.8681693077087402,
302
+ "learning_rate": 0.0033027522935779817,
303
+ "loss": 0.7388,
304
+ "step": 370
305
+ },
306
+ {
307
+ "epoch": 3.47,
308
+ "grad_norm": 1.2175371646881104,
309
+ "learning_rate": 0.003256880733944954,
310
+ "loss": 0.8183,
311
+ "step": 380
312
+ },
313
+ {
314
+ "epoch": 3.56,
315
+ "grad_norm": 1.4015443325042725,
316
+ "learning_rate": 0.003211009174311927,
317
+ "loss": 0.8545,
318
+ "step": 390
319
+ },
320
+ {
321
+ "epoch": 3.65,
322
+ "grad_norm": 1.3555314540863037,
323
+ "learning_rate": 0.003165137614678899,
324
+ "loss": 0.8038,
325
+ "step": 400
326
+ },
327
+ {
328
+ "epoch": 3.74,
329
+ "grad_norm": 1.3774343729019165,
330
+ "learning_rate": 0.003119266055045872,
331
+ "loss": 0.8346,
332
+ "step": 410
333
+ },
334
+ {
335
+ "epoch": 3.84,
336
+ "grad_norm": 1.0004535913467407,
337
+ "learning_rate": 0.003073394495412844,
338
+ "loss": 0.7919,
339
+ "step": 420
340
+ },
341
+ {
342
+ "epoch": 3.93,
343
+ "grad_norm": 0.7557776570320129,
344
+ "learning_rate": 0.003027522935779817,
345
+ "loss": 0.7649,
346
+ "step": 430
347
+ },
348
+ {
349
+ "epoch": 4.0,
350
+ "eval_accuracy": 0.7417746759720838,
351
+ "eval_f1": 0.41456723287460584,
352
+ "eval_loss": 0.6878895163536072,
353
+ "eval_precision": 0.5009001421261136,
354
+ "eval_recall": 0.3971855759911965,
355
+ "eval_runtime": 5.8982,
356
+ "eval_samples_per_second": 170.052,
357
+ "eval_steps_per_second": 10.681,
358
+ "step": 438
359
+ },
360
+ {
361
+ "epoch": 4.02,
362
+ "grad_norm": 0.8360877633094788,
363
+ "learning_rate": 0.002981651376146789,
364
+ "loss": 0.7322,
365
+ "step": 440
366
+ },
367
+ {
368
+ "epoch": 4.11,
369
+ "grad_norm": 1.399274468421936,
370
+ "learning_rate": 0.002935779816513762,
371
+ "loss": 0.7425,
372
+ "step": 450
373
+ },
374
+ {
375
+ "epoch": 4.2,
376
+ "grad_norm": 1.0945734977722168,
377
+ "learning_rate": 0.0028899082568807338,
378
+ "loss": 0.7295,
379
+ "step": 460
380
+ },
381
+ {
382
+ "epoch": 4.29,
383
+ "grad_norm": 1.5122932195663452,
384
+ "learning_rate": 0.0028440366972477065,
385
+ "loss": 0.7892,
386
+ "step": 470
387
+ },
388
+ {
389
+ "epoch": 4.38,
390
+ "grad_norm": 0.7687821388244629,
391
+ "learning_rate": 0.002798165137614679,
392
+ "loss": 0.7614,
393
+ "step": 480
394
+ },
395
+ {
396
+ "epoch": 4.47,
397
+ "grad_norm": 1.25822913646698,
398
+ "learning_rate": 0.0027522935779816515,
399
+ "loss": 0.7811,
400
+ "step": 490
401
+ },
402
+ {
403
+ "epoch": 4.57,
404
+ "grad_norm": 0.7886181473731995,
405
+ "learning_rate": 0.002706422018348624,
406
+ "loss": 0.8093,
407
+ "step": 500
408
+ },
409
+ {
410
+ "epoch": 4.66,
411
+ "grad_norm": 0.7840601801872253,
412
+ "learning_rate": 0.0026605504587155966,
413
+ "loss": 0.738,
414
+ "step": 510
415
+ },
416
+ {
417
+ "epoch": 4.75,
418
+ "grad_norm": 1.1541500091552734,
419
+ "learning_rate": 0.002614678899082569,
420
+ "loss": 0.8208,
421
+ "step": 520
422
+ },
423
+ {
424
+ "epoch": 4.84,
425
+ "grad_norm": 1.354428768157959,
426
+ "learning_rate": 0.0025688073394495416,
427
+ "loss": 0.746,
428
+ "step": 530
429
+ },
430
+ {
431
+ "epoch": 4.93,
432
+ "grad_norm": 0.8335555195808411,
433
+ "learning_rate": 0.0025229357798165135,
434
+ "loss": 0.8146,
435
+ "step": 540
436
+ },
437
+ {
438
+ "epoch": 5.0,
439
+ "eval_accuracy": 0.7178464606181456,
440
+ "eval_f1": 0.36409764256305227,
441
+ "eval_loss": 0.7471081614494324,
442
+ "eval_precision": 0.4490225302647663,
443
+ "eval_recall": 0.4141043546195771,
444
+ "eval_runtime": 5.7321,
445
+ "eval_samples_per_second": 174.978,
446
+ "eval_steps_per_second": 10.991,
447
+ "step": 547
448
+ },
449
+ {
450
+ "epoch": 5.02,
451
+ "grad_norm": 1.1929486989974976,
452
+ "learning_rate": 0.0024770642201834866,
453
+ "loss": 0.7308,
454
+ "step": 550
455
+ },
456
+ {
457
+ "epoch": 5.11,
458
+ "grad_norm": 1.202407717704773,
459
+ "learning_rate": 0.002431192660550459,
460
+ "loss": 0.7956,
461
+ "step": 560
462
+ },
463
+ {
464
+ "epoch": 5.21,
465
+ "grad_norm": 1.1130154132843018,
466
+ "learning_rate": 0.0023853211009174312,
467
+ "loss": 0.7042,
468
+ "step": 570
469
+ },
470
+ {
471
+ "epoch": 5.3,
472
+ "grad_norm": 1.1920500993728638,
473
+ "learning_rate": 0.002339449541284404,
474
+ "loss": 0.7945,
475
+ "step": 580
476
+ },
477
+ {
478
+ "epoch": 5.39,
479
+ "grad_norm": 1.4452427625656128,
480
+ "learning_rate": 0.0022935779816513763,
481
+ "loss": 0.7757,
482
+ "step": 590
483
+ },
484
+ {
485
+ "epoch": 5.48,
486
+ "grad_norm": 1.1632148027420044,
487
+ "learning_rate": 0.002247706422018349,
488
+ "loss": 0.7595,
489
+ "step": 600
490
+ },
491
+ {
492
+ "epoch": 5.57,
493
+ "grad_norm": 0.8493008017539978,
494
+ "learning_rate": 0.0022018348623853213,
495
+ "loss": 0.7757,
496
+ "step": 610
497
+ },
498
+ {
499
+ "epoch": 5.66,
500
+ "grad_norm": 1.3676623106002808,
501
+ "learning_rate": 0.0021559633027522936,
502
+ "loss": 0.7664,
503
+ "step": 620
504
+ },
505
+ {
506
+ "epoch": 5.75,
507
+ "grad_norm": 0.9162562489509583,
508
+ "learning_rate": 0.0021100917431192663,
509
+ "loss": 0.7303,
510
+ "step": 630
511
+ },
512
+ {
513
+ "epoch": 5.84,
514
+ "grad_norm": 0.7179450392723083,
515
+ "learning_rate": 0.0020642201834862386,
516
+ "loss": 0.7088,
517
+ "step": 640
518
+ },
519
+ {
520
+ "epoch": 5.94,
521
+ "grad_norm": 0.7027204632759094,
522
+ "learning_rate": 0.002018348623853211,
523
+ "loss": 0.6831,
524
+ "step": 650
525
+ },
526
+ {
527
+ "epoch": 6.0,
528
+ "eval_accuracy": 0.7367896311066799,
529
+ "eval_f1": 0.42520915568023343,
530
+ "eval_loss": 0.7007002830505371,
531
+ "eval_precision": 0.47770601644698146,
532
+ "eval_recall": 0.4148257042870626,
533
+ "eval_runtime": 5.9496,
534
+ "eval_samples_per_second": 168.582,
535
+ "eval_steps_per_second": 10.589,
536
+ "step": 657
537
+ },
538
+ {
539
+ "epoch": 6.03,
540
+ "grad_norm": 0.7743313312530518,
541
+ "learning_rate": 0.0019724770642201837,
542
+ "loss": 0.7261,
543
+ "step": 660
544
+ },
545
+ {
546
+ "epoch": 6.12,
547
+ "grad_norm": 0.6318005323410034,
548
+ "learning_rate": 0.0019266055045871562,
549
+ "loss": 0.702,
550
+ "step": 670
551
+ },
552
+ {
553
+ "epoch": 6.21,
554
+ "grad_norm": 0.696121096611023,
555
+ "learning_rate": 0.0018807339449541285,
556
+ "loss": 0.6966,
557
+ "step": 680
558
+ },
559
+ {
560
+ "epoch": 6.3,
561
+ "grad_norm": 0.5956413149833679,
562
+ "learning_rate": 0.001834862385321101,
563
+ "loss": 0.7176,
564
+ "step": 690
565
+ },
566
+ {
567
+ "epoch": 6.39,
568
+ "grad_norm": 1.6218867301940918,
569
+ "learning_rate": 0.0017889908256880735,
570
+ "loss": 0.7223,
571
+ "step": 700
572
+ },
573
+ {
574
+ "epoch": 6.48,
575
+ "grad_norm": 1.1353025436401367,
576
+ "learning_rate": 0.001743119266055046,
577
+ "loss": 0.7098,
578
+ "step": 710
579
+ },
580
+ {
581
+ "epoch": 6.58,
582
+ "grad_norm": 0.7476430535316467,
583
+ "learning_rate": 0.0016972477064220186,
584
+ "loss": 0.6411,
585
+ "step": 720
586
+ },
587
+ {
588
+ "epoch": 6.67,
589
+ "grad_norm": 1.3945239782333374,
590
+ "learning_rate": 0.0016513761467889909,
591
+ "loss": 0.7219,
592
+ "step": 730
593
+ },
594
+ {
595
+ "epoch": 6.76,
596
+ "grad_norm": 0.9898785948753357,
597
+ "learning_rate": 0.0016055045871559634,
598
+ "loss": 0.7456,
599
+ "step": 740
600
+ },
601
+ {
602
+ "epoch": 6.85,
603
+ "grad_norm": 0.7352449893951416,
604
+ "learning_rate": 0.001559633027522936,
605
+ "loss": 0.7057,
606
+ "step": 750
607
+ },
608
+ {
609
+ "epoch": 6.94,
610
+ "grad_norm": 1.0120513439178467,
611
+ "learning_rate": 0.0015137614678899084,
612
+ "loss": 0.695,
613
+ "step": 760
614
+ },
615
+ {
616
+ "epoch": 7.0,
617
+ "eval_accuracy": 0.7427716849451645,
618
+ "eval_f1": 0.48411655206463156,
619
+ "eval_loss": 0.6797036528587341,
620
+ "eval_precision": 0.4638223826629623,
621
+ "eval_recall": 0.5333725550353186,
622
+ "eval_runtime": 5.7522,
623
+ "eval_samples_per_second": 174.367,
624
+ "eval_steps_per_second": 10.952,
625
+ "step": 766
626
+ },
627
+ {
628
+ "epoch": 7.03,
629
+ "grad_norm": 1.297319769859314,
630
+ "learning_rate": 0.001467889908256881,
631
+ "loss": 0.7541,
632
+ "step": 770
633
+ },
634
+ {
635
+ "epoch": 7.12,
636
+ "grad_norm": 1.1115490198135376,
637
+ "learning_rate": 0.0014220183486238532,
638
+ "loss": 0.6944,
639
+ "step": 780
640
+ },
641
+ {
642
+ "epoch": 7.21,
643
+ "grad_norm": 0.881907045841217,
644
+ "learning_rate": 0.0013761467889908258,
645
+ "loss": 0.7047,
646
+ "step": 790
647
+ },
648
+ {
649
+ "epoch": 7.31,
650
+ "grad_norm": 0.9110414981842041,
651
+ "learning_rate": 0.0013302752293577983,
652
+ "loss": 0.7227,
653
+ "step": 800
654
+ },
655
+ {
656
+ "epoch": 7.4,
657
+ "grad_norm": 0.7190865874290466,
658
+ "learning_rate": 0.0012844036697247708,
659
+ "loss": 0.7263,
660
+ "step": 810
661
+ },
662
+ {
663
+ "epoch": 7.49,
664
+ "grad_norm": 0.9148305654525757,
665
+ "learning_rate": 0.0012385321100917433,
666
+ "loss": 0.6336,
667
+ "step": 820
668
+ },
669
+ {
670
+ "epoch": 7.58,
671
+ "grad_norm": 0.7972878813743591,
672
+ "learning_rate": 0.0011926605504587156,
673
+ "loss": 0.6886,
674
+ "step": 830
675
+ },
676
+ {
677
+ "epoch": 7.67,
678
+ "grad_norm": 0.9237717986106873,
679
+ "learning_rate": 0.0011467889908256881,
680
+ "loss": 0.6212,
681
+ "step": 840
682
+ },
683
+ {
684
+ "epoch": 7.76,
685
+ "grad_norm": 1.1942154169082642,
686
+ "learning_rate": 0.0011009174311926607,
687
+ "loss": 0.6202,
688
+ "step": 850
689
+ },
690
+ {
691
+ "epoch": 7.85,
692
+ "grad_norm": 1.2370058298110962,
693
+ "learning_rate": 0.0010550458715596332,
694
+ "loss": 0.703,
695
+ "step": 860
696
+ },
697
+ {
698
+ "epoch": 7.95,
699
+ "grad_norm": 1.2201330661773682,
700
+ "learning_rate": 0.0010091743119266055,
701
+ "loss": 0.6646,
702
+ "step": 870
703
+ },
704
+ {
705
+ "epoch": 8.0,
706
+ "eval_accuracy": 0.7537387836490529,
707
+ "eval_f1": 0.4932964122925023,
708
+ "eval_loss": 0.6534218192100525,
709
+ "eval_precision": 0.6130395728356862,
710
+ "eval_recall": 0.5077119285550199,
711
+ "eval_runtime": 6.1104,
712
+ "eval_samples_per_second": 164.148,
713
+ "eval_steps_per_second": 10.31,
714
+ "step": 876
715
+ },
716
+ {
717
+ "epoch": 8.04,
718
+ "grad_norm": 0.904547393321991,
719
+ "learning_rate": 0.0009633027522935781,
720
+ "loss": 0.6991,
721
+ "step": 880
722
+ },
723
+ {
724
+ "epoch": 8.13,
725
+ "grad_norm": 0.7196776270866394,
726
+ "learning_rate": 0.0009174311926605505,
727
+ "loss": 0.612,
728
+ "step": 890
729
+ },
730
+ {
731
+ "epoch": 8.22,
732
+ "grad_norm": 1.1908409595489502,
733
+ "learning_rate": 0.000871559633027523,
734
+ "loss": 0.6583,
735
+ "step": 900
736
+ },
737
+ {
738
+ "epoch": 8.31,
739
+ "grad_norm": 1.1398776769638062,
740
+ "learning_rate": 0.0008256880733944954,
741
+ "loss": 0.7221,
742
+ "step": 910
743
+ },
744
+ {
745
+ "epoch": 8.4,
746
+ "grad_norm": 0.867472767829895,
747
+ "learning_rate": 0.000779816513761468,
748
+ "loss": 0.6012,
749
+ "step": 920
750
+ },
751
+ {
752
+ "epoch": 8.49,
753
+ "grad_norm": 0.7770695686340332,
754
+ "learning_rate": 0.0007339449541284405,
755
+ "loss": 0.6763,
756
+ "step": 930
757
+ },
758
+ {
759
+ "epoch": 8.58,
760
+ "grad_norm": 1.4209235906600952,
761
+ "learning_rate": 0.0006880733944954129,
762
+ "loss": 0.6451,
763
+ "step": 940
764
+ },
765
+ {
766
+ "epoch": 8.68,
767
+ "grad_norm": 1.1582767963409424,
768
+ "learning_rate": 0.0006422018348623854,
769
+ "loss": 0.6745,
770
+ "step": 950
771
+ },
772
+ {
773
+ "epoch": 8.77,
774
+ "grad_norm": 1.2608932256698608,
775
+ "learning_rate": 0.0005963302752293578,
776
+ "loss": 0.6464,
777
+ "step": 960
778
+ },
779
+ {
780
+ "epoch": 8.86,
781
+ "grad_norm": 0.7971704006195068,
782
+ "learning_rate": 0.0005504587155963303,
783
+ "loss": 0.6251,
784
+ "step": 970
785
+ },
786
+ {
787
+ "epoch": 8.95,
788
+ "grad_norm": 1.318163275718689,
789
+ "learning_rate": 0.0005045871559633027,
790
+ "loss": 0.675,
791
+ "step": 980
792
+ },
793
+ {
794
+ "epoch": 9.0,
795
+ "eval_accuracy": 0.7666999002991027,
796
+ "eval_f1": 0.5308101410608023,
797
+ "eval_loss": 0.6237577795982361,
798
+ "eval_precision": 0.6518028517787527,
799
+ "eval_recall": 0.5430883155707511,
800
+ "eval_runtime": 5.972,
801
+ "eval_samples_per_second": 167.95,
802
+ "eval_steps_per_second": 10.549,
803
+ "step": 985
804
+ },
805
+ {
806
+ "epoch": 9.04,
807
+ "grad_norm": 0.9488193392753601,
808
+ "learning_rate": 0.00045871559633027525,
809
+ "loss": 0.6078,
810
+ "step": 990
811
+ },
812
+ {
813
+ "epoch": 9.13,
814
+ "grad_norm": 0.8466370105743408,
815
+ "learning_rate": 0.0004128440366972477,
816
+ "loss": 0.6368,
817
+ "step": 1000
818
+ },
819
+ {
820
+ "epoch": 9.22,
821
+ "grad_norm": 1.245497465133667,
822
+ "learning_rate": 0.00036697247706422024,
823
+ "loss": 0.6639,
824
+ "step": 1010
825
+ },
826
+ {
827
+ "epoch": 9.32,
828
+ "grad_norm": 1.0719431638717651,
829
+ "learning_rate": 0.0003211009174311927,
830
+ "loss": 0.5846,
831
+ "step": 1020
832
+ },
833
+ {
834
+ "epoch": 9.41,
835
+ "grad_norm": 0.8983196020126343,
836
+ "learning_rate": 0.00027522935779816516,
837
+ "loss": 0.663,
838
+ "step": 1030
839
+ },
840
+ {
841
+ "epoch": 9.5,
842
+ "grad_norm": 0.8495015501976013,
843
+ "learning_rate": 0.00022935779816513763,
844
+ "loss": 0.5945,
845
+ "step": 1040
846
+ },
847
+ {
848
+ "epoch": 9.59,
849
+ "grad_norm": 1.3694357872009277,
850
+ "learning_rate": 0.00018348623853211012,
851
+ "loss": 0.6575,
852
+ "step": 1050
853
+ },
854
+ {
855
+ "epoch": 9.68,
856
+ "grad_norm": 1.014473557472229,
857
+ "learning_rate": 0.00013761467889908258,
858
+ "loss": 0.6433,
859
+ "step": 1060
860
+ },
861
+ {
862
+ "epoch": 9.77,
863
+ "grad_norm": 1.0054584741592407,
864
+ "learning_rate": 9.174311926605506e-05,
865
+ "loss": 0.6797,
866
+ "step": 1070
867
+ },
868
+ {
869
+ "epoch": 9.86,
870
+ "grad_norm": 1.37436044216156,
871
+ "learning_rate": 4.587155963302753e-05,
872
+ "loss": 0.6579,
873
+ "step": 1080
874
+ },
875
+ {
876
+ "epoch": 9.95,
877
+ "grad_norm": 0.9838040471076965,
878
+ "learning_rate": 0.0,
879
+ "loss": 0.6145,
880
+ "step": 1090
881
+ },
882
+ {
883
+ "epoch": 9.95,
884
+ "eval_accuracy": 0.7726819541375872,
885
+ "eval_f1": 0.5282805571871881,
886
+ "eval_loss": 0.6095667481422424,
887
+ "eval_precision": 0.6426600715638663,
888
+ "eval_recall": 0.5346360087577886,
889
+ "eval_runtime": 6.0311,
890
+ "eval_samples_per_second": 166.305,
891
+ "eval_steps_per_second": 10.446,
892
+ "step": 1090
893
+ },
894
+ {
895
+ "epoch": 9.95,
896
+ "step": 1090,
897
+ "total_flos": 5.440571948014866e+18,
898
+ "train_loss": 0.7656277652180523,
899
+ "train_runtime": 925.5702,
900
+ "train_samples_per_second": 75.705,
901
+ "train_steps_per_second": 1.178
902
+ }
903
+ ],
904
+ "logging_steps": 10,
905
+ "max_steps": 1090,
906
+ "num_input_tokens_seen": 0,
907
+ "num_train_epochs": 10,
908
+ "save_steps": 500,
909
+ "total_flos": 5.440571948014866e+18,
910
+ "train_batch_size": 16,
911
+ "trial_name": null,
912
+ "trial_params": null
913
+ }