{ "best_metric": null, "best_model_checkpoint": null, "epoch": 7.861635220125786, "eval_steps": 500, "global_step": 2500, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "eval_accuracy": 0.6806451612903226, "eval_loss": 0.40702202916145325, "eval_runtime": 1.8726, "eval_samples_per_second": 1655.484, "eval_steps_per_second": 34.712, "step": 318 }, { "epoch": 1.57, "grad_norm": 0.9639246463775635, "learning_rate": 1.650593990216632e-05, "loss": 0.6436, "step": 500 }, { "epoch": 2.0, "eval_accuracy": 0.8480645161290322, "eval_loss": 0.13934274017810822, "eval_runtime": 2.1147, "eval_samples_per_second": 1465.91, "eval_steps_per_second": 30.737, "step": 636 }, { "epoch": 3.0, "eval_accuracy": 0.895483870967742, "eval_loss": 0.07144162803888321, "eval_runtime": 3.2019, "eval_samples_per_second": 968.173, "eval_steps_per_second": 20.3, "step": 954 }, { "epoch": 3.14, "grad_norm": 0.7319316267967224, "learning_rate": 1.3011879804332637e-05, "loss": 0.1636, "step": 1000 }, { "epoch": 4.0, "eval_accuracy": 0.9151612903225806, "eval_loss": 0.0515269860625267, "eval_runtime": 2.0391, "eval_samples_per_second": 1520.288, "eval_steps_per_second": 31.877, "step": 1272 }, { "epoch": 4.72, "grad_norm": 0.5300735831260681, "learning_rate": 9.517819706498952e-06, "loss": 0.0819, "step": 1500 }, { "epoch": 5.0, "eval_accuracy": 0.9229032258064516, "eval_loss": 0.04307929426431656, "eval_runtime": 2.0917, "eval_samples_per_second": 1482.018, "eval_steps_per_second": 31.075, "step": 1590 }, { "epoch": 6.0, "eval_accuracy": 0.9287096774193548, "eval_loss": 0.038923561573028564, "eval_runtime": 2.117, "eval_samples_per_second": 1464.33, "eval_steps_per_second": 30.704, "step": 1908 }, { "epoch": 6.29, "grad_norm": 0.4357357323169708, "learning_rate": 6.02375960866527e-06, "loss": 0.0618, "step": 2000 }, { "epoch": 7.0, "eval_accuracy": 0.9290322580645162, "eval_loss": 0.03594091534614563, "eval_runtime": 2.0849, "eval_samples_per_second": 1486.855, "eval_steps_per_second": 31.176, "step": 2226 }, { "epoch": 7.86, "grad_norm": 0.26694580912590027, "learning_rate": 2.5296995108315863e-06, "loss": 0.0541, "step": 2500 } ], "logging_steps": 500, "max_steps": 2862, "num_input_tokens_seen": 0, "num_train_epochs": 9, "save_steps": 500, "total_flos": 651155886807636.0, "train_batch_size": 48, "trial_name": null, "trial_params": { "alpha": 0.2693940258587564, "num_train_epochs": 9, "temperature": 2 } }