{ "best_metric": null, "best_model_checkpoint": null, "epoch": 7.861635220125786, "eval_steps": 500, "global_step": 2500, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "eval_accuracy": 0.5916129032258064, "eval_loss": 0.19515082240104675, "eval_runtime": 1.8956, "eval_samples_per_second": 1635.33, "eval_steps_per_second": 34.289, "step": 318 }, { "epoch": 1.57, "grad_norm": 0.6071546077728271, "learning_rate": 1.650593990216632e-05, "loss": 0.315, "step": 500 }, { "epoch": 2.0, "eval_accuracy": 0.8209677419354838, "eval_loss": 0.09566613286733627, "eval_runtime": 2.0569, "eval_samples_per_second": 1507.132, "eval_steps_per_second": 31.601, "step": 636 }, { "epoch": 3.0, "eval_accuracy": 0.8745161290322581, "eval_loss": 0.06388552486896515, "eval_runtime": 3.4293, "eval_samples_per_second": 903.986, "eval_steps_per_second": 18.955, "step": 954 }, { "epoch": 3.14, "grad_norm": 0.4936941862106323, "learning_rate": 1.3011879804332637e-05, "loss": 0.1104, "step": 1000 }, { "epoch": 4.0, "eval_accuracy": 0.9, "eval_loss": 0.049174223095178604, "eval_runtime": 3.2604, "eval_samples_per_second": 950.797, "eval_steps_per_second": 19.936, "step": 1272 }, { "epoch": 4.72, "grad_norm": 0.36201611161231995, "learning_rate": 9.517819706498952e-06, "loss": 0.0706, "step": 1500 }, { "epoch": 5.0, "eval_accuracy": 0.9058064516129032, "eval_loss": 0.040870048105716705, "eval_runtime": 2.5708, "eval_samples_per_second": 1205.872, "eval_steps_per_second": 25.284, "step": 1590 }, { "epoch": 6.0, "eval_accuracy": 0.9112903225806451, "eval_loss": 0.035991765558719635, "eval_runtime": 3.5579, "eval_samples_per_second": 871.299, "eval_steps_per_second": 18.269, "step": 1908 }, { "epoch": 6.29, "grad_norm": 0.304591566324234, "learning_rate": 6.02375960866527e-06, "loss": 0.0557, "step": 2000 }, { "epoch": 7.0, "eval_accuracy": 0.9180645161290323, "eval_loss": 0.033146169036626816, "eval_runtime": 1.8785, "eval_samples_per_second": 1650.226, "eval_steps_per_second": 34.602, "step": 2226 }, { "epoch": 7.86, "grad_norm": 0.20553433895111084, "learning_rate": 2.5296995108315863e-06, "loss": 0.0489, "step": 2500 } ], "logging_steps": 500, "max_steps": 2862, "num_input_tokens_seen": 0, "num_train_epochs": 9, "save_steps": 500, "total_flos": 651155886807636.0, "train_batch_size": 48, "trial_name": null, "trial_params": { "alpha": 0.44158831361956363, "num_train_epochs": 9, "temperature": 13 } }