BobaZooba commited on
Commit
04a7142
1 Parent(s): 91256fb

Training in progress, step 100

Browse files
adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "v_proj",
18
  "o_proj",
 
19
  "up_proj",
20
- "gate_proj",
21
  "down_proj",
22
  "k_proj",
23
- "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
 
17
  "o_proj",
18
+ "q_proj",
19
  "up_proj",
 
20
  "down_proj",
21
  "k_proj",
22
+ "v_proj",
23
+ "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dda825c13ac7b2f848b970b524f518b6456014cb3e473ece417bae2eb6739174
3
  size 335605144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25e89b84367382ff8f0d486818a7e9a1c72e5409a56e029bc42bf1057d91069d
3
  size 335605144
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a00d251b9e8778867a0be2ae762151214e219b14ed22e794563e9fffee1931b
3
  size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:702ddecfce26dfe6b8a1289d399de5c5452d05721418eac40963e864cdcc977f
3
  size 6264
training_config.json CHANGED
@@ -58,16 +58,16 @@
58
  "lora_dropout": 0.1,
59
  "raw_lora_target_modules": "all",
60
  "output_dir": "./outputs/",
61
- "per_device_train_batch_size": 2,
62
  "do_eval": false,
63
  "per_device_eval_batch_size": null,
64
- "gradient_accumulation_steps": 4,
65
  "eval_accumulation_steps": null,
66
  "eval_delay": 0,
67
  "eval_steps": 1000,
68
  "warmup_steps": 100,
69
- "max_steps": 5000,
70
- "num_train_epochs": 3,
71
  "learning_rate": 0.0002,
72
  "max_grad_norm": 1.0,
73
  "weight_decay": 0.001,
 
58
  "lora_dropout": 0.1,
59
  "raw_lora_target_modules": "all",
60
  "output_dir": "./outputs/",
61
+ "per_device_train_batch_size": 8,
62
  "do_eval": false,
63
  "per_device_eval_batch_size": null,
64
+ "gradient_accumulation_steps": 2,
65
  "eval_accumulation_steps": null,
66
  "eval_delay": 0,
67
  "eval_steps": 1000,
68
  "warmup_steps": 100,
69
+ "max_steps": null,
70
+ "num_train_epochs": 5,
71
  "learning_rate": 0.0002,
72
  "max_grad_norm": 1.0,
73
  "weight_decay": 0.001,