LoRA-Llama-3-MLP / lora.yml
secretmoon's picture
Upload lora.yml
c88a22d verified
raw
history blame contribute delete
No virus
1.32 kB
base_model: /mnt/WD/AI/text-generation-webui/models/L3-8B-Stheno-v3.1/
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: 94_stories.jsonl
type: completion
- path: Luna_personality_instruct.json
type: alpaca
- path: alpaca_wiki_6144.json
type: alpaca
- path: alpaca_Luna.json
type: alpaca
dataset_prepared_path: last_run_prepared
val_set_size: 0
output_dir: ./lora-out
lora_on_cpu: true
adapter: lora
lora_model_dir:
sequence_len: 6144
sample_packing: true
pad_to_sequence_len: true
lora_r: 256
lora_alpha: 64
lora_dropout: 0.04
lora_target_modules:
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project:
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 1
micro_batch_size: 2
num_epochs: 3
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.00033
train_on_inputs: false
group_by_length: false
bf16: true
fp16:
tf32: true
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
warmup_steps: 50
evals_per_epoch: 4
eval_table_size:
saves_per_epoch: 1
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: "<|end_of_text|>"