File size: 1,619 Bytes
3a5a873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# you have got to be shitting me
import huggingface_hub
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch

import os
import argparse

parser = argparse.ArgumentParser(
                    prog='loraize',
                    description='Apply one or more loras to a model, and then save it',
                    epilog='BOTTOM TEXT')
parser.add_argument(
                    'model',
                    type=str,
                    help='path or HF name of a base model',
                    )
parser.add_argument(
                    'lora',
                    type=str,
                    help='one or more LORAs to apply',
                    nargs='+')
parser.add_argument(
                    'output_dir',
                    type=str,
                    help='output directory',
                    )
args = parser.parse_args()

print(f"Loading bassoon model:", args.model)
base_model = AutoModelForCausalLM.from_pretrained(
                args.model,
                return_dict=True,
                torch_dtype=torch.bfloat16,
                device_map="cpu",
    )

for lora in args.lora:
    print(f"Loading LORA: ",lora)
    model = PeftModel.from_pretrained(
                base_model,
                lora,
                device_map="cpu"
            )
print(f"Good luck, bitches. Unloading.")
print("This gon' take a sec.")
model = model.merge_and_unload()
tokenizer = AutoTokenizer.from_pretrained(args.model)

model.save_pretrained(args.output_dir, safe_serialization=True, max_shard_size='10GB')
tokenizer.save_pretrained(args.output_dir)