ukim4
commited on
Commit
•
545d769
0
Parent(s):
Duplicate from localmodels/LLM
Browse files- .gitattributes +35 -0
- README.md +136 -0
- added_tokens.json +3 -0
- config.json +29 -0
- generation_config.json +7 -0
- quantize_config.json +8 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +34 -0
- wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors +3 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
duplicated_from: localmodels/LLM
|
3 |
+
---
|
4 |
+
# Wizard Vicuna 13B Uncensored GPTQ
|
5 |
+
|
6 |
+
From: https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored merged with [SuperHOT 8K](https://huggingface.co/kaiokendev/superhot-13b-8k-no-rlhf-test).
|
7 |
+
|
8 |
+
**This is an experimental new GPTQ which offers up to 8K context size**
|
9 |
+
|
10 |
+
The increased context is tested to work with [ExLlama](https://github.com/turboderp/exllama), via the latest release of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
|
11 |
+
|
12 |
+
It has also been tested from Python code using AutoGPTQ and `trust_remote_code=True`.
|
13 |
+
|
14 |
+
Please read carefully below to see how to use it.
|
15 |
+
|
16 |
+
## How to use this model in text-generation-webui with ExLlama
|
17 |
+
|
18 |
+
Using the latest version of text-generation-webui:
|
19 |
+
|
20 |
+
1. Click the **Model tab**.
|
21 |
+
2. Under **Download custom model or LoRA**, enter `localmodels/Wizard-Vicuna-13B-Uncensored-SuperHOT-8K-GPTQ`.
|
22 |
+
3. Click **Download**.
|
23 |
+
4. The model will start downloading. Once it's finished it will say "Done"
|
24 |
+
5. Untick **Autoload the model**
|
25 |
+
6. In the top left, click the refresh icon next to **Model**.
|
26 |
+
7. In the **Model** dropdown, choose the model you just downloaded: `Wizard-Vicuna-13B-Uncensored-SuperHOT-8K-GPTQ`
|
27 |
+
8. To use the increased context, set the **Loader** to **ExLlama**, set **max_seq_len** to 8192 or 4096, and set **compress_pos_emb** to **4** for 8192 context, or to **2** for 4096 context.
|
28 |
+
9. Now click **Save Settings** followed by **Reload**
|
29 |
+
10. The model will automatically load, and is now ready for use!
|
30 |
+
11. Once you're ready, click the **Text Generation tab** and enter a prompt to get started!
|
31 |
+
|
32 |
+
## How to use this GPTQ model from Python code with AutoGPTQ
|
33 |
+
|
34 |
+
First make sure you have AutoGPTQ and Einops installed:
|
35 |
+
|
36 |
+
```
|
37 |
+
pip3 install einops auto-gptq
|
38 |
+
```
|
39 |
+
|
40 |
+
Then run the following code. Note that in order to get this to work, `config.json` has been hardcoded to a sequence length of 8192.
|
41 |
+
|
42 |
+
If you want to try 4096 instead to reduce VRAM usage, manually edit `config.json` to set `max_position_embeddings` to the value you want.
|
43 |
+
|
44 |
+
```python
|
45 |
+
from transformers import AutoTokenizer, pipeline, logging
|
46 |
+
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
47 |
+
import argparse
|
48 |
+
|
49 |
+
model_name_or_path = "TheBloke/Wizard-Vicuna-13B-Uncensored-SuperHOT-8K-GPTQ"
|
50 |
+
model_basename = "wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order"
|
51 |
+
|
52 |
+
use_triton = False
|
53 |
+
|
54 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
|
55 |
+
|
56 |
+
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
|
57 |
+
model_basename=model_basename,
|
58 |
+
use_safetensors=True,
|
59 |
+
trust_remote_code=True,
|
60 |
+
device_map='auto',
|
61 |
+
use_triton=use_triton,
|
62 |
+
quantize_config=None)
|
63 |
+
|
64 |
+
model.seqlen = 8192
|
65 |
+
|
66 |
+
# Note: check the prompt template is correct for this model.
|
67 |
+
prompt = "Tell me about AI"
|
68 |
+
prompt_template=f'''USER: {prompt}
|
69 |
+
ASSISTANT:'''
|
70 |
+
|
71 |
+
print("\n\n*** Generate:")
|
72 |
+
|
73 |
+
input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
|
74 |
+
output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
|
75 |
+
print(tokenizer.decode(output[0]))
|
76 |
+
|
77 |
+
# Inference can also be done using transformers' pipeline
|
78 |
+
|
79 |
+
# Prevent printing spurious transformers error when using pipeline with AutoGPTQ
|
80 |
+
logging.set_verbosity(logging.CRITICAL)
|
81 |
+
|
82 |
+
print("*** Pipeline:")
|
83 |
+
pipe = pipeline(
|
84 |
+
"text-generation",
|
85 |
+
model=model,
|
86 |
+
tokenizer=tokenizer,
|
87 |
+
max_new_tokens=512,
|
88 |
+
temperature=0.7,
|
89 |
+
top_p=0.95,
|
90 |
+
repetition_penalty=1.15
|
91 |
+
)
|
92 |
+
|
93 |
+
print(pipe(prompt_template)[0]['generated_text'])
|
94 |
+
```
|
95 |
+
|
96 |
+
## Model
|
97 |
+
|
98 |
+
**wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors**
|
99 |
+
|
100 |
+
This will work with AutoGPTQ, ExLlama, and CUDA versions of GPTQ-for-LLaMa. There are reports of issues with Triton mode of recent GPTQ-for-LLaMa. If you have issues, please use AutoGPTQ instead.
|
101 |
+
|
102 |
+
It was created with group_size 128 to increase inference accuracy, but without --act-order (desc_act) to increase compatibility and improve inference speed.
|
103 |
+
|
104 |
+
* `wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors`
|
105 |
+
* Works for use with ExLlama with increased context (4096 or 8192)
|
106 |
+
* Works with AutoGPTQ in Python code, including with increased context if `trust_remote_code=True` is set.
|
107 |
+
* Parameters: Groupsize = 128. No act-order.
|
108 |
+
|
109 |
+
---
|
110 |
+
|
111 |
+
### SuperHOT Prototype 2 w/ 8K Context
|
112 |
+
|
113 |
+
This is a second prototype of SuperHOT, this time 30B with 8K context and no RLHF, using the same technique described in [the github blog](https://kaiokendev.github.io/til#extending-context-to-8k).
|
114 |
+
Tests have shown that the model does indeed leverage the extended context at 8K.
|
115 |
+
|
116 |
+
#### Looking for Merged & Quantized Models?
|
117 |
+
- 30B 4-bit CUDA: [tmpupload/superhot-30b-8k-4bit-safetensors](https://huggingface.co/tmpupload/superhot-30b-8k-4bit-safetensors)
|
118 |
+
- 30B 4-bit CUDA 128g: [tmpupload/superhot-30b-8k-4bit-128g-safetensors](https://huggingface.co/tmpupload/superhot-30b-8k-4bit-128g-safetensors)
|
119 |
+
|
120 |
+
#### Training Details
|
121 |
+
I trained the LoRA with the following configuration:
|
122 |
+
- 1200 samples (~400 samples over 2048 sequence length)
|
123 |
+
- learning rate of 3e-4
|
124 |
+
- 3 epochs
|
125 |
+
- The exported modules are:
|
126 |
+
- q_proj
|
127 |
+
- k_proj
|
128 |
+
- v_proj
|
129 |
+
- o_proj
|
130 |
+
- no bias
|
131 |
+
- Rank = 4
|
132 |
+
- Alpha = 8
|
133 |
+
- no dropout
|
134 |
+
- weight decay of 0.1
|
135 |
+
- AdamW beta1 of 0.9 and beta2 0.99, epsilon of 1e-5
|
136 |
+
- Trained on 4-bit base model
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[PAD]": 32000
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/content/wizard-vicuna-13B-uncensored/",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"bos_token_id": 1,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"hidden_act": "silu",
|
9 |
+
"hidden_size": 5120,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 13824,
|
12 |
+
"max_position_embeddings": 8192,
|
13 |
+
"max_sequence_length": 2048,
|
14 |
+
"model_type": "llama",
|
15 |
+
"num_attention_heads": 40,
|
16 |
+
"num_hidden_layers": 40,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"rms_norm_eps": 1e-06,
|
19 |
+
"tie_word_embeddings": false,
|
20 |
+
"torch_dtype": "float16",
|
21 |
+
"transformers_version": "4.30.0.dev0",
|
22 |
+
"use_cache": true,
|
23 |
+
"vocab_size": 32000,
|
24 |
+
"auto_map": {
|
25 |
+
"AutoModel": "modelling_llama.LlamaModel",
|
26 |
+
"AutoModelForCausalLM": "modelling_llama.LlamaForCausalLM",
|
27 |
+
"AutoModelForSequenceClassification": "modelling_llama.LlamaForSequenceClassification"
|
28 |
+
}
|
29 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.30.0.dev0"
|
7 |
+
}
|
quantize_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bits": 4,
|
3 |
+
"group_size": 128,
|
4 |
+
"damp_percent": 0.01,
|
5 |
+
"desc_act": false,
|
6 |
+
"sym": true,
|
7 |
+
"true_sequential": true
|
8 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<unk>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<s>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"model_max_length": 2048,
|
22 |
+
"pad_token": null,
|
23 |
+
"padding_side": "right",
|
24 |
+
"sp_model_kwargs": {},
|
25 |
+
"tokenizer_class": "LlamaTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
wizard-vicuna-13b-uncensored-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20e3af98e97aed1584b4302e7932902b99377f5a0c76bf4d342d26595b951f80
|
3 |
+
size 7454797160
|