Dracones commited on
Commit
87a21c7
1 Parent(s): b05c5c2

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ base_model: microsoft/WizardLM-2-8x22B
6
+ tags:
7
+ - exl2
8
+ ---
9
+
10
+ # WizardLM-2-8x22B - EXL2 2.25bpw
11
+
12
+ This is a 2.25bpw EXL2 quant of [microsoft/WizardLM-2-8x22B](https://huggingface.co/microsoft/WizardLM-2-8x22B)
13
+
14
+ Details about the model can be found at the above model page.
15
+
16
+ ## EXL2 Version
17
+
18
+ These quants were made with exllamav2 version 0.0.18. Quants made on this version of EXL2 may not work on older versions of the exllamav2 library.
19
+
20
+ If you have problems loading these models, please update Text Generation WebUI to the latest version.
21
+
22
+
23
+
24
+ ## Quant Details
25
+
26
+ This is the script used for quantization.
27
+
28
+ ```bash
29
+ #!/bin/bash
30
+
31
+ # Activate the conda environment
32
+ source ~/miniconda3/etc/profile.d/conda.sh
33
+ conda activate exllamav2
34
+
35
+ # Set the model name and bit size
36
+ MODEL_NAME="WizardLM-2-8x22B"
37
+
38
+ # Define variables
39
+ MODEL_DIR="/mnt/storage/models/$MODEL_NAME"
40
+ OUTPUT_DIR="exl2_$MODEL_NAME"
41
+ MEASUREMENT_FILE="measurements/$MODEL_NAME.json"
42
+
43
+ # Create the measurement file if needed
44
+ if [ ! -f "$MEASUREMENT_FILE" ]; then
45
+ echo "Creating $MEASUREMENT_FILE"
46
+ # Create directories
47
+ if [ -d "$OUTPUT_DIR" ]; then
48
+ rm -r "$OUTPUT_DIR"
49
+ fi
50
+ mkdir "$OUTPUT_DIR"
51
+
52
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -om $MEASUREMENT_FILE
53
+ fi
54
+
55
+ # Choose one of the below. Either create a single quant for testing or a batch of them.
56
+ # BIT_PRECISIONS=(2.25)
57
+ BIT_PRECISIONS=(5.0 4.5 4.0 3.5 3.0 2.75 2.5 2.25)
58
+
59
+ for BIT_PRECISION in "${BIT_PRECISIONS[@]}"
60
+ do
61
+ CONVERTED_FOLDER="models/${MODEL_NAME}_exl2_${BIT_PRECISION}bpw"
62
+
63
+ # If it doesn't already exist, make the quant
64
+ if [ ! -d "$CONVERTED_FOLDER" ]; then
65
+
66
+ echo "Creating $CONVERTED_FOLDER"
67
+
68
+ # Create directories
69
+ if [ -d "$OUTPUT_DIR" ]; then
70
+ rm -r "$OUTPUT_DIR"
71
+ fi
72
+ mkdir "$OUTPUT_DIR"
73
+ mkdir "$CONVERTED_FOLDER"
74
+
75
+ # Run conversion commands
76
+ python convert.py -i $MODEL_DIR -o $OUTPUT_DIR -nr -m $MEASUREMENT_FILE -b $BIT_PRECISION -cf $CONVERTED_FOLDER
77
+
78
+ fi
79
+ done
80
+ ```
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 16384,
13
+ "max_position_embeddings": 65536,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 48,
16
+ "num_experts_per_tok": 2,
17
+ "num_hidden_layers": 56,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000,
23
+ "router_aux_loss_coef": 0.001,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.36.2",
29
+ "use_cache": false,
30
+ "vocab_size": 32000,
31
+ "quantization_config": {
32
+ "quant_method": "exl2",
33
+ "version": "0.0.18",
34
+ "bits": 2.25,
35
+ "head_bits": 6,
36
+ "calibration": {
37
+ "rows": 100,
38
+ "length": 2048,
39
+ "dataset": "(default)"
40
+ }
41
+ }
42
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
output-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:847adf348b47b1cfbf54b3783bda0b96ac9dacb31b2e68c2e988fe42268a5c45
3
+ size 8571116592
output-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e7a617d36666362964da52e843f05085db421bb31c689e5dc2af0cee153fac
3
+ size 8574236200
output-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884063495a62dfeaf934a35f9037d2a6e5162e0349d3434d39c65461669ea24a
3
+ size 8562241800
output-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:760aa85e648b65e22174df1ca9b7fb22b9b73b74736b6d3b2f9cf28b14717f06
3
+ size 8589948912
output-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a127d7ae2ee301850fe54b8396c71483c539bbd5e8680df0d3296710c2c8b5e
3
+ size 5697545680
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "<unk>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }