lu-vae commited on
Commit
24303d7
1 Parent(s): 8eccb60

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: JackFram/llama-68m
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: data/llama-68m-20240502-0037
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
15
+ <details><summary>See axolotl config</summary>
16
+
17
+ axolotl version: `0.4.0`
18
+ ```yaml
19
+ base_model: JackFram/llama-68m
20
+ model_type: LlamaForCausalLM
21
+ tokenizer_type: AutoTokenizer
22
+
23
+ load_in_8bit: false
24
+ load_in_4bit: false
25
+ strict: false
26
+
27
+ datasets:
28
+ - path: /data/data/final_set_cleaned/train/
29
+ type: sharegpt
30
+ conversation: chatml
31
+ - path: /data/data/map_coig_cqia.jsonl
32
+ type: sharegpt
33
+ conversation: chatml
34
+ - path: /data/data/ruozhiba.jsonl
35
+ type: sharegpt
36
+ conversation: chatml
37
+ dataset_prepared_path: last_run_prepared
38
+ val_set_size: 0
39
+ output_dir: ./out
40
+
41
+ sequence_len: 4096
42
+ sample_packing: true
43
+ pad_to_sequence_len: true
44
+
45
+ wandb_project:
46
+ wandb_entity:
47
+ wandb_watch:
48
+ wandb_name:
49
+ wandb_log_model:
50
+
51
+ gradient_accumulation_steps: 8
52
+ micro_batch_size: 4
53
+ num_epochs: 2
54
+ optimizer: paged_adamw_8bit
55
+ lr_scheduler: cosine
56
+ learning_rate: 2e-5
57
+
58
+ train_on_inputs: false
59
+ group_by_length: false
60
+ bf16: auto
61
+ fp16:
62
+ tf32: false
63
+
64
+ gradient_checkpointing: true
65
+ gradient_checkpointing_kwargs:
66
+ use_reentrant: false
67
+ early_stopping_patience:
68
+ resume_from_checkpoint:
69
+ logging_steps: 1
70
+ xformers_attention:
71
+ flash_attention: true
72
+
73
+ warmup_steps: 100
74
+ evals_per_epoch: 0
75
+ eval_table_size:
76
+ saves_per_epoch: 4
77
+ debug:
78
+ deepspeed: deepspeed/zero2.json
79
+ weight_decay: 0.0
80
+ fsdp:
81
+ fsdp_config:
82
+ default_system_message: "You are a helpful assistant."
83
+ special_tokens:
84
+ eos_token: "<|im_end|>"
85
+ pad_token: "<|end_of_text|>"
86
+ tokens:
87
+ - "<|im_start|>"
88
+ - "<|im_end|>"
89
+
90
+ ```
91
+
92
+ </details><br>
93
+
94
+ # data/llama-68m-20240502-0037
95
+
96
+ This model is a fine-tuned version of [JackFram/llama-68m](https://huggingface.co/JackFram/llama-68m) on the None dataset.
97
+
98
+ ## Model description
99
+
100
+ More information needed
101
+
102
+ ## Intended uses & limitations
103
+
104
+ More information needed
105
+
106
+ ## Training and evaluation data
107
+
108
+ More information needed
109
+
110
+ ## Training procedure
111
+
112
+ ### Training hyperparameters
113
+
114
+ The following hyperparameters were used during training:
115
+ - learning_rate: 2e-05
116
+ - train_batch_size: 4
117
+ - eval_batch_size: 4
118
+ - seed: 42
119
+ - distributed_type: multi-GPU
120
+ - num_devices: 6
121
+ - gradient_accumulation_steps: 8
122
+ - total_train_batch_size: 192
123
+ - total_eval_batch_size: 24
124
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
125
+ - lr_scheduler_type: cosine
126
+ - lr_scheduler_warmup_steps: 100
127
+ - num_epochs: 2
128
+
129
+ ### Training results
130
+
131
+
132
+
133
+ ### Framework versions
134
+
135
+ - Transformers 4.40.1
136
+ - Pytorch 2.0.1+cu118
137
+ - Datasets 2.15.0
138
+ - Tokenizers 0.19.1
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|end_of_text|>": 32001,
3
+ "<|im_end|>": 32000,
4
+ "<|im_start|>": 32002
5
+ }
axo.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: JackFram/llama-68m
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: /data/data/final_set_cleaned/train/
11
+ type: sharegpt
12
+ conversation: chatml
13
+ - path: /data/data/map_coig_cqia.jsonl
14
+ type: sharegpt
15
+ conversation: chatml
16
+ - path: /data/data/ruozhiba.jsonl
17
+ type: sharegpt
18
+ conversation: chatml
19
+ dataset_prepared_path: last_run_prepared
20
+ val_set_size: 0
21
+ output_dir: ./out
22
+
23
+ sequence_len: 4096
24
+ sample_packing: true
25
+ pad_to_sequence_len: true
26
+
27
+ wandb_project:
28
+ wandb_entity:
29
+ wandb_watch:
30
+ wandb_name:
31
+ wandb_log_model:
32
+
33
+ gradient_accumulation_steps: 8
34
+ micro_batch_size: 4
35
+ num_epochs: 2
36
+ optimizer: paged_adamw_8bit
37
+ lr_scheduler: cosine
38
+ learning_rate: 2e-5
39
+
40
+ train_on_inputs: false
41
+ group_by_length: false
42
+ bf16: auto
43
+ fp16:
44
+ tf32: false
45
+
46
+ gradient_checkpointing: true
47
+ gradient_checkpointing_kwargs:
48
+ use_reentrant: false
49
+ early_stopping_patience:
50
+ resume_from_checkpoint:
51
+ logging_steps: 1
52
+ xformers_attention:
53
+ flash_attention: true
54
+
55
+ warmup_steps: 100
56
+ evals_per_epoch: 0
57
+ eval_table_size:
58
+ saves_per_epoch: 4
59
+ debug:
60
+ deepspeed: deepspeed/zero2.json
61
+ weight_decay: 0.0
62
+ fsdp:
63
+ fsdp_config:
64
+ default_system_message: "You are a helpful assistant."
65
+ special_tokens:
66
+ eos_token: "<|im_end|>"
67
+ pad_token: "<|end_of_text|>"
68
+ tokens:
69
+ - "<|im_start|>"
70
+ - "<|im_end|>"
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "JackFram/llama-68m",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "eos_token_id": 32000,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 2,
18
+ "num_key_value_heads": 12,
19
+ "pad_token_id": 1,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": false,
28
+ "vocab_size": 32003
29
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "do_sample": true,
5
+ "eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.40.1"
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a37f641c8e998765e133e138943d55c33dbcc355b3c4b7c1492205579e5ada21
3
+ size 136072767
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end_of_text|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "32000": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": false
37
+ },
38
+ "32001": {
39
+ "content": "<|end_of_text|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|im_start|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ }
54
+ },
55
+ "bos_token": "<s>",
56
+ "clean_up_tokenization_spaces": false,
57
+ "eos_token": "<|im_end|>",
58
+ "legacy": true,
59
+ "model_max_length": 1000000000000000019884624838656,
60
+ "pad_token": "<|end_of_text|>",
61
+ "sp_model_kwargs": {},
62
+ "spaces_between_special_tokens": false,
63
+ "tokenizer_class": "LlamaTokenizer",
64
+ "unk_token": "<unk>",
65
+ "use_default_system_prompt": false,
66
+ "use_fast": true
67
+ }
train.log ADDED
The diff for this file is too large to render. See raw diff