Files changed (1) hide show
  1. README.md +26 -13
README.md CHANGED
@@ -1,25 +1,25 @@
1
  ---
 
 
 
 
2
  license: apache-2.0
3
- base_model: openlm-research/open_llama_3b_v2
4
- datasets:
5
- - xaviviro/oasst2_ca_gpt
6
- - xaviviro/oasst2_es_gpt
7
  tags:
8
  - finetune
9
  - chatml
10
  - gpt4
11
  - catalan
12
- model-index:
13
- - name: FLAMA-0.5-3B
14
- results: []
15
- library_name: transformers
16
  widget:
17
  - text: "<|im_start|>user\n Qui va ser Isaac Newton?<|im_end|>\n<|im_start|>assistant\n"
18
  - text: "<|im_start|>user\n ¿Quién fue Isaac Newton?<|im_end|>\n<|im_start|>assistant\n"
19
- language:
20
- - ca
21
- - es
22
- - en
23
  ---
24
 
25
  # FLAMA: Model 3B ChatML en Català i Castellà. Versió 0.5
@@ -96,4 +96,17 @@ Quien fué Isaac Newton?<|im_end|>
96
  journal={arXiv preprint arXiv:2302.13971},
97
  year={2023}
98
  }
99
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - ca
4
+ - es
5
+ - en
6
  license: apache-2.0
7
+ library_name: transformers
 
 
 
8
  tags:
9
  - finetune
10
  - chatml
11
  - gpt4
12
  - catalan
13
+ datasets:
14
+ - xaviviro/oasst2_ca_gpt
15
+ - xaviviro/oasst2_es_gpt
16
+ base_model: openlm-research/open_llama_3b_v2
17
  widget:
18
  - text: "<|im_start|>user\n Qui va ser Isaac Newton?<|im_end|>\n<|im_start|>assistant\n"
19
  - text: "<|im_start|>user\n ¿Quién fue Isaac Newton?<|im_end|>\n<|im_start|>assistant\n"
20
+ model-index:
21
+ - name: FLAMA-0.5-3B
22
+ results: []
 
23
  ---
24
 
25
  # FLAMA: Model 3B ChatML en Català i Castellà. Versió 0.5
 
96
  journal={arXiv preprint arXiv:2302.13971},
97
  year={2023}
98
  }
99
+ ```
100
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
101
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_xaviviro__FLAMA-0.5-3B)
102
+
103
+ | Metric |Value|
104
+ |---------------------------------|----:|
105
+ |Avg. |39.23|
106
+ |AI2 Reasoning Challenge (25-Shot)|37.97|
107
+ |HellaSwag (10-Shot) |67.65|
108
+ |MMLU (5-Shot) |25.73|
109
+ |TruthfulQA (0-shot) |41.11|
110
+ |Winogrande (5-shot) |62.12|
111
+ |GSM8k (5-shot) | 0.83|
112
+