Vimek commited on
Commit
c9685b9
1 Parent(s): 84b1cd1

opus-mt-ja-pl-pop-tests

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Helsinki-NLP/opus-mt-ja-pl
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - tatoeba
8
+ metrics:
9
+ - bleu
10
+ model-index:
11
+ - name: opus_model
12
+ results:
13
+ - task:
14
+ name: Sequence-to-sequence Language Modeling
15
+ type: text2text-generation
16
+ dataset:
17
+ name: tatoeba
18
+ type: tatoeba
19
+ config: ja-pl
20
+ split: train
21
+ args: ja-pl
22
+ metrics:
23
+ - name: Bleu
24
+ type: bleu
25
+ value: 34.4952
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # opus_model
32
+
33
+ This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ja-pl](https://huggingface.co/Helsinki-NLP/opus-mt-ja-pl) on the tatoeba dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 1.1164
36
+ - Bleu: 34.4952
37
+ - Gen Len: 9.442
38
+ - Meteor: 0.5692
39
+ - Chrf: 53.728
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 3e-05
59
+ - train_batch_size: 8
60
+ - eval_batch_size: 8
61
+ - seed: 42
62
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
63
+ - lr_scheduler_type: linear
64
+ - num_epochs: 8
65
+ - mixed_precision_training: Native AMP
66
+
67
+ ### Training results
68
+
69
+ | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | Meteor | Chrf |
70
+ |:-------------:|:-----:|:------:|:---------------:|:-------:|:-------:|:------:|:-------:|
71
+ | 2.5658 | 1.0 | 56681 | 1.6196 | 21.6767 | 9.2915 | 0.4586 | 43.4725 |
72
+ | 2.3419 | 2.0 | 113362 | 1.4667 | 25.4469 | 9.3688 | 0.4916 | 46.3391 |
73
+ | 2.23 | 3.0 | 170043 | 1.3715 | 27.166 | 9.4895 | 0.5089 | 48.2252 |
74
+ | 2.1139 | 4.0 | 226724 | 1.2833 | 28.9288 | 9.4581 | 0.5244 | 49.4667 |
75
+ | 1.9825 | 5.0 | 283405 | 1.2170 | 31.3751 | 9.3229 | 0.5358 | 51.0005 |
76
+ | 1.8982 | 6.0 | 340086 | 1.1660 | 32.9805 | 9.4976 | 0.5563 | 52.5487 |
77
+ | 1.8198 | 7.0 | 396767 | 1.1305 | 34.0223 | 9.4436 | 0.5665 | 53.2912 |
78
+ | 1.7592 | 8.0 | 453448 | 1.1164 | 34.4952 | 9.442 | 0.5692 | 53.728 |
79
+
80
+
81
+ ### Framework versions
82
+
83
+ - Transformers 4.42.3
84
+ - Pytorch 2.1.2
85
+ - Datasets 2.20.0
86
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Helsinki-NLP/opus-mt-ja-pl",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "swish",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "MarianMTModel"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "bad_words_ids": [
12
+ [
13
+ 65000
14
+ ]
15
+ ],
16
+ "bos_token_id": 0,
17
+ "classif_dropout": 0.0,
18
+ "classifier_dropout": 0.0,
19
+ "d_model": 512,
20
+ "decoder_attention_heads": 8,
21
+ "decoder_ffn_dim": 2048,
22
+ "decoder_layerdrop": 0.0,
23
+ "decoder_layers": 6,
24
+ "decoder_start_token_id": 65000,
25
+ "decoder_vocab_size": 65001,
26
+ "dropout": 0.1,
27
+ "encoder_attention_heads": 8,
28
+ "encoder_ffn_dim": 2048,
29
+ "encoder_layerdrop": 0.0,
30
+ "encoder_layers": 6,
31
+ "eos_token_id": 0,
32
+ "extra_pos_embeddings": 65001,
33
+ "forced_eos_token_id": 0,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1",
37
+ "2": "LABEL_2"
38
+ },
39
+ "init_std": 0.02,
40
+ "is_encoder_decoder": true,
41
+ "label2id": {
42
+ "LABEL_0": 0,
43
+ "LABEL_1": 1,
44
+ "LABEL_2": 2
45
+ },
46
+ "max_length": 512,
47
+ "max_position_embeddings": 512,
48
+ "model_type": "marian",
49
+ "normalize_before": false,
50
+ "normalize_embedding": false,
51
+ "num_beams": 6,
52
+ "num_hidden_layers": 6,
53
+ "pad_token_id": 65000,
54
+ "scale_embedding": true,
55
+ "share_encoder_decoder_embeddings": true,
56
+ "static_position_embeddings": true,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.42.3",
59
+ "use_cache": true,
60
+ "vocab_size": 65001
61
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 65000
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 65000,
9
+ "eos_token_id": 0,
10
+ "forced_eos_token_id": 0,
11
+ "max_length": 512,
12
+ "num_beams": 6,
13
+ "pad_token_id": 65000,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.42.3"
16
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:077a538f42756d5ae7d258b9e345ef3c58636253ae94b63ad48b0cbc1e221e0c
3
+ size 309965092
source.spm ADDED
Binary file (836 kB). View file
 
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
Binary file (843 kB). View file
 
tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "65000": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": true,
29
+ "eos_token": "</s>",
30
+ "model_max_length": 512,
31
+ "pad_token": "<pad>",
32
+ "separate_vocabs": false,
33
+ "source_lang": "jpn",
34
+ "sp_model_kwargs": {},
35
+ "target_lang": "pol",
36
+ "tokenizer_class": "MarianTokenizer",
37
+ "unk_token": "<unk>"
38
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47757aa0d6131562a8af6a21ac198866eede03c18262efcef3ae24a3d6199f06
3
+ size 5240
vocab.json ADDED
The diff for this file is too large to render. See raw diff