File size: 2,429 Bytes
9203553
 
 
 
 
 
 
 
 
 
adc61b7
9203553
04f40cd
9203553
 
5ee35e1
 
 
 
 
 
 
f2f6f8f
9203553
 
 
 
 
 
 
 
 
 
 
 
dba91fa
 
 
 
 
94b3985
dba91fa
 
 
9025860
 
4ff951b
 
 
 
9203553
 
 
 
dba91fa
 
cb0fe4c
8e400bb
dba91fa
aa8b23d
 
9203553
 
f2f6f8f
3728159
9025860
774056a
3728159
a9273cf
 
 
 
f2f6f8f
3728159
9025860
4ff951b
04f40cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from typing import List

COLUMNS_PRETTY = {
    "bleu": "BLEU",
    "chrf": "ChrF",
    "rouge1": "ROUGE-1",
    "rouge2": "ROUGE-2",
    "rougeL": "ROUGE-L",
    "bertscore": "BERTScore",
    "bertscore_normalized": "BERTScore (Normalized)",
    "model_name": "Model Name",
    "model_availability": "Availability",
    "urls": "Resources",
    "context_size": "Context Size",
    "submitted_by": "Submitted By",
    "EM infile": "EM infile",
    "EM inproject": "EM inproject",
    "EM common": "EM common",
    "EM commited": "EM committed",
    "EM non_informative": "EM non-informative",
    "EM random": "EM random",
    "EM all": "EM all",    
    "dataset": "Dataset",
}


METRICS_PER_TASK = {
    "commit_message_generation": [
        "BLEU",
        "ChrF",
        "ROUGE-1",
        "ROUGE-2",
        "ROUGE-L",
        "BERTScore",
        "BERTScore (Normalized)",
    ],
    "project_code_completion": [
        "EM infile",
        "EM inproject",
        "EM common",
        "EM committed",
        "EM non-informative",
        "EM random",
        "EM all",
    ],
    "bug_localization": [
        "k",
        "P@k",
        "R@k",
        "f1-score",
    ]
}


SORT_COLUMN_PER_TASK = {
    "commit_message_generation": "ROUGE-1",
    "project_code_completion": "Dataset",
    "bug_localization": "Model Name",
}


def get_columns_per_task(task_id: str) -> List[str]:
    metrics_per_task = METRICS_PER_TASK[task_id]
    if task_id == 'project_code_completion':
        return ["Model Name", "Context Size", "Dataset"] + metrics_per_task + ["Availability", "Submitted By", "Resources"]
    if task_id == 'bug_localization':
        return ["Model Name", "Availability", "Context Size", "Dataset"] + metrics_per_task + ["Submitted By", "Resources"]
    return ["Model Name", "Context Size", "Availability"] + metrics_per_task + ["Submitted By", "Resources"]


def get_types_per_task(task_id: str) -> List[str]:
    metrics_per_task = METRICS_PER_TASK.get(task_id, (0, 0, 0, 0, 0))
    if task_id == 'project_code_completion':
        return ["html", "markdown", "html"] + ["number" for _ in metrics_per_task] + ["markdown", "markdown", "html"]
    if task_id == 'bug_localization':
        return ["html", "markdown", "markdown", "html"] + ["number" for _ in metrics_per_task] + ["markdown", "html"]
    return ["html", "markdown", "markdown"] + ["number" for _ in metrics_per_task] + ["markdown", "html"]