alozowski commited on
Commit
37e5956
1 Parent(s): 36a18e0

Remove unused imports and script

Browse files
Files changed (2) hide show
  1. app.py +0 -8
  2. src/tools/plots.py +0 -152
app.py CHANGED
@@ -17,9 +17,7 @@ from src.display.about import (
17
  CITATION_BUTTON_LABEL,
18
  CITATION_BUTTON_TEXT,
19
  EVALUATION_QUEUE_TEXT,
20
- FAQ_TEXT,
21
  INTRODUCTION_TEXT,
22
- LLM_BENCHMARKS_TEXT,
23
  TITLE,
24
  )
25
  from src.display.css_html_js import custom_css
@@ -48,7 +46,6 @@ from src.envs import (
48
  )
49
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
50
  from src.submission.submit import add_new_eval
51
- from src.tools.plots import create_metric_plot_obj, create_plot_df, create_scores_df
52
  from src.voting.vote_system import VoteManager, run_scheduler
53
 
54
  # Configure logging
@@ -169,11 +166,6 @@ LEADERBOARD_DF, eval_queue_dfs = init_space()
169
  finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = eval_queue_dfs
170
 
171
 
172
- # Data processing for plots now only on demand in the respective Gradio tab
173
- def load_and_create_plots():
174
- plot_df = create_plot_df(create_scores_df(LEADERBOARD_DF))
175
- return plot_df
176
-
177
  # Function to check if a user is logged in
178
  def check_login(profile: gr.OAuthProfile | None) -> bool:
179
  if profile is None:
 
17
  CITATION_BUTTON_LABEL,
18
  CITATION_BUTTON_TEXT,
19
  EVALUATION_QUEUE_TEXT,
 
20
  INTRODUCTION_TEXT,
 
21
  TITLE,
22
  )
23
  from src.display.css_html_js import custom_css
 
46
  )
47
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
48
  from src.submission.submit import add_new_eval
 
49
  from src.voting.vote_system import VoteManager, run_scheduler
50
 
51
  # Configure logging
 
166
  finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = eval_queue_dfs
167
 
168
 
 
 
 
 
 
169
  # Function to check if a user is logged in
170
  def check_login(profile: gr.OAuthProfile | None) -> bool:
171
  if profile is None:
src/tools/plots.py DELETED
@@ -1,152 +0,0 @@
1
- import numpy as np
2
- import pandas as pd
3
- import plotly.express as px
4
- from plotly.graph_objs import Figure
5
-
6
- from src.display.utils import BENCHMARK_COLS, AutoEvalColumn, Task, Tasks
7
- # from src.display.utils import human_baseline_row as HUMAN_BASELINE
8
- from src.leaderboard.filter_models import FLAGGED_MODELS
9
-
10
-
11
- def create_scores_df(results_df: list[dict]) -> pd.DataFrame:
12
- """
13
- Generates a DataFrame containing the maximum scores until each date.
14
-
15
- :param results_df: A DataFrame containing result information including metric scores and dates.
16
- :return: A new DataFrame containing the maximum scores until each date for every metric.
17
- """
18
- # Step 1: Ensure 'date' is in datetime format and sort the DataFrame by it
19
- results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True)
20
- results_df.sort_values(by="date", inplace=True)
21
-
22
- # Step 2: Initialize the scores dictionary
23
- scores = {k: [] for k in BENCHMARK_COLS + [AutoEvalColumn.average.name]}
24
-
25
- # Step 3: Iterate over the rows of the DataFrame and update the scores dictionary
26
- for task in [t.value for t in Tasks] + [Task("Average", "avg", AutoEvalColumn.average.name)]:
27
- current_max = 0
28
- last_date = ""
29
- column = task.col_name
30
- for _, row in results_df.iterrows():
31
- current_model = row[AutoEvalColumn.fullname.name]
32
- # We ignore models that are flagged/no longer on the hub/not finished
33
- to_ignore = (
34
- not row[AutoEvalColumn.still_on_hub.name]
35
- or not row[AutoEvalColumn.not_flagged.name]
36
- or current_model in FLAGGED_MODELS
37
- )
38
- if to_ignore:
39
- continue
40
-
41
- current_date = row[AutoEvalColumn.date.name]
42
- current_score = row[task.col_name]
43
-
44
- if current_score > current_max:
45
- if current_date == last_date and len(scores[column]) > 0:
46
- scores[column][-1] = {"model": current_model, "date": current_date, "score": current_score}
47
- else:
48
- scores[column].append({"model": current_model, "date": current_date, "score": current_score})
49
- current_max = current_score
50
- last_date = current_date
51
-
52
- # Step 4: Return all dictionaries as DataFrames
53
- return {k: pd.DataFrame(v) for k, v in scores.items()}
54
-
55
-
56
- def create_plot_df(scores_df: dict[str : pd.DataFrame]) -> pd.DataFrame:
57
- """
58
- Transforms the scores DataFrame into a new format suitable for plotting.
59
-
60
- :param scores_df: A DataFrame containing metric scores and dates.
61
- :return: A new DataFrame reshaped for plotting purposes.
62
- """
63
- # Initialize the list to store DataFrames
64
- dfs = []
65
- # Iterate over the cols and create a new DataFrame for each column
66
- for col in BENCHMARK_COLS + [AutoEvalColumn.average.name]:
67
- d = scores_df[col].reset_index(drop=True)
68
- d["task"] = col
69
- dfs.append(d)
70
-
71
- # Concatenate all the created DataFrames
72
- concat_df = pd.concat(dfs, ignore_index=True)
73
-
74
- # # Sort values by 'date'
75
- # concat_df.sort_values(by="date", inplace=True)
76
- # concat_df.reset_index(drop=True, inplace=True)
77
- # return concat_df
78
-
79
-
80
- def create_metric_plot_obj(df: pd.DataFrame, metrics: list[str], title: str) -> Figure:
81
- """
82
- Create a Plotly figure object with lines representing different metrics
83
- and horizontal dotted lines representing human baselines.
84
-
85
- :param df: The DataFrame containing the metric values, names, and dates.
86
- :param metrics: A list of strings representing the names of the metrics
87
- to be included in the plot.
88
- :param title: A string representing the title of the plot.
89
- :return: A Plotly figure object with lines representing metrics and
90
- horizontal dotted lines representing human baselines.
91
- """
92
-
93
- # Filter the DataFrame based on the specified metrics
94
- df = df[df["task"].isin(metrics)]
95
-
96
- # Filter the human baselines based on the specified metrics
97
- filtered_human_baselines = {k: v for k, v in HUMAN_BASELINE.items() if k in metrics}
98
-
99
- # Create a line figure using plotly express with specified markers and custom data
100
- fig = px.line(
101
- df,
102
- x="date",
103
- y="score",
104
- color="task",
105
- markers=True,
106
- custom_data=["task", "score", "model"],
107
- title=title,
108
- )
109
-
110
- # Update hovertemplate for better hover interaction experience
111
- fig.update_traces(
112
- hovertemplate="<br>".join(
113
- [
114
- "Model Name: %{customdata[2]}",
115
- "Metric Name: %{customdata[0]}",
116
- "Date: %{x}",
117
- "Metric Value: %{y}",
118
- ]
119
- )
120
- )
121
-
122
- # Update the range of the y-axis
123
- fig.update_layout(yaxis_range=[0, 100])
124
-
125
- # Create a dictionary to hold the color mapping for each metric
126
- metric_color_mapping = {}
127
-
128
- # Map each metric name to its color in the figure
129
- for trace in fig.data:
130
- metric_color_mapping[trace.name] = trace.line.color
131
-
132
- # Iterate over filtered human baselines and add horizontal lines to the figure
133
- for metric, value in filtered_human_baselines.items():
134
- color = metric_color_mapping.get(metric, "blue") # Retrieve color from mapping; default to blue if not found
135
- location = "top left" if metric == "HellaSwag" else "bottom left" # Set annotation position
136
- # Add horizontal line with matched color and positioned annotation
137
- fig.add_hline(
138
- y=value,
139
- line_dash="dot",
140
- annotation_text=f"{metric} human baseline",
141
- annotation_position=location,
142
- annotation_font_size=10,
143
- annotation_font_color=color,
144
- line_color=color,
145
- )
146
-
147
- return fig
148
-
149
-
150
- # Example Usage:
151
- # human_baselines dictionary is defined.
152
- # chart = create_metric_plot_obj(scores_df, ["ARC", "HellaSwag", "MMLU", "TruthfulQA"], human_baselines, "Graph Title")