Minseok Bae commited on
Commit
1f26f6c
1 Parent(s): 2c24f05

Fixed the leaderboard filtering functionality. Modified filter_models() function in app.py/

Browse files
Files changed (2) hide show
  1. app.py +8 -5
  2. src/populate.py +1 -1
app.py CHANGED
@@ -94,11 +94,14 @@ def filter_models(
94
  df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
95
  ) -> pd.DataFrame:
96
  # Show all models
97
- if show_deleted:
98
- filtered_df = df
99
- else: # Show only still on the hub models
100
- filtered_df = df[df[utils.AutoEvalColumn.still_on_hub.name]]
101
-
 
 
 
102
  type_emoji = [t[0] for t in type_query]
103
  filtered_df = filtered_df.loc[df[utils.AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
104
  filtered_df = filtered_df.loc[df[utils.AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
 
94
  df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
95
  ) -> pd.DataFrame:
96
  # Show all models
97
+ filtered_df = df
98
+ # if show_deleted:
99
+ # filtered_df = df
100
+ # else: # Show only still on the hub models
101
+ # filtered_df = df[df[utils.AutoEvalColumn.still_on_hub.name]]
102
+
103
+ filtered_df = df
104
+
105
  type_emoji = [t[0] for t in type_query]
106
  filtered_df = filtered_df.loc[df[utils.AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
107
  filtered_df = filtered_df.loc[df[utils.AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
src/populate.py CHANGED
@@ -13,7 +13,7 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
13
  all_data_json = [v.to_dict() for v in raw_data]
14
 
15
  df = pd.DataFrame.from_records(all_data_json)
16
- df = df.sort_values(by=[utils.AutoEvalColumn.accuracy.name], ascending=False)
17
  df = df[cols].round(decimals=2)
18
 
19
  # filter out if any of the benchmarks have not been produced
 
13
  all_data_json = [v.to_dict() for v in raw_data]
14
 
15
  df = pd.DataFrame.from_records(all_data_json)
16
+ df = df.sort_values(by=[utils.AutoEvalColumn.hallucination_rate.name], ascending=True)
17
  df = df[cols].round(decimals=2)
18
 
19
  # filter out if any of the benchmarks have not been produced