idolezal commited on
Commit
ec6e1e5
1 Parent(s): f6916e3

Create lock for global variables

Browse files
Files changed (2) hide show
  1. app.py +25 -17
  2. server.py +206 -157
app.py CHANGED
@@ -102,6 +102,11 @@ def submit_results():
102
  leaderboard_server.save_pre_submit()
103
  leaderboard_server.update_leaderboard()
104
  gr.Info('Submission successful!')
 
 
 
 
 
105
  return (
106
  gr.update(value='Pre-submit model', visible=True, interactive=True),
107
  gr.update(visible=False),
@@ -109,10 +114,10 @@ def submit_results():
109
  gr.update(visible=False),
110
  gr.update(visible=False),
111
  gr.update(visible=False),
112
- gr.DataFrame(value=leaderboard_server.get_leaderboard(category=leaderboard_server.tasks_category_overall), visible=True),
113
  gr.update(visible=False),
114
- gr.update(choices=leaderboard_server.submission_ids),
115
- gr.update(value=leaderboard_server.tasks_category_overall),
116
  )
117
 
118
 
@@ -155,11 +160,12 @@ def hide_modal():
155
 
156
  def on_application_load():
157
  leaderboard_server.update_leaderboard()
158
- return (
159
- gr.DataFrame(value=leaderboard_server.get_leaderboard(category=leaderboard_server.tasks_category_overall), visible=True),
160
- gr.update(choices=leaderboard_server.submission_ids),
161
- gr.update(value=leaderboard_server.tasks_category_overall),
162
- )
 
163
 
164
 
165
  custom_css = """
@@ -228,15 +234,15 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
228
 
229
  with gr.Row():
230
  category_of_tasks = gr.Dropdown(
231
- choices=[leaderboard_server.tasks_category_overall] + list(leaderboard_server.tasks_categories),
232
- value=leaderboard_server.tasks_category_overall,
233
  label="Category of benchmarks",
234
  interactive=True,
235
  )
236
 
237
  with gr.Row():
238
  results_table = gr.DataFrame(
239
- leaderboard_server.get_leaderboard(category=leaderboard_server.tasks_category_overall),
240
  interactive=False,
241
  label=None,
242
  visible=True,
@@ -252,11 +258,13 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
252
 
253
  with gr.TabItem('Model details'):
254
  gr.Markdown(MORE_DETAILS_MARKDOWN)
255
- detail_dropdown = gr.Dropdown(
256
- choices=leaderboard_server.submission_ids, # TODO: team_name/model_name
257
- label="Select model",
258
- interactive=True,
259
- )
 
 
260
 
261
  with gr.Row():
262
  model_description = gr.Text(value='', label='Model description', visible=False, interactive=False)
@@ -327,7 +335,7 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
327
  pre_submission_btn.click(
328
  fn=on_submit_pressed,
329
  outputs=[pre_submission_btn],
330
- ).then(
331
  fn=process_submission,
332
  inputs=list(submission_inputs.values()),
333
  outputs=[
 
102
  leaderboard_server.save_pre_submit()
103
  leaderboard_server.update_leaderboard()
104
  gr.Info('Submission successful!')
105
+
106
+ with leaderboard_server.var_lock.ro:
107
+ leaderboard = leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL)
108
+ submission_ids = leaderboard_server.submission_ids
109
+
110
  return (
111
  gr.update(value='Pre-submit model', visible=True, interactive=True),
112
  gr.update(visible=False),
 
114
  gr.update(visible=False),
115
  gr.update(visible=False),
116
  gr.update(visible=False),
117
+ gr.DataFrame(value=leaderboard, visible=True),
118
  gr.update(visible=False),
119
+ gr.update(choices=submission_ids),
120
+ gr.update(value=leaderboard_server.TASKS_CATEGORY_OVERALL),
121
  )
122
 
123
 
 
160
 
161
  def on_application_load():
162
  leaderboard_server.update_leaderboard()
163
+ with leaderboard_server.var_lock.ro:
164
+ return (
165
+ gr.DataFrame(value=leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL), visible=True),
166
+ gr.update(choices=leaderboard_server.submission_ids),
167
+ gr.update(value=leaderboard_server.TASKS_CATEGORY_OVERALL),
168
+ )
169
 
170
 
171
  custom_css = """
 
234
 
235
  with gr.Row():
236
  category_of_tasks = gr.Dropdown(
237
+ choices=[leaderboard_server.TASKS_CATEGORY_OVERALL] + list(leaderboard_server.TASKS_CATEGORIES),
238
+ value=leaderboard_server.TASKS_CATEGORY_OVERALL,
239
  label="Category of benchmarks",
240
  interactive=True,
241
  )
242
 
243
  with gr.Row():
244
  results_table = gr.DataFrame(
245
+ leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL),
246
  interactive=False,
247
  label=None,
248
  visible=True,
 
258
 
259
  with gr.TabItem('Model details'):
260
  gr.Markdown(MORE_DETAILS_MARKDOWN)
261
+
262
+ with leaderboard_server.var_lock.ro:
263
+ detail_dropdown = gr.Dropdown(
264
+ choices=leaderboard_server.submission_ids, # TODO: team_name/model_name
265
+ label="Select model",
266
+ interactive=True,
267
+ )
268
 
269
  with gr.Row():
270
  model_description = gr.Text(value='', label='Model description', visible=False, interactive=False)
 
335
  pre_submission_btn.click(
336
  fn=on_submit_pressed,
337
  outputs=[pre_submission_btn],
338
+ ).then( # TODO: Zjistit proč to neběží konkurentně.
339
  fn=process_submission,
340
  inputs=list(submission_inputs.values()),
341
  outputs=[
server.py CHANGED
@@ -102,6 +102,46 @@ def check_significance(model_a_path, model_b_path):
102
 
103
  pre_submit_lock = Lock()
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  class LeaderboardServer:
106
  def __init__(self):
107
  self.server_address = REPO
@@ -112,13 +152,16 @@ class LeaderboardServer:
112
  token=HF_TOKEN,
113
  local_dir="./",
114
  )
115
- self.submission_id_to_file = {} # Map submission ids to file paths
116
- self.tasks_metadata = json.load(open(TASKS_METADATA_PATH))
117
- self.tasks_categories = {self.tasks_metadata[task]["category"] for task in self.tasks_metadata}
118
- self.tasks_category_overall = "Overall"
 
119
  self.submission_ids = set()
 
120
  self.fetch_existing_models()
121
  self.tournament_results = self.load_tournament_results()
 
122
  self.pre_submit_lock = pre_submit_lock
123
  self.pre_submit = None
124
 
@@ -130,7 +173,9 @@ class LeaderboardServer:
130
  local_dir="./",
131
  )
132
  self.fetch_existing_models()
133
- self.tournament_results = self.load_tournament_results()
 
 
134
 
135
  def load_tournament_results(self):
136
  metadata_rank_paths = os.path.join(self.local_leaderboard, "tournament.json")
@@ -148,171 +193,174 @@ class LeaderboardServer:
148
  if metadata is None:
149
  continue
150
  submission_id = metadata["submission_id"]
151
- self.submission_ids.add(submission_id)
152
-
153
- self.submission_id_to_file[submission_id] = submission_file
 
154
 
155
  def get_leaderboard(self, pre_submit=None, category=None):
156
- tournament_results = pre_submit.tournament_results if pre_submit else self.tournament_results
157
- category = category if category else self.tasks_category_overall
 
158
 
159
- if len(tournament_results) == 0:
160
- return pd.DataFrame(columns=['No submissions yet'])
161
- else:
162
- processed_results = []
163
- for submission_id in tournament_results.keys():
164
- path = self.submission_id_to_file.get(submission_id)
165
- if path is None:
166
- if pre_submit and submission_id == pre_submit.submission_id:
167
- data = json.load(open(pre_submit.file))
 
 
 
 
168
  else:
169
- raise gr.Error(f"Internal error: Submission [{submission_id}] not found")
170
- elif path:
171
- data = json.load(open(path))
172
- else:
173
- raise gr.Error(f"Submission [{submission_id}] not found")
174
-
175
- if submission_id != data["metadata"]["submission_id"]:
176
- raise gr.Error(f"Proper submission [{submission_id}] not found")
177
-
178
- local_results = {}
179
- win_score = {}
180
- visible_metrics_map_word_to_header = {}
181
- for task in self.tasks_metadata.keys():
182
 
183
- task_category = self.tasks_metadata[task]["category"]
184
- if category not in (self.tasks_category_overall, task_category):
185
- continue
186
- else:
187
- # tournament_results
188
- num_of_competitors = 0
189
- num_of_wins = 0
190
- for competitor_id in tournament_results[submission_id].keys() - {submission_id}: # without self
191
- num_of_competitors += 1
192
- if tournament_results[submission_id][competitor_id][task]:
193
- num_of_wins += 1
194
- task_score = num_of_wins / num_of_competitors * 100 if num_of_competitors > 0 else 100
195
- win_score.setdefault(task_category, []).append(task_score)
196
 
197
- if category == task_category:
198
- local_results[task] = task_score
199
- for metric in VISIBLE_METRICS:
200
- visible_metrics_map_word_to_header[task + "_" + metric] = self.tasks_metadata[task]["abbreviation"] + " " + metric
201
- metric_value = data['results'][task].get(metric)
202
- if metric_value is not None:
203
- local_results[task + "_" + metric] = metric_value * 100
204
- break # Only the first metric of every task
205
-
206
-
207
- for c in win_score:
208
- win_score[c] = sum(win_score[c]) / len(win_score[c])
209
-
210
- if category == self.tasks_category_overall:
 
 
 
 
 
 
 
 
 
 
211
  for c in win_score:
212
- local_results[c] = win_score[c]
213
- local_results["average_score"] = sum(win_score.values()) / len(win_score)
214
- else:
215
- local_results["average_score"] = win_score[category]
216
-
217
- model_link = data["metadata"]["link_to_model"]
218
- model_title = data["metadata"]["team_name"] + "/" + data["metadata"]["model_name"]
219
- model_title_abbr = self.abbreviate(data["metadata"]["team_name"], 14) + "/" + self.abbreviate(data["metadata"]["model_name"], 14)
220
- local_results["model"] = f'<a href={xmlQuoteAttr(model_link)} title={xmlQuoteAttr(model_title)}>{xmlEscape(model_title_abbr, MARKDOWN_SPECIAL_CHARACTERS)}</a>'
221
- release = data["metadata"].get("submission_timestamp")
222
- release = time.strftime("%Y-%m-%d", time.gmtime(release)) if release else "N/A"
223
- local_results["release"] = release
224
- local_results["model_type"] = data["metadata"]["model_type"]
225
- local_results["parameters"] = data["metadata"]["parameters"]
 
 
 
 
 
 
 
 
 
 
226
 
227
- if pre_submit and submission_id == pre_submit.submission_id:
228
- processed_results.insert(0, local_results)
229
- else:
230
- processed_results.append(local_results)
231
- dataframe = pd.DataFrame.from_records(processed_results)
232
-
233
- extra_attributes_map_word_to_header = {
234
- "model": "Model",
235
- "release": "Release",
236
- "average_score": "Average ⬆️",
237
- "team_name": "Team name",
238
- "model_name": "Model name",
239
- "model_type": "Type",
240
- "parameters": "# θ (B)",
241
- "input_length": "Input length (# tokens)",
242
- "precision": "Precision",
243
- "description": "Description",
244
- "link_to_model": "Link to model"
245
- }
246
- first_attributes = [
247
- "model",
248
- "release",
249
- "model_type",
250
- "parameters",
251
- "average_score",
252
- ]
253
- df_order = [
254
- key
255
- for key in dict.fromkeys(
256
- first_attributes
257
- + list(self.tasks_metadata.keys())
258
- + list(dataframe.columns)
259
- ).keys()
260
- if key in dataframe.columns
261
- ]
262
- dataframe = dataframe[df_order]
263
- attributes_map_word_to_header = {key: value["abbreviation"] for key, value in self.tasks_metadata.items()}
264
- attributes_map_word_to_header.update(extra_attributes_map_word_to_header)
265
- attributes_map_word_to_header.update(visible_metrics_map_word_to_header)
266
- dataframe = dataframe.rename(
267
- columns=attributes_map_word_to_header
268
- )
269
- return dataframe
270
 
271
  def start_tournament(self, new_submission_id, new_model_file):
272
- new_tournament = copy.deepcopy(self.tournament_results)
273
- new_tournament[new_submission_id] = {}
274
- new_tournament[new_submission_id][new_submission_id] = {
275
- task: False for task in self.tasks_metadata.keys()
276
- }
277
-
278
- rest_of_competitors = list(self.submission_ids - {new_submission_id}) # without self
279
- num_of_competitors = len(rest_of_competitors)
280
-
281
- result_url = {}
282
- result_inverse_url = {}
283
-
284
- while rest_of_competitors:
285
- next_competitors = []
286
- while rest_of_competitors:
287
- if len(next_competitors) < 5: # 5*2==10 tasks
288
- next_competitors.append(rest_of_competitors.pop())
289
- else:
290
- break
291
 
292
- for competitor_id in next_competitors:
293
- result_url[competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[competitor_id])
294
- result_inverse_url[competitor_id] = check_significance_send_task(self.submission_id_to_file[competitor_id], new_model_file)
295
 
296
- while next_competitors:
297
- competitor_id = next_competitors.pop(0)
298
- result = check_significance_wait_for_result(result_url.pop(competitor_id))
299
- result_inverse = check_significance_wait_for_result(result_inverse_url.pop(competitor_id))
300
-
301
- if rest_of_competitors:
302
- new_competitor_id = rest_of_competitors.pop()
303
- next_competitors.append(new_competitor_id)
304
- result_url[new_competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[new_competitor_id])
305
- result_inverse_url[new_competitor_id] = check_significance_send_task(self.submission_id_to_file[new_competitor_id], new_model_file)
306
 
307
- new_tournament[new_submission_id][competitor_id] = {
308
- task: data["significant"] for task, data in result.items()
309
- }
310
- new_tournament[competitor_id][new_submission_id] = {
311
- task: data["significant"] for task, data in result_inverse.items()
312
- }
313
 
314
- num_of_competitors_done = num_of_competitors - len(next_competitors) - len(rest_of_competitors)
315
- gr.Info(f"Tournament: {num_of_competitors_done}/{num_of_competitors} = {(num_of_competitors_done) * 100 // num_of_competitors}% done")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
  return new_tournament
318
 
@@ -416,7 +464,8 @@ class LeaderboardServer:
416
  self.pre_submit = None
417
 
418
  def get_model_detail(self, submission_id):
419
- path = self.submission_id_to_file.get(submission_id)
 
420
  if path is None:
421
  raise gr.Error(f"Submission [{submission_id}] not found")
422
  data = json.load(open(path))
 
102
 
103
  pre_submit_lock = Lock()
104
 
105
+ class _ReadLock:
106
+ def __init__(self, lock):
107
+ self._lock = lock
108
+ self.reading = 0
109
+
110
+ def __enter__(self):
111
+ with self._lock:
112
+ self.reading += 1
113
+
114
+ def __exit__(self, exc_type, exc_value, traceback):
115
+ with self._lock:
116
+ self.reading -= 1
117
+
118
+ class ReadWriteLock:
119
+ """
120
+ Zámek, který ověří, že nikdo nečte když se zapisuje a že zapisuje pouze jeden
121
+ """
122
+
123
+ def __init__(self):
124
+ self._lock = Lock()
125
+ self.ro = _ReadLock(self._lock)
126
+ self.rw = self
127
+
128
+ def __enter__(self):
129
+ self._lock.acquire()
130
+ while True:
131
+ reading = self.ro.reading
132
+ if reading > 0:
133
+ self._lock.release()
134
+ time.sleep(1)
135
+ self._lock.acquire()
136
+ elif reading < 0:
137
+ self._lock.release()
138
+ raise RuntimeError()
139
+ else:
140
+ return
141
+
142
+ def __exit__(self, exc_type, exc_value, traceback):
143
+ self._lock.release()
144
+
145
  class LeaderboardServer:
146
  def __init__(self):
147
  self.server_address = REPO
 
152
  token=HF_TOKEN,
153
  local_dir="./",
154
  )
155
+ self.TASKS_METADATA = json.load(open(TASKS_METADATA_PATH))
156
+ self.TASKS_CATEGORIES = {self.TASKS_METADATA[task]["category"] for task in self.TASKS_METADATA}
157
+ self.TASKS_CATEGORY_OVERALL = "Overall"
158
+
159
+ self.var_lock = ReadWriteLock()
160
  self.submission_ids = set()
161
+ self.submission_id_to_file = {} # Map submission ids to file paths
162
  self.fetch_existing_models()
163
  self.tournament_results = self.load_tournament_results()
164
+
165
  self.pre_submit_lock = pre_submit_lock
166
  self.pre_submit = None
167
 
 
173
  local_dir="./",
174
  )
175
  self.fetch_existing_models()
176
+
177
+ with self.var_lock.rw:
178
+ self.tournament_results = self.load_tournament_results()
179
 
180
  def load_tournament_results(self):
181
  metadata_rank_paths = os.path.join(self.local_leaderboard, "tournament.json")
 
193
  if metadata is None:
194
  continue
195
  submission_id = metadata["submission_id"]
196
+
197
+ with self.var_lock.rw:
198
+ self.submission_ids.add(submission_id)
199
+ self.submission_id_to_file[submission_id] = submission_file
200
 
201
  def get_leaderboard(self, pre_submit=None, category=None):
202
+ with self.var_lock.ro:
203
+ tournament_results = pre_submit.tournament_results if pre_submit else self.tournament_results
204
+ category = category if category else self.TASKS_CATEGORY_OVERALL
205
 
206
+ if len(tournament_results) == 0:
207
+ return pd.DataFrame(columns=['No submissions yet'])
208
+ else:
209
+ processed_results = []
210
+ for submission_id in tournament_results.keys():
211
+ path = self.submission_id_to_file.get(submission_id)
212
+ if path is None:
213
+ if pre_submit and submission_id == pre_submit.submission_id:
214
+ data = json.load(open(pre_submit.file))
215
+ else:
216
+ raise gr.Error(f"Internal error: Submission [{submission_id}] not found")
217
+ elif path:
218
+ data = json.load(open(path))
219
  else:
220
+ raise gr.Error(f"Submission [{submission_id}] not found")
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
+ if submission_id != data["metadata"]["submission_id"]:
223
+ raise gr.Error(f"Proper submission [{submission_id}] not found")
224
+
225
+ local_results = {}
226
+ win_score = {}
227
+ visible_metrics_map_word_to_header = {}
228
+ for task in self.TASKS_METADATA.keys():
 
 
 
 
 
 
229
 
230
+ task_category = self.TASKS_METADATA[task]["category"]
231
+ if category not in (self.TASKS_CATEGORY_OVERALL, task_category):
232
+ continue
233
+ else:
234
+ # tournament_results
235
+ num_of_competitors = 0
236
+ num_of_wins = 0
237
+ for competitor_id in tournament_results[submission_id].keys() - {submission_id}: # without self
238
+ num_of_competitors += 1
239
+ if tournament_results[submission_id][competitor_id][task]:
240
+ num_of_wins += 1
241
+ task_score = num_of_wins / num_of_competitors * 100 if num_of_competitors > 0 else 100
242
+ win_score.setdefault(task_category, []).append(task_score)
243
+
244
+ if category == task_category:
245
+ local_results[task] = task_score
246
+ for metric in VISIBLE_METRICS:
247
+ visible_metrics_map_word_to_header[task + "_" + metric] = self.TASKS_METADATA[task]["abbreviation"] + " " + metric
248
+ metric_value = data['results'][task].get(metric)
249
+ if metric_value is not None:
250
+ local_results[task + "_" + metric] = metric_value * 100
251
+ break # Only the first metric of every task
252
+
253
+
254
  for c in win_score:
255
+ win_score[c] = sum(win_score[c]) / len(win_score[c])
256
+
257
+ if category == self.TASKS_CATEGORY_OVERALL:
258
+ for c in win_score:
259
+ local_results[c] = win_score[c]
260
+ local_results["average_score"] = sum(win_score.values()) / len(win_score)
261
+ else:
262
+ local_results["average_score"] = win_score[category]
263
+
264
+ model_link = data["metadata"]["link_to_model"]
265
+ model_title = data["metadata"]["team_name"] + "/" + data["metadata"]["model_name"]
266
+ model_title_abbr = self.abbreviate(data["metadata"]["team_name"], 14) + "/" + self.abbreviate(data["metadata"]["model_name"], 14)
267
+ local_results["model"] = f'<a href={xmlQuoteAttr(model_link)} title={xmlQuoteAttr(model_title)}>{xmlEscape(model_title_abbr, MARKDOWN_SPECIAL_CHARACTERS)}</a>'
268
+ release = data["metadata"].get("submission_timestamp")
269
+ release = time.strftime("%Y-%m-%d", time.gmtime(release)) if release else "N/A"
270
+ local_results["release"] = release
271
+ local_results["model_type"] = data["metadata"]["model_type"]
272
+ local_results["parameters"] = data["metadata"]["parameters"]
273
+
274
+ if pre_submit and submission_id == pre_submit.submission_id:
275
+ processed_results.insert(0, local_results)
276
+ else:
277
+ processed_results.append(local_results)
278
+ dataframe = pd.DataFrame.from_records(processed_results)
279
 
280
+ extra_attributes_map_word_to_header = {
281
+ "model": "Model",
282
+ "release": "Release",
283
+ "average_score": "Average ⬆️",
284
+ "team_name": "Team name",
285
+ "model_name": "Model name",
286
+ "model_type": "Type",
287
+ "parameters": "# θ (B)",
288
+ "input_length": "Input length (# tokens)",
289
+ "precision": "Precision",
290
+ "description": "Description",
291
+ "link_to_model": "Link to model"
292
+ }
293
+ first_attributes = [
294
+ "model",
295
+ "release",
296
+ "model_type",
297
+ "parameters",
298
+ "average_score",
299
+ ]
300
+ df_order = [
301
+ key
302
+ for key in dict.fromkeys(
303
+ first_attributes
304
+ + list(self.TASKS_METADATA.keys())
305
+ + list(dataframe.columns)
306
+ ).keys()
307
+ if key in dataframe.columns
308
+ ]
309
+ dataframe = dataframe[df_order]
310
+ attributes_map_word_to_header = {key: value["abbreviation"] for key, value in self.TASKS_METADATA.items()}
311
+ attributes_map_word_to_header.update(extra_attributes_map_word_to_header)
312
+ attributes_map_word_to_header.update(visible_metrics_map_word_to_header)
313
+ dataframe = dataframe.rename(
314
+ columns=attributes_map_word_to_header
315
+ )
316
+ return dataframe
 
 
 
 
 
 
317
 
318
  def start_tournament(self, new_submission_id, new_model_file):
319
+ with self.var_lock.ro:
320
+ new_tournament = copy.deepcopy(self.tournament_results)
321
+ new_tournament[new_submission_id] = {}
322
+ new_tournament[new_submission_id][new_submission_id] = {
323
+ task: False for task in self.TASKS_METADATA.keys()
324
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
+ rest_of_competitors = list(self.submission_ids - {new_submission_id}) # without self
327
+ num_of_competitors = len(rest_of_competitors)
 
328
 
329
+ result_url = {}
330
+ result_inverse_url = {}
331
+
332
+ while rest_of_competitors:
333
+ next_competitors = []
334
+ while rest_of_competitors:
335
+ if len(next_competitors) < 5: # 5*2==10 tasks
336
+ next_competitors.append(rest_of_competitors.pop())
337
+ else:
338
+ break
339
 
340
+ for competitor_id in next_competitors:
341
+ result_url[competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[competitor_id])
342
+ result_inverse_url[competitor_id] = check_significance_send_task(self.submission_id_to_file[competitor_id], new_model_file)
 
 
 
343
 
344
+ while next_competitors:
345
+ competitor_id = next_competitors.pop(0)
346
+ result = check_significance_wait_for_result(result_url.pop(competitor_id))
347
+ result_inverse = check_significance_wait_for_result(result_inverse_url.pop(competitor_id))
348
+
349
+ if rest_of_competitors:
350
+ new_competitor_id = rest_of_competitors.pop()
351
+ next_competitors.append(new_competitor_id)
352
+ result_url[new_competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[new_competitor_id])
353
+ result_inverse_url[new_competitor_id] = check_significance_send_task(self.submission_id_to_file[new_competitor_id], new_model_file)
354
+
355
+ new_tournament[new_submission_id][competitor_id] = {
356
+ task: data["significant"] for task, data in result.items()
357
+ }
358
+ new_tournament[competitor_id][new_submission_id] = {
359
+ task: data["significant"] for task, data in result_inverse.items()
360
+ }
361
+
362
+ num_of_competitors_done = num_of_competitors - len(next_competitors) - len(rest_of_competitors)
363
+ gr.Info(f"Tournament: {num_of_competitors_done}/{num_of_competitors} = {(num_of_competitors_done) * 100 // num_of_competitors}% done")
364
 
365
  return new_tournament
366
 
 
464
  self.pre_submit = None
465
 
466
  def get_model_detail(self, submission_id):
467
+ with self.var_lock.ro:
468
+ path = self.submission_id_to_file.get(submission_id)
469
  if path is None:
470
  raise gr.Error(f"Submission [{submission_id}] not found")
471
  data = json.load(open(path))