Spaces:
Running
Running
Create lock for global variables
Browse files
app.py
CHANGED
@@ -102,6 +102,11 @@ def submit_results():
|
|
102 |
leaderboard_server.save_pre_submit()
|
103 |
leaderboard_server.update_leaderboard()
|
104 |
gr.Info('Submission successful!')
|
|
|
|
|
|
|
|
|
|
|
105 |
return (
|
106 |
gr.update(value='Pre-submit model', visible=True, interactive=True),
|
107 |
gr.update(visible=False),
|
@@ -109,10 +114,10 @@ def submit_results():
|
|
109 |
gr.update(visible=False),
|
110 |
gr.update(visible=False),
|
111 |
gr.update(visible=False),
|
112 |
-
gr.DataFrame(value=
|
113 |
gr.update(visible=False),
|
114 |
-
gr.update(choices=
|
115 |
-
gr.update(value=leaderboard_server.
|
116 |
)
|
117 |
|
118 |
|
@@ -155,11 +160,12 @@ def hide_modal():
|
|
155 |
|
156 |
def on_application_load():
|
157 |
leaderboard_server.update_leaderboard()
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
163 |
|
164 |
|
165 |
custom_css = """
|
@@ -228,15 +234,15 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
|
|
228 |
|
229 |
with gr.Row():
|
230 |
category_of_tasks = gr.Dropdown(
|
231 |
-
choices=[leaderboard_server.
|
232 |
-
value=leaderboard_server.
|
233 |
label="Category of benchmarks",
|
234 |
interactive=True,
|
235 |
)
|
236 |
|
237 |
with gr.Row():
|
238 |
results_table = gr.DataFrame(
|
239 |
-
leaderboard_server.get_leaderboard(category=leaderboard_server.
|
240 |
interactive=False,
|
241 |
label=None,
|
242 |
visible=True,
|
@@ -252,11 +258,13 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
|
|
252 |
|
253 |
with gr.TabItem('Model details'):
|
254 |
gr.Markdown(MORE_DETAILS_MARKDOWN)
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
|
|
|
|
260 |
|
261 |
with gr.Row():
|
262 |
model_description = gr.Text(value='', label='Model description', visible=False, interactive=False)
|
@@ -327,7 +335,7 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
|
|
327 |
pre_submission_btn.click(
|
328 |
fn=on_submit_pressed,
|
329 |
outputs=[pre_submission_btn],
|
330 |
-
).then(
|
331 |
fn=process_submission,
|
332 |
inputs=list(submission_inputs.values()),
|
333 |
outputs=[
|
|
|
102 |
leaderboard_server.save_pre_submit()
|
103 |
leaderboard_server.update_leaderboard()
|
104 |
gr.Info('Submission successful!')
|
105 |
+
|
106 |
+
with leaderboard_server.var_lock.ro:
|
107 |
+
leaderboard = leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL)
|
108 |
+
submission_ids = leaderboard_server.submission_ids
|
109 |
+
|
110 |
return (
|
111 |
gr.update(value='Pre-submit model', visible=True, interactive=True),
|
112 |
gr.update(visible=False),
|
|
|
114 |
gr.update(visible=False),
|
115 |
gr.update(visible=False),
|
116 |
gr.update(visible=False),
|
117 |
+
gr.DataFrame(value=leaderboard, visible=True),
|
118 |
gr.update(visible=False),
|
119 |
+
gr.update(choices=submission_ids),
|
120 |
+
gr.update(value=leaderboard_server.TASKS_CATEGORY_OVERALL),
|
121 |
)
|
122 |
|
123 |
|
|
|
160 |
|
161 |
def on_application_load():
|
162 |
leaderboard_server.update_leaderboard()
|
163 |
+
with leaderboard_server.var_lock.ro:
|
164 |
+
return (
|
165 |
+
gr.DataFrame(value=leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL), visible=True),
|
166 |
+
gr.update(choices=leaderboard_server.submission_ids),
|
167 |
+
gr.update(value=leaderboard_server.TASKS_CATEGORY_OVERALL),
|
168 |
+
)
|
169 |
|
170 |
|
171 |
custom_css = """
|
|
|
234 |
|
235 |
with gr.Row():
|
236 |
category_of_tasks = gr.Dropdown(
|
237 |
+
choices=[leaderboard_server.TASKS_CATEGORY_OVERALL] + list(leaderboard_server.TASKS_CATEGORIES),
|
238 |
+
value=leaderboard_server.TASKS_CATEGORY_OVERALL,
|
239 |
label="Category of benchmarks",
|
240 |
interactive=True,
|
241 |
)
|
242 |
|
243 |
with gr.Row():
|
244 |
results_table = gr.DataFrame(
|
245 |
+
leaderboard_server.get_leaderboard(category=leaderboard_server.TASKS_CATEGORY_OVERALL),
|
246 |
interactive=False,
|
247 |
label=None,
|
248 |
visible=True,
|
|
|
258 |
|
259 |
with gr.TabItem('Model details'):
|
260 |
gr.Markdown(MORE_DETAILS_MARKDOWN)
|
261 |
+
|
262 |
+
with leaderboard_server.var_lock.ro:
|
263 |
+
detail_dropdown = gr.Dropdown(
|
264 |
+
choices=leaderboard_server.submission_ids, # TODO: team_name/model_name
|
265 |
+
label="Select model",
|
266 |
+
interactive=True,
|
267 |
+
)
|
268 |
|
269 |
with gr.Row():
|
270 |
model_description = gr.Text(value='', label='Model description', visible=False, interactive=False)
|
|
|
335 |
pre_submission_btn.click(
|
336 |
fn=on_submit_pressed,
|
337 |
outputs=[pre_submission_btn],
|
338 |
+
).then( # TODO: Zjistit proč to neběží konkurentně.
|
339 |
fn=process_submission,
|
340 |
inputs=list(submission_inputs.values()),
|
341 |
outputs=[
|
server.py
CHANGED
@@ -102,6 +102,46 @@ def check_significance(model_a_path, model_b_path):
|
|
102 |
|
103 |
pre_submit_lock = Lock()
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
class LeaderboardServer:
|
106 |
def __init__(self):
|
107 |
self.server_address = REPO
|
@@ -112,13 +152,16 @@ class LeaderboardServer:
|
|
112 |
token=HF_TOKEN,
|
113 |
local_dir="./",
|
114 |
)
|
115 |
-
self.
|
116 |
-
self.
|
117 |
-
self.
|
118 |
-
|
|
|
119 |
self.submission_ids = set()
|
|
|
120 |
self.fetch_existing_models()
|
121 |
self.tournament_results = self.load_tournament_results()
|
|
|
122 |
self.pre_submit_lock = pre_submit_lock
|
123 |
self.pre_submit = None
|
124 |
|
@@ -130,7 +173,9 @@ class LeaderboardServer:
|
|
130 |
local_dir="./",
|
131 |
)
|
132 |
self.fetch_existing_models()
|
133 |
-
|
|
|
|
|
134 |
|
135 |
def load_tournament_results(self):
|
136 |
metadata_rank_paths = os.path.join(self.local_leaderboard, "tournament.json")
|
@@ -148,171 +193,174 @@ class LeaderboardServer:
|
|
148 |
if metadata is None:
|
149 |
continue
|
150 |
submission_id = metadata["submission_id"]
|
151 |
-
|
152 |
-
|
153 |
-
|
|
|
154 |
|
155 |
def get_leaderboard(self, pre_submit=None, category=None):
|
156 |
-
|
157 |
-
|
|
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
|
|
168 |
else:
|
169 |
-
raise gr.Error(f"
|
170 |
-
elif path:
|
171 |
-
data = json.load(open(path))
|
172 |
-
else:
|
173 |
-
raise gr.Error(f"Submission [{submission_id}] not found")
|
174 |
-
|
175 |
-
if submission_id != data["metadata"]["submission_id"]:
|
176 |
-
raise gr.Error(f"Proper submission [{submission_id}] not found")
|
177 |
-
|
178 |
-
local_results = {}
|
179 |
-
win_score = {}
|
180 |
-
visible_metrics_map_word_to_header = {}
|
181 |
-
for task in self.tasks_metadata.keys():
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
for competitor_id in tournament_results[submission_id].keys() - {submission_id}: # without self
|
191 |
-
num_of_competitors += 1
|
192 |
-
if tournament_results[submission_id][competitor_id][task]:
|
193 |
-
num_of_wins += 1
|
194 |
-
task_score = num_of_wins / num_of_competitors * 100 if num_of_competitors > 0 else 100
|
195 |
-
win_score.setdefault(task_category, []).append(task_score)
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
for c in win_score:
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
attributes_map_word_to_header.update(extra_attributes_map_word_to_header)
|
265 |
-
attributes_map_word_to_header.update(visible_metrics_map_word_to_header)
|
266 |
-
dataframe = dataframe.rename(
|
267 |
-
columns=attributes_map_word_to_header
|
268 |
-
)
|
269 |
-
return dataframe
|
270 |
|
271 |
def start_tournament(self, new_submission_id, new_model_file):
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
rest_of_competitors = list(self.submission_ids - {new_submission_id}) # without self
|
279 |
-
num_of_competitors = len(rest_of_competitors)
|
280 |
-
|
281 |
-
result_url = {}
|
282 |
-
result_inverse_url = {}
|
283 |
-
|
284 |
-
while rest_of_competitors:
|
285 |
-
next_competitors = []
|
286 |
-
while rest_of_competitors:
|
287 |
-
if len(next_competitors) < 5: # 5*2==10 tasks
|
288 |
-
next_competitors.append(rest_of_competitors.pop())
|
289 |
-
else:
|
290 |
-
break
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
result_inverse_url[competitor_id] = check_significance_send_task(self.submission_id_to_file[competitor_id], new_model_file)
|
295 |
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
new_tournament[competitor_id][new_submission_id] = {
|
311 |
-
task: data["significant"] for task, data in result_inverse.items()
|
312 |
-
}
|
313 |
|
314 |
-
|
315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
|
317 |
return new_tournament
|
318 |
|
@@ -416,7 +464,8 @@ class LeaderboardServer:
|
|
416 |
self.pre_submit = None
|
417 |
|
418 |
def get_model_detail(self, submission_id):
|
419 |
-
|
|
|
420 |
if path is None:
|
421 |
raise gr.Error(f"Submission [{submission_id}] not found")
|
422 |
data = json.load(open(path))
|
|
|
102 |
|
103 |
pre_submit_lock = Lock()
|
104 |
|
105 |
+
class _ReadLock:
|
106 |
+
def __init__(self, lock):
|
107 |
+
self._lock = lock
|
108 |
+
self.reading = 0
|
109 |
+
|
110 |
+
def __enter__(self):
|
111 |
+
with self._lock:
|
112 |
+
self.reading += 1
|
113 |
+
|
114 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
115 |
+
with self._lock:
|
116 |
+
self.reading -= 1
|
117 |
+
|
118 |
+
class ReadWriteLock:
|
119 |
+
"""
|
120 |
+
Zámek, který ověří, že nikdo nečte když se zapisuje a že zapisuje pouze jeden
|
121 |
+
"""
|
122 |
+
|
123 |
+
def __init__(self):
|
124 |
+
self._lock = Lock()
|
125 |
+
self.ro = _ReadLock(self._lock)
|
126 |
+
self.rw = self
|
127 |
+
|
128 |
+
def __enter__(self):
|
129 |
+
self._lock.acquire()
|
130 |
+
while True:
|
131 |
+
reading = self.ro.reading
|
132 |
+
if reading > 0:
|
133 |
+
self._lock.release()
|
134 |
+
time.sleep(1)
|
135 |
+
self._lock.acquire()
|
136 |
+
elif reading < 0:
|
137 |
+
self._lock.release()
|
138 |
+
raise RuntimeError()
|
139 |
+
else:
|
140 |
+
return
|
141 |
+
|
142 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
143 |
+
self._lock.release()
|
144 |
+
|
145 |
class LeaderboardServer:
|
146 |
def __init__(self):
|
147 |
self.server_address = REPO
|
|
|
152 |
token=HF_TOKEN,
|
153 |
local_dir="./",
|
154 |
)
|
155 |
+
self.TASKS_METADATA = json.load(open(TASKS_METADATA_PATH))
|
156 |
+
self.TASKS_CATEGORIES = {self.TASKS_METADATA[task]["category"] for task in self.TASKS_METADATA}
|
157 |
+
self.TASKS_CATEGORY_OVERALL = "Overall"
|
158 |
+
|
159 |
+
self.var_lock = ReadWriteLock()
|
160 |
self.submission_ids = set()
|
161 |
+
self.submission_id_to_file = {} # Map submission ids to file paths
|
162 |
self.fetch_existing_models()
|
163 |
self.tournament_results = self.load_tournament_results()
|
164 |
+
|
165 |
self.pre_submit_lock = pre_submit_lock
|
166 |
self.pre_submit = None
|
167 |
|
|
|
173 |
local_dir="./",
|
174 |
)
|
175 |
self.fetch_existing_models()
|
176 |
+
|
177 |
+
with self.var_lock.rw:
|
178 |
+
self.tournament_results = self.load_tournament_results()
|
179 |
|
180 |
def load_tournament_results(self):
|
181 |
metadata_rank_paths = os.path.join(self.local_leaderboard, "tournament.json")
|
|
|
193 |
if metadata is None:
|
194 |
continue
|
195 |
submission_id = metadata["submission_id"]
|
196 |
+
|
197 |
+
with self.var_lock.rw:
|
198 |
+
self.submission_ids.add(submission_id)
|
199 |
+
self.submission_id_to_file[submission_id] = submission_file
|
200 |
|
201 |
def get_leaderboard(self, pre_submit=None, category=None):
|
202 |
+
with self.var_lock.ro:
|
203 |
+
tournament_results = pre_submit.tournament_results if pre_submit else self.tournament_results
|
204 |
+
category = category if category else self.TASKS_CATEGORY_OVERALL
|
205 |
|
206 |
+
if len(tournament_results) == 0:
|
207 |
+
return pd.DataFrame(columns=['No submissions yet'])
|
208 |
+
else:
|
209 |
+
processed_results = []
|
210 |
+
for submission_id in tournament_results.keys():
|
211 |
+
path = self.submission_id_to_file.get(submission_id)
|
212 |
+
if path is None:
|
213 |
+
if pre_submit and submission_id == pre_submit.submission_id:
|
214 |
+
data = json.load(open(pre_submit.file))
|
215 |
+
else:
|
216 |
+
raise gr.Error(f"Internal error: Submission [{submission_id}] not found")
|
217 |
+
elif path:
|
218 |
+
data = json.load(open(path))
|
219 |
else:
|
220 |
+
raise gr.Error(f"Submission [{submission_id}] not found")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
+
if submission_id != data["metadata"]["submission_id"]:
|
223 |
+
raise gr.Error(f"Proper submission [{submission_id}] not found")
|
224 |
+
|
225 |
+
local_results = {}
|
226 |
+
win_score = {}
|
227 |
+
visible_metrics_map_word_to_header = {}
|
228 |
+
for task in self.TASKS_METADATA.keys():
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
+
task_category = self.TASKS_METADATA[task]["category"]
|
231 |
+
if category not in (self.TASKS_CATEGORY_OVERALL, task_category):
|
232 |
+
continue
|
233 |
+
else:
|
234 |
+
# tournament_results
|
235 |
+
num_of_competitors = 0
|
236 |
+
num_of_wins = 0
|
237 |
+
for competitor_id in tournament_results[submission_id].keys() - {submission_id}: # without self
|
238 |
+
num_of_competitors += 1
|
239 |
+
if tournament_results[submission_id][competitor_id][task]:
|
240 |
+
num_of_wins += 1
|
241 |
+
task_score = num_of_wins / num_of_competitors * 100 if num_of_competitors > 0 else 100
|
242 |
+
win_score.setdefault(task_category, []).append(task_score)
|
243 |
+
|
244 |
+
if category == task_category:
|
245 |
+
local_results[task] = task_score
|
246 |
+
for metric in VISIBLE_METRICS:
|
247 |
+
visible_metrics_map_word_to_header[task + "_" + metric] = self.TASKS_METADATA[task]["abbreviation"] + " " + metric
|
248 |
+
metric_value = data['results'][task].get(metric)
|
249 |
+
if metric_value is not None:
|
250 |
+
local_results[task + "_" + metric] = metric_value * 100
|
251 |
+
break # Only the first metric of every task
|
252 |
+
|
253 |
+
|
254 |
for c in win_score:
|
255 |
+
win_score[c] = sum(win_score[c]) / len(win_score[c])
|
256 |
+
|
257 |
+
if category == self.TASKS_CATEGORY_OVERALL:
|
258 |
+
for c in win_score:
|
259 |
+
local_results[c] = win_score[c]
|
260 |
+
local_results["average_score"] = sum(win_score.values()) / len(win_score)
|
261 |
+
else:
|
262 |
+
local_results["average_score"] = win_score[category]
|
263 |
+
|
264 |
+
model_link = data["metadata"]["link_to_model"]
|
265 |
+
model_title = data["metadata"]["team_name"] + "/" + data["metadata"]["model_name"]
|
266 |
+
model_title_abbr = self.abbreviate(data["metadata"]["team_name"], 14) + "/" + self.abbreviate(data["metadata"]["model_name"], 14)
|
267 |
+
local_results["model"] = f'<a href={xmlQuoteAttr(model_link)} title={xmlQuoteAttr(model_title)}>{xmlEscape(model_title_abbr, MARKDOWN_SPECIAL_CHARACTERS)}</a>'
|
268 |
+
release = data["metadata"].get("submission_timestamp")
|
269 |
+
release = time.strftime("%Y-%m-%d", time.gmtime(release)) if release else "N/A"
|
270 |
+
local_results["release"] = release
|
271 |
+
local_results["model_type"] = data["metadata"]["model_type"]
|
272 |
+
local_results["parameters"] = data["metadata"]["parameters"]
|
273 |
+
|
274 |
+
if pre_submit and submission_id == pre_submit.submission_id:
|
275 |
+
processed_results.insert(0, local_results)
|
276 |
+
else:
|
277 |
+
processed_results.append(local_results)
|
278 |
+
dataframe = pd.DataFrame.from_records(processed_results)
|
279 |
|
280 |
+
extra_attributes_map_word_to_header = {
|
281 |
+
"model": "Model",
|
282 |
+
"release": "Release",
|
283 |
+
"average_score": "Average ⬆️",
|
284 |
+
"team_name": "Team name",
|
285 |
+
"model_name": "Model name",
|
286 |
+
"model_type": "Type",
|
287 |
+
"parameters": "# θ (B)",
|
288 |
+
"input_length": "Input length (# tokens)",
|
289 |
+
"precision": "Precision",
|
290 |
+
"description": "Description",
|
291 |
+
"link_to_model": "Link to model"
|
292 |
+
}
|
293 |
+
first_attributes = [
|
294 |
+
"model",
|
295 |
+
"release",
|
296 |
+
"model_type",
|
297 |
+
"parameters",
|
298 |
+
"average_score",
|
299 |
+
]
|
300 |
+
df_order = [
|
301 |
+
key
|
302 |
+
for key in dict.fromkeys(
|
303 |
+
first_attributes
|
304 |
+
+ list(self.TASKS_METADATA.keys())
|
305 |
+
+ list(dataframe.columns)
|
306 |
+
).keys()
|
307 |
+
if key in dataframe.columns
|
308 |
+
]
|
309 |
+
dataframe = dataframe[df_order]
|
310 |
+
attributes_map_word_to_header = {key: value["abbreviation"] for key, value in self.TASKS_METADATA.items()}
|
311 |
+
attributes_map_word_to_header.update(extra_attributes_map_word_to_header)
|
312 |
+
attributes_map_word_to_header.update(visible_metrics_map_word_to_header)
|
313 |
+
dataframe = dataframe.rename(
|
314 |
+
columns=attributes_map_word_to_header
|
315 |
+
)
|
316 |
+
return dataframe
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
|
318 |
def start_tournament(self, new_submission_id, new_model_file):
|
319 |
+
with self.var_lock.ro:
|
320 |
+
new_tournament = copy.deepcopy(self.tournament_results)
|
321 |
+
new_tournament[new_submission_id] = {}
|
322 |
+
new_tournament[new_submission_id][new_submission_id] = {
|
323 |
+
task: False for task in self.TASKS_METADATA.keys()
|
324 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
|
326 |
+
rest_of_competitors = list(self.submission_ids - {new_submission_id}) # without self
|
327 |
+
num_of_competitors = len(rest_of_competitors)
|
|
|
328 |
|
329 |
+
result_url = {}
|
330 |
+
result_inverse_url = {}
|
331 |
+
|
332 |
+
while rest_of_competitors:
|
333 |
+
next_competitors = []
|
334 |
+
while rest_of_competitors:
|
335 |
+
if len(next_competitors) < 5: # 5*2==10 tasks
|
336 |
+
next_competitors.append(rest_of_competitors.pop())
|
337 |
+
else:
|
338 |
+
break
|
339 |
|
340 |
+
for competitor_id in next_competitors:
|
341 |
+
result_url[competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[competitor_id])
|
342 |
+
result_inverse_url[competitor_id] = check_significance_send_task(self.submission_id_to_file[competitor_id], new_model_file)
|
|
|
|
|
|
|
343 |
|
344 |
+
while next_competitors:
|
345 |
+
competitor_id = next_competitors.pop(0)
|
346 |
+
result = check_significance_wait_for_result(result_url.pop(competitor_id))
|
347 |
+
result_inverse = check_significance_wait_for_result(result_inverse_url.pop(competitor_id))
|
348 |
+
|
349 |
+
if rest_of_competitors:
|
350 |
+
new_competitor_id = rest_of_competitors.pop()
|
351 |
+
next_competitors.append(new_competitor_id)
|
352 |
+
result_url[new_competitor_id] = check_significance_send_task(new_model_file, self.submission_id_to_file[new_competitor_id])
|
353 |
+
result_inverse_url[new_competitor_id] = check_significance_send_task(self.submission_id_to_file[new_competitor_id], new_model_file)
|
354 |
+
|
355 |
+
new_tournament[new_submission_id][competitor_id] = {
|
356 |
+
task: data["significant"] for task, data in result.items()
|
357 |
+
}
|
358 |
+
new_tournament[competitor_id][new_submission_id] = {
|
359 |
+
task: data["significant"] for task, data in result_inverse.items()
|
360 |
+
}
|
361 |
+
|
362 |
+
num_of_competitors_done = num_of_competitors - len(next_competitors) - len(rest_of_competitors)
|
363 |
+
gr.Info(f"Tournament: {num_of_competitors_done}/{num_of_competitors} = {(num_of_competitors_done) * 100 // num_of_competitors}% done")
|
364 |
|
365 |
return new_tournament
|
366 |
|
|
|
464 |
self.pre_submit = None
|
465 |
|
466 |
def get_model_detail(self, submission_id):
|
467 |
+
with self.var_lock.ro:
|
468 |
+
path = self.submission_id_to_file.get(submission_id)
|
469 |
if path is None:
|
470 |
raise gr.Error(f"Submission [{submission_id}] not found")
|
471 |
data = json.load(open(path))
|