ClΓ©mentine commited on
Commit
0f4a719
β€’
1 Parent(s): d7daa68

corrected finetuned mentions

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. src/assets/text_content.py +1 -1
app.py CHANGED
@@ -328,7 +328,7 @@ with demo:
328
  )
329
  filter_columns = gr.Radio(
330
  label="⏚ Filter model types",
331
- choices = ["all", "🟒 base", "πŸ”Ά instruction-tuned", "🟦 RL-tuned"],
332
  value="all",
333
  elem_id="filter-columns"
334
  )
@@ -404,7 +404,7 @@ with demo:
404
  False, label="Private", visible=not IS_PUBLIC
405
  )
406
  model_type = gr.Dropdown(
407
- choices=["pretrained", "fine-tuned", "with RL"],
408
  label="Model type",
409
  multiselect=False,
410
  value=None,
 
328
  )
329
  filter_columns = gr.Radio(
330
  label="⏚ Filter model types",
331
+ choices = ["all", "🟒 base", "πŸ”Ά finetuned", "🟦 RL-tuned"],
332
  value="all",
333
  elem_id="filter-columns"
334
  )
 
404
  False, label="Private", visible=not IS_PUBLIC
405
  )
406
  model_type = gr.Dropdown(
407
+ choices=["pretrained", "finetuned", "with RL"],
408
  label="Model type",
409
  multiselect=False,
410
  value=None,
src/assets/text_content.py CHANGED
@@ -63,7 +63,7 @@ INTRODUCTION_TEXT = f"""
63
 
64
  Other cool benchmarks for LLMs are developed at HuggingFace, go check them out: πŸ™‹πŸ€– [human and GPT4 evals](https://huggingface.co/spaces/HuggingFaceH4/human_eval_llm_leaderboard), πŸ–₯️ [performance benchmarks](https://huggingface.co/spaces/optimum/llm-perf-leaderboard)
65
 
66
- 🟒: Base pretrained model – πŸ”Ά: Instruction finetuned model – 🟦: Model finetuned with RL (read more details in "About" tab)
67
  """
68
 
69
  LLM_BENCHMARKS_TEXT = f"""
 
63
 
64
  Other cool benchmarks for LLMs are developed at HuggingFace, go check them out: πŸ™‹πŸ€– [human and GPT4 evals](https://huggingface.co/spaces/HuggingFaceH4/human_eval_llm_leaderboard), πŸ–₯️ [performance benchmarks](https://huggingface.co/spaces/optimum/llm-perf-leaderboard)
65
 
66
+ 🟒: Base pretrained model – πŸ”Ά: Finetuned model – 🟦: Model using RL (read more details in "About" tab)
67
  """
68
 
69
  LLM_BENCHMARKS_TEXT = f"""