Spaces:
Runtime error
Runtime error
Refactor init_leaderboard function to improve dropdown UI and add search functionality
Browse files
app.py
CHANGED
|
@@ -70,23 +70,31 @@ LEADERBOARD_DF, SUBSETS = get_leaderboard_df(RESULTS_REPO)
|
|
| 70 |
|
| 71 |
def init_leaderboard(dataframes, subsets):
|
| 72 |
subsets = list(subsets)
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
|
|
|
| 90 |
|
| 91 |
demo = gr.Blocks(css=custom_css)
|
| 92 |
with demo:
|
|
@@ -95,7 +103,7 @@ with demo:
|
|
| 95 |
|
| 96 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 97 |
with gr.TabItem("π
LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
|
| 98 |
-
|
| 99 |
|
| 100 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 101 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
|
| 70 |
|
| 71 |
def init_leaderboard(dataframes, subsets):
|
| 72 |
subsets = list(subsets)
|
| 73 |
+
|
| 74 |
+
with gr.Row():
|
| 75 |
+
selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1])
|
| 76 |
+
research_textbox = gr.Textbox(placeholder="π Search Models... [press enter]", label="Filter Models by Name", )
|
| 77 |
+
|
| 78 |
+
with gr.Row():
|
| 79 |
+
datatype = [c.type for c in fields(AutoEvalColumn)]
|
| 80 |
+
dataframe = gr.Dataframe(dataframes, datatype=datatype, type="pandas")
|
| 81 |
|
| 82 |
+
|
| 83 |
+
# return Leaderboard(
|
| 84 |
+
# value=dataframes,
|
| 85 |
+
# datatype=[c.type for c in fields(AutoEvalColumn)],
|
| 86 |
+
# select_columns=SelectColumns(
|
| 87 |
+
# default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
| 88 |
+
# cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
| 89 |
+
# label="Select Columns to Display:",
|
| 90 |
+
# ),
|
| 91 |
+
# search_columns=[AutoEvalColumn.model.name],
|
| 92 |
+
# hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
| 93 |
+
# filter_columns=[
|
| 94 |
+
# "Dataset Version",
|
| 95 |
+
# ],
|
| 96 |
+
# interactive=False,
|
| 97 |
+
# )
|
| 98 |
|
| 99 |
demo = gr.Blocks(css=custom_css)
|
| 100 |
with demo:
|
|
|
|
| 103 |
|
| 104 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 105 |
with gr.TabItem("π
LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
|
| 106 |
+
init_leaderboard(LEADERBOARD_DF, SUBSETS)
|
| 107 |
|
| 108 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 109 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|