apsys commited on
Commit
2374038
1 Parent(s): d78ed99

more ref, ci, debug

Browse files
Files changed (1) hide show
  1. app.py +31 -17
app.py CHANGED
@@ -28,6 +28,15 @@ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REP
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
30
 
 
 
 
 
 
 
 
 
 
31
 
32
  def restart_space():
33
  API.restart_space(repo_id=REPO_ID)
@@ -94,6 +103,8 @@ demo = gr.Blocks(css=custom_css)
94
  with demo:
95
  gr.HTML(TITLE)
96
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
97
 
98
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
99
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
@@ -141,7 +152,7 @@ with demo:
141
  # headers=EVAL_COLS,
142
  # datatype=EVAL_TYPES,
143
  # row_count=5,
144
- # )
145
  with gr.Row():
146
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
147
 
@@ -174,22 +185,25 @@ with demo:
174
  )
175
  base_model_name_textbox = gr.Textbox(label="Организация")
176
  ans_file = gr.File(label="Arena Hard Answer File", file_types=["json","jsonl"])
177
-
178
- submit_button = gr.Button("Submit Eval")
179
- submission_result = gr.Markdown()
180
- submit_button.click(
181
- add_new_eval,
182
- [
183
- model_name_textbox,
184
- base_model_name_textbox,
185
- revision_name_textbox,
186
- precision,
187
- weight_type,
188
- model_type,
189
- ans_file
190
- ],
191
- submission_result,
192
- )
 
 
 
193
 
194
  with gr.Row():
195
  with gr.Accordion("📙 Citation", open=False):
 
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
30
 
31
+ loggedin = False
32
+
33
+ def check_login(profile: gr.OAuthProfile | None) -> bool:
34
+ global loggedin
35
+ if profile is None:
36
+ loggedin = False
37
+ return False
38
+ loggedin = True
39
+ return True
40
 
41
  def restart_space():
42
  API.restart_space(repo_id=REPO_ID)
 
103
  with demo:
104
  gr.HTML(TITLE)
105
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
106
+ login_button = gr.LoginButton(elem_id="oauth-button")
107
+
108
 
109
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
110
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
 
152
  # headers=EVAL_COLS,
153
  # datatype=EVAL_TYPES,
154
  # row_count=5,
155
+
156
  with gr.Row():
157
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
158
 
 
185
  )
186
  base_model_name_textbox = gr.Textbox(label="Организация")
187
  ans_file = gr.File(label="Arena Hard Answer File", file_types=["json","jsonl"])
188
+ login_button.click(check_login)
189
+ if not loggedin:
190
+ gr.Markdown("Please log in to submit an evaluation.")
191
+ else:
192
+ submit_button = gr.Button("Submit Eval")
193
+ submission_result = gr.Markdown()
194
+ submit_button.click(
195
+ add_new_eval,
196
+ [
197
+ model_name_textbox,
198
+ base_model_name_textbox,
199
+ revision_name_textbox,
200
+ precision,
201
+ weight_type,
202
+ model_type,
203
+ ans_file,
204
+ ],
205
+ submission_result,
206
+ )
207
 
208
  with gr.Row():
209
  with gr.Accordion("📙 Citation", open=False):