update
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +0 -35
- .gitignore +1 -6
- .pre-commit-config.yaml +0 -53
- Makefile +0 -13
- README.md +0 -46
- app.py +105 -199
- constants.py +42 -0
- eval-queue/.gitattributes +0 -55
- eval-queue/svbench/.gitattributes +0 -55
- eval-queue/svbench/Flash-VStream.json +0 -14
- eval-queue/svbench/GPT-4V.json +0 -14
- eval-queue/svbench/GPT-4o.json +0 -14
- eval-queue/svbench/Gemini 1.5 Pro.json +0 -14
- eval-queue/svbench/InternLM-XC2.5.json +0 -14
- eval-queue/svbench/InternVL2.json +0 -14
- eval-queue/svbench/LLaVA-NeXT-Video.json +0 -14
- eval-queue/svbench/MiniCPM-V 2.6.json +0 -14
- eval-queue/svbench/MovieChat.json +0 -14
- eval-queue/svbench/Qwen2-VL.json +0 -14
- eval-queue/svbench/ShareGPT4Video.json +0 -14
- eval-queue/svbench/TimeChat.json +0 -14
- eval-queue/svbench/VILA.json +0 -14
- eval-queue/svbench/Video-ChatGPT.json +0 -14
- eval-queue/svbench/Video-LLaVA.json +0 -14
- eval-queue/svbench/VideoLLaMA2.json +0 -14
- eval-results/.gitattributes +0 -55
- eval-results/svbench/.gitattributes +0 -55
- eval-results/svbench/Flash-VStream/results_Flash-VStream.json +0 -45
- eval-results/svbench/GPT-4V/results_GPT-4V.json +0 -45
- eval-results/svbench/GPT-4o/results_GPT-4o.json +0 -45
- eval-results/svbench/Gemini 1.5 Pro/results_Gemini 1.5 Pro.json +0 -45
- eval-results/svbench/InternLM-XC2.5/results_InternLM-XC2.5.json +0 -45
- eval-results/svbench/InternVL2/results_InternVL2.json +0 -45
- eval-results/svbench/LLaVA-NeXT-Video/results_LLaVA-NeXT-Video.json +0 -45
- eval-results/svbench/MiniCPM-V 2.6/results_MiniCPM-V 2.6.json +0 -45
- eval-results/svbench/MovieChat/results_MovieChat.json +0 -45
- eval-results/svbench/Qwen2-VL/results_Qwen2-VL.json +0 -45
- eval-results/svbench/ShareGPT4Video/results_ShareGPT4Video.json +0 -45
- eval-results/svbench/TimeChat/results_TimeChat.json +0 -45
- eval-results/svbench/VILA/results_VILA.json +0 -45
- eval-results/svbench/Video-ChatGPT/results_Video-ChatGPT.json +0 -45
- eval-results/svbench/Video-LLaVA/results_Video-LLaVA.json +0 -45
- eval-results/svbench/VideoLLaMA2/results_VideoLLaMA2.json +0 -45
- pyproject.toml +0 -13
- requirements.txt +0 -16
- src/about.py +0 -90
- src/display/css_html_js.py +0 -105
- src/display/formatting.py +0 -27
- src/display/utils.py +0 -125
- src/envs.py +0 -25
.gitattributes
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
-
scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
-
auto_evals/
|
2 |
venv/
|
3 |
__pycache__/
|
4 |
.env
|
5 |
.ipynb_checkpoints
|
6 |
*ipynb
|
7 |
-
.vscode/
|
8 |
-
|
9 |
-
eval-queue-bk/
|
10 |
-
eval-results-bk/
|
11 |
-
logs/
|
|
|
|
|
1 |
venv/
|
2 |
__pycache__/
|
3 |
.env
|
4 |
.ipynb_checkpoints
|
5 |
*ipynb
|
6 |
+
.vscode/
|
|
|
|
|
|
|
|
.pre-commit-config.yaml
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
default_language_version:
|
16 |
-
python: python3
|
17 |
-
|
18 |
-
ci:
|
19 |
-
autofix_prs: true
|
20 |
-
autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
|
21 |
-
autoupdate_schedule: quarterly
|
22 |
-
|
23 |
-
repos:
|
24 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
25 |
-
rev: v4.3.0
|
26 |
-
hooks:
|
27 |
-
- id: check-yaml
|
28 |
-
- id: check-case-conflict
|
29 |
-
- id: detect-private-key
|
30 |
-
- id: check-added-large-files
|
31 |
-
args: ['--maxkb=1000']
|
32 |
-
- id: requirements-txt-fixer
|
33 |
-
- id: end-of-file-fixer
|
34 |
-
- id: trailing-whitespace
|
35 |
-
|
36 |
-
- repo: https://github.com/PyCQA/isort
|
37 |
-
rev: 5.12.0
|
38 |
-
hooks:
|
39 |
-
- id: isort
|
40 |
-
name: Format imports
|
41 |
-
|
42 |
-
- repo: https://github.com/psf/black
|
43 |
-
rev: 22.12.0
|
44 |
-
hooks:
|
45 |
-
- id: black
|
46 |
-
name: Format code
|
47 |
-
additional_dependencies: ['click==8.0.2']
|
48 |
-
|
49 |
-
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
50 |
-
# Ruff version.
|
51 |
-
rev: 'v0.0.267'
|
52 |
-
hooks:
|
53 |
-
- id: ruff
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Makefile
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
.PHONY: style format
|
2 |
-
|
3 |
-
|
4 |
-
style:
|
5 |
-
python -m black --line-length 119 .
|
6 |
-
python -m isort .
|
7 |
-
ruff check --fix .
|
8 |
-
|
9 |
-
|
10 |
-
quality:
|
11 |
-
python -m black --check --line-length 119 .
|
12 |
-
python -m isort --check-only .
|
13 |
-
ruff check .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SVBench
|
3 |
-
emoji: 🥇
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: true
|
9 |
-
license: apache-2.0
|
10 |
-
short_description: Leaderboard for SVBench
|
11 |
-
sdk_version: 5.19.0
|
12 |
-
---
|
13 |
-
|
14 |
-
# Start the configuration
|
15 |
-
|
16 |
-
Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
|
17 |
-
|
18 |
-
Results files should have the following format and be stored as json files:
|
19 |
-
```json
|
20 |
-
{
|
21 |
-
"config": {
|
22 |
-
"model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
|
23 |
-
"model_name": "path of the model on the hub: org/model",
|
24 |
-
"model_sha": "revision on the hub",
|
25 |
-
},
|
26 |
-
"results": {
|
27 |
-
"task_name": {
|
28 |
-
"metric_name": score,
|
29 |
-
},
|
30 |
-
"task_name2": {
|
31 |
-
"metric_name": score,
|
32 |
-
}
|
33 |
-
}
|
34 |
-
}
|
35 |
-
```
|
36 |
-
|
37 |
-
Request files are created automatically by this tool.
|
38 |
-
|
39 |
-
If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
|
40 |
-
|
41 |
-
# Code logic for more complex edits
|
42 |
-
|
43 |
-
You'll find
|
44 |
-
- the main table' columns names and properties in `src/display/utils.py`
|
45 |
-
- the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
|
46 |
-
- the logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,213 +1,119 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
3 |
import pandas as pd
|
4 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
-
from huggingface_hub import snapshot_download
|
6 |
-
|
7 |
-
from src.about import (
|
8 |
-
CITATION_BUTTON_LABEL,
|
9 |
-
CITATION_BUTTON_TEXT,
|
10 |
-
EVALUATION_QUEUE_TEXT,
|
11 |
-
INTRODUCTION_TEXT,
|
12 |
-
LLM_BENCHMARKS_TEXT,
|
13 |
-
TITLE,
|
14 |
-
)
|
15 |
-
from src.display.css_html_js import custom_css
|
16 |
-
from src.display.utils import (
|
17 |
-
BENCHMARK_COLS,
|
18 |
-
COLS,
|
19 |
-
EVAL_COLS,
|
20 |
-
EVAL_TYPES,
|
21 |
-
AutoEvalColumn,
|
22 |
-
ModelType,
|
23 |
-
fields,
|
24 |
-
WeightType,
|
25 |
-
Precision
|
26 |
-
)
|
27 |
-
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
28 |
-
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
29 |
-
from src.submission.submit import add_new_eval
|
30 |
-
import pdb
|
31 |
-
import os
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
(
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
raise ValueError("Leaderboard DataFrame is empty or None.")
|
67 |
-
|
68 |
-
# Check for None in filter_columns
|
69 |
-
filter_columns = [
|
70 |
-
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
|
71 |
-
]
|
72 |
-
|
73 |
-
return Leaderboard(
|
74 |
-
value=dataframe,
|
75 |
-
datatype=[c.type for c in fields(AutoEvalColumn)],
|
76 |
-
select_columns=SelectColumns(
|
77 |
-
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
78 |
-
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
79 |
-
label="Select Columns to Display:",
|
80 |
-
),
|
81 |
-
search_columns=[AutoEvalColumn.model.name],
|
82 |
-
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
83 |
-
filter_columns=[
|
84 |
-
# ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
|
85 |
-
# ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
86 |
-
# ColumnFilter(
|
87 |
-
# AutoEvalColumn.params.name,
|
88 |
-
# type="slider",
|
89 |
-
# min=0.01,
|
90 |
-
# max=150,
|
91 |
-
# label="Select the number of parameters (B)",
|
92 |
-
# ),
|
93 |
-
# ColumnFilter(
|
94 |
-
# AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
95 |
-
# ),
|
96 |
-
],
|
97 |
-
bool_checkboxgroup_label="Hide models",
|
98 |
interactive=False,
|
|
|
99 |
)
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
107 |
-
with gr.TabItem("
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
with gr.Column():
|
115 |
-
with gr.Row():
|
116 |
-
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
117 |
-
|
118 |
-
with gr.Column():
|
119 |
-
with gr.Accordion(
|
120 |
-
f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
|
121 |
-
open=False,
|
122 |
-
):
|
123 |
-
with gr.Row():
|
124 |
-
finished_eval_table = gr.components.Dataframe(
|
125 |
-
value=finished_eval_queue_df,
|
126 |
-
headers=EVAL_COLS,
|
127 |
-
datatype=EVAL_TYPES,
|
128 |
-
row_count=5,
|
129 |
-
)
|
130 |
-
with gr.Accordion(
|
131 |
-
f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
132 |
-
open=False,
|
133 |
-
):
|
134 |
-
with gr.Row():
|
135 |
-
running_eval_table = gr.components.Dataframe(
|
136 |
-
value=running_eval_queue_df,
|
137 |
-
headers=EVAL_COLS,
|
138 |
-
datatype=EVAL_TYPES,
|
139 |
-
row_count=5,
|
140 |
-
)
|
141 |
-
|
142 |
-
with gr.Accordion(
|
143 |
-
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
144 |
-
open=False,
|
145 |
-
):
|
146 |
-
with gr.Row():
|
147 |
-
pending_eval_table = gr.components.Dataframe(
|
148 |
-
value=pending_eval_queue_df,
|
149 |
-
headers=EVAL_COLS,
|
150 |
-
datatype=EVAL_TYPES,
|
151 |
-
row_count=5,
|
152 |
-
)
|
153 |
-
with gr.Row():
|
154 |
-
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
155 |
-
|
156 |
-
with gr.Row():
|
157 |
-
with gr.Column():
|
158 |
-
model_name_textbox = gr.Textbox(label="Model name")
|
159 |
-
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
160 |
-
model_type = gr.Dropdown(
|
161 |
-
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
162 |
-
label="Model type",
|
163 |
-
multiselect=False,
|
164 |
-
value=None,
|
165 |
-
interactive=True,
|
166 |
)
|
|
|
|
|
|
|
|
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
interactive=True,
|
175 |
-
)
|
176 |
-
weight_type = gr.Dropdown(
|
177 |
-
choices=[i.value.name for i in WeightType],
|
178 |
-
label="Weights type",
|
179 |
-
multiselect=False,
|
180 |
-
value="Original",
|
181 |
-
interactive=True,
|
182 |
-
)
|
183 |
-
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
184 |
-
|
185 |
-
submit_button = gr.Button("Submit Eval")
|
186 |
-
submission_result = gr.Markdown()
|
187 |
-
submit_button.click(
|
188 |
-
add_new_eval,
|
189 |
-
[
|
190 |
-
model_name_textbox,
|
191 |
-
base_model_name_textbox,
|
192 |
-
revision_name_textbox,
|
193 |
-
precision,
|
194 |
-
weight_type,
|
195 |
-
model_type,
|
196 |
-
],
|
197 |
-
submission_result,
|
198 |
)
|
199 |
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
|
|
|
|
|
|
|
|
208 |
)
|
209 |
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__all__ = ['block', 'make_clickable_model', 'make_clickable_user', 'get_submissions']
|
2 |
+
import os
|
3 |
+
|
4 |
import gradio as gr
|
|
|
5 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
from constants import *
|
8 |
+
|
9 |
+
global data_component, filter_component
|
10 |
+
|
11 |
+
def get_baseline_df():
|
12 |
+
df = pd.read_csv(CSV_DIR)
|
13 |
+
df['Average'] = ((df['Streaming_OS'] + df['Dialogue_OS']) / 2).round(2)
|
14 |
+
df = df.sort_values(by="Average", ascending=False)
|
15 |
+
present_columns = ['Model'] + checkbox_group.value
|
16 |
+
df = df[present_columns]
|
17 |
+
return df
|
18 |
+
|
19 |
+
def get_all_df():
|
20 |
+
df = pd.read_csv(CSV_DIR)
|
21 |
+
df['Average'] = ((df['Streaming_OS'] + df['Dialogue_OS']) / 2).round(2)
|
22 |
+
df = df.sort_values(by="Average", ascending=False)
|
23 |
+
return df
|
24 |
+
|
25 |
+
def on_filter_model_size_method_change(selected_columns):
|
26 |
+
updated_data = get_all_df()
|
27 |
+
|
28 |
+
# columns:
|
29 |
+
selected_columns = [item for item in TASK_INFO if item in selected_columns]
|
30 |
+
present_columns = ['Model'] + selected_columns
|
31 |
+
updated_data = updated_data[present_columns]
|
32 |
+
updated_data = updated_data.sort_values(by=selected_columns[0], ascending=False)
|
33 |
+
updated_headers = present_columns
|
34 |
+
update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers]
|
35 |
+
filter_component = gr.components.Dataframe(
|
36 |
+
value=updated_data,
|
37 |
+
headers=updated_headers,
|
38 |
+
type="pandas",
|
39 |
+
datatype=update_datatype,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
interactive=False,
|
41 |
+
visible=True,
|
42 |
)
|
43 |
|
44 |
+
return filter_component
|
45 |
+
|
46 |
+
def search_model(query):
|
47 |
+
df = get_all_df()
|
48 |
+
filtered_df = df[df['Model'].str.contains(query, case=False)]
|
49 |
+
return filtered_df
|
50 |
|
51 |
+
block = gr.Blocks()
|
52 |
+
|
53 |
+
with block:
|
54 |
+
gr.Markdown(
|
55 |
+
LEADERBORAD_INTRODUCTION
|
56 |
+
)
|
57 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
58 |
+
with gr.TabItem("📊 SVBench", elem_id="svbench-tab-table", id=1):
|
59 |
+
with gr.Accordion("Citation", open=False):
|
60 |
+
citation_button = gr.Textbox(
|
61 |
+
value=CITATION_BUTTON_TEXT,
|
62 |
+
label=CITATION_BUTTON_LABEL,
|
63 |
+
elem_id="citation-button",
|
64 |
+
lines=10,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
)
|
66 |
+
|
67 |
+
gr.Markdown(
|
68 |
+
TABLE_INTRODUCTION
|
69 |
+
)
|
70 |
|
71 |
+
# selection for column part:
|
72 |
+
checkbox_group = gr.CheckboxGroup(
|
73 |
+
choices=TASK_INFO,
|
74 |
+
value=AVG_INFO,
|
75 |
+
label="Evaluation Dimension",
|
76 |
+
interactive=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
)
|
78 |
|
79 |
+
search_box = gr.Textbox(
|
80 |
+
label="Search Model",
|
81 |
+
placeholder="Enter model name",
|
82 |
+
interactive=True,
|
83 |
+
)
|
84 |
+
|
85 |
+
data_component = gr.components.Dataframe(
|
86 |
+
value=get_baseline_df,
|
87 |
+
headers=['Model', 'Type', 'Size'] + AVG_INFO,
|
88 |
+
type="pandas",
|
89 |
+
datatype=DATA_TITILE_TYPE,
|
90 |
+
interactive=False,
|
91 |
+
visible=True,
|
92 |
)
|
93 |
|
94 |
+
checkbox_group.change(fn=on_filter_model_size_method_change, inputs=[checkbox_group], outputs=data_component)
|
95 |
+
search_box.change(fn=search_model, inputs=[search_box], outputs=data_component)
|
96 |
+
|
97 |
+
# table 2
|
98 |
+
with gr.TabItem("📝 About", elem_id="svbench-tab-table", id=2):
|
99 |
+
gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
|
100 |
+
|
101 |
+
# table 3
|
102 |
+
with gr.TabItem("🚀 Submit here! ", elem_id="-tab-table", id=3):
|
103 |
+
gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
def refresh_data():
|
108 |
+
value1 = get_baseline_df()
|
109 |
+
return value1
|
110 |
+
|
111 |
+
with gr.Row():
|
112 |
+
data_run = gr.Button("Refresh")
|
113 |
+
with gr.Row():
|
114 |
+
result_download = gr.Button("Download Leaderboard")
|
115 |
+
file_download = gr.File(label="download the csv of leaderboard.", visible=False)
|
116 |
+
data_run.click(on_filter_model_size_method_change, inputs=[checkbox_group], outputs=data_component)
|
117 |
+
result_download.click(lambda: (CSV_DIR, gr.update(visible=True)), inputs=None, outputs=[file_download, file_download])
|
118 |
+
|
119 |
+
block.launch()
|
constants.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
# this is .py for store constants
|
3 |
+
MODEL_INFO = ["Model"]
|
4 |
+
TASK_INFO = ["F/FPS", "Type", "Size","Dialogue_SA", "Dialogue_CC", "Dialogue_LC", "Dialogue_TU", "Dialogue_IC", "Dialogue_OS", "Streaming_SA", "Streaming_CC", "Streaming_LC", "Streaming_TU", "Streaming_IC", "Streaming_OS", "Average"]
|
5 |
+
AVG_INFO = ["Type", "Size", "F/FPS", "Dialogue_OS", "Streaming_OS","Average"]
|
6 |
+
|
7 |
+
DATA_TITILE_TYPE = ['str', 'str', 'str', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number', 'number']
|
8 |
+
|
9 |
+
CSV_DIR = "./svbench.csv"
|
10 |
+
|
11 |
+
COLUMN_NAMES = MODEL_INFO + TASK_INFO
|
12 |
+
|
13 |
+
LEADERBORAD_INTRODUCTION = """# SVBench Leaderboard
|
14 |
+
|
15 |
+
Welcome to the leaderboard of the SVBench! 🏆
|
16 |
+
|
17 |
+
SVBench is a benchmark specifically designed to evaluate the performance of Large Vision-Language Models (LVLMs) in long-context streaming video understanding tasks. This benchmark comprehensively assesses the models' capabilities in handling streaming videos through its unique temporal multi-turn question-answering chains. To facilitate research and development, SVBench provides a detailed leaderboard showcasing the performance results of over a dozen models on this benchmark. By ranking the models based on their performance on SVBench, users can quickly identify models that excel in specific tasks, thereby guiding subsequent research and applications.
|
18 |
+
Detailed information about SVBench and the leaderboard can be accessed via the following link: [SVBench Benchmark](https://yzy-bupt.github.io/SVBench). The paper is available at: [SVBench Paper](https://arxiv.org/abs/2502.10810). Additionally, the related dataset is hosted on the Hugging Face platform, and researchers can access it at [SVBench Dataset](https://huggingface.co/datasets/yzy666/SVBench) for further experiments and model development.
|
19 |
+
This leaderboard not only provides a fair competitive environment for current models but also serves as an important reference standard for future model improvements and innovations.
|
20 |
+
"""
|
21 |
+
|
22 |
+
SUBMIT_INTRODUCTION = """
|
23 |
+
# Leaderboard submissions can be made through the following link: [Leaderboard Submission](https://docs.google.com/forms/d/e/1FAIpQLSfz62pGaIdKjmDbOP0vw74dXSiG-2ILJI7gdugdx4pfWSc42Q/viewform).
|
24 |
+
"""
|
25 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
26 |
+
CITATION_BUTTON_TEXT = """
|
27 |
+
@article{yang2025svbench,
|
28 |
+
title={SVBench: A Benchmark with Temporal Multi-Turn Dialogues for Streaming Video Understanding},
|
29 |
+
author={Yang, Zhenyu and Hu, Yuhang and Du, Zemin and Xue, Dizhan and Qian, Shengsheng and Wu, Jiahong and Yang, Fan and Dong, Weiming and Xu, Changsheng},
|
30 |
+
journal={arXiv preprint arXiv:2502.10810},
|
31 |
+
year={2025}
|
32 |
+
}
|
33 |
+
"""
|
34 |
+
|
35 |
+
TABLE_INTRODUCTION = """
|
36 |
+
"""
|
37 |
+
|
38 |
+
LEADERBORAD_INFO = """
|
39 |
+
Despite the significant advancements of Large Vision-Language Models (LVLMs) on established benchmarks, there remains a notable gap in suitable evaluation regarding their applicability in the emerging domain of long-context streaming video understanding. Current benchmarks for video understanding typically emphasize isolated single-instance text inputs and fail to evaluate the capacity to sustain temporal reasoning throughout the entire duration of video streams. To address these limitations, we introduce SVBench, a pioneering benchmark with temporal multi-turn question-answering chains specifically designed to thoroughly assess the capabilities of streaming video understanding of current LVLMs. We design a semi-automated annotation pipeline to obtain 49,979 Question-Answer (QA) pairs of 1,353 streaming videos, which includes generating QA chains that represent a series of consecutive multi-turn dialogues over video segments and constructing temporal linkages between successive QA chains. Our experimental results, obtained from 14 models in dialogue and streaming evaluations, reveal that while the closed-source GPT-4o outperforms others, most open-source LVLMs struggle with long-context streaming video understanding. We also construct a StreamingChat model, which significantly outperforms open-source LVLMs on our SVBench and achieves comparable performance on diverse vision-language benchmarks. We expect SVBench to advance the research of streaming video understanding by providing a comprehensive and in-depth analysis of current LVLMs.
|
40 |
+
"""
|
41 |
+
|
42 |
+
|
eval-queue/.gitattributes
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
-
# Audio files - uncompressed
|
38 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
-
# Audio files - compressed
|
42 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
-
# Image files - uncompressed
|
48 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
-
# Image files - compressed
|
53 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/.gitattributes
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
-
# Audio files - uncompressed
|
38 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
-
# Audio files - compressed
|
42 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
-
# Image files - uncompressed
|
48 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
-
# Image files - compressed
|
53 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/Flash-VStream.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Flash-VStream",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/GPT-4V.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "GPT-4V",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 0,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/GPT-4o.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "GPT-4o",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 0,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/Gemini 1.5 Pro.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Gemini 1.5 Pro",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 0,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/InternLM-XC2.5.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "InternLM-XC2.5",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/InternVL2.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "InternVL2",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "ImageLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 8,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/LLaVA-NeXT-Video.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "LLaVA-NeXT-Video",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/MiniCPM-V 2.6.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "MiniCPM-V 2.6",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "ImageLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 8,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/MovieChat.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "MovieChat",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/Qwen2-VL.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Qwen2-VL",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "ImageLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/ShareGPT4Video.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "ShareGPT4Video",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 8,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/TimeChat.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "TimeChat",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/VILA.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "VILA",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "ImageLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 8,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/Video-ChatGPT.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Video-ChatGPT",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/Video-LLaVA.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "Video-LLaVA",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-queue/svbench/VideoLLaMA2.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"model": "VideoLLaMA2",
|
3 |
-
"base_model": "",
|
4 |
-
"revision": "float16",
|
5 |
-
"precision": "",
|
6 |
-
"weight_type": "",
|
7 |
-
"status": "FINISHED",
|
8 |
-
"submitted_time": "",
|
9 |
-
"model_type": "VideoLLM",
|
10 |
-
"likes": 0,
|
11 |
-
"params": 7,
|
12 |
-
"license": "",
|
13 |
-
"private": false
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/.gitattributes
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
-
# Audio files - uncompressed
|
38 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
-
# Audio files - compressed
|
42 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
-
# Image files - uncompressed
|
48 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
-
# Image files - compressed
|
53 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/.gitattributes
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
-
# Audio files - uncompressed
|
38 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
-
# Audio files - compressed
|
42 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
-
# Image files - uncompressed
|
48 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
-
# Image files - compressed
|
53 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/Flash-VStream/results_Flash-VStream.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "Flash-VStream",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.3754
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4474
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5102
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4795
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.3794
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4272
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3571
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.4424
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.4849
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.3895
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.3900
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.3880
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/GPT-4V/results_GPT-4V.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "GPT-4V",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.5603
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.6261
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.6909
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.6536
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.5373
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.6030
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.5637
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.6141
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.6580
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.5918
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.5716
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.5793
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/GPT-4o/results_GPT-4o.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "GPT-4o",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.5826
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.6476
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.7075
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.6768
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.5582
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.6257
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.5799
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.6352
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.6772
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.6018
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.5925
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.5997
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/Gemini 1.5 Pro/results_Gemini 1.5 Pro.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "Gemini 1.5 Pro",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.4907
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.5615
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.6224
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.5836
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4772
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.5368
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.4935
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.5577
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.6041
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.5289
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.5111
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.5155
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/InternLM-XC2.5/results_InternLM-XC2.5.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "InternLM-XC2.5",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.4651
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.5316
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5984
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.5294
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4587
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.5071
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.5262
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.5855
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.6289
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.5398
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.5439
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.5439
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/InternVL2/results_InternVL2.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "InternVL2",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.4053
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4677
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5238
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4697
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4035
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4448
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3892
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.4542
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.5045
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.4153
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.4235
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.4162
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/LLaVA-NeXT-Video/results_LLaVA-NeXT-Video.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "LLaVA-NeXT-Video",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.3771
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4459
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5205
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4180
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.3658
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4140
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3429
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.3968
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.4765
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.3533
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.3668
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.3612
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/MiniCPM-V 2.6/results_MiniCPM-V 2.6.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "MiniCPM-V 2.6",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.5170
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.5950
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.6533
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.6172
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.5009
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.5663
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.4644
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.5273
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.5835
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.5348
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.4832
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.4967
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/MovieChat/results_MovieChat.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "MovieChat",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.2036
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.2374
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.2897
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.228
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.2051
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.2272
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.1892
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.2238
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.2677
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.2046
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.2098
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.1964
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/Qwen2-VL/results_Qwen2-VL.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "Qwen2-VL",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.5047
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.5771
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.6346
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.6077
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4944
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.5529
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.4838
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.5517
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.5991
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.5204
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.5142
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.5139
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/ShareGPT4Video/results_ShareGPT4Video.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "ShareGPT4Video",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.3626
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4368
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5012
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4733
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.3725
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4176
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3314
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.4048
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.4601
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.3815
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.3781
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.3710
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/TimeChat/results_TimeChat.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "TimeChat",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.3109
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.3857
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.4552
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4337
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.3110
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.3624
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.2714
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.3442
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.3978
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.3680
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.3171
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.3115
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/VILA/results_VILA.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "VILA",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.4323
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4930
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5559
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.5247
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4127
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4707
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3819
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.4427
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.4918
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.4129
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.4055
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.4038
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/Video-ChatGPT/results_Video-ChatGPT.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "Video-ChatGPT",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.2801
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.3404
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.4089
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.3566
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.2959
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.3224
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.2284
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.2844
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.3393
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.2631
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.2643
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.2502
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/Video-LLaVA/results_Video-LLaVA.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "Video-LLaVA",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.3185
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.3838
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.4493
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.4154
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.3280
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.3649
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.2695
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.3368
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.3900
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.3183
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.3153
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.2989
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval-results/svbench/VideoLLaMA2/results_VideoLLaMA2.json
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"config": {
|
3 |
-
"model_dtype": "torch.float16",
|
4 |
-
"model_name": "VideoLLaMA2",
|
5 |
-
"model_sha": ""
|
6 |
-
},
|
7 |
-
"results": {
|
8 |
-
"Dialogue_SA": {
|
9 |
-
"acc": 0.4250
|
10 |
-
},
|
11 |
-
"Dialogue_CC": {
|
12 |
-
"acc": 0.4988
|
13 |
-
},
|
14 |
-
"Dialogue_LC": {
|
15 |
-
"acc": 0.5596
|
16 |
-
},
|
17 |
-
"Dialogue_TU": {
|
18 |
-
"acc": 0.5223
|
19 |
-
},
|
20 |
-
"Dialogue_IC": {
|
21 |
-
"acc": 0.4140
|
22 |
-
},
|
23 |
-
"Dialogue_OS": {
|
24 |
-
"acc": 0.4710
|
25 |
-
},
|
26 |
-
"Streaming_SA": {
|
27 |
-
"acc": 0.3895
|
28 |
-
},
|
29 |
-
"Streaming_CC": {
|
30 |
-
"acc": 0.4611
|
31 |
-
},
|
32 |
-
"Streaming_LC": {
|
33 |
-
"acc": 0.5177
|
34 |
-
},
|
35 |
-
"Streaming_TU": {
|
36 |
-
"acc": 0.4369
|
37 |
-
},
|
38 |
-
"Streaming_IC": {
|
39 |
-
"acc": 0.4222
|
40 |
-
},
|
41 |
-
"Streaming_OS": {
|
42 |
-
"acc": 0.4277
|
43 |
-
}
|
44 |
-
}
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
[tool.ruff]
|
2 |
-
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
|
3 |
-
select = ["E", "F"]
|
4 |
-
ignore = ["E501"] # line too long (black is taking care of this)
|
5 |
-
line-length = 119
|
6 |
-
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
|
7 |
-
|
8 |
-
[tool.isort]
|
9 |
-
profile = "black"
|
10 |
-
line_length = 119
|
11 |
-
|
12 |
-
[tool.black]
|
13 |
-
line-length = 119
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
APScheduler
|
2 |
-
black
|
3 |
-
datasets
|
4 |
-
gradio
|
5 |
-
gradio[oauth]
|
6 |
-
gradio_leaderboard==0.0.13
|
7 |
-
gradio_client
|
8 |
-
huggingface-hub>=0.18.0
|
9 |
-
matplotlib
|
10 |
-
numpy
|
11 |
-
pandas
|
12 |
-
python-dateutil
|
13 |
-
tqdm
|
14 |
-
transformers
|
15 |
-
tokenizers>=0.15.0
|
16 |
-
sentencepiece
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/about.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
@dataclass
|
5 |
-
class Task:
|
6 |
-
benchmark: str
|
7 |
-
metric: str
|
8 |
-
col_name: str
|
9 |
-
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
-
class Tasks(Enum):
|
14 |
-
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
task0 = Task("Dialogue_SA", "acc", "Dialogue_SA")
|
16 |
-
task1 = Task("Dialogue_CC", "acc", "Dialogue_CC")
|
17 |
-
task2 = Task("Dialogue_LC", "acc", "Dialogue_LC")
|
18 |
-
task3 = Task("Dialogue_TU", "acc", "Dialogue_TU")
|
19 |
-
task4 = Task("Dialogue_IC", "acc", "Dialogue_IC")
|
20 |
-
task5 = Task("Dialogue_OS", "acc", "Dialogue_OS")
|
21 |
-
task6 = Task("Streaming_SA", "acc", "Streaming_SA")
|
22 |
-
task7 = Task("Streaming_CC", "acc", "Streaming_CC")
|
23 |
-
task8 = Task("Streaming_LC", "acc", "Streaming_LC")
|
24 |
-
task9 = Task("Streaming_TU", "acc", "Streaming_TU")
|
25 |
-
task10 = Task("Streaming_IC", "acc", "Streaming_IC")
|
26 |
-
task11 = Task("Streaming_OS", "acc", "Streaming_OS")
|
27 |
-
|
28 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
29 |
-
# ---------------------------------------------------
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
# Your leaderboard name
|
34 |
-
TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
|
35 |
-
|
36 |
-
# What does your leaderboard evaluate?
|
37 |
-
INTRODUCTION_TEXT = """
|
38 |
-
SVBench is a benchmark specifically designed to evaluate the performance of Large Vision-Language Models (LVLMs) in long-context streaming video understanding tasks. This benchmark comprehensively assesses the models' capabilities in handling streaming videos through its unique temporal multi-turn question-answering chains. To facilitate research and development, SVBench provides a detailed leaderboard showcasing the performance results of over a dozen models on this benchmark. By ranking the models based on their performance on SVBench, users can quickly identify models that excel in specific tasks, thereby guiding subsequent research and applications.
|
39 |
-
Detailed information about SVBench and the leaderboard can be accessed via the following link: [SVBench Benchmark](https://yzy-bupt.github.io/SVBench). The paper is available at: [SVBench Paper](https://arxiv.org/abs/2502.10810). Leaderboard submissions can be made through the following link: [Leaderboard Submission](https://docs.google.com/forms/d/e/1FAIpQLSfz62pGaIdKjmDbOP0vw74dXSiG-2ILJI7gdugdx4pfWSc42Q/viewform). Additionally, the related dataset is hosted on the Hugging Face platform, and researchers can access it at [SVBench Dataset](https://huggingface.co/datasets/yzy666/SVBench) for further experiments and model development.
|
40 |
-
This leaderboard not only provides a fair competitive environment for current models but also serves as an important reference standard for future model improvements and innovations.
|
41 |
-
"""
|
42 |
-
|
43 |
-
# Which evaluations are you running? how can people reproduce what you have?
|
44 |
-
LLM_BENCHMARKS_TEXT = f"""
|
45 |
-
## How it works
|
46 |
-
|
47 |
-
## Reproducibility
|
48 |
-
To reproduce our results, here is the commands you can run:
|
49 |
-
|
50 |
-
"""
|
51 |
-
|
52 |
-
EVALUATION_QUEUE_TEXT = """
|
53 |
-
## Some good practices before submitting a model
|
54 |
-
|
55 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
56 |
-
```python
|
57 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
58 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
59 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
60 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
61 |
-
```
|
62 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
63 |
-
|
64 |
-
Note: make sure your model is public!
|
65 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
66 |
-
|
67 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
68 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
69 |
-
|
70 |
-
### 3) Make sure your model has an open license!
|
71 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
72 |
-
|
73 |
-
### 4) Fill up your model card
|
74 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
75 |
-
|
76 |
-
## In case of model failure
|
77 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
78 |
-
Make sure you have followed the above steps first.
|
79 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
80 |
-
"""
|
81 |
-
|
82 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
83 |
-
CITATION_BUTTON_TEXT = r"""
|
84 |
-
@article{yang2025svbench,
|
85 |
-
title={SVBench: A Benchmark with Temporal Multi-Turn Dialogues for Streaming Video Understanding},
|
86 |
-
author={Yang, Zhenyu and Hu, Yuhang and Du, Zemin and Xue, Dizhan and Qian, Shengsheng and Wu, Jiahong and Yang, Fan and Dong, Weiming and Xu, Changsheng},
|
87 |
-
journal={arXiv preprint arXiv:2502.10810},
|
88 |
-
year={2025}
|
89 |
-
}
|
90 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/css_html_js.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
custom_css = """
|
2 |
-
|
3 |
-
.markdown-text {
|
4 |
-
font-size: 16px !important;
|
5 |
-
}
|
6 |
-
|
7 |
-
#models-to-add-text {
|
8 |
-
font-size: 18px !important;
|
9 |
-
}
|
10 |
-
|
11 |
-
#citation-button span {
|
12 |
-
font-size: 16px !important;
|
13 |
-
}
|
14 |
-
|
15 |
-
#citation-button textarea {
|
16 |
-
font-size: 16px !important;
|
17 |
-
}
|
18 |
-
|
19 |
-
#citation-button > label > button {
|
20 |
-
margin: 6px;
|
21 |
-
transform: scale(1.3);
|
22 |
-
}
|
23 |
-
|
24 |
-
#leaderboard-table {
|
25 |
-
margin-top: 15px
|
26 |
-
}
|
27 |
-
|
28 |
-
#leaderboard-table-lite {
|
29 |
-
margin-top: 15px
|
30 |
-
}
|
31 |
-
|
32 |
-
#search-bar-table-box > div:first-child {
|
33 |
-
background: none;
|
34 |
-
border: none;
|
35 |
-
}
|
36 |
-
|
37 |
-
#search-bar {
|
38 |
-
padding: 0px;
|
39 |
-
}
|
40 |
-
|
41 |
-
/* Limit the width of the first AutoEvalColumn so that names don't expand too much */
|
42 |
-
#leaderboard-table td:nth-child(2),
|
43 |
-
#leaderboard-table th:nth-child(2) {
|
44 |
-
max-width: 400px;
|
45 |
-
overflow: auto;
|
46 |
-
white-space: nowrap;
|
47 |
-
}
|
48 |
-
|
49 |
-
.tab-buttons button {
|
50 |
-
font-size: 20px;
|
51 |
-
}
|
52 |
-
|
53 |
-
#scale-logo {
|
54 |
-
border-style: none !important;
|
55 |
-
box-shadow: none;
|
56 |
-
display: block;
|
57 |
-
margin-left: auto;
|
58 |
-
margin-right: auto;
|
59 |
-
max-width: 600px;
|
60 |
-
}
|
61 |
-
|
62 |
-
#scale-logo .download {
|
63 |
-
display: none;
|
64 |
-
}
|
65 |
-
#filter_type{
|
66 |
-
border: 0;
|
67 |
-
padding-left: 0;
|
68 |
-
padding-top: 0;
|
69 |
-
}
|
70 |
-
#filter_type label {
|
71 |
-
display: flex;
|
72 |
-
}
|
73 |
-
#filter_type label > span{
|
74 |
-
margin-top: var(--spacing-lg);
|
75 |
-
margin-right: 0.5em;
|
76 |
-
}
|
77 |
-
#filter_type label > .wrap{
|
78 |
-
width: 103px;
|
79 |
-
}
|
80 |
-
#filter_type label > .wrap .wrap-inner{
|
81 |
-
padding: 2px;
|
82 |
-
}
|
83 |
-
#filter_type label > .wrap .wrap-inner input{
|
84 |
-
width: 1px
|
85 |
-
}
|
86 |
-
#filter-columns-type{
|
87 |
-
border:0;
|
88 |
-
padding:0.5;
|
89 |
-
}
|
90 |
-
#filter-columns-size{
|
91 |
-
border:0;
|
92 |
-
padding:0.5;
|
93 |
-
}
|
94 |
-
#box-filter > .form{
|
95 |
-
border: 0
|
96 |
-
}
|
97 |
-
"""
|
98 |
-
|
99 |
-
get_window_url_params = """
|
100 |
-
function(url_params) {
|
101 |
-
const params = new URLSearchParams(window.location.search);
|
102 |
-
url_params = Object.fromEntries(params);
|
103 |
-
return url_params;
|
104 |
-
}
|
105 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/formatting.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
def model_hyperlink(link, model_name):
|
2 |
-
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
-
|
4 |
-
|
5 |
-
def make_clickable_model(model_name):
|
6 |
-
link = f"https://huggingface.co/{model_name}"
|
7 |
-
return model_hyperlink(link, model_name)
|
8 |
-
|
9 |
-
|
10 |
-
def styled_error(error):
|
11 |
-
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
12 |
-
|
13 |
-
|
14 |
-
def styled_warning(warn):
|
15 |
-
return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
|
16 |
-
|
17 |
-
|
18 |
-
def styled_message(message):
|
19 |
-
return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
|
20 |
-
|
21 |
-
|
22 |
-
def has_no_nan_values(df, columns):
|
23 |
-
return df[columns].notna().all(axis=1)
|
24 |
-
|
25 |
-
|
26 |
-
def has_nan_values(df, columns):
|
27 |
-
return df[columns].isna().any(axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/utils.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass, make_dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.about import Tasks
|
7 |
-
|
8 |
-
def fields(raw_class):
|
9 |
-
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
-
|
11 |
-
|
12 |
-
# These classes are for user facing column names,
|
13 |
-
# to avoid having to change them all around the code
|
14 |
-
# when a modif is needed
|
15 |
-
@dataclass
|
16 |
-
class ColumnContent:
|
17 |
-
name: str
|
18 |
-
type: str
|
19 |
-
displayed_by_default: bool
|
20 |
-
hidden: bool = False
|
21 |
-
never_hidden: bool = False
|
22 |
-
|
23 |
-
## Leaderboard columns
|
24 |
-
auto_eval_column_dict = []
|
25 |
-
# Init
|
26 |
-
# auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
-
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
-
#Scores
|
29 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
-
for task in Tasks:
|
31 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
-
# Model information
|
33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
-
# auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
35 |
-
# auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
36 |
-
# auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
37 |
-
# auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
38 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
39 |
-
# auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
40 |
-
# auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
41 |
-
# auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
42 |
-
|
43 |
-
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
-
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
-
|
46 |
-
## For the queue columns in the submission tab
|
47 |
-
@dataclass(frozen=True)
|
48 |
-
class EvalQueueColumn: # Queue column
|
49 |
-
model = ColumnContent("model", "markdown", True)
|
50 |
-
# revision = ColumnContent("revision", "str", True)
|
51 |
-
# private = ColumnContent("private", "bool", True)
|
52 |
-
# precision = ColumnContent("precision", "str", True)
|
53 |
-
# weight_type = ColumnContent("weight_type", "str", "Original")
|
54 |
-
# status = ColumnContent("status", "str", True)
|
55 |
-
|
56 |
-
## All the model information that we might need
|
57 |
-
@dataclass
|
58 |
-
class ModelDetails:
|
59 |
-
name: str
|
60 |
-
display_name: str = ""
|
61 |
-
symbol: str = "" # emoji
|
62 |
-
|
63 |
-
class ModelType(Enum):
|
64 |
-
VideoLLM = ModelDetails(name="VideoLLM", symbol="🎥")
|
65 |
-
ImageLLM = ModelDetails(name="ImageLLM", symbol="🖼️")
|
66 |
-
Unknown = ModelDetails(name="", symbol="?")
|
67 |
-
|
68 |
-
def to_str(self, separator=" "):
|
69 |
-
return f"{self.value.symbol}{separator}{self.value.name}"
|
70 |
-
|
71 |
-
@staticmethod
|
72 |
-
def from_str(type):
|
73 |
-
if "VideoLLM" in type or "🎥" in type:
|
74 |
-
return ModelType.VideoLLM
|
75 |
-
if "ImageLLM" in type or "🖼️" in type:
|
76 |
-
return ModelType.ImageLLM
|
77 |
-
return ModelType.Unknown
|
78 |
-
|
79 |
-
# class ModelType(Enum):
|
80 |
-
# PT = ModelDetails(name="pretrained", symbol="🟢")
|
81 |
-
# FT = ModelDetails(name="fine-tuned", symbol="🔶")
|
82 |
-
# IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
|
83 |
-
# RL = ModelDetails(name="RL-tuned", symbol="🟦")
|
84 |
-
# Unknown = ModelDetails(name="", symbol="?")
|
85 |
-
|
86 |
-
# def to_str(self, separator=" "):
|
87 |
-
# return f"{self.value.symbol}{separator}{self.value.name}"
|
88 |
-
|
89 |
-
# @staticmethod
|
90 |
-
# def from_str(type):
|
91 |
-
# if "fine-tuned" in type or "🔶" in type:
|
92 |
-
# return ModelType.FT
|
93 |
-
# if "pretrained" in type or "🟢" in type:
|
94 |
-
# return ModelType.PT
|
95 |
-
# if "RL-tuned" in type or "🟦" in type:
|
96 |
-
# return ModelType.RL
|
97 |
-
# if "instruction-tuned" in type or "⭕" in type:
|
98 |
-
# return ModelType.IFT
|
99 |
-
# return ModelType.Unknown
|
100 |
-
|
101 |
-
class WeightType(Enum):
|
102 |
-
Adapter = ModelDetails("Adapter")
|
103 |
-
Original = ModelDetails("Original")
|
104 |
-
Delta = ModelDetails("Delta")
|
105 |
-
|
106 |
-
class Precision(Enum):
|
107 |
-
float16 = ModelDetails("float16")
|
108 |
-
bfloat16 = ModelDetails("bfloat16")
|
109 |
-
Unknown = ModelDetails("?")
|
110 |
-
|
111 |
-
def from_str(precision):
|
112 |
-
if precision in ["torch.float16", "float16"]:
|
113 |
-
return Precision.float16
|
114 |
-
if precision in ["torch.bfloat16", "bfloat16"]:
|
115 |
-
return Precision.bfloat16
|
116 |
-
return Precision.Unknown
|
117 |
-
|
118 |
-
# Column selection
|
119 |
-
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
120 |
-
|
121 |
-
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
122 |
-
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
123 |
-
|
124 |
-
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/envs.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from huggingface_hub import HfApi
|
4 |
-
|
5 |
-
# Info to change for your repository
|
6 |
-
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
8 |
-
|
9 |
-
OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
10 |
-
# ----------------------------------
|
11 |
-
|
12 |
-
REPO_ID = f"{OWNER}/leaderboard"
|
13 |
-
QUEUE_REPO = f"{OWNER}/requests"
|
14 |
-
RESULTS_REPO = f"{OWNER}/results"
|
15 |
-
|
16 |
-
# If you setup a cache later, just change HF_HOME
|
17 |
-
CACHE_PATH=os.getenv("HF_HOME", ".")
|
18 |
-
|
19 |
-
# Local caches
|
20 |
-
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue/svbench")
|
21 |
-
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results/svbench")
|
22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
24 |
-
|
25 |
-
API = HfApi(token=TOKEN)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|