morriszms commited on
Commit
1604d4c
·
verified ·
1 Parent(s): 01fcf97

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ QVQ-72B-Preview-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ QVQ-72B-Preview-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ QVQ-72B-Preview-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ QVQ-72B-Preview-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ QVQ-72B-Preview-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ QVQ-72B-Preview-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ QVQ-72B-Preview-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ QVQ-72B-Preview-Q5_K_M/QVQ-72B-Preview-Q5_K_M-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
44
+ QVQ-72B-Preview-Q5_K_M/QVQ-72B-Preview-Q5_K_M-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
45
+ QVQ-72B-Preview-Q6_K/QVQ-72B-Preview-Q6_K-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
46
+ QVQ-72B-Preview-Q6_K/QVQ-72B-Preview-Q6_K-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
47
+ QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
48
+ QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
49
+ QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
QVQ-72B-Preview-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e585bb44f8291c105e1e6ee5cfc49b8e79d5b7a7ada1b7d3301f291d14d8f8
3
+ size 29811761248
QVQ-72B-Preview-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3cbb27cc3fa40a273f3eb73f2fc79d708a72d70bb52f8a2101cb275988a8bb
3
+ size 39505223776
QVQ-72B-Preview-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2f6e06a8eab6684adcdb57160066eb4cb6b403ac7d1c689914ef20456ff8a05
3
+ size 37698723936
QVQ-72B-Preview-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bc7baeac7efff34d81a759486e61a22a73d5c165380010002ab635a111298da
3
+ size 34487787616
QVQ-72B-Preview-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c0bdeba15b3996a4aa147be8315efb402477f113cf5d50f9064bf561602d016
3
+ size 41231736928
QVQ-72B-Preview-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f4c63df02622bd0f33f1b78d775e24be572e475790ea77d3fc5c4706ab15525
3
+ size 47415713888
QVQ-72B-Preview-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe05feff7e8bd6579b9496e9c877c272bf8a923a6a480700d4462503ca49bed
3
+ size 43889221728
QVQ-72B-Preview-Q5_K_M/QVQ-72B-Preview-Q5_K_M-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91cd66fc7dab814d8c1a899e1f293afdf7b339ce38f8676b185b1de7094d442f
3
+ size 34868195872
QVQ-72B-Preview-Q5_K_M/QVQ-72B-Preview-Q5_K_M-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f349beb171a63008a1398d8f95991e392cce75955bf2c5f7de79876ad0613d0d
3
+ size 19579268896
QVQ-72B-Preview-Q6_K/QVQ-72B-Preview-Q6_K-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d2efd2fd1bc713c51e08df58a1bda567d89486997381d83cc3c0ea3e63cef92
3
+ size 34880052320
QVQ-72B-Preview-Q6_K/QVQ-72B-Preview-Q6_K-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47355ca94fbd3a8d82c8bfdf2cef472bda96616583dd293f1a5cc8a3f3f0da60
3
+ size 29467575520
QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62daf5e2e1ec23e389bc5b77abb8e82130e79d2b258e21b86afcaa2be68e0f0e
3
+ size 34780006176
QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a4df0e4d46fe96f3d11f902aaa6121894024eddf976daced1be064e4ca5052
3
+ size 34764293216
QVQ-72B-Preview-Q8_0/QVQ-72B-Preview-Q8_0-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46737703735737df34ef5a99b9cdbc2db02e087c5ea77ec119905cb73b64b4b1
3
+ size 7718311488
README.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: qwen
4
+ license_link: https://huggingface.co/Qwen/QVQ-72B-Preview/blob/main/LICENSE
5
+ language:
6
+ - en
7
+ pipeline_tag: image-text-to-text
8
+ base_model: Qwen/QVQ-72B-Preview
9
+ tags:
10
+ - chat
11
+ - TensorBlock
12
+ - GGUF
13
+ library_name: transformers
14
+ ---
15
+
16
+
17
+ # QVQ-72B-Preview
18
+
19
+ ## Introduction
20
+
21
+ **QVQ-72B-Preview** is an experimental research model developed by the Qwen team, focusing on enhancing visual reasoning capabilities.
22
+
23
+ ## Performance
24
+
25
+ | | **QVQ-72B-Preview** | o1-2024-12-17 | gpt-4o-2024-05-13 | Claude3.5 Sonnet-20241022 | Qwen2VL-72B |
26
+ |----------------|-----------------|---------------|-------------------|----------------------------|-------------|
27
+ | MMMU(val) | 70.3 | 77.3 | 69.1 | 70.4 | 64.5 |
28
+ | MathVista(mini) | 71.4 | 71.0 | 63.8 | 65.3 | 70.5 |
29
+ | MathVision(full) | 35.9 | – | 30.4 | 35.6 | 25.9 |
30
+ | OlympiadBench | 20.4 | – | 25.9 | – | 11.2 |
31
+
32
+
33
+ **QVQ-72B-Preview** has achieved remarkable performance on various benchmarks. It scored a remarkable 70.3% on the Multimodal Massive Multi-task Understanding (MMMU) benchmark, showcasing QVQ's powerful ability in multidisciplinary understanding and reasoning. Furthermore, the significant improvements on MathVision highlight the model's progress in mathematical reasoning tasks. OlympiadBench also demonstrates the model's enhanced ability to tackle challenging problems.
34
+
35
+ ***But It's Not All Perfect: Acknowledging the Limitations***
36
+
37
+ While **QVQ-72B-Preview** exhibits promising performance that surpasses expectations, it’s important to acknowledge several limitations:
38
+
39
+ 1. **Language Mixing and Code-Switching:** The model might occasionally mix different languages or unexpectedly switch between them, potentially affecting the clarity of its responses.
40
+ 2. **Recursive Reasoning Loops:** There's a risk of the model getting caught in recursive reasoning loops, leading to lengthy responses that may not even arrive at a final answer.
41
+ 3. **Safety and Ethical Considerations:** Robust safety measures are needed to ensure reliable and safe performance. Users should exercise caution when deploying this model.
42
+ 4. **Performance and Benchmark Limitations:** Despite the improvements in visual reasoning, QVQ doesn’t entirely replace the capabilities of Qwen2-VL-72B. During multi-step visual reasoning, the model might gradually lose focus on the image content, leading to hallucinations. Moreover, QVQ doesn’t show significant improvement over Qwen2-VL-72B in basic recognition tasks like identifying people, animals, or plants.
43
+
44
+ Note: Currently, the model only supports single-round dialogues and image outputs. It does not support video inputs.
45
+ ## Quickstart
46
+
47
+ We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
48
+
49
+ ```bash
50
+ pip install qwen-vl-utils
51
+ ```
52
+
53
+ Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`:
54
+
55
+ ```python
56
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
57
+ from qwen_vl_utils import process_vision_info
58
+
59
+ # default: Load the model on the available device(s)
60
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
61
+ "Qwen/QVQ-72B-Preview", torch_dtype="auto", device_map="auto"
62
+ )
63
+
64
+ # default processer
65
+ processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview")
66
+
67
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
68
+ # min_pixels = 256*28*28
69
+ # max_pixels = 1280*28*28
70
+ # processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview", min_pixels=min_pixels, max_pixels=max_pixels)
71
+
72
+ messages = [
73
+ {
74
+ "role": "system",
75
+ "content": [
76
+ {"type": "text", "text": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."}
77
+ ],
78
+ },
79
+ {
80
+ "role": "user",
81
+ "content": [
82
+ {
83
+ "type": "image",
84
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png",
85
+ },
86
+ {"type": "text", "text": "What value should be filled in the blank space?"},
87
+ ],
88
+ }
89
+ ]
90
+
91
+ # Preparation for inference
92
+ text = processor.apply_chat_template(
93
+ messages, tokenize=False, add_generation_prompt=True
94
+ )
95
+ image_inputs, video_inputs = process_vision_info(messages)
96
+ inputs = processor(
97
+ text=[text],
98
+ images=image_inputs,
99
+ videos=video_inputs,
100
+ padding=True,
101
+ return_tensors="pt",
102
+ )
103
+ inputs = inputs.to("cuda")
104
+
105
+ # Inference: Generation of the output
106
+ generated_ids = model.generate(**inputs, max_new_tokens=8192)
107
+ generated_ids_trimmed = [
108
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
109
+ ]
110
+ output_text = processor.batch_decode(
111
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
112
+ )
113
+ print(output_text)
114
+ ```
115
+
116
+ ## Citation
117
+
118
+ If you find our work helpful, feel free to give us a cite.
119
+
120
+ ```
121
+ @misc{qvq-72b-preview,
122
+ title = {QVQ: To See the World with Wisdom},
123
+ url = {https://qwenlm.github.io/blog/qvq-72b-preview/},
124
+ author = {Qwen Team},
125
+ month = {December},
126
+ year = {2024}
127
+ }
128
+
129
+ @article{Qwen2VL,
130
+ title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
131
+ author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
132
+ journal={arXiv preprint arXiv:2409.12191},
133
+ year={2024}
134
+ }
135
+ ```