Add model card

#1
by nielsr HF Staff - opened
Files changed (1) hide show
  1. README.md +102 -3
README.md CHANGED
@@ -1,3 +1,102 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ library_name: transformers
4
+ pipeline_tag: video-text-to-text
5
+ ---
6
+
7
+ # Slow-Fast Architecture for Video Multi-Modal Large Language Models
8
+
9
+ This repository contains the model presented in the paper [Slow-Fast Architecture for Video Multi-Modal Large Language Models](https://huggingface.co/papers/2504.01328).
10
+
11
+ Code: https://github.com/SHI-Labs/Slow-Fast-Video-Multimodal-LLM
12
+
13
+ ## Introduction
14
+ This model uses a novel slow-fast architecture to balance temporal resolution and spatial detail in video understanding, overcoming the sequence length limitations of traditional LLMs. It employs a dual-token strategy: "fast" tokens provide a quick overview, while "slow" tokens allow instruction-aware extraction of details via cross-attention.
15
+
16
+ ## Usage
17
+ ```python
18
+ import torch
19
+ import os
20
+ import numpy as np
21
+ from decord import VideoReader, cpu
22
+
23
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
24
+ from llava.conversation import conv_templates, SeparatorStyle
25
+ from llava.model.builder import load_pretrained_model
26
+ from llava.mm_utils import tokenizer_image_token, get_model_name_from_path
27
+ from llava.utils import disable_torch_init
28
+
29
+ def load_video(video_path, max_frames_num):
30
+ vr = VideoReader(video_path, num_threads=4)
31
+ fps = round(vr.get_avg_fps())
32
+ frame_idx = [i for i in range(0, len(vr), fps)]
33
+
34
+ uniform_sampled_frames = np.linspace(0, len(vr) - 1, max_frames_num, dtype=int)
35
+ frame_idx = uniform_sampled_frames.tolist()
36
+ spare_frames = vr.get_batch(frame_idx).asnumpy()
37
+
38
+ return spare_frames
39
+
40
+ # Model
41
+ # Ensure you have cloned the code repository: git clone https://github.com/SHI-Labs/Slow-Fast-Video-Multimodal-LLM.git
42
+ model_path = "shi-labs/slowfast-video-mllm-qwen2-7b-convnext-576-frame64-s1t4" # Or other checkpoint
43
+ video_path = "Slow-Fast-Video-Multimodal-LLM/assets/catinterrupt.mp4" # Example video path from cloned repo
44
+ question = "Please describe this video in detail."
45
+ max_frames=64 # Set according to the specific checkpoint
46
+
47
+ disable_torch_init()
48
+ model_path = os.path.expanduser(model_path)
49
+ model_name = get_model_name_from_path(model_path)
50
+ # Make sure to pass trust_remote_code=True
51
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name, use_flash_attn=True, trust_remote_code=True)
52
+
53
+ if model.config.mm_use_im_start_end:
54
+ prompt = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "
55
+ " + question
56
+ else:
57
+ prompt = DEFAULT_IMAGE_TOKEN + "
58
+ " + question
59
+
60
+ conv = conv_templates["qwen_1_5"].copy()
61
+ conv.append_message(conv.roles[0], prompt)
62
+ conv.append_message(conv.roles[1], None)
63
+ prompt = conv.get_prompt()
64
+
65
+ # read and process video
66
+ video = load_video(video_path, max_frames_num=max_frames)
67
+ video_tensor = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].half().cuda()
68
+ videos = [video_tensor]
69
+
70
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
71
+ input_ids = input_ids.to(device='cuda', non_blocking=True).unsqueeze(dim=0)
72
+
73
+ with torch.inference_mode():
74
+ output_ids = model.generate(
75
+ input_ids,
76
+ images=videos,
77
+ do_sample=True,
78
+ max_new_tokens=1024,
79
+ num_beams=1,
80
+ temperature=0.2,
81
+ top_p=1.0,
82
+ use_cache=True)
83
+
84
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
85
+ print(f"User input: {question}
86
+ ")
87
+ print(outputs)
88
+ ```
89
+
90
+ ## Citation
91
+
92
+ ```bibtex
93
+ @misc{wang2025slowfast,
94
+ title={Slow-Fast Architecture for Video Multi-Modal Large Language Models},
95
+ author={Haotian Wang and Zhengyuan Yang and Yue Zhao and Bin Lin and Zhe Chen and Yue Cao and Hongxia Yang},
96
+ year={2025},
97
+ eprint={2504.01328},\
98
+ archivePrefix={arXiv},
99
+ primaryClass={cs.CV},
100
+ url={https://arxiv.org/abs/2504.01328v1},
101
+ }
102
+ ```