DongfuJiang commited on
Commit
8b5ed27
·
verified ·
1 Parent(s): 6410280

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +65 -0
README.md CHANGED
@@ -54,3 +54,68 @@ The following hyperparameters were used during training:
54
  - Pytorch 2.5.1+cu124
55
  - Datasets 2.18.0
56
  - Tokenizers 0.21.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  - Pytorch 2.5.1+cu124
55
  - Datasets 2.18.0
56
  - Tokenizers 0.21.0
57
+
58
+
59
+ ```python
60
+
61
+ from PIL import Image
62
+ import requests
63
+ from transformers import AutoProcessor, AutoModel
64
+ from mantis.models.siglip_video import SiglipVideoModel
65
+ import torch
66
+ import numpy as np
67
+ import av
68
+ def read_video_pyav(container, indices):
69
+ '''
70
+ Decode the video with PyAV decoder.
71
+
72
+ Args:
73
+ container (av.container.input.InputContainer): PyAV container.
74
+ indices (List[int]): List of frame indices to decode.
75
+
76
+ Returns:
77
+ np.ndarray: np array of decoded frames of shape (num_frames, height, width, 3).
78
+ '''
79
+ frames = []
80
+ container.seek(0)
81
+ if len(indices) == 0:
82
+ # to debug
83
+ indices = [0]
84
+ print("No indices to decode, might be an empty video please check")
85
+ start_index = indices[0]
86
+ end_index = indices[-1]
87
+ for i, frame in enumerate(container.decode(video=0)):
88
+ if i > end_index:
89
+ break
90
+ if i >= start_index and i in indices:
91
+ frames.append(frame)
92
+ return np.stack([x.to_ndarray(format="rgb24") for x in frames])
93
+
94
+ # model = SiglipVideoModel.from_pretrained("google/siglip-so400m-patch14-384")
95
+ model = SiglipVideoModel.from_pretrained("Mantis-VL/siglip-video_16384_2fps_128").to("cuda:2")
96
+ processor = AutoProcessor.from_pretrained("google/siglip-so400m-patch14-384")
97
+
98
+
99
+ container = av.open("../mochi.mp4")
100
+ # container = av.open("/home/dongfu/WorkSpace/Mantis/data/llava-video/data/0_30_s_youtube_v0_1/videos/liwei_youtube_videos/videos/youtube_video_2024/ytb_F-FpE2GWW84.mp4")
101
+ total_frames = container.streams.video[0].frames
102
+ sample_fps = 2
103
+ ori_fps = container.streams.video[0].average_rate
104
+ indices = np.arange(0, total_frames, int(ori_fps/sample_fps))
105
+ frames = read_video_pyav(container, indices)
106
+
107
+ text = "Close-up of a chameleon's eye, with its scaly skin changing color. Ultra high resolution 4k."
108
+ # text = "The video showcases a group of individuals dressed in matching military-style uniforms, consisting of long, light-colored tunics and dark vests, marching in unison. They are carrying large, black, shoulder-mounted weapons, and the background appears to be an open area, possibly a parade ground or a military base, with a clear sky overhead. The text overlay in English reads, 'Talabani won victory over America with an impossible weapon,' suggesting a narrative of triumph using unconventional means. The individuals are seen marching in a coordinated manner, emphasizing discipline and uniformity. As the video progresses, the group continues their synchronized march, maintaining the same background setting. The text overlay, 'Talabani won victory over America with an impossible weapon,' reappears, reinforcing the narrative of triumph. One individual in the foreground is prominently holding a rifle, adding to the display of military prowess. The video emphasizes the themes of discipline, coordination, and military strength."
109
+
110
+ print(frames.shape)
111
+ inputs = processor(text=[text], images=frames, padding="max_length", return_tensors="pt")
112
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
113
+ inputs['pixel_values'] = [inputs['pixel_values']]
114
+ with torch.no_grad():
115
+ outputs = model(**inputs)
116
+ logits_per_video = outputs.logits_per_video
117
+ print(logits_per_video)
118
+ probs = torch.sigmoid(logits_per_video) # these are the probabilities
119
+ print(f"{probs[0][0]:.1%} the video contains the text: '{text}'")
120
+
121
+ ```