Multimodal-OCR / app.py
prithivMLmods's picture
Update app.py
2e3cd2c verified
import gradio as gr
from transformers.image_utils import load_image
from threading import Thread
import time
import torch
import spaces
from PIL import Image
import requests
from io import BytesIO
import cv2
import numpy as np
from transformers import (
Qwen2VLForConditionalGeneration,
AutoProcessor,
TextIteratorStreamer,
AutoModelForImageTextToText,
)
# Helper function to return a progress bar HTML snippet.
def progress_bar_html(label: str) -> str:
return f'''
<div style="display: flex; align-items: center;">
<span style="margin-right: 10px; font-size: 14px;">{label}</span>
<div style="width: 110px; height: 5px; background-color: #FFB6C1; border-radius: 2px; overflow: hidden;">
<div style="width: 100%; height: 100%; background-color: #FF69B4; animation: loading 1.5s linear infinite;"></div>
</div>
</div>
<style>
@keyframes loading {{
0% {{ transform: translateX(-100%); }}
100% {{ transform: translateX(100%); }}
}}
</style>
'''
# Helper function to downsample a video into 10 evenly spaced frames.
def downsample_video(video_path):
vidcap = cv2.VideoCapture(video_path)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vidcap.get(cv2.CAP_PROP_FPS)
frames = []
# Sample 10 evenly spaced frames.
frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
for i in frame_indices:
vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
success, image = vidcap.read()
if success:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pil_image = Image.fromarray(image)
timestamp = round(i / fps, 2)
frames.append((pil_image, timestamp))
vidcap.release()
return frames
# Model and processor setups
# Setup for Qwen2VL OCR branch (default).
QV_MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct" # or use "prithivMLmods/Qwen2-VL-OCR2-2B-Instruct"
qwen_processor = AutoProcessor.from_pretrained(QV_MODEL_ID, trust_remote_code=True)
qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
QV_MODEL_ID,
trust_remote_code=True,
torch_dtype=torch.float16
).to("cuda").eval()
# Setup for Aya-Vision branch.
AYA_MODEL_ID = "CohereForAI/aya-vision-8b"
aya_processor = AutoProcessor.from_pretrained(AYA_MODEL_ID)
aya_model = AutoModelForImageTextToText.from_pretrained(
AYA_MODEL_ID, device_map="auto", torch_dtype=torch.float16
)
# ---------------------------
# Main Inference Function
# ---------------------------
@spaces.GPU
def model_inference(input_dict, history):
text = input_dict["text"].strip()
files = input_dict.get("files", [])
# Branch for video inference with Aya-Vision using @video-infer.
if text.lower().startswith("@video-infer"):
prompt = text[len("@video-infer"):].strip()
if not files:
yield "Error: Please provide a video for the @video-infer feature."
return
video_path = files[0]
frames = downsample_video(video_path)
if not frames:
yield "Error: Could not extract frames from the video."
return
# Build messages: start with the prompt then add each frame with its timestamp.
content_list = []
content_list.append({"type": "text", "text": prompt})
for frame, timestamp in frames:
content_list.append({"type": "text", "text": f"Frame {timestamp}:"})
content_list.append({"type": "image", "image": frame})
messages = [{
"role": "user",
"content": content_list,
}]
inputs = aya_processor.apply_chat_template(
messages,
padding=True,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt"
).to(aya_model.device)
streamer = TextIteratorStreamer(aya_processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
temperature=0.3
)
thread = Thread(target=aya_model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
yield progress_bar_html("Processing video with Aya-Vision-8b")
for new_text in streamer:
buffer += new_text
buffer = buffer.replace("<|im_end|>", "")
time.sleep(0.01)
yield buffer
return
# Branch for single image inference with Aya-Vision using @aya-vision.
if text.lower().startswith("@aya-vision"):
text_prompt = text[len("@aya-vision"):].strip()
if not files:
yield "Error: Please provide an image for the @aya-vision feature."
return
else:
# Use the first provided image.
image = load_image(files[0])
yield progress_bar_html("Processing with Aya-Vision-8b")
messages = [{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": text_prompt},
],
}]
inputs = aya_processor.apply_chat_template(
messages,
padding=True,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt"
).to(aya_model.device)
streamer = TextIteratorStreamer(aya_processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
temperature=0.3
)
thread = Thread(target=aya_model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
buffer = buffer.replace("<|im_end|>", "")
time.sleep(0.01)
yield buffer
return
# Default branch: Use Qwen2VL OCR for text (with optional images).
if len(files) > 1:
images = [load_image(image) for image in files]
elif len(files) == 1:
images = [load_image(files[0])]
else:
images = []
if text == "" and not images:
yield "Error: Please input a query and optionally image(s)."
return
if text == "" and images:
yield "Error: Please input a text query along with the image(s)."
return
messages = [{
"role": "user",
"content": [
*[{"type": "image", "image": image} for image in images],
{"type": "text", "text": text},
],
}]
prompt = qwen_processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
inputs = qwen_processor(
text=[prompt],
images=images if images else None,
return_tensors="pt",
padding=True,
).to("cuda")
streamer = TextIteratorStreamer(qwen_processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
thread = Thread(target=qwen_model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
yield progress_bar_html("Processing with Qwen2VL OCR")
for new_text in streamer:
buffer += new_text
buffer = buffer.replace("<|im_end|>", "")
time.sleep(0.01)
yield buffer
# Gradio Interface Setup
examples = [
[{"text": "@aya-vision Summarize the letter", "files": ["examples/1.png"]}],
[{"text": "@aya-vision Extract JSON from the image", "files": ["example_images/document.jpg"]}],
[{"text": "@video-infer Explain what is happening in this video ?", "files": ["examples/oreo.mp4"]}],
[{"text": "Extract as JSON table from the table", "files": ["examples/4.jpg"]}],
[{"text": "@aya-vision Describe the photo", "files": ["examples/3.png"]}],
[{"text": "@aya-vision Summarize the full image in detail", "files": ["examples/2.jpg"]}],
[{"text": "@aya-vision Describe this image.", "files": ["example_images/campeones.jpg"]}],
[{"text": "@aya-vision What is this UI about?", "files": ["example_images/s2w_example.png"]}],
[{"text": "Can you describe this image?", "files": ["example_images/newyork.jpg"]}],
[{"text": "Can you describe this image?", "files": ["example_images/dogs.jpg"]}],
[{"text": "@aya-vision Where do the severe droughts happen according to this diagram?", "files": ["example_images/examples_weather_events.png"]}],
]
demo = gr.ChatInterface(
fn=model_inference,
description="# **Multimodal OCR `@aya-vision for image, @video-infer for video`**",
examples=examples,
textbox=gr.MultimodalTextbox(
label="Query Input",
file_types=["image", "video"],
file_count="multiple",
placeholder="Tag @aya-vision for Aya-Vision image infer, @video-infer for Aya-Vision video infer, default runs Qwen2VL OCR"
),
stop_btn="Stop Generation",
multimodal=True,
cache_examples=False,
)
demo.launch(debug=True)