5m4ck3r commited on
Commit
bd6df67
·
verified ·
1 Parent(s): 79cf728

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from PIL import Image
3
  import torch
4
  from transformers import AutoProcessor, LlavaNextForConditionalGeneration
 
5
 
6
  # Load the processor and model
7
  model_id = "llava-hf/llava-v1.6-mistral-7B-hf"
@@ -12,6 +13,7 @@ model = LlavaNextForConditionalGeneration.from_pretrained(
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  model.to(device)
14
 
 
15
  def llava_inference(image: Image.Image, prompt: str):
16
  # Format the input as a conversation
17
  conversation = [
 
2
  from PIL import Image
3
  import torch
4
  from transformers import AutoProcessor, LlavaNextForConditionalGeneration
5
+ import spaces
6
 
7
  # Load the processor and model
8
  model_id = "llava-hf/llava-v1.6-mistral-7B-hf"
 
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  model.to(device)
15
 
16
+ @spaces.GPU()
17
  def llava_inference(image: Image.Image, prompt: str):
18
  # Format the input as a conversation
19
  conversation = [