captain-awesome commited on
Commit
ab670a9
·
verified ·
1 Parent(s): a48cec3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -17
app.py CHANGED
@@ -1,28 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers.utils import logging
3
- logging.set_verbosity_error()
4
 
 
5
  import warnings
6
  warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
7
 
8
- from transformers import BlipForQuestionAnswering
9
- from transformers import AutoProcessor
10
-
11
  def qa(image, question):
12
- model = BlipForQuestionAnswering.from_pretrained(
13
- "./models/Salesforce/blip-vqa-base")
14
- processor = AutoProcessor.from_pretrained(
15
- "./models/Salesforce/blip-vqa-base")
 
 
16
 
17
- inputs = processor(image, question, return_tensors="pt")
18
-
19
- out = model.generate(image, question)
20
-
21
- result = processor.decode(out[0], skip_special_tokens=True)
22
  return result
23
 
24
- # def greet(name):
25
- # return "Hello " + name + "!!"
26
-
27
- iface = gr.Interface(fn=qa, inputs=["image","text"], outputs="textbox")
28
  iface.launch()
 
1
+ # import gradio as gr
2
+ # from transformers.utils import logging
3
+ # logging.set_verbosity_error()
4
+
5
+ # import warnings
6
+ # warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
7
+
8
+ # from transformers import BlipForQuestionAnswering
9
+ # from transformers import AutoProcessor
10
+
11
+ # def qa(image, question):
12
+ # model = BlipForQuestionAnswering.from_pretrained(
13
+ # "./models/Salesforce/blip-vqa-base")
14
+ # processor = AutoProcessor.from_pretrained(
15
+ # "./models/Salesforce/blip-vqa-base")
16
+
17
+ # inputs = processor(image, question, return_tensors="pt")
18
+
19
+ # out = model.generate(image, question)
20
+
21
+ # result = processor.decode(out[0], skip_special_tokens=True)
22
+ # return result
23
+
24
+ # # def greet(name):
25
+ # # return "Hello " + name + "!!"
26
+
27
+ # iface = gr.Interface(fn=qa, inputs=["image","text"], outputs="textbox")
28
+ # iface.launch()
29
+
30
+
31
+
32
  import gradio as gr
33
  from transformers.utils import logging
34
+ from transformers import BlipForQuestionAnswering, AutoProcessor
35
 
36
+ logging.set_verbosity_error()
37
  import warnings
38
  warnings.filterwarnings("ignore", message="Using the model-agnostic default `max_length`")
39
 
 
 
 
40
  def qa(image, question):
41
+ model = BlipForQuestionAnswering.from_pretrained("./models/Salesforce/blip-vqa-base")
42
+ processor = AutoProcessor.from_pretrained("./models/Salesforce/blip-vqa-base")
43
+
44
+ inputs = processor(image=image, question=question, return_tensors="pt")
45
+ out = model.generate(**inputs)
46
+ result = processor.decode(out[0], skip_special_tokens=True)
47
 
 
 
 
 
 
48
  return result
49
 
50
+ iface = gr.Interface(fn=qa, inputs=["image", "text"], outputs="textbox")
 
 
 
51
  iface.launch()