sheraz179 commited on
Commit
0941d02
·
1 Parent(s): 4e796e2

Upload handler.py

Browse files
Files changed (1) hide show
  1. handler.py +49 -0
handler.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from PIL import Image
3
+ import torch
4
+ import os
5
+ from io import BytesIO
6
+ from transformers import Blip2ForConditionalGeneration, Blip2Processor
7
+
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
+
10
+
11
+ class EndpointHandler():
12
+ def __init__(self, path=""):
13
+ # load the optimized model
14
+
15
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
16
+ self.model = BlipForConditionalGeneration.from_pretrained(
17
+ "Salesforce/blip-image-captioning-base"
18
+ ).to(device)
19
+ self.model.eval()
20
+ self.model = self.model.to(device)
21
+
22
+
23
+
24
+ def __call__(self, data: Any) -> Dict[str, Any]:
25
+ """
26
+ Args:
27
+ data (:obj:):
28
+ includes the input data and the parameters for the inference.
29
+ Return:
30
+ A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing :
31
+ - "caption": A string corresponding to the generated caption.
32
+ """
33
+ inputs = data.pop("inputs", data)
34
+ parameters = data.pop("parameters", {})
35
+
36
+ raw_images = [Image.open(BytesIO(_img)) for _img in inputs]
37
+
38
+ processed_image = self.processor(images=raw_images, return_tensors="pt")
39
+ processed_image["pixel_values"] = processed_image["pixel_values"].to(device)
40
+ processed_image = {**processed_image, **parameters}
41
+
42
+ with torch.no_grad():
43
+ out = self.model.generate(
44
+ **processed_image
45
+ )
46
+ captions = self.processor.batch_decode(out, skip_special_tokens=True)
47
+ # postprocess the prediction
48
+ return {"captions": captions}
49
+