nicklorch commited on
Commit
a4e73a2
·
1 Parent(s): dd31dc4

added debugging

Browse files
Files changed (1) hide show
  1. handler.py +24 -19
handler.py CHANGED
@@ -1,5 +1,6 @@
1
  from io import BytesIO
2
  import base64
 
3
 
4
  from PIL import Image
5
  import torch
@@ -14,25 +15,29 @@ class EndpointHandler():
14
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
15
 
16
  def __call__(self, data):
 
17
 
18
- text_input = None
19
- if isinstance(data, dict):
20
- inputs = data.pop("inputs", None)
21
- text_input = inputs.get('text',None)
22
- image_data = BytesIO(base64.b64decode(inputs['image'])) if 'image' in inputs else None
23
- else:
24
- # assuming its an image sent via binary
25
- image_data = BytesIO(data)
 
26
 
27
 
28
- if text_input:
29
- processor = self.processor(text=text_input, return_tensors="pt", padding=True).to(device)
30
- with torch.no_grad():
31
- return {'embeddings':self.text_model(**processor).pooler_output.tolist()[0]}
32
- elif image_data:
33
- image = Image.open(image_data)
34
- processor = self.processor(images=image, return_tensors="pt").to(device)
35
- with torch.no_grad():
36
- return {'embeddings':self.image_model(**processor).image_embeds.tolist()[0]}
37
- else:
38
- return {'embeddings':None}
 
 
 
1
  from io import BytesIO
2
  import base64
3
+ import traceback
4
 
5
  from PIL import Image
6
  import torch
 
15
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
16
 
17
  def __call__(self, data):
18
+ try:
19
 
20
+ text_input = None
21
+ if isinstance(data, dict):
22
+ print('data is a dict: ', data)
23
+ inputs = data.pop("inputs", None)
24
+ text_input = inputs.get('text',None)
25
+ image_data = BytesIO(base64.b64decode(inputs['image'])) if 'image' in inputs else None
26
+ else:
27
+ # assuming its an image sent via binary
28
+ image_data = BytesIO(data)
29
 
30
 
31
+ if text_input:
32
+ processor = self.processor(text=text_input, return_tensors="pt", padding=True).to(device)
33
+ with torch.no_grad():
34
+ return {'embeddings':self.text_model(**processor).pooler_output.tolist()[0]}
35
+ elif image_data:
36
+ image = Image.open(image_data)
37
+ processor = self.processor(images=image, return_tensors="pt").to(device)
38
+ with torch.no_grad():
39
+ return {'embeddings':self.image_model(**processor).image_embeds.tolist()[0]}
40
+ else:
41
+ return {'embeddings':None}
42
+ except Exception:
43
+ return {'Error':traceback.format_exc()}