Update handler.py
Browse files- handler.py +9 -1
handler.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import torch
|
2 |
from transformers import CLIPProcessor, CLIPModel
|
3 |
from PIL import Image
|
@@ -23,6 +24,8 @@ class EndpointHandler:
|
|
23 |
|
24 |
def __call__(self, data):
|
25 |
"""Processes input and runs inference."""
|
|
|
|
|
26 |
print("📥 Processing input...")
|
27 |
|
28 |
if "inputs" in data:
|
@@ -61,4 +64,9 @@ class EndpointHandler:
|
|
61 |
best_matches = [(text[idx], probs[idx].item()) for idx in sorted_indices[:3]] # Get top 3 matches
|
62 |
predictions.append({"image_index": i, "top_matches": best_matches})
|
63 |
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
import torch
|
3 |
from transformers import CLIPProcessor, CLIPModel
|
4 |
from PIL import Image
|
|
|
24 |
|
25 |
def __call__(self, data):
|
26 |
"""Processes input and runs inference."""
|
27 |
+
start_time = time.time() # Start timer
|
28 |
+
|
29 |
print("📥 Processing input...")
|
30 |
|
31 |
if "inputs" in data:
|
|
|
64 |
best_matches = [(text[idx], probs[idx].item()) for idx in sorted_indices[:3]] # Get top 3 matches
|
65 |
predictions.append({"image_index": i, "top_matches": best_matches})
|
66 |
|
67 |
+
total_time = time.time() - start_time # Calculate time taken
|
68 |
+
|
69 |
+
return {
|
70 |
+
"predictions": predictions,
|
71 |
+
"processing_time_seconds": round(total_time, 4)
|
72 |
+
}
|