addede a Production Branch
Browse files- image_indexer.py +9 -13
- image_search.py +11 -13
image_indexer.py
CHANGED
@@ -322,19 +322,15 @@ class ImageIndexer:
|
|
322 |
# Load model and processor with proper device handling
|
323 |
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
324 |
|
325 |
-
# Load model
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
self.model = CLIPModel.from_pretrained(
|
335 |
-
"openai/clip-vit-base-patch16",
|
336 |
-
device_map="cpu"
|
337 |
-
)
|
338 |
|
339 |
self.model_initialized.set()
|
340 |
print("Model initialization complete")
|
|
|
322 |
# Load model and processor with proper device handling
|
323 |
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
324 |
|
325 |
+
# Load model with explicit device placement to avoid meta tensor issues
|
326 |
+
self.model = CLIPModel.from_pretrained(
|
327 |
+
"openai/clip-vit-base-patch16",
|
328 |
+
torch_dtype=torch.float32, # Use float32 for CPU compatibility
|
329 |
+
device_map=None # Don't use device_map to avoid meta tensor issues
|
330 |
+
)
|
331 |
+
|
332 |
+
# Move model to device explicitly
|
333 |
+
self.model = self.model.to(self.device)
|
|
|
|
|
|
|
|
|
334 |
|
335 |
self.model_initialized.set()
|
336 |
print("Model initialization complete")
|
image_search.py
CHANGED
@@ -16,19 +16,17 @@ class ImageSearch:
|
|
16 |
# Load model and processor with proper device handling
|
17 |
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
18 |
|
19 |
-
# Load model
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
device_map="cpu"
|
31 |
-
)
|
32 |
|
33 |
# Initialize Qdrant client, folder manager and image database
|
34 |
self.qdrant = QdrantClientSingleton.get_instance()
|
|
|
16 |
# Load model and processor with proper device handling
|
17 |
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
18 |
|
19 |
+
# Load model with explicit device placement to avoid meta tensor issues
|
20 |
+
self.model = CLIPModel.from_pretrained(
|
21 |
+
"openai/clip-vit-base-patch16",
|
22 |
+
torch_dtype=torch.float32, # Use float32 for CPU compatibility
|
23 |
+
device_map=None # Don't use device_map to avoid meta tensor issues
|
24 |
+
)
|
25 |
+
|
26 |
+
# Move model to device explicitly
|
27 |
+
self.model = self.model.to(self.device)
|
28 |
+
|
29 |
+
print("Model initialization complete")
|
|
|
|
|
30 |
|
31 |
# Initialize Qdrant client, folder manager and image database
|
32 |
self.qdrant = QdrantClientSingleton.get_instance()
|