VesperAI commited on
Commit
f29601a
·
1 Parent(s): fd546cb

addede a Production Branch

Browse files
Files changed (2) hide show
  1. image_indexer.py +9 -13
  2. image_search.py +11 -13
image_indexer.py CHANGED
@@ -322,19 +322,15 @@ class ImageIndexer:
322
  # Load model and processor with proper device handling
323
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
324
 
325
- # Load model directly to the target device to avoid meta tensor issues
326
- if self.device == "cuda":
327
- self.model = CLIPModel.from_pretrained(
328
- "openai/clip-vit-base-patch16",
329
- torch_dtype=torch.float16,
330
- device_map="auto"
331
- )
332
- else:
333
- # For CPU, use device_map to avoid meta tensor issues
334
- self.model = CLIPModel.from_pretrained(
335
- "openai/clip-vit-base-patch16",
336
- device_map="cpu"
337
- )
338
 
339
  self.model_initialized.set()
340
  print("Model initialization complete")
 
322
  # Load model and processor with proper device handling
323
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
324
 
325
+ # Load model with explicit device placement to avoid meta tensor issues
326
+ self.model = CLIPModel.from_pretrained(
327
+ "openai/clip-vit-base-patch16",
328
+ torch_dtype=torch.float32, # Use float32 for CPU compatibility
329
+ device_map=None # Don't use device_map to avoid meta tensor issues
330
+ )
331
+
332
+ # Move model to device explicitly
333
+ self.model = self.model.to(self.device)
 
 
 
 
334
 
335
  self.model_initialized.set()
336
  print("Model initialization complete")
image_search.py CHANGED
@@ -16,19 +16,17 @@ class ImageSearch:
16
  # Load model and processor with proper device handling
17
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
18
 
19
- # Load model directly to the target device to avoid meta tensor issues
20
- if self.device == "cuda":
21
- self.model = CLIPModel.from_pretrained(
22
- "openai/clip-vit-base-patch16",
23
- torch_dtype=torch.float16,
24
- device_map="auto"
25
- )
26
- else:
27
- # For CPU, use device_map to avoid meta tensor issues
28
- self.model = CLIPModel.from_pretrained(
29
- "openai/clip-vit-base-patch16",
30
- device_map="cpu"
31
- )
32
 
33
  # Initialize Qdrant client, folder manager and image database
34
  self.qdrant = QdrantClientSingleton.get_instance()
 
16
  # Load model and processor with proper device handling
17
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
18
 
19
+ # Load model with explicit device placement to avoid meta tensor issues
20
+ self.model = CLIPModel.from_pretrained(
21
+ "openai/clip-vit-base-patch16",
22
+ torch_dtype=torch.float32, # Use float32 for CPU compatibility
23
+ device_map=None # Don't use device_map to avoid meta tensor issues
24
+ )
25
+
26
+ # Move model to device explicitly
27
+ self.model = self.model.to(self.device)
28
+
29
+ print("Model initialization complete")
 
 
30
 
31
  # Initialize Qdrant client, folder manager and image database
32
  self.qdrant = QdrantClientSingleton.get_instance()