addgbf commited on
Commit
fc8bd63
1 Parent(s): 9fc4cdd

subo server32 completo al Space

Browse files
Dockerfile CHANGED
@@ -1,32 +1,33 @@
1
  FROM python:3.11-slim
2
- ENV PIP_NO_CACHE_DIR=1 PYTHONDONTWRITEBYTECODE=1 PYTHONUNBUFFERED=1
 
 
 
 
3
 
4
  RUN apt-get update && apt-get install -y --no-install-recommends \
5
- build-essential git && rm -rf /var/lib/apt/lists/*
 
6
 
7
  WORKDIR /app
8
 
9
- # === caches escribibles dentro de /app ===
10
- ENV HF_HOME=/app/.cache \
11
- HF_HUB_CACHE=/app/.cache \
12
- XDG_CACHE_HOME=/app/.cache \
13
- OPENCLIP_CACHE_DIR=/app/.cache \
14
- TORCH_HOME=/app/.cache/torch
15
- RUN mkdir -p /app/.cache /app/.cache/torch
16
-
17
  COPY requirements.txt ./
18
  RUN pip install --upgrade pip && pip install -r requirements.txt
19
 
 
 
 
 
 
 
 
 
20
  COPY server1.py .
21
  COPY text_embeddings_h14.pt .
22
  COPY text_embeddings_modelos_h14.pt .
23
 
24
- # (OPCIONAL) pre-descargar los pesos durante el build para evitar descarga en runtime
25
- RUN python - <<'PY'
26
- import open_clip
27
- open_clip.create_model_and_transforms('ViT-H-14', pretrained='laion2b_s32b_b79k', cache_dir="/app/.cache")
28
- print("Pesos cacheados en /app/.cache")
29
- PY
30
 
31
- # En Spaces debes escuchar en 7860
32
- CMD ["python", "-m", "uvicorn", "server1:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]
 
1
  FROM python:3.11-slim
2
+
3
+ # Configuraci贸n b谩sica
4
+ ENV PIP_NO_CACHE_DIR=1 \
5
+ PYTHONDONTWRITEBYTECODE=1 \
6
+ PYTHONUNBUFFERED=1
7
 
8
  RUN apt-get update && apt-get install -y --no-install-recommends \
9
+ build-essential git && \
10
+ rm -rf /var/lib/apt/lists/*
11
 
12
  WORKDIR /app
13
 
14
+ # Instalar dependencias
 
 
 
 
 
 
 
15
  COPY requirements.txt ./
16
  RUN pip install --upgrade pip && pip install -r requirements.txt
17
 
18
+ # (Opcional) descargar un modelo m谩s ligero para que Render free no muera
19
+ RUN python - <<'PY'
20
+ import open_clip
21
+ open_clip.create_model_and_transforms('ViT-B-32', pretrained='openai')
22
+ print("Pesos descargados")
23
+ PY
24
+
25
+ # Copiar c贸digo y pesos
26
  COPY server1.py .
27
  COPY text_embeddings_h14.pt .
28
  COPY text_embeddings_modelos_h14.pt .
29
 
30
+ # Render usa PORT
31
+ ENV PORT=8080
 
 
 
 
32
 
33
+ CMD ["uvicorn", "server1:app", "--host", "0.0.0.0", "--port", "8080"]
 
__pycache__/server1.cpython-311.pyc ADDED
Binary file (7.11 kB). View file
 
modelos.xlsx ADDED
Binary file (11.9 kB). View file
 
precalcular_modelos.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # precalcular_modelos_b16.py
2
+ import torch
3
+ import open_clip
4
+ import pandas as pd
5
+
6
+ # Solo marca + modelo
7
+ df = pd.read_excel("modelos.xlsx")
8
+ textos = (df["Marca"] + " " + df["Modelo"]).tolist()
9
+
10
+ MODEL_NAME = "ViT-B-16"
11
+ PRETRAINED = "openai"
12
+
13
+ model, _, _ = open_clip.create_model_and_transforms(MODEL_NAME, pretrained=PRETRAINED)
14
+ tokenizer = open_clip.get_tokenizer(MODEL_NAME)
15
+
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+ model = model.to(device)
18
+
19
+ with torch.no_grad():
20
+ text_inputs = tokenizer(textos).to(device) # tensor en GPU o CPU
21
+ text_features = model.encode_text(text_inputs)
22
+ text_features /= text_features.norm(dim=-1, keepdim=True)
23
+
24
+ torch.save({"embeddings": text_features.cpu(), "labels": textos}, "text_embeddings_modelos_b16.pt")
25
+ print("Embeddings de modelos guardados en 'text_embeddings_modelos_b16.pt'")
precalcular_text_embeddings_h14_excel.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # precalcular_text_embeddings_b16_excel.py
2
+ import torch
3
+ import open_clip
4
+ import pandas as pd
5
+
6
+ # Leer el Excel
7
+ df = pd.read_excel("versiones_coche.xlsx")
8
+
9
+ # Crear los textos combinando marca, modelo y versi贸n
10
+ def combinar_filas(row):
11
+ if pd.isna(row["Version"]) or not row["Version"]:
12
+ return f'{row["Marca"]} {row["Modelo"]}'
13
+ return f'{row["Marca"]} {row["Modelo"]} {row["Version"]}'
14
+
15
+ textos = df.apply(combinar_filas, axis=1).tolist()
16
+
17
+ # Cargar modelo
18
+ model, _, _ = open_clip.create_model_and_transforms('ViT-B-16', pretrained='laion2b_s34b_b88k')
19
+ tokenizer = open_clip.get_tokenizer('ViT-B-16')
20
+
21
+ # Calcular embeddings
22
+ with torch.no_grad():
23
+ text_inputs = tokenizer(textos)
24
+ text_features = model.encode_text(text_inputs)
25
+ text_features /= text_features.norm(dim=-1, keepdim=True)
26
+
27
+ # Guardar
28
+ torch.save({'embeddings': text_features, 'labels': textos}, 'text_embeddings_b16.pt')
29
+ print("Embeddings de texto guardados en 'text_embeddings_b16.pt'")
requirements.txt CHANGED
@@ -1,12 +1,7 @@
1
- # PyTorch CPU wheels (sin CUDA)
2
- torch==2.3.0+cpu
3
- torchvision==0.18.0+cpu
4
- --extra-index-url https://download.pytorch.org/whl/cpu
5
-
6
- # Resto
7
- fastapi==0.111.0
8
- uvicorn[standard]==0.30.1
9
  pillow
10
  open_clip_torch
11
- timm
12
- huggingface-hub
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ torch
4
+ torchvision
 
 
 
 
5
  pillow
6
  open_clip_torch
7
+ timm
 
server1.py CHANGED
@@ -9,28 +9,28 @@ from typing import Optional
9
 
10
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
- # Cargar modelo CLIP
13
  clip_model, _, preprocess = open_clip.create_model_and_transforms(
14
- 'ViT-H-14', pretrained='laion2b_s32b_b79k'
15
  )
16
  clip_model = clip_model.to(DEVICE)
17
  clip_model.eval()
18
- for param in clip_model.parameters():
19
- param.requires_grad = False
20
 
21
- # Cargar embeddings de modelos (marca + modelo)
22
- model_ckpt = torch.load("text_embeddings_modelos_h14.pt", map_location=DEVICE)
 
23
  model_labels = model_ckpt["labels"]
24
  model_embeddings = model_ckpt["embeddings"].to(DEVICE)
25
  model_embeddings /= model_embeddings.norm(dim=-1, keepdim=True)
26
 
27
- # Cargar embeddings de versiones (marca + modelo + versi贸n)
28
- version_ckpt = torch.load("text_embeddings_h14.pt", map_location=DEVICE)
29
  version_labels = version_ckpt["labels"]
30
  version_embeddings = version_ckpt["embeddings"].to(DEVICE)
31
  version_embeddings /= version_embeddings.norm(dim=-1, keepdim=True)
32
 
33
- # Transformaci贸n de imagen
34
  normalize = next(t for t in preprocess.transforms if isinstance(t, transforms.Normalize))
35
  transform = transforms.Compose([
36
  transforms.Resize((224, 224)),
@@ -47,10 +47,7 @@ def predict_top(text_feats, text_labels, image_tensor, topk=3):
47
  similarity = (100.0 * image_features @ text_feats.T).softmax(dim=-1)
48
  topk_result = torch.topk(similarity[0], k=topk)
49
  return [
50
- {
51
- "label": text_labels[idx],
52
- "confidence": round(conf.item() * 100, 2)
53
- }
54
  for conf, idx in zip(topk_result.values, topk_result.indices)
55
  ]
56
 
@@ -63,10 +60,12 @@ def process_image(image_bytes: bytes):
63
  modelo_predecido = top_model["label"]
64
  confianza_modelo = top_model["confidence"]
65
 
66
- # Separar marca y modelo
67
- marca, modelo = modelo_predecido.split(" ", 1)
 
 
68
 
69
- # Paso 2: buscar versiones que empiecen con ese modelo completo
70
  versiones_filtradas = [
71
  (label, idx) for idx, label in enumerate(version_labels)
72
  if label.startswith(modelo_predecido)
@@ -80,15 +79,14 @@ def process_image(image_bytes: bytes):
80
  "version": "No se encontraron versiones para este modelo"
81
  }
82
 
83
- # Extraer embeddings correspondientes
84
  indices_versiones = [idx for _, idx in versiones_filtradas]
85
  versiones_labels = [label for label, _ in versiones_filtradas]
86
  versiones_embeds = version_embeddings[indices_versiones]
87
 
88
- # Paso 3: predecir versi贸n dentro de las versiones del modelo
89
  top_version = predict_top(versiones_embeds, versiones_labels, img_tensor, topk=1)[0]
90
  version_predicha = (
91
- top_version["label"].replace(modelo_predecido + " ", "")
92
  if top_version["confidence"] >= 25
93
  else "Versi贸n no identificada con suficiente confianza"
94
  )
@@ -101,13 +99,10 @@ def process_image(image_bytes: bytes):
101
  "confianza_version": top_version["confidence"]
102
  }
103
 
104
-
105
  @app.post("/predict/")
106
  async def predict(front: UploadFile = File(...), back: Optional[UploadFile] = File(None)):
107
  front_bytes = await front.read()
108
  if back:
109
- _ = await back.read()
110
  result = process_image(front_bytes)
111
  return JSONResponse(content=result)
112
-
113
-
 
9
 
10
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
+ # === 1) Cargar modelo CLIP (B/16) ===
13
  clip_model, _, preprocess = open_clip.create_model_and_transforms(
14
+ "ViT-B-16", pretrained="openai"
15
  )
16
  clip_model = clip_model.to(DEVICE)
17
  clip_model.eval()
18
+ for p in clip_model.parameters():
19
+ p.requires_grad = False
20
 
21
+ # === 2) Cargar embeddings hechos con B/16 ===
22
+ # (Aseg煤rate de que estos ficheros existen: los generaste como text_embeddings_modelos_b16.pt y text_embeddings_b16.pt)
23
+ model_ckpt = torch.load("text_embeddings_modelos_b16.pt", map_location=DEVICE)
24
  model_labels = model_ckpt["labels"]
25
  model_embeddings = model_ckpt["embeddings"].to(DEVICE)
26
  model_embeddings /= model_embeddings.norm(dim=-1, keepdim=True)
27
 
28
+ version_ckpt = torch.load("text_embeddings_b16.pt", map_location=DEVICE)
 
29
  version_labels = version_ckpt["labels"]
30
  version_embeddings = version_ckpt["embeddings"].to(DEVICE)
31
  version_embeddings /= version_embeddings.norm(dim=-1, keepdim=True)
32
 
33
+ # Transformaci贸n de imagen (usa la normalize del preprocess de B/16)
34
  normalize = next(t for t in preprocess.transforms if isinstance(t, transforms.Normalize))
35
  transform = transforms.Compose([
36
  transforms.Resize((224, 224)),
 
47
  similarity = (100.0 * image_features @ text_feats.T).softmax(dim=-1)
48
  topk_result = torch.topk(similarity[0], k=topk)
49
  return [
50
+ {"label": text_labels[idx], "confidence": round(conf.item() * 100, 2)}
 
 
 
51
  for conf, idx in zip(topk_result.values, topk_result.indices)
52
  ]
53
 
 
60
  modelo_predecido = top_model["label"]
61
  confianza_modelo = top_model["confidence"]
62
 
63
+ # Separar marca y modelo con cuidado (por si solo hay una palabra)
64
+ partes = modelo_predecido.split(" ", 1)
65
+ marca = partes[0]
66
+ modelo = partes[1] if len(partes) > 1 else ""
67
 
68
+ # Paso 2: filtrar versiones que empiecen con el label completo de modelo
69
  versiones_filtradas = [
70
  (label, idx) for idx, label in enumerate(version_labels)
71
  if label.startswith(modelo_predecido)
 
79
  "version": "No se encontraron versiones para este modelo"
80
  }
81
 
82
+ # Paso 3: predecir versi贸n dentro de las versiones del modelo
83
  indices_versiones = [idx for _, idx in versiones_filtradas]
84
  versiones_labels = [label for label, _ in versiones_filtradas]
85
  versiones_embeds = version_embeddings[indices_versiones]
86
 
 
87
  top_version = predict_top(versiones_embeds, versiones_labels, img_tensor, topk=1)[0]
88
  version_predicha = (
89
+ top_version["label"].replace(modelo_predecido + " ", "")
90
  if top_version["confidence"] >= 25
91
  else "Versi贸n no identificada con suficiente confianza"
92
  )
 
99
  "confianza_version": top_version["confidence"]
100
  }
101
 
 
102
  @app.post("/predict/")
103
  async def predict(front: UploadFile = File(...), back: Optional[UploadFile] = File(None)):
104
  front_bytes = await front.read()
105
  if back:
106
+ _ = await back.read() # de momento no se usa
107
  result = process_image(front_bytes)
108
  return JSONResponse(content=result)
 
 
text_embeddings_b16.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3665a6bb5b3b58cabbbdc35fbc2bffccb827431a2c1a6c12b28bbb1eda193971
3
+ size 2346749
text_embeddings_modelos_b16.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7600362f35843cc6000e0ec01c296a9674cde62d5eeb25f281017093c6b736e9
3
+ size 843829
versiones_coche.xlsx ADDED
Binary file (26.4 kB). View file