Spaces:
Sleeping
Sleeping
Commit
·
9bb2fc2
1
Parent(s):
b083fdc
support product can be str or list
Browse files- app/schemas/requests.py +2 -2
- app/services/base.py +2 -2
- app/services/service_anthropic.py +2 -2
- app/services/service_openai.py +2 -2
- app/utils/converter.py +26 -1
- clean_for_gradio.sh +1 -1
app/schemas/requests.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from typing import Any, Dict, List, Optional
|
| 2 |
|
| 3 |
from pydantic import BaseModel
|
| 4 |
|
|
@@ -18,7 +18,7 @@ class ExtractionRequest(BaseModel):
|
|
| 18 |
img_urls: Optional[List[str]] = None
|
| 19 |
product_taxonomy: str
|
| 20 |
request_meta: Optional[Dict[str, str]] = None
|
| 21 |
-
product_data: Optional[Dict[str, str]] = None
|
| 22 |
ai_model: str = settings.DEFAULT_MODEL # type: ignore
|
| 23 |
max_attempts: int = settings.DEFAULT_MAX_ATTEMPTS # type: ignore
|
| 24 |
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional, Union
|
| 2 |
|
| 3 |
from pydantic import BaseModel
|
| 4 |
|
|
|
|
| 18 |
img_urls: Optional[List[str]] = None
|
| 19 |
product_taxonomy: str
|
| 20 |
request_meta: Optional[Dict[str, str]] = None
|
| 21 |
+
product_data: Optional[Dict[str, Union[str, List]]] = None
|
| 22 |
ai_model: str = settings.DEFAULT_MODEL # type: ignore
|
| 23 |
max_attempts: int = settings.DEFAULT_MAX_ATTEMPTS # type: ignore
|
| 24 |
|
app/services/base.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from abc import ABC, abstractmethod
|
| 2 |
-
from typing import Any, Dict, List, Type
|
| 3 |
|
| 4 |
from pydantic import BaseModel
|
| 5 |
|
|
@@ -34,7 +34,7 @@ class BaseAttributionService(ABC):
|
|
| 34 |
ai_model: str,
|
| 35 |
img_urls: List[str],
|
| 36 |
product_taxonomy: str,
|
| 37 |
-
product_data: Dict[str, str],
|
| 38 |
pil_images: List[Any] = None,
|
| 39 |
img_paths: List[str] = None,
|
| 40 |
) -> Dict[str, Any]:
|
|
|
|
| 1 |
from abc import ABC, abstractmethod
|
| 2 |
+
from typing import Any, Dict, List, Type, Union
|
| 3 |
|
| 4 |
from pydantic import BaseModel
|
| 5 |
|
|
|
|
| 34 |
ai_model: str,
|
| 35 |
img_urls: List[str],
|
| 36 |
product_taxonomy: str,
|
| 37 |
+
product_data: Dict[str, Union[str, List]],
|
| 38 |
pil_images: List[Any] = None,
|
| 39 |
img_paths: List[str] = None,
|
| 40 |
) -> Dict[str, Any]:
|
app/services/service_anthropic.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
-
from typing import Any, Dict, List, Type
|
| 4 |
|
| 5 |
import anthropic
|
| 6 |
import weave
|
|
@@ -41,7 +41,7 @@ class AnthropicService(BaseAttributionService):
|
|
| 41 |
ai_model: str,
|
| 42 |
img_urls: List[str],
|
| 43 |
product_taxonomy: str,
|
| 44 |
-
product_data: Dict[str, str],
|
| 45 |
pil_images: List[Any] = None, # do not remove, this is for weave
|
| 46 |
img_paths: List[str] = None,
|
| 47 |
) -> Dict[str, Any]:
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
+
from typing import Any, Dict, List, Type, Union
|
| 4 |
|
| 5 |
import anthropic
|
| 6 |
import weave
|
|
|
|
| 41 |
ai_model: str,
|
| 42 |
img_urls: List[str],
|
| 43 |
product_taxonomy: str,
|
| 44 |
+
product_data: Dict[str, Union[str, List]],
|
| 45 |
pil_images: List[Any] = None, # do not remove, this is for weave
|
| 46 |
img_paths: List[str] = None,
|
| 47 |
) -> Dict[str, Any]:
|
app/services/service_openai.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
-
from typing import Any, Dict, List, Type
|
| 4 |
|
| 5 |
import openai
|
| 6 |
import weave
|
|
@@ -58,7 +58,7 @@ class OpenAIService(BaseAttributionService):
|
|
| 58 |
ai_model: str,
|
| 59 |
img_urls: List[str],
|
| 60 |
product_taxonomy: str,
|
| 61 |
-
product_data: Dict[str, str],
|
| 62 |
pil_images: List[Any] = None, # do not remove, this is for weave
|
| 63 |
img_paths: List[str] = None,
|
| 64 |
) -> Dict[str, Any]:
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
+
from typing import Any, Dict, List, Type, Union
|
| 4 |
|
| 5 |
import openai
|
| 6 |
import weave
|
|
|
|
| 58 |
ai_model: str,
|
| 59 |
img_urls: List[str],
|
| 60 |
product_taxonomy: str,
|
| 61 |
+
product_data: Dict[str, Union[str, List]],
|
| 62 |
pil_images: List[Any] = None, # do not remove, this is for weave
|
| 63 |
img_paths: List[str] = None,
|
| 64 |
) -> Dict[str, Any]:
|
app/utils/converter.py
CHANGED
|
@@ -11,4 +11,29 @@ def product_data_to_str(product_data: dict[str, any]) -> str:
|
|
| 11 |
if product_data is None:
|
| 12 |
return ""
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
if product_data is None:
|
| 12 |
return ""
|
| 13 |
|
| 14 |
+
data_list = []
|
| 15 |
+
for k, v in product_data.items():
|
| 16 |
+
data_line = None
|
| 17 |
+
if isinstance(v, list):
|
| 18 |
+
delimiter = ","
|
| 19 |
+
for sub_v in v:
|
| 20 |
+
if "," in sub_v:
|
| 21 |
+
delimiter = (
|
| 22 |
+
";" # Use semicolon as delimiter if comma is found in the value
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
for i, sub_v in enumerate(v):
|
| 26 |
+
if ";" in sub_v:
|
| 27 |
+
v[i] = f'"{sub_v}"'
|
| 28 |
+
|
| 29 |
+
data_line = f"{k}: {f'{delimiter} '.join(v)}"
|
| 30 |
+
elif isinstance(v, str):
|
| 31 |
+
data_line = f"{k}: {v}"
|
| 32 |
+
else:
|
| 33 |
+
raise ValueError(
|
| 34 |
+
f"Unsupported data type for value of product data: {type(v)}. Only list and str are supported."
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
data_list.append(data_line)
|
| 38 |
+
|
| 39 |
+
return "\n".join(data_list)
|
clean_for_gradio.sh
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
rm -rf app/api
|
| 2 |
rm -rf app/aws
|
| 3 |
rm app/main.py
|
| 4 |
-
rm worker.py
|
|
|
|
| 1 |
rm -rf app/api
|
| 2 |
rm -rf app/aws
|
| 3 |
rm app/main.py
|
| 4 |
+
rm app/worker.py
|