Spaces:
Sleeping
Sleeping
chips
commited on
Commit
·
0390fed
1
Parent(s):
9736f41
adding garment description feature using openai
Browse files- app.py +32 -1
- base_generator.py +10 -0
- requirements.txt +3 -1
app.py
CHANGED
|
@@ -5,9 +5,11 @@ from io import BytesIO
|
|
| 5 |
import fal_client
|
| 6 |
import os
|
| 7 |
import base_generator
|
| 8 |
-
|
|
|
|
| 9 |
|
| 10 |
app = FastAPI()
|
|
|
|
| 11 |
|
| 12 |
@app.get("/")
|
| 13 |
def greet_json():
|
|
@@ -160,3 +162,32 @@ def videoStatus(request_id: str):
|
|
| 160 |
async def videoResult(request_id: str):
|
| 161 |
result = await fal_client.result_async("fal-ai/kling-video/v1.6/pro/image-to-video", request_id)
|
| 162 |
return(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import fal_client
|
| 6 |
import os
|
| 7 |
import base_generator
|
| 8 |
+
import openai
|
| 9 |
+
import base64
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 13 |
|
| 14 |
@app.get("/")
|
| 15 |
def greet_json():
|
|
|
|
| 162 |
async def videoResult(request_id: str):
|
| 163 |
result = await fal_client.result_async("fal-ai/kling-video/v1.6/pro/image-to-video", request_id)
|
| 164 |
return(result)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
#Auxiliary functions
|
| 169 |
+
|
| 170 |
+
@app.post("/describeGarment")
|
| 171 |
+
async def describe_garment(image: UploadFile = File(...)):
|
| 172 |
+
image_bytes = await image.read()
|
| 173 |
+
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
| 174 |
+
|
| 175 |
+
response = openai.ChatCompletion.create(
|
| 176 |
+
model="gpt-4o", # See discussion below
|
| 177 |
+
messages=[
|
| 178 |
+
{
|
| 179 |
+
"role": "user",
|
| 180 |
+
"content": [
|
| 181 |
+
{"type": "text", "text": "Describe this garment or garments to be inserted into the sentence: a man wearing ..... in front of a white background. be detailed but online describe the garmensts, not the rest of the images"},
|
| 182 |
+
{
|
| 183 |
+
"type": "image_url",
|
| 184 |
+
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}
|
| 185 |
+
}
|
| 186 |
+
]
|
| 187 |
+
}
|
| 188 |
+
],
|
| 189 |
+
max_tokens=300
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
description = response["choices"][0]["message"]["content"]
|
| 193 |
+
return {"description": description}
|
base_generator.py
CHANGED
|
@@ -7,6 +7,16 @@ import requests
|
|
| 7 |
# a way to support loras directly
|
| 8 |
# A way to get a description of the outfit
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def run_workflow(body):
|
| 12 |
print("run workflow")
|
|
|
|
| 7 |
# a way to support loras directly
|
| 8 |
# A way to get a description of the outfit
|
| 9 |
|
| 10 |
+
def import_lora(lora_name):
|
| 11 |
+
print("we are importing a lora")
|
| 12 |
+
return("Tetsok")
|
| 13 |
+
|
| 14 |
+
def return_pose(pose_id: int):
|
| 15 |
+
print("we are returning a pose")
|
| 16 |
+
return("Tetsok")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
|
| 21 |
def run_workflow(body):
|
| 22 |
print("run workflow")
|
requirements.txt
CHANGED
|
@@ -3,4 +3,6 @@ uvicorn[standard]
|
|
| 3 |
httpx
|
| 4 |
python-multipart
|
| 5 |
fal-client
|
| 6 |
-
requests
|
|
|
|
|
|
|
|
|
| 3 |
httpx
|
| 4 |
python-multipart
|
| 5 |
fal-client
|
| 6 |
+
requests
|
| 7 |
+
openai
|
| 8 |
+
base64
|