Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import torch
|
4 |
+
from flask import Flask, request, jsonify
|
5 |
+
import threading
|
6 |
+
import io
|
7 |
+
import base64
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
# Carregamento do pipeline do modelo
|
11 |
+
pipe = DiffusionPipeline.from_pretrained(
|
12 |
+
"HiDream-ai/HiDream-E1-Full",
|
13 |
+
torch_dtype=torch.float16,
|
14 |
+
)
|
15 |
+
pipe = pipe.to("cuda")
|
16 |
+
|
17 |
+
# Flask app para API
|
18 |
+
app = Flask(__name__)
|
19 |
+
|
20 |
+
def image_to_base64(image: Image.Image) -> str:
|
21 |
+
buffered = io.BytesIO()
|
22 |
+
image.save(buffered, format="PNG")
|
23 |
+
return base64.b64encode(buffered.getvalue()).decode()
|
24 |
+
|
25 |
+
@app.route("/v1/texttoimage/completions", methods=["POST"])
|
26 |
+
def text_to_image():
|
27 |
+
data = request.json
|
28 |
+
prompt = data.get("prompt", "").strip()
|
29 |
+
if not prompt:
|
30 |
+
return jsonify({"error": "Prompt vazio"}), 400
|
31 |
+
result = pipe(prompt)
|
32 |
+
img_b64 = image_to_base64(result.images[0])
|
33 |
+
return jsonify({"image_base64": img_b64})
|
34 |
+
|
35 |
+
# Executa Flask em uma thread separada
|
36 |
+
def run_flask():
|
37 |
+
app.run(host="0.0.0.0", port=7860)
|
38 |
+
|
39 |
+
threading.Thread(target=run_flask, daemon=True).start()
|
40 |
+
|
41 |
+
# Gradio interface
|
42 |
+
def gerar_imagem_gradio(prompt):
|
43 |
+
result = pipe(prompt)
|
44 |
+
return result.images[0]
|
45 |
+
|
46 |
+
with gr.Blocks() as demo:
|
47 |
+
gr.Markdown("## Chat Text-to-Image com API Flask integrada")
|
48 |
+
gr.Markdown("API disponível em: `http://localhost:7860/v1/texttoimage/completions`")
|
49 |
+
|
50 |
+
chat = gr.Chatbot()
|
51 |
+
txt = gr.Textbox(placeholder="Digite seu prompt aqui e pressione Enter", show_label=False)
|
52 |
+
|
53 |
+
def responder(prompt, chat_history):
|
54 |
+
img = gerar_imagem_gradio(prompt)
|
55 |
+
chat_history = chat_history + [(prompt, img)]
|
56 |
+
return chat_history, ""
|
57 |
+
|
58 |
+
txt.submit(responder, inputs=[txt, chat], outputs=[chat, txt])
|
59 |
+
|
60 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|