from flask import Flask, request, jsonify from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler import torch app = Flask(__name__) # ** Hafif SD Turbo Modeli ve LoRA Yükleme ** base_model = "stabilityai/sd-turbo" # Hafif model lora_model = "maria26/Floor_Plan_LoRA" pipe = StableDiffusionPipeline.from_pretrained( base_model, torch_dtype=torch.float16, safety_checker=None ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) # LoRA'yı yükle pipe.load_lora_weights(lora_model) # Eğer GPU yetmezse CPU'ya geçir device = "cuda" if torch.cuda.is_available() else "cpu" pipe.to(device) @app.route('/generate', methods=['POST']) def generate(): data = request.json prompt = data.get("prompt", "a simple architectural floor plan") try: image = pipe(prompt).images[0] image_path = "static/output.png" image.save(image_path) return jsonify({"status": "success", "image_url": image_path}) except Exception as e: return jsonify({"status": "error", "message": str(e)}) if __name__ == '__main__': app.run(host="0.0.0.0", port=5000, debug=True)