File size: 7,971 Bytes
338d95d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
# compi_phase1_advanced.py
# Enhanced text-to-image generation with batch processing, negative prompts, and style controls
import os
import sys
import torch
import argparse
from datetime import datetime
from diffusers import StableDiffusionPipeline
from PIL import Image
import json
# Add project root to path for imports
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
# ------------------ 1. SETUP AND ARGUMENT PARSING ------------------
def setup_args():
parser = argparse.ArgumentParser(description="CompI Phase 1: Advanced Text-to-Image Generation")
parser.add_argument("prompt", nargs="*", help="Text prompt for image generation")
parser.add_argument("--negative", "-n", default="", help="Negative prompt (what to avoid)")
parser.add_argument("--steps", "-s", type=int, default=30, help="Number of inference steps (default: 30)")
parser.add_argument("--guidance", "-g", type=float, default=7.5, help="Guidance scale (default: 7.5)")
parser.add_argument("--seed", type=int, default=None, help="Random seed for reproducibility")
parser.add_argument("--batch", "-b", type=int, default=1, help="Number of images to generate")
parser.add_argument("--width", "-w", type=int, default=512, help="Image width (default: 512)")
parser.add_argument("--height", type=int, default=512, help="Image height (default: 512)")
parser.add_argument("--model", "-m", default="runwayml/stable-diffusion-v1-5", help="Model to use")
parser.add_argument("--output", "-o", default="outputs", help="Output directory")
parser.add_argument("--interactive", "-i", action="store_true", help="Interactive mode")
return parser.parse_args()
# Check for GPU
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Logging function
def log(msg):
now = datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
print(f"{now} {msg}")
# ------------------ 2. MODEL LOADING ------------------
def load_model(model_name):
log(f"Loading model: {model_name}")
def dummy_safety_checker(images, **kwargs):
return images, [False] * len(images)
try:
pipe = StableDiffusionPipeline.from_pretrained(
model_name,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
safety_checker=dummy_safety_checker,
)
pipe = pipe.to(device)
# Memory optimizations
pipe.enable_attention_slicing()
# Note: enable_memory_efficient_attention() is deprecated in newer versions
log("Model loaded successfully")
return pipe
except Exception as e:
log(f"Error loading model: {e}")
sys.exit(1)
# ------------------ 3. GENERATION FUNCTION ------------------
def generate_image(pipe, prompt, negative_prompt="", **kwargs):
"""Generate a single image with given parameters"""
# Set up generator
seed = kwargs.get('seed', torch.seed())
if device == "cuda":
generator = torch.Generator(device).manual_seed(seed)
else:
generator = torch.manual_seed(seed)
# Generation parameters
params = {
'prompt': prompt,
'negative_prompt': negative_prompt if negative_prompt else None,
'height': kwargs.get('height', 512),
'width': kwargs.get('width', 512),
'num_inference_steps': kwargs.get('steps', 30),
'guidance_scale': kwargs.get('guidance', 7.5),
'generator': generator,
}
log(f"Generating: '{prompt[:50]}...' (seed: {seed})")
with torch.autocast(device) if device == "cuda" else torch.no_grad():
result = pipe(**params)
return result.images[0], seed
# ------------------ 4. SAVE FUNCTION ------------------
def save_image(image, prompt, seed, output_dir, metadata=None):
"""Save image with descriptive filename and metadata"""
os.makedirs(output_dir, exist_ok=True)
# Create filename
prompt_slug = "_".join(prompt.lower().split()[:6])
prompt_slug = "".join(c for c in prompt_slug if c.isalnum() or c in "_-")[:40]
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{prompt_slug}_{timestamp}_seed{seed}.png"
filepath = os.path.join(output_dir, filename)
# Save image
image.save(filepath)
# Save metadata
if metadata:
metadata_file = filepath.replace('.png', '_metadata.json')
with open(metadata_file, 'w') as f:
json.dump(metadata, f, indent=2)
log(f"Saved: {filepath}")
return filepath
# ------------------ 5. INTERACTIVE MODE ------------------
def interactive_mode(pipe, output_dir):
"""Interactive prompt mode for experimentation"""
log("Entering interactive mode. Type 'quit' to exit.")
while True:
try:
prompt = input("\nπ¨ Enter prompt: ").strip()
if prompt.lower() in ['quit', 'exit', 'q']:
break
if not prompt:
continue
negative = input("β Negative prompt (optional): ").strip()
# Quick settings
print("βοΈ Quick settings (press Enter for defaults):")
steps = input(f" Steps (30): ").strip()
steps = int(steps) if steps else 30
guidance = input(f" Guidance (7.5): ").strip()
guidance = float(guidance) if guidance else 7.5
# Generate
image, seed = generate_image(
pipe, prompt, negative,
steps=steps, guidance=guidance
)
# Save with metadata
metadata = {
'prompt': prompt,
'negative_prompt': negative,
'steps': steps,
'guidance_scale': guidance,
'seed': seed,
'timestamp': datetime.now().isoformat()
}
save_image(image, prompt, seed, output_dir, metadata)
except KeyboardInterrupt:
print("\nπ Goodbye!")
break
except Exception as e:
log(f"Error: {e}")
# ------------------ 6. MAIN FUNCTION ------------------
def main():
args = setup_args()
# Load model
pipe = load_model(args.model)
# Interactive mode
if args.interactive:
interactive_mode(pipe, args.output)
return
# Get prompt
if args.prompt:
prompt = " ".join(args.prompt)
else:
prompt = input("Enter your prompt: ").strip()
if not prompt:
log("No prompt provided. Use --interactive for interactive mode.")
return
# Generate batch
log(f"Generating {args.batch} image(s)")
for i in range(args.batch):
try:
# Use provided seed or generate random one
seed = args.seed if args.seed else torch.seed()
image, actual_seed = generate_image(
pipe, prompt, args.negative,
seed=seed, steps=args.steps, guidance=args.guidance,
width=args.width, height=args.height
)
# Prepare metadata
metadata = {
'prompt': prompt,
'negative_prompt': args.negative,
'steps': args.steps,
'guidance_scale': args.guidance,
'seed': actual_seed,
'width': args.width,
'height': args.height,
'model': args.model,
'batch_index': i + 1,
'timestamp': datetime.now().isoformat()
}
save_image(image, prompt, actual_seed, args.output, metadata)
except Exception as e:
log(f"Error generating image {i+1}: {e}")
log("Generation complete!")
if __name__ == "__main__":
main()
|