# -*- coding: utf-8 -*-
"""Diffusion.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1bcJlVBYDIxhySq0b6YHyKsumLgIomNqf

#Diffusion

Setup
"""

!nvidia-smi

!pip install diffusers==0.11.1
!pip install transformers scipy ftfy accelerate

"""pipeline"""

import torch
from diffusers import StableDiffusionPipeline

pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")

# move pipeline to GPU
pipe = pipe.to("cuda")

# Let's Generate image
prompt = "cute panda eating pizza on bamboo tree "
image = pipe(prompt).images[0]

image.save(f"Happy_panda.png")

image

import torch

generator = torch.Generator("cuda").manual_seed(2048)

image = pipe(prompt, generator=generator).images[0]

image

# increase inference steps
import torch

generator = torch.Generator("cuda").manual_seed(2048)

image = pipe(prompt, num_inference_steps=70, generator=generator).images[0]

image

from PIL import Image

def image_grid(imgs, rows, cols):
    assert len(imgs) == rows*cols

    w, h = imgs[0].size
    grid = Image.new('RGB', size=(cols*w, rows*h))
    grid_w, grid_h = grid.size

    for i, img in enumerate(imgs):
        grid.paste(img, box=(i%cols*w, i//cols*h))
    return grid

num_images = 3
prompt = ["cute panda eating pizza on bamboo tree "] * num_images

images = pipe(prompt).images

grid = image_grid(images, rows=1, cols=3)
grid

num_cols = 3
num_rows = 4

prompt = ["cute panda eating pizza on bamboo tree "] * num_cols

all_images = []
for i in range(num_rows):
  images = pipe(prompt).images
  all_images.extend(images)

grid = image_grid(all_images, rows=num_rows, cols=num_cols)
grid

# Generating rectangle image
prompt = "cute panda eating pizza on bamboo tree "

image = pipe(prompt, height=512, width=752).images[0]
image