Venkateshwar Reddy commited on
Commit
4ea785f
·
1 Parent(s): d34b9d8

Changed to DDPM pipeline

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -1,18 +1,18 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- from diffusers import DiffusionPipeline
5
  import torch
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
-
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("FrozenScar/cartoon_face", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
  else:
15
- pipe = DiffusionPipeline.from_pretrained("FrozenScar/cartoon_face", use_safetensors=True)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ from diffusers import DiffusionPipeline, DDPMPipeline, DDPMScheduler
5
  import torch
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
+ noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
+ pipe = DDPMPipeline.from_pretrained("FrozenScar/cartoon_face", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
  else:
15
+ pipe = DDPMPipeline.from_pretrained("FrozenScar/cartoon_face", scheduler=noise_scheduler, use_safetensors=True)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max