Venkateshwar Reddy commited on
Commit
0111f03
·
1 Parent(s): 15f4fb9

added model

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -8,11 +8,11 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
  else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
@@ -25,15 +25,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
25
 
26
  generator = torch.Generator().manual_seed(seed)
27
 
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
 
38
  return image
39
 
 
8
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
+ pipe = DiffusionPipeline.from_pretrained("FrozenScar/cartoon_face", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
  else:
15
+ pipe = DiffusionPipeline.from_pretrained("FrozenScar/cartoon_face", use_safetensors=True)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
 
25
 
26
  generator = torch.Generator().manual_seed(seed)
27
 
28
+ image = pipe().images[0]
 
 
 
 
 
 
 
 
29
 
30
  return image
31