matteomarjanovic commited on
Commit
b79e3f7
·
1 Parent(s): e1cbdd9

add lora and edit some parameters

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -2,12 +2,14 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
 
 
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.float16
@@ -16,12 +18,13 @@ else:
16
 
17
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
  pipe = pipe.to(device)
 
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
23
 
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
  def infer(
26
  prompt,
27
  negative_prompt,
@@ -41,11 +44,11 @@ def infer(
41
  image = pipe(
42
  prompt=prompt,
43
  negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
  ).images[0]
50
 
51
  return image, seed
 
2
  import numpy as np
3
  import random
4
 
5
+ import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_repo_id = "black-forest-labs/FLUX.1-schnell" # Replace to the model you would like to use
11
+ lora_path = "matteomarjanovic/flatsketcher"
12
+ weigths_file = "lora.safetensors"
13
 
14
  if torch.cuda.is_available():
15
  torch_dtype = torch.float16
 
18
 
19
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
20
  pipe = pipe.to(device)
21
+ pipe.load_lora_weights(lora_path, weight_name=weigths_file)
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
24
  MAX_IMAGE_SIZE = 1024
25
 
26
 
27
+ @spaces.GPU #[uncomment to use ZeroGPU]
28
  def infer(
29
  prompt,
30
  negative_prompt,
 
44
  image = pipe(
45
  prompt=prompt,
46
  negative_prompt=negative_prompt,
47
+ guidance_scale=0.,
48
+ num_inference_steps=4,
49
+ width=1920,
50
+ height=1080,
51
+ max_sequence_length=256,
52
  ).images[0]
53
 
54
  return image, seed