AlekseyCalvin commited on
Commit
9ce3c69
·
verified ·
1 Parent(s): 1cb1121

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -30,7 +30,7 @@ os.environ["TRANSFORMERS_CACHE"] = cache_path
30
  os.environ["HF_HUB_CACHE"] = cache_path
31
  os.environ["HF_HOME"] = cache_path
32
 
33
- torch.set_float32_matmul_precision("medium")
34
 
35
  # Load LoRAs from JSON file
36
  with open('loras.json', 'r') as f:
@@ -38,13 +38,14 @@ with open('loras.json', 'r') as f:
38
 
39
  # Initialize the base model
40
  dtype = torch.bfloat16
41
- device = "cuda" if torch.cuda.is_available() else "cpu"
42
  base_model = "AlekseyCalvin/Artsy_Lite_Flux_v1_by_jurdn_Diffusers"
43
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
44
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
45
  torch.cuda.empty_cache()
46
 
47
- clipmodel = 'long'
 
 
48
  if clipmodel == "long":
49
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
50
  config = CLIPConfig.from_pretrained(model_id)
@@ -103,7 +104,7 @@ def update_selection(evt: gr.SelectData, width, height):
103
  )
104
 
105
  @spaces.GPU(duration=50)
106
- def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
107
  pipe.to("cuda")
108
  generator = torch.Generator(device="cuda").manual_seed(seed)
109
 
@@ -161,7 +162,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
161
  )
162
  # Info blob stating what the app is running
163
  info_blob = gr.HTML(
164
- """<div id="info_blob"> Img. Manufactory Running On: Our 'Historic Color SOON®' Schnell/Pixelwave-base Model (at AlekseyCalvin/HistoricColorSoonr_Schnell). Now testing related LoRAs (#s2-8,11,12,14,16)for merging. </div>"""
165
  )
166
 
167
  # Info blob stating what the app is running
 
30
  os.environ["HF_HUB_CACHE"] = cache_path
31
  os.environ["HF_HOME"] = cache_path
32
 
33
+ #torch.set_float32_matmul_precision("medium")
34
 
35
  # Load LoRAs from JSON file
36
  with open('loras.json', 'r') as f:
 
38
 
39
  # Initialize the base model
40
  dtype = torch.bfloat16
 
41
  base_model = "AlekseyCalvin/Artsy_Lite_Flux_v1_by_jurdn_Diffusers"
42
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
43
  #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
44
  torch.cuda.empty_cache()
45
 
46
+ device = "cuda" if torch.cuda.is_available() else "cpu"
47
+
48
+ clipmodel = 'norm'
49
  if clipmodel == "long":
50
  model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
51
  config = CLIPConfig.from_pretrained(model_id)
 
104
  )
105
 
106
  @spaces.GPU(duration=50)
107
+ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
108
  pipe.to("cuda")
109
  generator = torch.Generator(device="cuda").manual_seed(seed)
110
 
 
162
  )
163
  # Info blob stating what the app is running
164
  info_blob = gr.HTML(
165
+ """<div id="info_blob"> Img. Manufactory Running On: ArtsyLite Flux model. Now testing related LoRAs (#s2-8,11,12,14,16)for merging. </div>"""
166
  )
167
 
168
  # Info blob stating what the app is running