alyxsis commited on
Commit
801e638
Β·
verified Β·
1 Parent(s): 024cbad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +302 -385
app.py CHANGED
@@ -10,41 +10,11 @@ from huggingface_hub import InferenceClient
10
 
11
  # Project by Nymbo
12
 
13
- # Base API URL for Hugging Face inference
14
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
15
- # Retrieve the API token from environment variables
16
- API_TOKEN = os.getenv("HF_READ_TOKEN")
17
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
18
- # Timeout for requests
19
- timeout = 100
20
-
21
- # Provider configurations
22
- PROVIDER_CONFIG = {
23
- "hf-inference": {
24
- "name": "Hugging Face Inference",
25
- "description": "Official HF serverless inference (free tier available)",
26
- "api_key_env": "HF_READ_TOKEN",
27
- "supports_custom_models": True
28
- },
29
- "replicate": {
30
- "name": "Replicate",
31
- "description": "High-performance inference platform",
32
- "api_key_env": "HF_READ_TOKEN", # Using HF token for routing
33
- "supports_custom_models": False
34
- },
35
- "fal-ai": {
36
- "name": "Fal AI",
37
- "description": "Fast AI inference platform",
38
- "api_key_env": "HF_READ_TOKEN", # Using HF token for routing
39
- "supports_custom_models": False
40
- }
41
- }
42
-
43
- def query_with_provider(prompt, model, custom_lora, provider, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
44
  """
45
- Generate images using different inference providers (HF Inference, Replicate, Fal AI)
46
  """
47
- print(f"Starting query with provider: {provider}")
48
  print(f"Prompt: {prompt}")
49
  print(f"Model: {model}")
50
  print(f"Custom LoRA: {custom_lora}")
@@ -64,220 +34,28 @@ def query_with_provider(prompt, model, custom_lora, provider, is_negative=False,
64
  print(f'Generation {key}: {enhanced_prompt}')
65
 
66
  try:
67
- if provider == "hf-inference":
68
- return query_hf_inference(enhanced_prompt, model, custom_lora, is_negative, steps, cfg_scale, sampler, seed, strength, width, height, key)
69
- elif provider == "replicate":
70
- return query_replicate(enhanced_prompt, model, steps, cfg_scale, seed, width, height, key)
71
- elif provider == "fal-ai":
72
- return query_fal_ai(enhanced_prompt, model, steps, cfg_scale, seed, width, height, key)
73
- else:
74
- raise gr.Error(f"Unknown provider: {provider}")
75
- except Exception as e:
76
- print(f"Error with provider {provider}: {e}")
77
- raise gr.Error(f"Failed to generate image with {provider}: {str(e)}")
78
-
79
- def query_hf_inference(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024, key=None):
80
- """
81
- Original HF inference implementation with enhancements
82
- """
83
- print(f"Using HF Inference for generation {key}")
84
-
85
- # Randomly select an API token from available options to distribute the load
86
- available_tokens = [os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]
87
- available_tokens = [token for token in available_tokens if token is not None]
88
-
89
- if not available_tokens:
90
- raise gr.Error("No valid HF tokens found. Please set HF_READ_TOKEN environment variable.")
91
-
92
- API_TOKEN = random.choice(available_tokens)
93
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
94
- print(f"Selected API token: {API_TOKEN[:10]}...")
95
-
96
- # Set the API URL based on the selected model or custom LoRA
97
- if custom_lora.strip() != "":
98
- API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
99
- else:
100
- # Model mapping for HF inference - comprehensive model list
101
- model_urls = {
102
- 'Stable Diffusion XL': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0",
103
- 'FLUX.1 [Dev]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev",
104
- 'FLUX.1 [Schnell]': "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell",
105
- 'HiDream-I1-Full': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Full",
106
- 'HiDream-I1-Dev': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Dev",
107
- 'HiDream-I1-Fast': "https://api-inference.huggingface.co/models/HiDream-ai/HiDream-I1-Fast",
108
- 'Animagine 4.0': "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-4.0",
109
- 'Flux Icon Kit': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Icon-Kit-LoRA",
110
- 'Pixel Background': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Pixel-Background-LoRA",
111
- 'Meme XD': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Meme-Xd-LoRA",
112
- 'Chill Guy': "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Chill-Guy-Zone",
113
- 'Pepe': "https://api-inference.huggingface.co/models/openfree/pepe",
114
- 'NSFWmodel': "https://api-inference.huggingface.co/models/lexa862/NSFWmodel",
115
- 'Claude Art': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Claude-Art",
116
- 'Open Genmoji': "https://api-inference.huggingface.co/models/EvanZhouDev/open-genmoji",
117
- 'EBook Creative Cover': "https://api-inference.huggingface.co/models/prithivMLmods/EBook-Creative-Cover-Flux-LoRA",
118
- 'Flux Logo Design 2': "https://api-inference.huggingface.co/models/prithivMLmods/Logo-Design-Flux-LoRA",
119
- 'Isometric 3D': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Isometric-3D-LoRA",
120
- 'Flux Condensation': "https://api-inference.huggingface.co/models/fofr/flux-condensation",
121
- 'Flux Handwriting': "https://api-inference.huggingface.co/models/fofr/flux-handwriting",
122
- 'Shou Xin': "https://api-inference.huggingface.co/models/Datou1111/shou_xin",
123
- 'Sketch Smudge': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Sketch-Smudge-LoRA",
124
- '80s Cyberpunk': "https://api-inference.huggingface.co/models/fofr/flux-80s-cyberpunk",
125
- 'Coloring Book Flux': "https://api-inference.huggingface.co/models/renderartist/coloringbookflux",
126
- 'Flux Miniature LoRA': "https://api-inference.huggingface.co/models/gokaygokay/Flux-Miniature-LoRA",
127
- 'Sketch Paint': "https://api-inference.huggingface.co/models/strangerzonehf/Sketch-Paint",
128
- 'Flux UltraRealism 2.0': "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0",
129
- 'Midjourney Mix': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix-LoRA",
130
- 'Midjourney Mix 2': "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix2-LoRA",
131
- 'Flux Logo Design': "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
132
- 'Flux Uncensored': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored",
133
- 'Flux Uncensored V2': "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2",
134
- 'Flux Tarot Cards': "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA",
135
- 'Pixel Art Sprites': "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux",
136
- '3D Sketchfab': "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA",
137
- 'Retro Comic Flux': "https://api-inference.huggingface.co/models/renderartist/retrocomicflux",
138
- 'Caricature': "https://api-inference.huggingface.co/models/TheAwakenOne/caricature",
139
- 'Huggieverse': "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse",
140
- 'Stable Diffusion 3.5 Large': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large",
141
- 'Stable Diffusion 3.5 Large Turbo': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo",
142
- 'Stable Diffusion 3 Medium': "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers",
143
- }
144
- API_URL = model_urls.get(model, "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell")
145
 
146
- # Apply model-specific prompt enhancements
147
- if model == 'Animagine 4.0':
148
- prompt = f"masterpiece, high score, great score, absurdres, {prompt}"
149
- elif model == 'Flux Icon Kit':
150
- prompt = f"Icon Kit, {prompt}"
151
- elif model == 'Pixel Background':
152
- prompt = f"Pixel Background, {prompt}"
153
- elif model == 'Meme XD':
154
- prompt = f"meme, {prompt}"
155
- elif model == 'Chill Guy':
156
- prompt = f"chill guy, {prompt}"
157
- elif model == 'Pepe':
158
- prompt = f"pepe, {prompt}"
159
- elif model == 'NSFWmodel':
160
- prompt = f"nude, {prompt}"
161
- elif model == 'Claude Art':
162
- prompt = f"claude art, {prompt}"
163
- elif model == 'Open Genmoji':
164
- prompt = f"emoji, {prompt}"
165
- elif model == 'EBook Creative Cover':
166
- prompt = f"EBook Cover, {prompt}"
167
- elif model == 'Flux Logo Design 2':
168
- prompt = f"Logo Design, {prompt}"
169
- elif model == 'Isometric 3D':
170
- prompt = f"Isometric 3D, {prompt}"
171
- elif model == 'Flux Condensation':
172
- prompt = f"CONDENSATION, {prompt}"
173
- elif model == 'Flux Handwriting':
174
- prompt = f"HWRIT handwriting, {prompt}"
175
- elif model == 'Shou Xin':
176
- prompt = f"shou_xin, pencil sketch, {prompt}"
177
- elif model == 'Sketch Smudge':
178
- prompt = f"Sketch Smudge, {prompt}"
179
- elif model == '80s Cyberpunk':
180
- prompt = f"80s cyberpunk, {prompt}"
181
- elif model == 'Coloring Book Flux':
182
- prompt = f"c0l0ringb00k, coloring book, coloring book page, {prompt}"
183
- elif model == 'Flux Miniature LoRA':
184
- prompt = f"MNTR, miniature drawing, {prompt}"
185
- elif model == 'Sketch Paint':
186
- prompt = f"Sketch paint, {prompt}"
187
- elif model == 'Flux UltraRealism 2.0':
188
- prompt = f"Ultra realistic, {prompt}"
189
- elif model == 'Midjourney Mix':
190
- prompt = f"midjourney mix, {prompt}"
191
- elif model == 'Midjourney Mix 2':
192
- prompt = f"MJ v6, {prompt}"
193
- elif model == 'Flux Logo Design':
194
- prompt = f"wablogo, logo, Minimalist, {prompt}"
195
- elif model == 'Flux Tarot Cards':
196
- prompt = f"Tarot card, {prompt}"
197
- elif model == 'Pixel Art Sprites':
198
- prompt = f"a pixel image, {prompt}"
199
- elif model == '3D Sketchfab':
200
- prompt = f"3D Sketchfab, {prompt}"
201
- elif model == 'Retro Comic Flux':
202
- prompt = f"c0m1c, comic book panel, {prompt}"
203
- elif model == 'Caricature':
204
- prompt = f"CCTUR3, {prompt}"
205
- elif model == 'Huggieverse':
206
- prompt = f"HGGRE, {prompt}"
207
- elif model == 'Stable Diffusion 3 Medium':
208
- prompt = f"A, {prompt}"
209
-
210
- print(f"API URL set to: {API_URL}")
211
-
212
- # Define the payload for the request
213
- payload = {
214
- "inputs": prompt,
215
- "is_negative": is_negative,
216
- "steps": steps,
217
- "cfg_scale": cfg_scale,
218
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
219
- "strength": strength,
220
- "parameters": {
221
- "width": width,
222
- "height": height
223
- }
224
- }
225
-
226
- # Make a request to the API to generate the image
227
- try:
228
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
229
- print(f"Response status code: {response.status_code}")
230
- except requests.exceptions.RequestException as e:
231
- print(f"Request failed: {e}")
232
- raise gr.Error(f"Request failed: {e}")
233
-
234
- # Handle response
235
- if response.status_code != 200:
236
- print(f"Error: Failed to retrieve image. Response status: {response.status_code}")
237
- print(f"Response content: {response.text}")
238
- if response.status_code == 503:
239
- raise gr.Error(f"Model is loading. Please try again in a few moments.")
240
- else:
241
- raise gr.Error(f"HTTP {response.status_code}: {response.text}")
242
-
243
- try:
244
- image_bytes = response.content
245
- image = Image.open(io.BytesIO(image_bytes))
246
- print(f'Generation {key} completed with HF Inference!')
247
- return image
248
- except Exception as e:
249
- print(f"Error while trying to open image: {e}")
250
- raise gr.Error(f"Failed to process generated image: {e}")
251
-
252
- def query_replicate(prompt, model, steps=35, cfg_scale=7, seed=-1, width=1024, height=1024, key=None):
253
- """
254
- Query Replicate provider through HF routing
255
- """
256
- print(f"Using Replicate provider for generation {key}")
257
-
258
- api_key = os.getenv("HF_READ_TOKEN")
259
- if not api_key:
260
- raise gr.Error("HF_READ_TOKEN not found. Required for Replicate routing.")
261
-
262
- try:
263
- client = InferenceClient(
264
- provider="replicate",
265
- api_key=api_key,
266
- )
267
 
268
- # Map models to Replicate-compatible model IDs
269
- replicate_models = {
270
- 'FLUX.1 [Dev]': "black-forest-labs/FLUX.1-dev",
271
- 'FLUX.1 [Schnell]': "black-forest-labs/FLUX.1-schnell",
272
- 'Stable Diffusion XL': "stability-ai/sdxl",
273
- }
 
274
 
275
- model_id = replicate_models.get(model, "black-forest-labs/FLUX.1-schnell")
276
- print(f"Using Replicate model: {model_id}")
277
 
278
- # Generate image using Replicate
279
  image = client.text_to_image(
280
- prompt=prompt,
281
  model=model_id,
282
  width=width,
283
  height=height,
@@ -286,59 +64,278 @@ def query_replicate(prompt, model, steps=35, cfg_scale=7, seed=-1, width=1024, h
286
  seed=seed if seed != -1 else None,
287
  )
288
 
289
- print(f'Generation {key} completed with Replicate!')
290
  return image
 
291
  except Exception as e:
292
- print(f"Replicate error: {e}")
293
- raise gr.Error(f"Replicate generation failed: {str(e)}")
294
 
295
- def query_fal_ai(prompt, model, steps=35, cfg_scale=7, seed=-1, width=1024, height=1024, key=None):
296
  """
297
- Query Fal AI provider through HF routing
298
  """
299
- print(f"Using Fal AI provider for generation {key}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
 
301
- api_key = os.getenv("HF_READ_TOKEN")
302
- if not api_key:
303
- raise gr.Error("HF_READ_TOKEN not found. Required for Fal AI routing.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
- try:
306
- client = InferenceClient(
307
- provider="fal-ai",
308
- api_key=api_key,
309
- )
310
-
311
- # Map models to Fal AI-compatible model IDs
312
- fal_models = {
313
- 'FLUX.1 [Dev]': "black-forest-labs/FLUX.1-dev",
314
- 'FLUX.1 [Schnell]': "black-forest-labs/FLUX.1-schnell",
315
- }
316
-
317
- model_id = fal_models.get(model, "black-forest-labs/FLUX.1-schnell")
318
- print(f"Using Fal AI model: {model_id}")
319
-
320
- # Generate image using Fal AI
321
- image = client.text_to_image(
322
- prompt=prompt,
323
- model=model_id,
324
- width=width,
325
- height=height,
326
- num_inference_steps=steps,
327
- guidance_scale=cfg_scale,
328
- seed=seed if seed != -1 else None,
329
- )
330
-
331
- print(f'Generation {key} completed with Fal AI!')
332
- return image
333
- except Exception as e:
334
- print(f"Fal AI error: {e}")
335
- raise gr.Error(f"Fal AI generation failed: {str(e)}")
336
 
337
  def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
338
  """
339
- Legacy query function for backward compatibility - uses HF inference by default
340
  """
341
- return query_with_provider(prompt, model, custom_lora, "hf-inference", is_negative, steps, cfg_scale, sampler, seed, strength, width, height)
342
 
343
  # Custom CSS to hide the footer in the interface
344
  css = """
@@ -360,19 +357,6 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
360
  with gr.Row():
361
  # Textbox for custom LoRA input
362
  custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
363
- with gr.Row():
364
- # Provider selection
365
- provider = gr.Radio(
366
- label="Inference Provider",
367
- value="hf-inference",
368
- choices=[
369
- ("πŸ€— Hugging Face Inference (Free Tier)", "hf-inference"),
370
- ("⚑ Replicate (Fast & High Quality)", "replicate"),
371
- ("πŸš€ Fal AI (Optimized Performance)", "fal-ai")
372
- ],
373
- info="Choose your inference provider. HF Inference includes free tier, while Replicate and Fal AI offer faster performance.",
374
- elem_id="provider-radio"
375
- )
376
  with gr.Row():
377
  # Accordion for selecting the model
378
  with gr.Accordion("Featured Models", open=False):
@@ -580,37 +564,32 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
580
  """
581
  )
582
 
583
- # Accordion about inference providers
584
- with gr.Accordion("πŸ”€ Inference Providers", open=False):
585
  gr.Markdown(
586
  """
587
- ## Available Providers
588
 
589
- ### πŸ€— Hugging Face Inference (Default)
590
- - **Free tier available** with rate limits
591
- - **Supports all custom models** and LoRAs
592
- - **Most comprehensive model selection**
593
- - Best for experimentation and testing
594
 
595
- ### ⚑ Replicate
596
- - **High-performance inference** with faster generation
597
- - **Premium quality** optimized models
598
- - Supports popular models like FLUX.1, Stable Diffusion
599
- - Usage billed through HF routing
600
 
601
- ### πŸš€ Fal AI
602
- - **Optimized for speed** and performance
603
- - **Enterprise-grade infrastructure**
604
- - Specialized in computer vision tasks
605
- - Usage billed through HF routing
606
 
607
- ## How Provider Routing Works
 
 
 
608
 
609
- When you select **Replicate** or **Fal AI**, your requests are routed through Hugging Face's
610
- provider system. You only need your HF token - no additional API keys required!
611
-
612
- **Billing**: Replicate and Fal AI usage is charged to your Hugging Face account based on
613
- compute time and provider rates.
614
  """
615
  )
616
 
@@ -650,70 +629,8 @@ with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
650
  with gr.Row():
651
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
652
 
653
- # Set up button click event to call the query_with_provider function
654
- text_button.click(query_with_provider, inputs=[text_prompt, model, custom_lora, provider, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
655
-
656
- # Function to get the list of compatible models based on the selected provider
657
- def get_compatible_models(provider):
658
- """
659
- Get list of models compatible with the selected provider
660
- """
661
- if provider == "hf-inference":
662
- # HF Inference supports all models
663
- return None # No filtering needed
664
-
665
- elif provider == "replicate":
666
- # Replicate supports limited set of popular models
667
- return [
668
- "FLUX.1 [Dev]",
669
- "FLUX.1 [Schnell]",
670
- "Stable Diffusion XL",
671
- "Stable Diffusion 3.5 Large",
672
- "Stable Diffusion 3.5 Large Turbo",
673
- "Stable Diffusion 3 Medium"
674
- ]
675
-
676
- elif provider == "fal-ai":
677
- # Fal AI supports FLUX and some Stable Diffusion models
678
- return [
679
- "FLUX.1 [Dev]",
680
- "FLUX.1 [Schnell]",
681
- "Stable Diffusion XL",
682
- "Stable Diffusion 3.5 Large"
683
- ]
684
-
685
- return None
686
-
687
- # Function to update the model choices based on the selected provider
688
- def update_model_choices(provider):
689
- """
690
- Update model choices based on selected provider
691
- """
692
- compatible_models = get_compatible_models(provider)
693
-
694
- if compatible_models is None:
695
- # Return all models for HF inference
696
- return gr.update()
697
- else:
698
- # Filter models for other providers
699
- return gr.update(choices=compatible_models, value=compatible_models[0] if compatible_models else "FLUX.1 [Schnell]")
700
-
701
- # Function to show/hide custom LoRA input based on provider capability
702
- def update_custom_lora_visibility(provider):
703
- """
704
- Show/hide custom LoRA input based on provider capability
705
- """
706
- if provider == "hf-inference":
707
- return gr.update(visible=True, interactive=True)
708
- else:
709
- return gr.update(visible=False, interactive=False)
710
-
711
- # Update model choices and custom LoRA visibility when provider is changed
712
- provider.change(
713
- fn=lambda p: (update_model_choices(p), update_custom_lora_visibility(p)),
714
- inputs=[provider],
715
- outputs=[model, custom_lora],
716
- )
717
 
718
  print("Launching Gradio interface...") # Debug log
719
  # Launch the Gradio interface without showing the API or sharing externally
 
10
 
11
  # Project by Nymbo
12
 
13
+ def query_with_auto_routing(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
+ Generate images using HF's automatic provider routing
16
  """
17
+ print(f"Starting query with automatic provider routing")
18
  print(f"Prompt: {prompt}")
19
  print(f"Model: {model}")
20
  print(f"Custom LoRA: {custom_lora}")
 
34
  print(f'Generation {key}: {enhanced_prompt}')
35
 
36
  try:
37
+ # Use automatic provider routing
38
+ api_key = os.getenv("HF_READ_TOKEN")
39
+ if not api_key:
40
+ raise gr.Error("HF_READ_TOKEN not found. Please set your Hugging Face API token.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ # Initialize client with automatic provider selection (default is "auto")
43
+ client = InferenceClient(api_key=api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ # Determine the model to use
46
+ if custom_lora.strip() != "":
47
+ model_id = custom_lora.strip()
48
+ print(f"Using custom LoRA: {model_id}")
49
+ else:
50
+ model_id = get_model_id_from_name(model)
51
+ print(f"Using model: {model_id}")
52
 
53
+ # Apply model-specific prompt enhancements
54
+ enhanced_prompt = apply_model_prompt_enhancements(model, enhanced_prompt)
55
 
56
+ # Generate image using automatic provider routing
57
  image = client.text_to_image(
58
+ prompt=enhanced_prompt,
59
  model=model_id,
60
  width=width,
61
  height=height,
 
64
  seed=seed if seed != -1 else None,
65
  )
66
 
67
+ print(f'Generation {key} completed with automatic routing!')
68
  return image
69
+
70
  except Exception as e:
71
+ print(f"Error with automatic routing: {e}")
72
+ raise gr.Error(f"Failed to generate image: {str(e)}")
73
 
74
+ def get_model_id_from_name(model_name):
75
  """
76
+ Convert user-friendly model names to actual HuggingFace model IDs
77
  """
78
+ model_mapping = {
79
+ 'FLUX.1 [Dev]': "black-forest-labs/FLUX.1-dev",
80
+ 'FLUX.1 [Schnell]': "black-forest-labs/FLUX.1-schnell",
81
+ 'Stable Diffusion XL': "stabilityai/stable-diffusion-xl-base-1.0",
82
+ 'Stable Diffusion 3.5 Large': "stabilityai/stable-diffusion-3.5-large",
83
+ 'Stable Diffusion 3.5 Large Turbo': "stabilityai/stable-diffusion-3.5-large-turbo",
84
+ 'Stable Diffusion 3 Medium': "stabilityai/stable-diffusion-3-medium-diffusers",
85
+ 'HiDream-I1-Full': "HiDream-ai/HiDream-I1-Full",
86
+ 'HiDream-I1-Dev': "HiDream-ai/HiDream-I1-Dev",
87
+ 'HiDream-I1-Fast': "HiDream-ai/HiDream-I1-Fast",
88
+ 'Animagine 4.0': "cagliostrolab/animagine-xl-4.0",
89
+ 'Flux Icon Kit': "strangerzonehf/Flux-Icon-Kit-LoRA",
90
+ 'Pixel Background': "strangerzonehf/Flux-Pixel-Background-LoRA",
91
+ 'Meme XD': "prithivMLmods/Flux-Meme-Xd-LoRA",
92
+ 'Chill Guy': "prithivMLmods/Flux-Chill-Guy-Zone",
93
+ 'Pepe': "openfree/pepe",
94
+ 'NSFWmodel': "lexa862/NSFWmodel",
95
+ 'Claude Art': "strangerzonehf/Flux-Claude-Art",
96
+ 'Open Genmoji': "EvanZhouDev/open-genmoji",
97
+ 'EBook Creative Cover': "prithivMLmods/EBook-Creative-Cover-Flux-LoRA",
98
+ 'Flux Logo Design 2': "prithivMLmods/Logo-Design-Flux-LoRA",
99
+ 'Isometric 3D': "strangerzonehf/Flux-Isometric-3D-LoRA",
100
+ 'Flux Condensation': "fofr/flux-condensation",
101
+ 'Flux Handwriting': "fofr/flux-handwriting",
102
+ 'Shou Xin': "Datou1111/shou_xin",
103
+ 'Sketch Smudge': "strangerzonehf/Flux-Sketch-Smudge-LoRA",
104
+ '80s Cyberpunk': "fofr/flux-80s-cyberpunk",
105
+ 'Coloring Book Flux': "renderartist/coloringbookflux",
106
+ 'Flux Miniature LoRA': "gokaygokay/Flux-Miniature-LoRA",
107
+ 'Sketch Paint': "strangerzonehf/Sketch-Paint",
108
+ 'Flux UltraRealism 2.0': "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0",
109
+ 'Midjourney Mix': "strangerzonehf/Flux-Midjourney-Mix-LoRA",
110
+ 'Midjourney Mix 2': "strangerzonehf/Flux-Midjourney-Mix2-LoRA",
111
+ 'Flux Logo Design': "Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
112
+ 'Flux Uncensored': "enhanceaiteam/Flux-uncensored",
113
+ 'Flux Uncensored V2': "enhanceaiteam/Flux-Uncensored-V2",
114
+ 'Flux Tarot Cards': "prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA",
115
+ 'Pixel Art Sprites': "sWizad/pokemon-trainer-sprites-pixelart-flux",
116
+ '3D Sketchfab': "prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA",
117
+ 'Retro Comic Flux': "renderartist/retrocomicflux",
118
+ 'Caricature': "TheAwakenOne/caricature",
119
+ 'Huggieverse': "Chunte/flux-lora-Huggieverse",
120
+ 'Propaganda Poster': "AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion",
121
+ 'Flux Game Assets V2': "gokaygokay/Flux-Game-Assets-LoRA-v2",
122
+ 'SDXL HS Card Style': "Norod78/sdxl-hearthstone-card-style-lora",
123
+ 'SLDR FLUX NSFW v2 Studio': "xey/sldr_flux_nsfw_v2-studio",
124
+ 'SoftPasty Flux': "alvdansen/softpasty-flux-dev",
125
+ 'Flux Stickers': "diabolic6045/Flux_Sticker_Lora",
126
+ 'Flux Animex V2': "strangerzonehf/Flux-Animex-v2-LoRA",
127
+ 'Flux Animeo V1': "strangerzonehf/Flux-Animeo-v1-LoRA",
128
+ 'Movie Board': "prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA",
129
+ 'Purple Dreamy': "prithivMLmods/Purple-Dreamy-Flux-LoRA",
130
+ 'PS1 Style Flux': "veryVANYA/ps1-style-flux",
131
+ 'Softserve Anime': "alvdansen/softserve_anime",
132
+ 'Flux Tarot v1': "multimodalart/flux-tarot-v1",
133
+ 'Half Illustration': "davisbro/half_illustration",
134
+ 'OpenDalle v1.1': "dataautogpt3/OpenDalleV1.1",
135
+ 'Flux Ghibsky Illustration': "aleksa-codes/flux-ghibsky-illustration",
136
+ 'Flux Koda': "alvdansen/flux-koda",
137
+ 'Soviet Diffusion XL': "openskyml/soviet-diffusion-xl",
138
+ 'Flux Realism LoRA': "XLabs-AI/flux-RealismLora",
139
+ 'Frosting Lane Flux': "alvdansen/frosting_lane_flux",
140
+ 'Phantasma Anime': "alvdansen/phantasma-anime",
141
+ 'Boreal': "kudzueye/Boreal",
142
+ 'How2Draw': "glif/how2draw",
143
+ 'Flux AestheticAnime': "dataautogpt3/FLUX-AestheticAnime",
144
+ 'Fashion Hut Modeling LoRA': "prithivMLmods/Fashion-Hut-Modeling-LoRA",
145
+ 'Flux SyntheticAnime': "dataautogpt3/FLUX-SyntheticAnime",
146
+ 'Flux Midjourney Anime': "brushpenbob/flux-midjourney-anime",
147
+ 'Coloring Book Generator': "robert123231/coloringbookgenerator",
148
+ 'Collage Flux': "prithivMLmods/Castor-Collage-Dim-Flux-LoRA",
149
+ 'Flux Product Ad Backdrop': "prithivMLmods/Flux-Product-Ad-Backdrop",
150
+ 'Product Design': "multimodalart/product-design",
151
+ '90s Anime Art': "glif/90s-anime-art",
152
+ 'Brain Melt Acid Art': "glif/Brain-Melt-Acid-Art",
153
+ 'Lustly Flux Uncensored v1': "lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1",
154
+ 'NSFW Master Flux': "Keltezaa/NSFW_MASTER_FLUX",
155
+ 'Flux Outfit Generator': "tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator",
156
+ 'Midjourney': "Jovie/Midjourney",
157
+ 'DreamPhotoGASM': "Yntec/DreamPhotoGASM",
158
+ 'Flux Super Realism LoRA': "strangerzonehf/Flux-Super-Realism-LoRA",
159
+ 'Stable Diffusion 2-1': "stabilityai/stable-diffusion-2-1-base",
160
+ 'Duchaiten Real3D NSFW XL': "stablediffusionapi/duchaiten-real3d-nsfw-xl",
161
+ 'Pixel Art XL': "nerijs/pixel-art-xl",
162
+ 'Character Design': "KappaNeuro/character-design",
163
+ 'Sketched Out Manga': "alvdansen/sketchedoutmanga",
164
+ 'Archfey Anime': "alvdansen/archfey_anime",
165
+ 'Lofi Cuties': "alvdansen/lofi-cuties",
166
+ 'YiffyMix': "Yntec/YiffyMix",
167
+ 'Analog Madness Realistic v7': "digiplay/AnalogMadness-realistic-model-v7",
168
+ 'Selfie Photography': "artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl",
169
+ 'Filmgrain': "artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl",
170
+ 'Leonardo AI Style Illustration': "goofyai/Leonardo_Ai_Style_Illustration",
171
+ 'Cyborg Style XL': "goofyai/cyborg_style_xl",
172
+ 'Little Tinies': "alvdansen/littletinies",
173
+ 'NSFW XL': "Dremmar/nsfw-xl",
174
+ 'Analog Redmond': "artificialguybr/analogredmond",
175
+ 'Pixel Art Redmond': "artificialguybr/PixelArtRedmond",
176
+ 'Ascii Art': "CiroN2022/ascii-art",
177
+ 'Analog': "Yntec/Analog",
178
+ 'Maple Syrup': "Yntec/MapleSyrup",
179
+ 'Perfect Lewd Fantasy': "digiplay/perfectLewdFantasy_v1.01",
180
+ 'AbsoluteReality 1.8.1': "digiplay/AbsoluteReality_v1.8.1",
181
+ 'Disney': "goofyai/disney_style_xl",
182
+ 'Redmond SDXL': "artificialguybr/LogoRedmond-LogoLoraForSDXL-V2",
183
+ 'epiCPhotoGasm': "Yntec/epiCPhotoGasm"
184
+ }
185
 
186
+ # Return the mapped model ID or default to FLUX.1 Schnell
187
+ return model_mapping.get(model_name, "black-forest-labs/FLUX.1-schnell")
188
+
189
+ def apply_model_prompt_enhancements(model_name, prompt):
190
+ """
191
+ Apply model-specific prompt enhancements
192
+ """
193
+ if model_name == 'Animagine 4.0':
194
+ return f"masterpiece, high score, great score, absurdres, {prompt}"
195
+ elif model_name == 'Flux Icon Kit':
196
+ return f"Icon Kit, {prompt}"
197
+ elif model_name == 'Pixel Background':
198
+ return f"Pixel Background, {prompt}"
199
+ elif model_name == 'Meme XD':
200
+ return f"meme, {prompt}"
201
+ elif model_name == 'Chill Guy':
202
+ return f"chill guy, {prompt}"
203
+ elif model_name == 'Pepe':
204
+ return f"pepe, {prompt}"
205
+ elif model_name == 'NSFWmodel':
206
+ return f"nude, {prompt}"
207
+ elif model_name == 'Claude Art':
208
+ return f"claude art, {prompt}"
209
+ elif model_name == 'Open Genmoji':
210
+ return f"emoji, {prompt}"
211
+ elif model_name == 'EBook Creative Cover':
212
+ return f"EBook Cover, {prompt}"
213
+ elif model_name == 'Flux Logo Design 2':
214
+ return f"Logo Design, {prompt}"
215
+ elif model_name == 'Isometric 3D':
216
+ return f"Isometric 3D, {prompt}"
217
+ elif model_name == 'Flux Condensation':
218
+ return f"CONDENSATION, {prompt}"
219
+ elif model_name == 'Flux Handwriting':
220
+ return f"HWRIT handwriting, {prompt}"
221
+ elif model_name == 'Shou Xin':
222
+ return f"shou_xin, pencil sketch, {prompt}"
223
+ elif model_name == 'Sketch Smudge':
224
+ return f"Sketch Smudge, {prompt}"
225
+ elif model_name == '80s Cyberpunk':
226
+ return f"80s cyberpunk, {prompt}"
227
+ elif model_name == 'Coloring Book Flux':
228
+ return f"c0l0ringb00k, coloring book, coloring book page, {prompt}"
229
+ elif model_name == 'Flux Miniature LoRA':
230
+ return f"MNTR, miniature drawing, {prompt}"
231
+ elif model_name == 'Sketch Paint':
232
+ return f"Sketch paint, {prompt}"
233
+ elif model_name == 'Flux UltraRealism 2.0':
234
+ return f"Ultra realistic, {prompt}"
235
+ elif model_name == 'Midjourney Mix':
236
+ return f"midjourney mix, {prompt}"
237
+ elif model_name == 'Midjourney Mix 2':
238
+ return f"MJ v6, {prompt}"
239
+ elif model_name == 'Flux Logo Design':
240
+ return f"wablogo, logo, Minimalist, {prompt}"
241
+ elif model_name == 'Flux Tarot Cards':
242
+ return f"Tarot card, {prompt}"
243
+ elif model_name == 'Pixel Art Sprites':
244
+ return f"a pixel image, {prompt}"
245
+ elif model_name == '3D Sketchfab':
246
+ return f"3D Sketchfab, {prompt}"
247
+ elif model_name == 'Retro Comic Flux':
248
+ return f"c0m1c, comic book panel, {prompt}"
249
+ elif model_name == 'Caricature':
250
+ return f"CCTUR3, {prompt}"
251
+ elif model_name == 'Huggieverse':
252
+ return f"HGGRE, {prompt}"
253
+ elif model_name == 'Stable Diffusion 3 Medium':
254
+ return f"A, {prompt}"
255
+ elif model_name == 'Propaganda Poster':
256
+ return f"propaganda poster, {prompt}"
257
+ elif model_name == 'Flux Game Assets V2':
258
+ return f"wbgmsst, white background, {prompt}"
259
+ elif model_name == 'SDXL HS Card Style':
260
+ return f"Hearthstone Card, {prompt}"
261
+ elif model_name == 'SoftPasty Flux':
262
+ return f"araminta_illus illustration style, {prompt}"
263
+ elif model_name == 'Flux Stickers':
264
+ return f"5t1cker 5ty1e, {prompt}"
265
+ elif model_name == 'Flux Animex V2':
266
+ return f"Animex, {prompt}"
267
+ elif model_name == 'Flux Animeo V1':
268
+ return f"Animeo, {prompt}"
269
+ elif model_name == 'Movie Board':
270
+ return f"movieboard, {prompt}"
271
+ elif model_name == 'Purple Dreamy':
272
+ return f"Purple Dreamy, {prompt}"
273
+ elif model_name == 'PS1 Style Flux':
274
+ return f"ps1 game screenshot, {prompt}"
275
+ elif model_name == 'Softserve Anime':
276
+ return f"sftsrv style illustration, {prompt}"
277
+ elif model_name == 'Flux Tarot v1':
278
+ return f"in the style of TOK a trtcrd tarot style, {prompt}"
279
+ elif model_name == 'Half Illustration':
280
+ return f"in the style of TOK, {prompt}"
281
+ elif model_name == 'Flux Ghibsky Illustration':
282
+ return f"GHIBSKY style, {prompt}"
283
+ elif model_name == 'Flux Koda':
284
+ return f"flmft style, {prompt}"
285
+ elif model_name == 'Soviet Diffusion XL':
286
+ return f"soviet poster, {prompt}"
287
+ elif model_name == 'Frosting Lane Flux':
288
+ return f"frstingln illustration, {prompt}"
289
+ elif model_name == 'Boreal':
290
+ return f"photo, {prompt}"
291
+ elif model_name == 'How2Draw':
292
+ return f"How2Draw, {prompt}"
293
+ elif model_name == 'Fashion Hut Modeling LoRA':
294
+ return f"Modeling of, {prompt}"
295
+ elif model_name == 'Flux SyntheticAnime':
296
+ return f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
297
+ elif model_name == 'Flux Midjourney Anime':
298
+ return f"egmid, {prompt}"
299
+ elif model_name == 'Collage Flux':
300
+ return f"collage, {prompt}"
301
+ elif model_name == 'Flux Product Ad Backdrop':
302
+ return f"Product Ad, {prompt}"
303
+ elif model_name == 'Product Design':
304
+ return f"product designed by prdsgn, {prompt}"
305
+ elif model_name == 'Brain Melt Acid Art':
306
+ return f"maximalism, in an acid surrealism style, {prompt}"
307
+ elif model_name == 'NSFW Master Flux':
308
+ return f"NSFW, {prompt}"
309
+ elif model_name == 'Pixel Art XL':
310
+ return f"pixel art, {prompt}"
311
+ elif model_name == 'Character Design':
312
+ return f"Character Design, {prompt}"
313
+ elif model_name == 'Sketched Out Manga':
314
+ return f"daiton, {prompt}"
315
+ elif model_name == 'Selfie Photography':
316
+ return f"instagram model, discord profile picture, {prompt}"
317
+ elif model_name == 'Filmgrain':
318
+ return f"Film Grain, FilmGrainAF, {prompt}"
319
+ elif model_name == 'Leonardo AI Style Illustration':
320
+ return f"leonardo style, illustration, vector art, {prompt}"
321
+ elif model_name == 'Cyborg Style XL':
322
+ return f"cyborg style, {prompt}"
323
+ elif model_name == 'Analog Redmond':
324
+ return f"timeless style, {prompt}"
325
+ elif model_name == 'Pixel Art Redmond':
326
+ return f"Pixel Art, {prompt}"
327
+ elif model_name == 'Ascii Art':
328
+ return f"ascii art, {prompt}"
329
+ elif model_name == 'Disney':
330
+ return f"Disney style, {prompt}"
331
 
332
+ return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
  def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
335
  """
336
+ Main query function - now uses automatic provider routing
337
  """
338
+ return query_with_auto_routing(prompt, model, custom_lora, is_negative, steps, cfg_scale, sampler, seed, strength, width, height)
339
 
340
  # Custom CSS to hide the footer in the interface
341
  css = """
 
357
  with gr.Row():
358
  # Textbox for custom LoRA input
359
  custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  with gr.Row():
361
  # Accordion for selecting the model
362
  with gr.Accordion("Featured Models", open=False):
 
564
  """
565
  )
566
 
567
+ # Accordion about automatic inference routing
568
+ with gr.Accordion("πŸ”€ Automatic Provider Routing", open=False):
569
  gr.Markdown(
570
  """
571
+ ## How It Works
572
 
573
+ This app uses **Hugging Face's automatic provider routing** to give you the best image generation experience.
 
 
 
 
574
 
575
+ ### ✨ What This Means For You:
576
+ - **No provider selection needed** - the system automatically chooses the best available provider
577
+ - **Optimal performance** - each model is routed to its best-performing infrastructure
578
+ - **Unified experience** - all models work the same way, regardless of the underlying provider
579
+ - **Automatic failover** - if one provider is busy, requests are routed to alternatives
580
 
581
+ ### πŸ”„ Behind the Scenes:
582
+ When you generate an image, Hugging Face intelligently routes your request to the most suitable provider:
583
+ - **πŸ€— Hugging Face Inference** - For comprehensive model support and free tier access
584
+ - **⚑ Replicate** - For high-performance inference when available
585
+ - **πŸš€ Fal AI** - For optimized speed and enterprise-grade performance
586
 
587
+ ### πŸ’‘ Benefits:
588
+ - **Always available** - automatic routing ensures maximum uptime
589
+ - **Best performance** - each model runs on its optimal infrastructure
590
+ - **Simple experience** - just select your model and generate!
591
 
592
+ **Note**: You only need your Hugging Face token - no additional API keys required!
 
 
 
 
593
  """
594
  )
595
 
 
629
  with gr.Row():
630
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
631
 
632
+ # Set up button click event to call the main query function
633
+ text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634
 
635
  print("Launching Gradio interface...") # Debug log
636
  # Launch the Gradio interface without showing the API or sharing externally