File size: 7,254 Bytes
d03461e
 
 
 
 
 
 
 
31d24f4
54a0b38
 
 
6b86782
d03461e
 
3dbc94a
c9bbb28
d03461e
3d8696b
 
ffd5d00
 
d03461e
858f710
6b86782
ffd5d00
858f710
 
 
 
d03461e
 
858f710
d03461e
858f710
d03461e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d8696b
d03461e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ebe1e7
 
 
a71265b
9ff236d
a352c96
 
2ebe1e7
 
 
 
 
 
 
 
a71265b
2755b43
 
3bacb44
d03461e
3bacb44
d03461e
 
 
 
 
 
 
 
 
 
 
 
 
3d59207
d03461e
 
 
 
 
2ebe1e7
9ff236d
2ebe1e7
d03461e
 
 
2ebe1e7
 
 
d03461e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e008d6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
from gradio_client import Client, handle_file
from PIL import Image
from lora_saver import main as backup_loras

os.system('quent_models.py')

from loras import loras
from huggingface_hub import login
from themes import IndonesiaTheme  # Import custom IndonesiaTheme
from lorify import Lorify
from css import css2
MAX_SEED = np.iinfo(np.int32).max


HF_TOKEN = os.getenv('HF_TOKEN')
HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN')

qwen_client = Client("K00B404/HugChatWrap",hf_token=HF_TOKEN)
loaded_loras=[]

for lora in loras:
    print(lora.get('repo'))
    loaded_loras.append(lora.get('repo'))
    
# Function to enable LoRA if selected
def enable_lora(lora_add, basemodel):
    print(f"[-] Determining model: LoRA {'enabled' if lora_add else 'disabled'}, base model: {basemodel}")
    return basemodel if not lora_add else lora_add
    
# Function to generate image
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
    try:
        if seed == -1:
            seed = random.randint(0, MAX_SEED)
        seed = int(seed)

        print(f"[-] Menerjemahkan prompt: {prompt}")
        text = str(Translator().translate(prompt, 'English')) + "," + lora_word
        
        print(f"[-] Generating image with prompt: {text}, model: {model}")
        client = AsyncInferenceClient()
        image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
        return image, seed
    except Exception as e:
        print(f"[-] Error generating image: {e}")
        return None, None

# Function to upscale image
def get_upscale_finegrain(prompt, img_path, upscale_factor):
    try:
        print(f"[-] Memulai proses upscaling dengan faktor {upscale_factor} untuk gambar {img_path}")
        client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN)
        result = client.predict(
            input_image=handle_file(img_path), 
            prompt=prompt, 
            negative_prompt="worst quality, low quality, normal quality",
            upscale_factor=upscale_factor,
            controlnet_scale=0.6,
            controlnet_decay=1,
            condition_scale=6,
            denoise_strength=0.35, 
            num_inference_steps=18,
            solver="DDIM", 
            api_name="/process"
        )
        print(f"[-] Proses upscaling berhasil.")
        return result[1]  # Return upscale image path
    except Exception as e:
        print(f"[-] Error scaling image: {e}")
        return None
        
# Main function to generate images and optionally upscale
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
    print(f"[-] Memulai generasi gambar dengan prompt: {prompt}")
    
    model = enable_lora(lora_model, basemodel) if process_lora else basemodel
    print(f"[-] Menggunakan model: {model}")

    image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
    
    if image is None:
        print("[-] Image generation failed.")
        return []

    image_path = "temp_image.jpg"
    print(f"[-] Menyimpan gambar sementara di: {image_path}")
    image.save(image_path, format="JPEG")

    upscale_image_path = None
    if process_upscale:
        print(f"[-] Memproses upscaling dengan faktor: {upscale_factor}")
        upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
        if upscale_image_path is not None and os.path.exists(upscale_image_path):
            print(f"[-] Proses upscaling selesai. Gambar tersimpan di: {upscale_image_path}")
            return [image_path, upscale_image_path]  # Return both images
        else:
            print("[-] Upscaling gagal, jalur gambar upscale tidak ditemukan.")

    return [image_path]

base_models=[
    "black-forest-labs/FLUX.1-schnell", 
    "black-forest-labs/FLUX.1-DEV", 
    "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro", 
    #"city96/FLUX.1-dev-gguf"
] 

loras_list_custom=[
    "Keltezaa/anal-riding-missionary",
    "Keltezaa/Fingering",
    "Keltezaa/Spreading",
    "Keltezaa/flux-prone-ass-spread-hd",
    "Keltezaa/Flux_P",
    "Shakker-Labs/FLUX.1-dev-LoRA-add-details", 
    "XLabs-AI/flux-RealismLora",       
]# + loaded_loras # add loras loaded from file
 
backup_loras(loras_list_custom)

# Creating Gradio interface
with gr.Blocks(css=css2, theme=IndonesiaTheme()) as WallpaperFluxMaker:
    # Displaying the application title
    gr.HTML('<div id="banner">✨ Flux MultiMode Generator + Upscaler ✨</div>')

    with gr.Column(elem_id="col-container"):
        # Output section (replacing ImageSlider with gr.Gallery)
        with gr.Row():
            output_res = gr.Gallery(label="⚡ Flux / Upscaled Image ⚡", elem_id="output-res", columns=2, height="auto")

        # User input section split into two columns
        with gr.Row():
            # Column 1: Input prompt, LoRA, and base model
            with gr.Column(scale=1, elem_id="col-left"):
                prompt = gr.Textbox(
                    label="📜 Description", 
                    placeholder="Tuliskan prompt Anda dalam bahasa apapun, yang akan langsung diterjemahkan ke bahasa Inggris.",
                    elem_id="textbox-prompt"
                )

                basemodel_choice = gr.Dropdown(
                    label="🖼️ Select Model", 
                    choices=base_models, 
                    value=base_models[0]
                )

                lora_model_choice = gr.Dropdown(
                    label="🎨 Select LoRA", 
                    choices=loras_list_custom, 
                    value=loras_list_custom[0]
                )

                process_lora = gr.Checkbox(label="🎨 Aktifkan LoRA")
                process_upscale = gr.Checkbox(label="🔍 Aktifkan Peningkatan Resolusi")
                upscale_factor = gr.Radio(label="🔍 Faktor Peningkatan Resolusi", choices=[2, 4, 8], value=2)

            # Column 2: Advanced options (always open)
            with gr.Column(scale=1, elem_id="col-right"):
                with gr.Accordion(label="⚙️ Opsi Lanjutan", open=True):
                    width = gr.Slider(label="Lebar", minimum=512, maximum=1280, step=8, value=1280)
                    height = gr.Slider(label="Tinggi", minimum=512, maximum=1280, step=8, value=768)
                    scales = gr.Slider(label="Skala", minimum=1, maximum=20, step=1, value=8)
                    steps = gr.Slider(label="Langkah", minimum=1, maximum=100, step=1, value=8)
                    seed = gr.Number(label="Seed", value=-1)

        # Button to generate image
        btn = gr.Button("🚀 Buat Gambar", elem_id="generate-btn")

        # Running the `gen` function when "Generate" button is pressed
        btn.click(fn=gen, inputs=[
            prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora
        ], outputs=output_res)

# Launching the Gradio app
WallpaperFluxMaker.queue(api_open=True).launch(show_api=True)