File size: 7,180 Bytes
586348d
 
 
 
 
 
 
558490f
 
586348d
 
 
 
 
 
558490f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586348d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558490f
 
 
586348d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import gradio as gr
from urllib.parse import urlparse
import requests
import time
import os
import spaces
import torch
import subprocess
import signal

zero = torch.Tensor([0]).cuda()
print(zero.device)  # <-- 'cpu' πŸ€”

names = ['prompt', 'negative_prompt', 'subject', 'number_of_outputs', 'number_of_images_per_pose', 'randomise_poses', 'output_format', 'output_quality', 'seed']

def check_cog_server():
    try:
        # Start the Cog server in the background
        cog_process = subprocess.Popen(["python3", "-m", "cog.server.http", "--threads=10"], cwd="/src")

        # Wait for the Cog server to start on port 5000
        counter1 = 0
        while True:
            try:
                requests.get("http://localhost:5000")
                print("Cog server is running on port 5000.")
                break
            except requests.exceptions.ConnectionError:
                print("Waiting for Cog server to start on port 5000...")
                time.sleep(5)
                counter1 += 1
                if counter1 >= 250:
                    raise Exception("Error: Cog server did not start on port 5000 after 250 attempts.")

        # Wait for the Cog server to be fully ready
        counter2 = 0
        while True:
            response = requests.get("http://localhost:5000/health-check")
            status = response.json().get("status")
            if status == "READY":
                print("Cog server is fully ready.")
                break
            else:
                print("Waiting for Cog server (models loading) on port 5000...")
                time.sleep(5)
                counter2 += 1
                if counter2 >= 250:
                    raise Exception("Error: Cog server did not become fully ready after 250 attempts.")

    except Exception as e:
        print(f"Error: {str(e)}")
        cog_process.send_signal(signal.SIGINT)  # Send interrupt signal to the Cog process
        raise e

@spaces.GPU
def predict(request: gr.Request, *args, progress=gr.Progress(track_tqdm=True)):
    print(zero.device)  # <-- 'cuda:0' πŸ€—
    headers = {'Content-Type': 'application/json'}

    payload = {"input": {}}
    
    base_url = "http://0.0.0.0:7860"
    for i, key in enumerate(names):
        value = args[i]
        if value and (os.path.exists(str(value))):
            value = f"{base_url}/file=" + value
        if value is not None and value != "":
            payload["input"][key] = value

    response = requests.post("http://0.0.0.0:5000/predictions", headers=headers, json=payload)

    if response.status_code == 201:
        follow_up_url = response.json()["urls"]["get"]
        response = requests.get(follow_up_url, headers=headers)
        while response.json()["status"] != "succeeded":
            if response.json()["status"] == "failed":
                raise gr.Error("The submission failed!")
            response = requests.get(follow_up_url, headers=headers)
            time.sleep(1)
    if response.status_code == 200:
        json_response = response.json()
        #If the output component is JSON return the entire output response 
        if(outputs[0].get_config()["name"] == "json"):
            return json_response["output"]
        predict_outputs = parse_outputs(json_response["output"])
        processed_outputs = process_outputs(predict_outputs)        
        return tuple(processed_outputs) if len(processed_outputs) > 1 else processed_outputs[0]
    else:
        if(response.status_code == 409):
            raise gr.Error(f"Sorry, the Cog image is still processing. Try again in a bit.")
        raise gr.Error(f"The submission failed! Error: {response.status_code}")

title = "Demo for consistent-character cog image by fofr"
description = "Create images of a given character in different poses β€’ running cog image by fofr"

css="""
#col-container{
    margin: 0 auto;
    max-width: 1400px;
    text-align: left;
}
"""
with gr.Blocks(css=css) as app:
    with gr.Column(elem_id="col-container"):
        gr.HTML(f"""
        <h2 style="text-align: center;">Consistent Character Workflow</h2>
        <p style="text-align: center;">{description}</p>
        """)

        with gr.Row():
            with gr.Column(scale=1):
                prompt = gr.Textbox(
                    label="Prompt", info='''Describe the subject. Include clothes and hairstyle for more consistency.'''
                )
        
                subject = gr.Image(
                    label="Subject", type="filepath"
                )

                submit_btn = gr.Button("Submit")

                with gr.Accordion(label="Advanced Settings", open=False):
                    
                    negative_prompt = gr.Textbox(
                        label="Negative Prompt", info='''Things you do not want to see in your image''',
                        value="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry"
                    )

                    with gr.Row():

                        number_of_outputs = gr.Slider(
                            label="Number Of Outputs", info='''The number of images to generate.''', value=2,
                            minimum=1, maximum=4, step=1,
                        )
                        
                        number_of_images_per_pose = gr.Slider(
                            label="Number Of Images Per Pose", info='''The number of images to generate for each pose.''', value=1,
                            minimum=1, maximum=4, step=1,
                        )

                    with gr.Row():
                        
                        randomise_poses = gr.Checkbox(
                            label="Randomise Poses", info='''Randomise the poses used.''', value=True
                        )
                        
                        output_format = gr.Dropdown(
                            choices=['webp', 'jpg', 'png'], label="output_format", info='''Format of the output images''', value="webp"
                        )
                    
                    with gr.Row():
                        
                        output_quality = gr.Number(
                            label="Output Quality", info='''Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.''', value=80
                        )
                        
                        seed = gr.Number(
                            label="Seed", info='''Set a seed for reproducibility. Random by default.''', value=None
                        )

            with gr.Column(scale=1.5):
                consistent_results = gr.Gallery(label="Consistent Results")

    inputs = [prompt, negative_prompt, subject, number_of_outputs, number_of_images_per_pose, randomise_poses, output_format, output_quality, seed]
    outputs = [consistent_results]

    submit_btn.click(
        fn = predict,
        inputs = inputs,
        outputs = outputs,
        show_api = False
    )

# Check the Cog server's readiness before launching the Gradio app
check_cog_server()

app.queue(max_size=12, api_open=False).launch(share=False, show_api=False)