Spaces:
Sleeping
Sleeping
merge hf default + github
Browse files- LICENSE +121 -0
- README.md +6 -4
- app.py +252 -0
- config.py +24 -0
- imagemeta.py +230 -0
- requirements.txt +3 -0
LICENSE
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Creative Commons Legal Code
|
2 |
+
|
3 |
+
CC0 1.0 Universal
|
4 |
+
|
5 |
+
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
|
6 |
+
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
|
7 |
+
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
|
8 |
+
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
|
9 |
+
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
|
10 |
+
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
|
11 |
+
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
|
12 |
+
HEREUNDER.
|
13 |
+
|
14 |
+
Statement of Purpose
|
15 |
+
|
16 |
+
The laws of most jurisdictions throughout the world automatically confer
|
17 |
+
exclusive Copyright and Related Rights (defined below) upon the creator
|
18 |
+
and subsequent owner(s) (each and all, an "owner") of an original work of
|
19 |
+
authorship and/or a database (each, a "Work").
|
20 |
+
|
21 |
+
Certain owners wish to permanently relinquish those rights to a Work for
|
22 |
+
the purpose of contributing to a commons of creative, cultural and
|
23 |
+
scientific works ("Commons") that the public can reliably and without fear
|
24 |
+
of later claims of infringement build upon, modify, incorporate in other
|
25 |
+
works, reuse and redistribute as freely as possible in any form whatsoever
|
26 |
+
and for any purposes, including without limitation commercial purposes.
|
27 |
+
These owners may contribute to the Commons to promote the ideal of a free
|
28 |
+
culture and the further production of creative, cultural and scientific
|
29 |
+
works, or to gain reputation or greater distribution for their Work in
|
30 |
+
part through the use and efforts of others.
|
31 |
+
|
32 |
+
For these and/or other purposes and motivations, and without any
|
33 |
+
expectation of additional consideration or compensation, the person
|
34 |
+
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
|
35 |
+
is an owner of Copyright and Related Rights in the Work, voluntarily
|
36 |
+
elects to apply CC0 to the Work and publicly distribute the Work under its
|
37 |
+
terms, with knowledge of his or her Copyright and Related Rights in the
|
38 |
+
Work and the meaning and intended legal effect of CC0 on those rights.
|
39 |
+
|
40 |
+
1. Copyright and Related Rights. A Work made available under CC0 may be
|
41 |
+
protected by copyright and related or neighboring rights ("Copyright and
|
42 |
+
Related Rights"). Copyright and Related Rights include, but are not
|
43 |
+
limited to, the following:
|
44 |
+
|
45 |
+
i. the right to reproduce, adapt, distribute, perform, display,
|
46 |
+
communicate, and translate a Work;
|
47 |
+
ii. moral rights retained by the original author(s) and/or performer(s);
|
48 |
+
iii. publicity and privacy rights pertaining to a person's image or
|
49 |
+
likeness depicted in a Work;
|
50 |
+
iv. rights protecting against unfair competition in regards to a Work,
|
51 |
+
subject to the limitations in paragraph 4(a), below;
|
52 |
+
v. rights protecting the extraction, dissemination, use and reuse of data
|
53 |
+
in a Work;
|
54 |
+
vi. database rights (such as those arising under Directive 96/9/EC of the
|
55 |
+
European Parliament and of the Council of 11 March 1996 on the legal
|
56 |
+
protection of databases, and under any national implementation
|
57 |
+
thereof, including any amended or successor version of such
|
58 |
+
directive); and
|
59 |
+
vii. other similar, equivalent or corresponding rights throughout the
|
60 |
+
world based on applicable law or treaty, and any national
|
61 |
+
implementations thereof.
|
62 |
+
|
63 |
+
2. Waiver. To the greatest extent permitted by, but not in contravention
|
64 |
+
of, applicable law, Affirmer hereby overtly, fully, permanently,
|
65 |
+
irrevocably and unconditionally waives, abandons, and surrenders all of
|
66 |
+
Affirmer's Copyright and Related Rights and associated claims and causes
|
67 |
+
of action, whether now known or unknown (including existing as well as
|
68 |
+
future claims and causes of action), in the Work (i) in all territories
|
69 |
+
worldwide, (ii) for the maximum duration provided by applicable law or
|
70 |
+
treaty (including future time extensions), (iii) in any current or future
|
71 |
+
medium and for any number of copies, and (iv) for any purpose whatsoever,
|
72 |
+
including without limitation commercial, advertising or promotional
|
73 |
+
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
|
74 |
+
member of the public at large and to the detriment of Affirmer's heirs and
|
75 |
+
successors, fully intending that such Waiver shall not be subject to
|
76 |
+
revocation, rescission, cancellation, termination, or any other legal or
|
77 |
+
equitable action to disrupt the quiet enjoyment of the Work by the public
|
78 |
+
as contemplated by Affirmer's express Statement of Purpose.
|
79 |
+
|
80 |
+
3. Public License Fallback. Should any part of the Waiver for any reason
|
81 |
+
be judged legally invalid or ineffective under applicable law, then the
|
82 |
+
Waiver shall be preserved to the maximum extent permitted taking into
|
83 |
+
account Affirmer's express Statement of Purpose. In addition, to the
|
84 |
+
extent the Waiver is so judged Affirmer hereby grants to each affected
|
85 |
+
person a royalty-free, non transferable, non sublicensable, non exclusive,
|
86 |
+
irrevocable and unconditional license to exercise Affirmer's Copyright and
|
87 |
+
Related Rights in the Work (i) in all territories worldwide, (ii) for the
|
88 |
+
maximum duration provided by applicable law or treaty (including future
|
89 |
+
time extensions), (iii) in any current or future medium and for any number
|
90 |
+
of copies, and (iv) for any purpose whatsoever, including without
|
91 |
+
limitation commercial, advertising or promotional purposes (the
|
92 |
+
"License"). The License shall be deemed effective as of the date CC0 was
|
93 |
+
applied by Affirmer to the Work. Should any part of the License for any
|
94 |
+
reason be judged legally invalid or ineffective under applicable law, such
|
95 |
+
partial invalidity or ineffectiveness shall not invalidate the remainder
|
96 |
+
of the License, and in such case Affirmer hereby affirms that he or she
|
97 |
+
will not (i) exercise any of his or her remaining Copyright and Related
|
98 |
+
Rights in the Work or (ii) assert any associated claims and causes of
|
99 |
+
action with respect to the Work, in either case contrary to Affirmer's
|
100 |
+
express Statement of Purpose.
|
101 |
+
|
102 |
+
4. Limitations and Disclaimers.
|
103 |
+
|
104 |
+
a. No trademark or patent rights held by Affirmer are waived, abandoned,
|
105 |
+
surrendered, licensed or otherwise affected by this document.
|
106 |
+
b. Affirmer offers the Work as-is and makes no representations or
|
107 |
+
warranties of any kind concerning the Work, express, implied,
|
108 |
+
statutory or otherwise, including without limitation warranties of
|
109 |
+
title, merchantability, fitness for a particular purpose, non
|
110 |
+
infringement, or the absence of latent or other defects, accuracy, or
|
111 |
+
the present or absence of errors, whether or not discoverable, all to
|
112 |
+
the greatest extent permissible under applicable law.
|
113 |
+
c. Affirmer disclaims responsibility for clearing rights of other persons
|
114 |
+
that may apply to the Work or any use thereof, including without
|
115 |
+
limitation any person's Copyright and Related Rights in the Work.
|
116 |
+
Further, Affirmer disclaims responsibility for obtaining any necessary
|
117 |
+
consents, permissions or other rights required for any use of the
|
118 |
+
Work.
|
119 |
+
d. Affirmer understands and acknowledges that Creative Commons is not a
|
120 |
+
party to this document and has no duty or obligation with respect to
|
121 |
+
this CC0 or use of the Work.
|
README.md
CHANGED
@@ -1,14 +1,16 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: red
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc0-1.0
|
11 |
short_description: A simple text2image serverless interface
|
12 |
---
|
13 |
|
14 |
-
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Huggingface Hub Text2Image Inference Client
|
3 |
+
emoji: 🖼
|
4 |
colorFrom: red
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.42.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc0-1.0
|
11 |
short_description: A simple text2image serverless interface
|
12 |
---
|
13 |
|
14 |
+
A simple text2image interface for the Huggingface Hub serverless API,
|
15 |
+
supporting extra generation parameters.
|
16 |
+
|
app.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import OrderedDict
|
2 |
+
from pathlib import Path
|
3 |
+
from time import perf_counter
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
from huggingface_hub import (
|
7 |
+
HfApi,
|
8 |
+
InferenceClient,
|
9 |
+
)
|
10 |
+
|
11 |
+
import config as cfg
|
12 |
+
from imagemeta import (
|
13 |
+
get_image_meta_str,
|
14 |
+
add_metadata_to_pil_image,
|
15 |
+
save_image_timestamp,
|
16 |
+
)
|
17 |
+
|
18 |
+
# XXX find out which schedulers are actually supported
|
19 |
+
scheduler_map = {
|
20 |
+
"DDIM": 'DDIMScheduler',
|
21 |
+
"DDPM": 'DDPMScheduler',
|
22 |
+
"DEIS": 'DEISMultistepScheduler',
|
23 |
+
"DPM++ 2M": 'DPMSolverMultistepScheduler',
|
24 |
+
"DPM++ 2S": 'DPMSolverSinglestepScheduler',
|
25 |
+
"DPM++ SDE": 'DPMSolverSDEScheduler',
|
26 |
+
"DPM2 a": 'KDPM2AncestralDiscreteScheduler',
|
27 |
+
"DPM2": 'KDPM2DiscreteScheduler',
|
28 |
+
"Euler EDM": 'EDMEulerScheduler',
|
29 |
+
"Euler a": 'EulerAncestralDiscreteScheduler',
|
30 |
+
"Euler": 'EulerDiscreteScheduler',
|
31 |
+
"Heun": 'HeunDiscreteScheduler',
|
32 |
+
"LCM": 'LCMScheduler',
|
33 |
+
"LMS": 'LMSDiscreteScheduler',
|
34 |
+
"PNDM": 'PNDMScheduler',
|
35 |
+
"TCD": 'TCDScheduler',
|
36 |
+
"UniPC": 'UniPCMultistepScheduler',
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
def components_to_parameters(args, ctrl):
|
41 |
+
params = {}
|
42 |
+
params_mul = {}
|
43 |
+
for (name, value) in zip(ctrl, args):
|
44 |
+
if type(name) is tuple:
|
45 |
+
# (type, num, prop), value ==> {(type, num) : {prop: value} }
|
46 |
+
params_mul.setdefault(name[0:2], {})[name[2]] = value
|
47 |
+
else:
|
48 |
+
params[name] = value
|
49 |
+
# group multiple params according to name
|
50 |
+
# {(type, num) : {prop: value} } ==> {type : {name: {prop: value}}
|
51 |
+
for (type_, num), props in params_mul.items():
|
52 |
+
t = params.setdefault(type_, {})
|
53 |
+
name = props.get('name')
|
54 |
+
if name:
|
55 |
+
p = t.setdefault(name, {})
|
56 |
+
for (prop, value) in props.items():
|
57 |
+
p[prop] = value
|
58 |
+
return params
|
59 |
+
|
60 |
+
ctra = OrderedDict()
|
61 |
+
ctro = OrderedDict()
|
62 |
+
|
63 |
+
# https://huggingface.co/docs/api-inference/detailed_parameters
|
64 |
+
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
65 |
+
|
66 |
+
from threading import RLock
|
67 |
+
lock = RLock()
|
68 |
+
|
69 |
+
def extract_params_inference(params):
|
70 |
+
kwargs = {}
|
71 |
+
save_params = {}
|
72 |
+
|
73 |
+
as_is = ['model', 'prompt', 'negative_prompt', 'num_inference_steps', 'guidance_scale',
|
74 |
+
'width', 'height', 'seed']
|
75 |
+
for k in as_is:
|
76 |
+
v = params.get(k)
|
77 |
+
if v:
|
78 |
+
kwargs[k] = v
|
79 |
+
save_params.update(**kwargs)
|
80 |
+
|
81 |
+
clip_skip = params.get('clip_skip')
|
82 |
+
if clip_skip and clip_skip > 1:
|
83 |
+
save_params['clip_skip'] = clip_skip-1
|
84 |
+
kwargs['clip_skip'] = clip_skip-1
|
85 |
+
|
86 |
+
if 'prompt' not in kwargs:
|
87 |
+
kwargs['prompt'] = ''
|
88 |
+
|
89 |
+
sampler = params.get('sampler')
|
90 |
+
if sampler:
|
91 |
+
scheduler = scheduler_map.get(sampler)
|
92 |
+
if scheduler:
|
93 |
+
kwargs['scheduler'] = scheduler
|
94 |
+
save_params['sampler'] = sampler
|
95 |
+
|
96 |
+
return kwargs, save_params
|
97 |
+
|
98 |
+
inference_timeout=300.0
|
99 |
+
|
100 |
+
def call_text_to_image_api(params, timeout=inference_timeout, token=None):
|
101 |
+
if cfg.DEBUG: print('call_text_to_image_api:', params)
|
102 |
+
kwargs , save_params = extract_params_inference(params)
|
103 |
+
client = InferenceClient(token=token)
|
104 |
+
if cfg.DEBUG: print('call_text_to_image_api: calling params:', kwargs)
|
105 |
+
result = client.text_to_image(**kwargs)
|
106 |
+
image_format=params.get('image_format', cfg.DEFAULT_IMAGE_FORMAT)
|
107 |
+
if result:
|
108 |
+
add_metadata_to_pil_image(result, save_params)
|
109 |
+
if cfg.AUTOSAVE_DIR:
|
110 |
+
with lock:
|
111 |
+
filename = save_image_timestamp(result, cfg.AUTOSAVE_DIR, format=image_format)
|
112 |
+
if cfg.DEBUG: print('call_text_to_image_api: saved to {}'.format(filename))
|
113 |
+
return [result]
|
114 |
+
|
115 |
+
|
116 |
+
def infer_api_fn(progress=gr.Progress(), previouslist=None, *args):
|
117 |
+
|
118 |
+
stime = perf_counter()
|
119 |
+
params = components_to_parameters(args, ctra)
|
120 |
+
model_str = params.get('model')
|
121 |
+
if not model_str or model_str == 'NA':
|
122 |
+
return None
|
123 |
+
|
124 |
+
kwargs = {'timeout':inference_timeout}
|
125 |
+
if cfg.HF_TOKEN_SD:
|
126 |
+
kwargs.update(token=cfg.HF_TOKEN_SD)
|
127 |
+
|
128 |
+
result = call_text_to_image_api(params, **kwargs)
|
129 |
+
|
130 |
+
print('gen_fn returning', result)
|
131 |
+
if previouslist is None: previouslist = []
|
132 |
+
mtime = 'API inference {:.2f}s'.format(perf_counter() - stime)
|
133 |
+
return previouslist + result, mtime
|
134 |
+
|
135 |
+
|
136 |
+
def update_inference_models():
|
137 |
+
token = cfg.HF_TOKEN_SD or None
|
138 |
+
client = InferenceClient(token=token)
|
139 |
+
models = client.list_deployed_models()
|
140 |
+
inf_models = models.get('text-to-image', [])
|
141 |
+
user_models = []
|
142 |
+
if token and HfApi:
|
143 |
+
api = HfApi(token=token)
|
144 |
+
whoami = api.whoami()['name']
|
145 |
+
user_models = [m.id for m in api.list_models(author=whoami)]
|
146 |
+
t2i_models = sorted(inf_models+user_models)
|
147 |
+
return gr.Dropdown(choices=t2i_models)
|
148 |
+
|
149 |
+
js_random = '() => Math.floor(Math.random()*(2**32))'
|
150 |
+
|
151 |
+
num_loras=5
|
152 |
+
|
153 |
+
app = gr.Blocks()
|
154 |
+
with app:
|
155 |
+
state = gr.State({})
|
156 |
+
gr.Markdown('# Huggingface Hub Inference Client')
|
157 |
+
with gr.Row():
|
158 |
+
with gr.Column():
|
159 |
+
with gr.Row():
|
160 |
+
print(cfg.EDIT_MODELS)
|
161 |
+
if 'edit' in cfg.EDIT_MODELS or 'download' in cfg.EDIT_MODELS:
|
162 |
+
ctra['model'] = gr.Dropdown(label="Checkpoint", choices=cfg.MODEL_LIST,
|
163 |
+
value=cfg.MODEL_LIST and cfg.MODEL_LIST[0] or None,
|
164 |
+
allow_custom_value=True, scale=3)
|
165 |
+
elif len(cfg.MODEL_LIST) > 1:
|
166 |
+
ctra['model'] = gr.Dropdown(label="Checkpoint", choices=cfg.MODEL_LIST,
|
167 |
+
value=cfg.MODEL_LIST[0],
|
168 |
+
allow_custom_value=False, scale=3)
|
169 |
+
else:
|
170 |
+
ctra['model'] = gr.Textbox(label="Checkpoint", value=cfg.MODEL_LIST[0],
|
171 |
+
interactive=False, scale=3)
|
172 |
+
if 'download' in cfg.EDIT_MODELS:
|
173 |
+
ctra_update_infmod = gr.Button("⬇️ Get Model List", scale=1)
|
174 |
+
ctra['prompt'] = gr.Textbox(label="Prompt")
|
175 |
+
ctra['negative_prompt'] = gr.Textbox(label="Negative prompt")
|
176 |
+
with gr.Row():
|
177 |
+
ctra['width'] = gr.Number(label="Width", value=512,
|
178 |
+
minimum=0, maximum=1024, step=8, precision=0)
|
179 |
+
ctra['height'] = gr.Number(label="Height", value=512,
|
180 |
+
minimum=0, maximum=1024, step=8, precision=0)
|
181 |
+
ctra['sampler'] = gr.Dropdown(label="Sampler",
|
182 |
+
choices=sorted(scheduler_map.keys()), value='Euler',
|
183 |
+
scale=1)
|
184 |
+
with gr.Row():
|
185 |
+
ctra['seed'] = gr.Number(label="Seed", value=42,
|
186 |
+
minimum=-1, maximum=2**64-1, step=1, precision=0)
|
187 |
+
ctra['num_inference_steps'] = gr.Number(label="Steps",
|
188 |
+
minimum=0, maximum=50, value=10, step=1, scale=0)
|
189 |
+
ctra['guidance_scale'] = gr.Number(label="CFG Scale",
|
190 |
+
minimum=0, maximum=10, value=4.0, step=0.1, scale=0)
|
191 |
+
ctra['clip_skip'] = gr.Number(label="CLIP Skip",
|
192 |
+
minimum=1, maximum=12, value=1, step=1, scale=0)
|
193 |
+
with gr.Row():
|
194 |
+
ctra_randomize_button = gr.Button(value="\U0001F3B2",
|
195 |
+
elem_classes="toolbutton", scale=0, min_width=80)
|
196 |
+
ctra_gen_infapi = gr.Button("Generate with Inference API", scale=4)
|
197 |
+
ctra_stop_button = gr.Button('Stop', variant='secondary',
|
198 |
+
interactive=False, scale=1)
|
199 |
+
|
200 |
+
with gr.Column():
|
201 |
+
ctro['gallery'] = gr.Gallery(label="Generated images", show_label=False,
|
202 |
+
#show_fullscreen_button=True,
|
203 |
+
type='pil', format=cfg.DEFAULT_IMAGE_FORMAT,
|
204 |
+
show_download_button=True, show_share_button=False)
|
205 |
+
ctro['times'] = gr.Textbox(show_label=False, label="Timing")
|
206 |
+
ctro['imagemeta'] = gr.Textbox(show_label=False, label="Image Metadata")
|
207 |
+
with gr.Row():
|
208 |
+
discard_image_button = gr.Button("Discard Image", scale=1)
|
209 |
+
|
210 |
+
# XXX no idea if it's the best way
|
211 |
+
selected_image = gr.Image(render=False)
|
212 |
+
|
213 |
+
ctra_inference_event = gr.on(fn=infer_api_fn, triggers=[ctra_gen_infapi.click],
|
214 |
+
inputs=[ctro['gallery']] + list(ctra.values()), outputs=[ctro['gallery'], ctro['times']])
|
215 |
+
ctra_gen_infapi.click(lambda: gr.update(interactive=True), None, ctra_stop_button)
|
216 |
+
ctra_stop_button.click(lambda: gr.update(interactive=False), None, ctra_stop_button,
|
217 |
+
cancels=[ctra_inference_event])
|
218 |
+
ctra_randomize_button.click(None, js=js_random, outputs=ctra['seed'])
|
219 |
+
if 'download' in cfg.EDIT_MODELS:
|
220 |
+
ctra_update_infmod.click(update_inference_models, inputs=[], outputs=ctra['model'])
|
221 |
+
|
222 |
+
def discard_image(state, gallery):
|
223 |
+
toremove = state.get('selected')
|
224 |
+
res = []
|
225 |
+
for image in gallery:
|
226 |
+
if toremove == image[0]:
|
227 |
+
state['selected'] = None
|
228 |
+
else:
|
229 |
+
res.append(image)
|
230 |
+
return res
|
231 |
+
discard_image_button.click(discard_image, inputs=[state, ctro['gallery']], outputs=[ctro['gallery']])
|
232 |
+
|
233 |
+
def on_select(value, evt: gr.SelectData, state):
|
234 |
+
#return f"The {evt.target} component was selected, index {evt.index}, and its value was {value}."
|
235 |
+
res = ''
|
236 |
+
index = evt.index
|
237 |
+
imagelist = value
|
238 |
+
if index >= 0 and index < len(imagelist):
|
239 |
+
image, caption = imagelist[index]
|
240 |
+
res = get_image_meta_str(image)
|
241 |
+
state['selected'] = image
|
242 |
+
else:
|
243 |
+
state['selected'] = None
|
244 |
+
return res
|
245 |
+
|
246 |
+
ctro['gallery'].select(on_select, [ctro['gallery'], state], [ctro['imagemeta']])
|
247 |
+
|
248 |
+
if __name__ == '__main__':
|
249 |
+
app.launch(show_error=True, debug=True)
|
250 |
+
|
251 |
+
|
252 |
+
|
config.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''app config'''
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
# HF token
|
6 |
+
HF_TOKEN_SD = os.environ.get('HF_TOKEN_SD')
|
7 |
+
|
8 |
+
# if set, generated images will be saved here
|
9 |
+
AUTOSAVE_DIR = os.environ.get('AUTOSAVE_DIR', '')
|
10 |
+
|
11 |
+
# 1 enables debug
|
12 |
+
DEBUG = os.environ.get('DEBUG')
|
13 |
+
|
14 |
+
# default image format (png, jpeg, webp)
|
15 |
+
DEFAULT_IMAGE_FORMAT = os.environ.get('DEFAULT_IMAGE_FORMAT', 'png')
|
16 |
+
|
17 |
+
# models available at start (comma separated)
|
18 |
+
MODEL_LIST = os.environ.get('MODEL_LIST', '').split(',')
|
19 |
+
|
20 |
+
# controls the model list editing (comma separated):
|
21 |
+
# download: enable fetching available models
|
22 |
+
# edit: enable editing the field
|
23 |
+
EDIT_MODELS = os.environ.get('EDIT_MODELS', '').split(',')
|
24 |
+
|
imagemeta.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image, ExifTags, PngImagePlugin
|
2 |
+
from sd_parsers import ParserManager
|
3 |
+
from io import BytesIO
|
4 |
+
from pathlib import Path
|
5 |
+
from os import path
|
6 |
+
import sys
|
7 |
+
import time
|
8 |
+
from typing import Union
|
9 |
+
|
10 |
+
#def format_metadata(params: dict) -> str:
|
11 |
+
def format_metadata(params):
|
12 |
+
lines = []
|
13 |
+
|
14 |
+
lines.append(params['prompt'])
|
15 |
+
|
16 |
+
loras = params.get('lora', {})
|
17 |
+
lorainfo=[]
|
18 |
+
for name, props in sorted(loras.items()):
|
19 |
+
weight = props.get('weight')
|
20 |
+
weight = float(weight) if weight else 0.0
|
21 |
+
if name and weight:
|
22 |
+
lorainfo.append('<lora:{}:{}>'.format(name,weight))
|
23 |
+
if lorainfo:
|
24 |
+
lines.append(''.join(lorainfo))
|
25 |
+
|
26 |
+
negative_prompt = params.get('negative_prompt')
|
27 |
+
if negative_prompt is not None:
|
28 |
+
lines.append('Negative prompt: ' + negative_prompt)
|
29 |
+
|
30 |
+
info = []
|
31 |
+
|
32 |
+
info.append(('Steps', params['num_inference_steps']))
|
33 |
+
|
34 |
+
sampler = params.get('sampler')
|
35 |
+
if sampler:
|
36 |
+
info.append(('Sampler', sampler))
|
37 |
+
|
38 |
+
info.append(('CFG Scale', params['guidance_scale']))
|
39 |
+
|
40 |
+
seed = params.get('seed', -1)
|
41 |
+
if seed != -1:
|
42 |
+
info.append(('Seed', seed))
|
43 |
+
|
44 |
+
width, height = params.get('width'), params.get('height')
|
45 |
+
if width and height:
|
46 |
+
info.append(('Size', '{}x{}'.format(width, height)))
|
47 |
+
|
48 |
+
model = params.get('model')
|
49 |
+
if model is not None:
|
50 |
+
if model.endswith('.safetensors'):
|
51 |
+
# filename without extension
|
52 |
+
model = path.basename(model).rsplit('.', 1)[0]
|
53 |
+
# else assume it's a model id
|
54 |
+
info.append(('Model', model))
|
55 |
+
|
56 |
+
mode = params.get('mode') # Img2Img, Txt2Img
|
57 |
+
if mode is not None:
|
58 |
+
info.append(('Mode', mode))
|
59 |
+
|
60 |
+
if mode == 'Img2Img':
|
61 |
+
strength = params.get('strength')
|
62 |
+
if strength is not None:
|
63 |
+
info.append(('Denoising strength', strength))
|
64 |
+
|
65 |
+
clip_skip = params.get('clip_skip', 0)
|
66 |
+
if clip_skip >= 1:
|
67 |
+
info.append(('Clip skip', clip_skip+1))
|
68 |
+
|
69 |
+
controlnet_model = params.get('cnet_model', '')
|
70 |
+
controlnet_conditioning_scale = params.get('controlnet_conditioning_scale', 0.0)
|
71 |
+
if controlnet_model and controlnet_conditioning_scale >= 0.001:
|
72 |
+
info.append(('ControlNet', controlnet_conditioning_scale))
|
73 |
+
info.append(('ControlNet Conditioning Scale', controlnet_conditioning_scale))
|
74 |
+
if params.get('guess_mode'):
|
75 |
+
info.append(('ControlNet Guess Mode', 1))
|
76 |
+
|
77 |
+
vae = params.get('vae')
|
78 |
+
if vae is not None:
|
79 |
+
info.append(('VAE', vae))
|
80 |
+
|
81 |
+
for (opt, meta) in [
|
82 |
+
('compel', 'Compel'),
|
83 |
+
('freeu', 'FreeU'),
|
84 |
+
('resadapter_norm', 'ResAdapter Normalization'),
|
85 |
+
('vae_tiling', 'VAE Tiling'),
|
86 |
+
]:
|
87 |
+
if params.get(opt):
|
88 |
+
info.append((meta, 1))
|
89 |
+
|
90 |
+
for (opt, meta) in [
|
91 |
+
('eta', 'Eta'),
|
92 |
+
('pag_adaptive_scale', 'PAG Adaptive Scale'),
|
93 |
+
('pag_scale', 'PAG Scale'),
|
94 |
+
('sag_scale', 'SAG Scale'),
|
95 |
+
('token_merging_ratio', 'Token Merging Ratio'),
|
96 |
+
]:
|
97 |
+
val = params.get(opt, 0.0)
|
98 |
+
if val and val >= 0.01:
|
99 |
+
info.append((meta, val))
|
100 |
+
|
101 |
+
lines.append(', '.join(['{}: {}'.format(k, v) for (k, v) in info]))
|
102 |
+
|
103 |
+
return '\n'.join(lines)
|
104 |
+
|
105 |
+
#def add_metadata_to_pil_image(pil_image: Image, params: Union[dict, str])-> None:
|
106 |
+
def add_metadata_to_pil_image(pil_image, params):
|
107 |
+
'''add generation parameters to the image info fields, in a Gradio-compatible way'''
|
108 |
+
|
109 |
+
if isinstance(params, str):
|
110 |
+
metadata = params
|
111 |
+
else:
|
112 |
+
metadata = format_metadata(params)
|
113 |
+
|
114 |
+
pil_image.info['parameters'] = metadata
|
115 |
+
|
116 |
+
# borrowed from piexif
|
117 |
+
usercomment = b'UNICODE\0' + metadata.encode('utf_16_be', errors='replace')
|
118 |
+
# The PIL Exif encoder detects both bytes and bytearrays as sequences of
|
119 |
+
# integers, so they get encoded with the wrong type, and most tools won't
|
120 |
+
# interpret that as text. A list wrapping a bytearray dodges those
|
121 |
+
# heuristics, correctly storing the data as a byte sequence.
|
122 |
+
usercomment = [bytearray(usercomment)]
|
123 |
+
exif = pil_image.getexif()
|
124 |
+
exif.setdefault(ExifTags.IFD.Exif, {})[ExifTags.Base.UserComment] = usercomment
|
125 |
+
pil_image.info['exif'] = exif.tobytes()
|
126 |
+
|
127 |
+
def get_image_meta_str(image):
|
128 |
+
res = []
|
129 |
+
if image.filename:
|
130 |
+
res.append('Filename: {}'.format(image.filename))
|
131 |
+
if image.format:
|
132 |
+
res.append('Format: {}'.format(image.format))
|
133 |
+
res.append('Size: {}x{}'.format(*image.size))
|
134 |
+
info = ParserManager().parse(image)
|
135 |
+
if info:
|
136 |
+
for prompt in info.prompts:
|
137 |
+
if prompt:
|
138 |
+
res.append('Prompt: {}'.format(prompt))
|
139 |
+
for negative_prompt in info.negative_prompts:
|
140 |
+
if negative_prompt:
|
141 |
+
res.append('Negative Prompt: {}'.format(negative_prompt))
|
142 |
+
for sampler in info.samplers:
|
143 |
+
if sampler.name:
|
144 |
+
res.append('Sampler: {}'.format(sampler.name))
|
145 |
+
seed = sampler.parameters.get('seed')
|
146 |
+
if seed:
|
147 |
+
res.append('Seed: {}'.format(seed))
|
148 |
+
steps = sampler.parameters.get('steps')
|
149 |
+
if steps:
|
150 |
+
res.append('Steps: {}'.format(steps))
|
151 |
+
if sampler.model and sampler.model.name:
|
152 |
+
res.append('Model: {}'.format(sampler.model.name))
|
153 |
+
for (meta, value) in sorted(info.metadata.items()):
|
154 |
+
res.append('{}: {}'.format(meta, value))
|
155 |
+
return '\n'.join(res)
|
156 |
+
|
157 |
+
#def extract_meta_for_saving(pil_image: Image):
|
158 |
+
def extract_meta_for_saving(pil_image):
|
159 |
+
|
160 |
+
meta_args = {}
|
161 |
+
|
162 |
+
exif = pil_image.info.get('exif')
|
163 |
+
if exif:
|
164 |
+
meta_args["exif"] = exif
|
165 |
+
|
166 |
+
pngtexts = [ (key, value) for key, value in pil_image.info.items()
|
167 |
+
if isinstance(key, str) and isinstance(value, str) ]
|
168 |
+
if pngtexts:
|
169 |
+
pnginfo = PngImagePlugin.PngInfo()
|
170 |
+
for key, value in pngtexts:
|
171 |
+
pnginfo.add_text(key, value)
|
172 |
+
meta_args["pnginfo"] = pnginfo
|
173 |
+
|
174 |
+
return meta_args
|
175 |
+
|
176 |
+
def filebytes(pil_image, format):
|
177 |
+
with BytesIO() as output_bytes:
|
178 |
+
meta_params = extract_meta_for_saving(pil_image)
|
179 |
+
if format in ['jpeg', 'jpg', 'webp']:
|
180 |
+
meta_params['quality'] = 90
|
181 |
+
pil_image.save(output_bytes, format=format, **meta_params)
|
182 |
+
return output_bytes.getvalue()
|
183 |
+
|
184 |
+
def open_file_timestamp(basename, directory='.', extension='.png'):
|
185 |
+
output = Path(directory)
|
186 |
+
count = 0
|
187 |
+
|
188 |
+
opt = True
|
189 |
+
spbasename = basename.rsplit('-', 1)
|
190 |
+
if len(spbasename) == 2 and len(spbasename[1]) == 4 and spbasename[1].isdigit():
|
191 |
+
count = int(spbasename[1])
|
192 |
+
basename = spbasename[0]
|
193 |
+
opt = False
|
194 |
+
|
195 |
+
def getfilepath(count):
|
196 |
+
extra = '-{:04}'.format(count)
|
197 |
+
return output / '{}{}.{}'.format(basename, extra, extension)
|
198 |
+
filepath = getfilepath(count)
|
199 |
+
|
200 |
+
# otimizacao
|
201 |
+
if opt:
|
202 |
+
existing = sorted(output.glob(basename + '*.' + extension))
|
203 |
+
if existing:
|
204 |
+
count = 1
|
205 |
+
last = existing[-1].name[:len(extension)]
|
206 |
+
sepnum = (last.rsplit('-', 1)+[''])[1]
|
207 |
+
if sepnum.isdigit():
|
208 |
+
count = int(sepnum)
|
209 |
+
filepath = getfilepath(count)
|
210 |
+
|
211 |
+
while True:
|
212 |
+
try:
|
213 |
+
return open(filepath, 'xb')
|
214 |
+
except FileExistsError:
|
215 |
+
count += 1
|
216 |
+
filepath = getfilepath(count)
|
217 |
+
|
218 |
+
def save_image_with_meta(pil_image, basename, directory, format="png"):
|
219 |
+
filedata = filebytes(pil_image, format)
|
220 |
+
with open_file_timestamp(basename, directory, format) as outfile:
|
221 |
+
outfile.write(filedata)
|
222 |
+
return outfile.name
|
223 |
+
|
224 |
+
|
225 |
+
def save_image_timestamp(pil_image, directory, format="png"):
|
226 |
+
outdir = Path(directory)
|
227 |
+
basename = time.strftime('%Y%m%d-0001')
|
228 |
+
return save_image_with_meta(pil_image, basename, outdir, format)
|
229 |
+
|
230 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.20.0
|
2 |
+
sd-parsers
|
3 |
+
|