iimmortall commited on
Commit
563d40d
Β·
verified Β·
1 Parent(s): 3285290

support docker

Browse files
Files changed (1) hide show
  1. app_docker.py +207 -0
app_docker.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import sys
4
+ import datetime
5
+ import gradio as gr
6
+ import numpy as np
7
+ from PIL import Image
8
+ import spaces #[uncomment to use ZeroGPU]
9
+ import torch
10
+ from torchvision.transforms import ToTensor, ToPILImage
11
+
12
+
13
+ # -------------------------- HuggingFace -------------------------------
14
+ from huggingface_hub import hf_hub_download, snapshot_download
15
+ model_name = "iimmortall/UltraFusion"
16
+ auth_token = os.getenv("HF_AUTH_TOKEN")
17
+ model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir="/home/user/app")
18
+
19
+ from ultrafusion_utils import load_model, run_ultrafusion, check_input
20
+ PYCUDA_FLAG = True
21
+ try :
22
+ import pycuda
23
+ except Exception:
24
+ PYCUDA_FLAG = False
25
+ print("No pycuda!!!")
26
+
27
+ RUN_TIMES = 0
28
+
29
+ to_tensor = ToTensor()
30
+ to_pil = ToPILImage()
31
+ ultrafusion_pipe, flow_model = load_model()
32
+
33
+ device = "cuda" if torch.cuda.is_available() else "cpu"
34
+ if torch.cuda.is_available():
35
+ torch_dtype = torch.float16
36
+ else:
37
+ torch_dtype = torch.float32
38
+
39
+ MAX_SEED = np.iinfo(np.int32).max
40
+ MAX_IMAGE_SIZE = 1024
41
+
42
+ # @spaces.GPU(duration=60) #[uncomment to use ZeroGPU]
43
+ def infer(
44
+ under_expo_img,
45
+ over_expo_img,
46
+ num_inference_steps
47
+ ):
48
+ print(under_expo_img.size)
49
+ print("reciving image")
50
+
51
+ under_expo_img_lr, over_expo_img_lr, under_expo_img, over_expo_img, use_bgu = check_input(under_expo_img, over_expo_img, max_l=1500)
52
+
53
+ global PYCUDA_FLAG
54
+ if not PYCUDA_FLAG and use_bgu:
55
+ print("No pycuda, do not run BGU.")
56
+ use_bgu = False
57
+
58
+ ue = to_tensor(under_expo_img_lr).unsqueeze(dim=0).to("cuda")
59
+ oe = to_tensor(over_expo_img_lr).unsqueeze(dim=0).to("cuda")
60
+ ue_hr = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
61
+ oe_hr = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
62
+
63
+ print("num_inference_steps:", num_inference_steps)
64
+ try:
65
+ if num_inference_steps is None:
66
+ num_inference_steps = 20
67
+ num_inference_steps = int(num_inference_steps)
68
+ except Exception as e:
69
+ num_inference_steps = 20
70
+
71
+ out = run_ultrafusion(ue, oe, ue_hr, oe_hr, use_bgu, 'test', flow_model=flow_model, pipe=ultrafusion_pipe, steps=num_inference_steps, consistent_start=None, test_bs=8)
72
+
73
+ out = out.clamp(0, 1).squeeze()
74
+ out_pil = to_pil(out)
75
+
76
+ global RUN_TIMES
77
+ RUN_TIMES = RUN_TIMES + 1
78
+ print("---------------------------- Using Times---------------------------------------")
79
+ print(f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}: Using times: {RUN_TIMES}")
80
+
81
+ return out_pil
82
+
83
+
84
+ def build_demo():
85
+ examples= [
86
+ [os.path.join("examples", img_name, "ue.jpg"),
87
+ os.path.join("examples", img_name, "oe.jpg")] for img_name in sorted(os.listdir("examples"))
88
+ ]
89
+ IMG_W = 320
90
+ IMG_H = 240
91
+ css = """
92
+ #col-container {
93
+ margin: 0 auto;
94
+ max-width: 640px;
95
+ }
96
+ """
97
+ # max-heigh: 1500px;
98
+
99
+ _README_ = r"""
100
+
101
+ - This is an HDR algorithm that fuses two images with different exposures.
102
+
103
+ - This can fuse two images with a very large exposure difference, even up to 9 stops.
104
+
105
+ - The two input images should have the same resolution; otherwise, an error will be reported.
106
+
107
+ - We are committed to not storing any data you upload or the results of its processing.
108
+
109
+ """
110
+ # - The maximum resolution we support is 1500 x 1500. If the images you upload are larger than this, they will be downscaled while maintaining the original aspect ratio.
111
+ # - This is only for internal testing. Do not share it publicly.
112
+ _CITE_ = r"""
113
+ πŸ“ **Citation**
114
+
115
+ If you find our work useful for your research or applications, please cite using this bibtex:
116
+ ```bibtex
117
+ @article{xxx,
118
+ title={xxx},
119
+ author={xxx},
120
+ journal={arXiv preprint arXiv:xx.xx},
121
+ year={2024}
122
+ }
123
+ ```
124
+
125
+ πŸ“‹ **License**
126
+
127
+ CC BY-NC 4.0. LICENSE.
128
+
129
+ πŸ“§ **Contact**
130
+
131
+ If you have any questions, feel free to open a discussion or contact us at <b>[email protected]</b>.
132
+ """
133
+
134
+ with gr.Blocks(css=css) as demo:
135
+ with gr.Column(elem_id="col-container"):
136
+ gr.Markdown("""<h1 style="text-align: center; font-size: 32px;"><b>UltraFusion HDR πŸ“Έβœ¨</b></h1>""")
137
+ # gr.Markdown("""<h1 style="text-align: center; font-size: 32px;"><b>OpenImagingLab</b></h1>""")
138
+ gr.Markdown("""<h1 style="text-align: center; font-size: 24px;"><b>How do I use it?</b></h1>""")
139
+ with gr.Row():
140
+ gr.Image("ui/en-short.png", width=IMG_W//3, show_label=False, interactive=False, show_download_button=False)
141
+ gr.Image("ui/en-long.png", width=IMG_W//3, show_label=False, interactive=False, show_download_button=False)
142
+ gr.Image("ui/en-run.png", width=IMG_W//3, show_label=False, interactive=False, show_download_button=False)
143
+
144
+ with gr.Row():
145
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>βž€ Tap the center of the camera screen, then drag the β˜€οΈŽ icon downward to capture a photo with a shorter exposure.</b></h1>""")
146
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>➁ Tap the center of the camera screen, then drag the β˜€οΈŽ icon upward to capture a photo with a longer exposure.</b></h1>""")
147
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>βž‚ Upload the short and long exposure images, then click the 'Run' button to receive the result. </b></h1>""")
148
+
149
+ gr.Markdown("""<h1 style="text-align: center; font-size: 24px;"><b>Enjoy it!</b></h1>""")
150
+ with gr.Row():
151
+ under_expo_img = gr.Image(label="Short Exposure Image", show_label=True,
152
+ image_mode="RGB",
153
+ sources=["upload", ],
154
+ width=IMG_W,
155
+ height=IMG_H,
156
+ type="pil"
157
+ )
158
+ over_expo_img = gr.Image(label="Long Exposure Image", show_label=True,
159
+ image_mode="RGB",
160
+ sources=["upload", ],
161
+ width=IMG_W,
162
+ height=IMG_H,
163
+ type="pil"
164
+ )
165
+ with gr.Row():
166
+ run_button = gr.Button("Run", variant="primary") # scale=0,
167
+
168
+ result = gr.Image(label="Result", show_label=True,
169
+ type='pil',
170
+ image_mode='RGB',
171
+ format="png",
172
+ width=IMG_W*2,
173
+ height=IMG_H*2,
174
+ )
175
+ gr.Markdown(r"""<h1 style="text-align: center; font-size: 18px;"><b>Like it? Click the button πŸ“₯ on the image to download.</b></h1>""") # width="100" height="100" <img src="ui/download.svg" alt="download">
176
+ with gr.Accordion("Advanced Settings", open=True):
177
+ num_inference_steps = gr.Slider(
178
+ label="Number of inference steps",
179
+ minimum=2,
180
+ maximum=50,
181
+ step=1,
182
+ value=20, # Replace with defaults that work for your model
183
+ interactive=True
184
+ )
185
+
186
+ gr.Examples(
187
+ examples=examples,
188
+ inputs=[under_expo_img, over_expo_img, num_inference_steps],
189
+ label="Examples",
190
+ # examples_per_page=10,
191
+ fn=infer,
192
+ cache_examples=True,
193
+ outputs=[result,],
194
+ )
195
+ gr.Markdown(_README_)
196
+ # gr.Markdown(_CITE_)
197
+ run_button.click(fn=infer,
198
+ inputs=[under_expo_img, over_expo_img, num_inference_steps],
199
+ outputs=[result,],
200
+ )
201
+ return demo
202
+
203
+ if __name__ == "__main__":
204
+ demo = build_demo()
205
+ demo.queue(max_size=10)
206
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
207
+ # demo.launch(server_name="0.0.0.0", debug=True, show_api=True, show_error=True, share=False)