YoBatM commited on
Commit
99b955f
·
verified ·
1 Parent(s): f2be1a4

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. .gitignore +7 -0
  3. LICENSE +21 -0
  4. Readme.md +673 -0
  5. THIRD-PARTY-LICENSES +143 -0
  6. benchmark-openvino.bat +23 -0
  7. benchmark.bat +23 -0
  8. configs/lcm-lora-models.txt +4 -0
  9. configs/lcm-models.txt +8 -0
  10. configs/openvino-lcm-models.txt +9 -0
  11. configs/stable-diffusion-models.txt +7 -0
  12. controlnet_models/Readme.txt +3 -0
  13. docs/images/2steps-inference.jpg +0 -0
  14. docs/images/ARCGPU.png +0 -0
  15. docs/images/fastcpu-cli.png +0 -0
  16. docs/images/fastcpu-webui.png +3 -0
  17. docs/images/fastsdcpu-android-termux-pixel7.png +3 -0
  18. docs/images/fastsdcpu-api.png +0 -0
  19. docs/images/fastsdcpu-gui.jpg +3 -0
  20. docs/images/fastsdcpu-mac-gui.jpg +0 -0
  21. docs/images/fastsdcpu-screenshot.png +3 -0
  22. docs/images/fastsdcpu-webui.png +3 -0
  23. docs/images/fastsdcpu_flux_on_cpu.png +3 -0
  24. install-mac.sh +31 -0
  25. install.bat +29 -0
  26. install.sh +28 -0
  27. lora_models/HoloEnV2.safetensors +3 -0
  28. lora_models/Readme.txt +3 -0
  29. models/gguf/clip/readme.txt +1 -0
  30. models/gguf/diffusion/readme.txt +1 -0
  31. models/gguf/t5xxl/readme.txt +1 -0
  32. models/gguf/vae/readme.txt +1 -0
  33. requirements.txt +18 -0
  34. src/__init__.py +0 -0
  35. src/app.py +535 -0
  36. src/app_settings.py +124 -0
  37. src/backend/__init__.py +0 -0
  38. src/backend/annotators/canny_control.py +15 -0
  39. src/backend/annotators/control_interface.py +12 -0
  40. src/backend/annotators/depth_control.py +15 -0
  41. src/backend/annotators/image_control_factory.py +31 -0
  42. src/backend/annotators/lineart_control.py +11 -0
  43. src/backend/annotators/mlsd_control.py +10 -0
  44. src/backend/annotators/normal_control.py +10 -0
  45. src/backend/annotators/pose_control.py +10 -0
  46. src/backend/annotators/shuffle_control.py +10 -0
  47. src/backend/annotators/softedge_control.py +10 -0
  48. src/backend/api/models/response.py +16 -0
  49. src/backend/api/web.py +103 -0
  50. src/backend/base64_image.py +21 -0
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ docs/images/fastcpu-webui.png filter=lfs diff=lfs merge=lfs -text
37
+ docs/images/fastsdcpu-android-termux-pixel7.png filter=lfs diff=lfs merge=lfs -text
38
+ docs/images/fastsdcpu-gui.jpg filter=lfs diff=lfs merge=lfs -text
39
+ docs/images/fastsdcpu-screenshot.png filter=lfs diff=lfs merge=lfs -text
40
+ docs/images/fastsdcpu-webui.png filter=lfs diff=lfs merge=lfs -text
41
+ docs/images/fastsdcpu_flux_on_cpu.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ env
2
+ *.bak
3
+ *.pyc
4
+ __pycache__
5
+ results
6
+ # excluding user settings for the GUI frontend
7
+ configs/settings.yaml
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Rupesh Sreeraman
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Readme.md ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FastSD CPU :sparkles:[![Mentioned in Awesome OpenVINO](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/openvinotoolkit/awesome-openvino)
2
+
3
+ <div align="center">
4
+ <a href="https://trendshift.io/repositories/3957" target="_blank"><img src="https://trendshift.io/api/badge/repositories/3957" alt="rupeshs%2Ffastsdcpu | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
5
+ </div>
6
+
7
+ FastSD CPU is a faster version of Stable Diffusion on CPU. Based on [Latent Consistency Models](https://github.com/luosiallen/latent-consistency-model) and
8
+ [Adversarial Diffusion Distillation](https://nolowiz.com/fast-stable-diffusion-on-cpu-using-fastsd-cpu-and-openvino/).
9
+
10
+ ![FastSD CPU screenshot](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastsdcpu-webui.png)
11
+ The following interfaces are available :
12
+
13
+ - Desktop GUI, basic text to image generation (Qt,faster)
14
+ - WebUI (Advanced features,Lora,controlnet etc)
15
+ - CLI (CommandLine Interface)
16
+
17
+ 🚀 Using __OpenVINO(SDXS-512-0.9)__, it took __0.82 seconds__ (__820 milliseconds__) to create a single 512x512 image on a __Core i7-12700__.
18
+
19
+ ## Table of Contents
20
+
21
+ - [Supported&nbsp;Platforms](#Supported&nbsp;platforms)
22
+ - [Dependencies](#dependencies)
23
+ - [Memory requirements](#memory-requirements)
24
+ - [Features](#features)
25
+ - [Benchmarks](#fast-inference-benchmarks)
26
+ - [OpenVINO Support](#openvino)
27
+ - [Installation](#installation)
28
+ - [AI PC Support - OpenVINO](#ai-pc-support)
29
+ - [GGUF support (Flux)](#gguf-support)
30
+ - [Real-time text to image (EXPERIMENTAL)](#real-time-text-to-image)
31
+ - [Models](#models)
32
+ - [How to use Lora models](#useloramodels)
33
+ - [How to use controlnet](#usecontrolnet)
34
+ - [Android](#android)
35
+ - [Raspberry Pi 4](#raspberry)
36
+ - [Orange Pi 5](#orangepi)
37
+ - [API&nbsp;Support](#apisupport)
38
+ - [License](#license)
39
+ - [Contributors](#contributors)
40
+
41
+ ## Supported platforms⚡️
42
+
43
+ FastSD CPU works on the following platforms:
44
+
45
+ - Windows
46
+ - Linux
47
+ - Mac
48
+ - Android + Termux
49
+ - Raspberry PI 4
50
+
51
+ ## Dependencies
52
+
53
+ - Python 3.10 or Python 3.11 (Please ensure that you have a working Python 3.10 or Python 3.11 installation available on the system)
54
+
55
+ ## Memory requirements
56
+
57
+ Minimum system RAM requirement for FastSD CPU.
58
+
59
+ Model (LCM,OpenVINO): SD Turbo, 1 step, 512 x 512
60
+
61
+ Model (LCM-LoRA): Dreamshaper v8, 3 step, 512 x 512
62
+
63
+ | Mode | Min RAM |
64
+ | --------------------- | ------------- |
65
+ | LCM | 2 GB |
66
+ | LCM-LoRA | 4 GB |
67
+ | OpenVINO | 11 GB |
68
+
69
+ If we enable Tiny decoder(TAESD) we can save some memory(2GB approx) for example in OpenVINO mode memory usage will become 9GB.
70
+
71
+ :exclamation: Please note that guidance scale >1 increases RAM usage and slow inference speed.
72
+
73
+ ## Features
74
+
75
+ - Desktop GUI, web UI and CLI
76
+ - Supports 256,512,768,1024 image sizes
77
+ - Supports Windows,Linux,Mac
78
+ - Saves images and diffusion setting used to generate the image
79
+ - Settings to control,steps,guidance and seed
80
+ - Added safety checker setting
81
+ - Maximum inference steps increased to 25
82
+ - Added [OpenVINO](https://github.com/openvinotoolkit/openvino) support
83
+ - Fixed OpenVINO image reproducibility issue
84
+ - Fixed OpenVINO high RAM usage,thanks [deinferno](https://github.com/deinferno)
85
+ - Added multiple image generation support
86
+ - Application settings
87
+ - Added Tiny Auto Encoder for SD (TAESD) support, 1.4x speed boost (Fast,moderate quality)
88
+ - Safety checker disabled by default
89
+ - Added SDXL,SSD1B - 1B LCM models
90
+ - Added LCM-LoRA support, works well for fine-tuned Stable Diffusion model 1.5 or SDXL models
91
+ - Added negative prompt support in LCM-LoRA mode
92
+ - LCM-LoRA models can be configured using text configuration file
93
+ - Added support for custom models for OpenVINO (LCM-LoRA baked)
94
+ - OpenVINO models now supports negative prompt (Set guidance >1.0)
95
+ - Real-time inference support,generates images while you type (experimental)
96
+ - Fast 2,3 steps inference
97
+ - Lcm-Lora fused models for faster inference
98
+ - Supports integrated GPU(iGPU) using OpenVINO (export DEVICE=GPU)
99
+ - 5.7x speed using OpenVINO(steps: 2,tiny autoencoder)
100
+ - Image to Image support (Use Web UI)
101
+ - OpenVINO image to image support
102
+ - Fast 1 step inference (SDXL Turbo)
103
+ - Added SD Turbo support
104
+ - Added image to image support for Turbo models (Pytorch and OpenVINO)
105
+ - Added image variations support
106
+ - Added 2x upscaler (EDSR and Tiled SD upscale (experimental)),thanks [monstruosoft](https://github.com/monstruosoft) for SD upscale
107
+ - Works on Android + Termux + PRoot
108
+ - Added interactive CLI,thanks [monstruosoft](https://github.com/monstruosoft)
109
+ - Added basic lora support to CLI and WebUI
110
+ - ONNX EDSR 2x upscale
111
+ - Add SDXL-Lightning support
112
+ - Add SDXL-Lightning OpenVINO support (int8)
113
+ - Add multilora support,thanks [monstruosoft](https://github.com/monstruosoft)
114
+ - Add basic ControlNet v1.1 support(LCM-LoRA mode),thanks [monstruosoft](https://github.com/monstruosoft)
115
+ - Add ControlNet annotators(Canny,Depth,LineArt,MLSD,NormalBAE,Pose,SoftEdge,Shuffle)
116
+ - Add SDXS-512 0.9 support
117
+ - Add SDXS-512 0.9 OpenVINO,fast 1 step inference (0.8 seconds to generate 512x512 image)
118
+ - Default model changed to SDXS-512-0.9
119
+ - Faster realtime image generation
120
+ - Add NPU device check
121
+ - Revert default model to SDTurbo
122
+ - Update realtime UI
123
+ - Add hypersd support
124
+ - 1 step fast inference support for SDXL and SD1.5
125
+ - Experimental support for single file Safetensors SD 1.5 models(Civitai models), simply add local model path to configs/stable-diffusion-models.txt file.
126
+ - Add REST API support
127
+ - Add Aura SR (4x)/GigaGAN based upscaler support
128
+ - Add Aura SR v2 upscaler support
129
+ - Add FLUX.1 schnell OpenVINO int 4 support
130
+ - Add CLIP skip support
131
+ - Add token merging support
132
+ - Add Intel AI PC support
133
+ - AI PC NPU(Power efficient inference using OpenVINO) supports, text to image ,image to image and image variations support
134
+ - Add [TAEF1 (Tiny autoencoder for FLUX.1) openvino](https://huggingface.co/rupeshs/taef1-openvino) support
135
+ - Add Image to Image and Image Variations Qt GUI support,thanks [monstruosoft](https://github.com/monstruosoft)
136
+
137
+ <a id="fast-inference-benchmarks"></a>
138
+
139
+ ## Fast Inference Benchmarks
140
+
141
+ ### 🚀 Fast 1 step inference with Hyper-SD
142
+
143
+ #### Stable diffuion 1.5
144
+
145
+ Works with LCM-LoRA mode.
146
+ Fast 1 step inference supported on `runwayml/stable-diffusion-v1-5` model,select `rupeshs/hypersd-sd1-5-1-step-lora` lcm_lora model from the settings.
147
+
148
+ #### Stable diffuion XL
149
+
150
+ Works with LCM and LCM-OpenVINO mode.
151
+
152
+ - *Hyper-SD SDXL 1 step* - [rupeshs/hyper-sd-sdxl-1-step](https://huggingface.co/rupeshs/hyper-sd-sdxl-1-step)
153
+
154
+ - *Hyper-SD SDXL 1 step OpenVINO* - [rupeshs/hyper-sd-sdxl-1-step-openvino-int8](https://huggingface.co/rupeshs/hyper-sd-sdxl-1-step-openvino-int8)
155
+
156
+ #### Inference Speed
157
+
158
+ Tested on Core i7-12700 to generate __768x768__ image(1 step).
159
+
160
+ | Diffusion Pipeline | Latency |
161
+ | --------------------- | ------------- |
162
+ | Pytorch | 19s |
163
+ | OpenVINO | 13s |
164
+ | OpenVINO + TAESDXL | 6.3s |
165
+
166
+ ### Fastest 1 step inference (SDXS-512-0.9)
167
+
168
+ :exclamation:This is an experimental model, only text to image workflow is supported.
169
+
170
+ #### Inference Speed
171
+
172
+ Tested on Core i7-12700 to generate __512x512__ image(1 step).
173
+
174
+ __SDXS-512-0.9__
175
+
176
+ | Diffusion Pipeline | Latency |
177
+ | --------------------- | ------------- |
178
+ | Pytorch | 4.8s |
179
+ | OpenVINO | 3.8s |
180
+ | OpenVINO + TAESD | __0.82s__ |
181
+
182
+ ### 🚀 Fast 1 step inference (SD/SDXL Turbo - Adversarial Diffusion Distillation,ADD)
183
+
184
+ Added support for ultra fast 1 step inference using [sdxl-turbo](https://huggingface.co/stabilityai/sdxl-turbo) model
185
+
186
+ :exclamation: These SD turbo models are intended for research purpose only.
187
+
188
+ #### Inference Speed
189
+
190
+ Tested on Core i7-12700 to generate __512x512__ image(1 step).
191
+
192
+ __SD Turbo__
193
+
194
+ | Diffusion Pipeline | Latency |
195
+ | --------------------- | ------------- |
196
+ | Pytorch | 7.8s |
197
+ | OpenVINO | 5s |
198
+ | OpenVINO + TAESD | 1.7s |
199
+
200
+ __SDXL Turbo__
201
+
202
+ | Diffusion Pipeline | Latency |
203
+ | --------------------- | ------------- |
204
+ | Pytorch | 10s |
205
+ | OpenVINO | 5.6s |
206
+ | OpenVINO + TAESDXL | 2.5s |
207
+
208
+ ### 🚀 Fast 2 step inference (SDXL-Lightning - Adversarial Diffusion Distillation)
209
+
210
+ SDXL-Lightning works with LCM and LCM-OpenVINO mode.You can select these models from app settings.
211
+
212
+ Tested on Core i7-12700 to generate __768x768__ image(2 steps).
213
+
214
+ | Diffusion Pipeline | Latency |
215
+ | --------------------- | ------------- |
216
+ | Pytorch | 18s |
217
+ | OpenVINO | 12s |
218
+ | OpenVINO + TAESDXL | 10s |
219
+
220
+ - *SDXL-Lightning* - [rupeshs/SDXL-Lightning-2steps](https://huggingface.co/rupeshs/SDXL-Lightning-2steps)
221
+
222
+ - *SDXL-Lightning OpenVINO* - [rupeshs/SDXL-Lightning-2steps-openvino-int8](https://huggingface.co/rupeshs/SDXL-Lightning-2steps-openvino-int8)
223
+
224
+ ### 2 Steps fast inference (LCM)
225
+
226
+ FastSD CPU supports 2 to 3 steps fast inference using LCM-LoRA workflow. It works well with SD 1.5 models.
227
+
228
+ ![2 Steps inference](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/2steps-inference.jpg)
229
+
230
+ ### FLUX.1-schnell OpenVINO support
231
+
232
+ ![FLUX Schenell OpenVINO](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastsdcpu_flux_on_cpu.png)
233
+
234
+ :exclamation: Important - Please note the following points with FLUX workflow
235
+
236
+ - As of now only text to image generation mode is supported
237
+ - Use OpenVINO mode
238
+ - Use int4 model - *rupeshs/FLUX.1-schnell-openvino-int4*
239
+ - 512x512 image generation needs around __30GB__ system RAM
240
+
241
+ Tested on Intel Core i7-12700 to generate __512x512__ image(3 steps).
242
+
243
+ | Diffusion Pipeline | Latency |
244
+ | --------------------- | ------------- |
245
+ | OpenVINO | 4 min 30sec |
246
+
247
+ ### Benchmark scripts
248
+
249
+ To benchmark run the following batch file on Windows:
250
+
251
+ - `benchmark.bat` - To benchmark Pytorch
252
+ - `benchmark-openvino.bat` - To benchmark OpenVINO
253
+
254
+ Alternatively you can run benchmarks by passing `-b` command line argument in CLI mode.
255
+ <a id="openvino"></a>
256
+
257
+ ## OpenVINO support
258
+
259
+ Fast SD CPU utilizes [OpenVINO](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html) to speed up the inference speed.
260
+ Thanks [deinferno](https://github.com/deinferno) for the OpenVINO model contribution.
261
+ We can get 2x speed improvement when using OpenVINO.
262
+ Thanks [Disty0](https://github.com/Disty0) for the conversion script.
263
+
264
+ ### OpenVINO SDXL models
265
+
266
+ These are models converted to use directly use it with FastSD CPU. These models are compressed to int8 to reduce the file size (10GB to 4.4 GB) using [NNCF](https://github.com/openvinotoolkit/nncf)
267
+
268
+ - Hyper-SD SDXL 1 step - [rupeshs/hyper-sd-sdxl-1-step-openvino-int8](https://huggingface.co/rupeshs/hyper-sd-sdxl-1-step-openvino-int8)
269
+ - SDXL Lightning 2 steps - [rupeshs/SDXL-Lightning-2steps-openvino-int8](https://huggingface.co/rupeshs/SDXL-Lightning-2steps-openvino-int8)
270
+
271
+ ### OpenVINO SD Turbo models
272
+
273
+ We have converted SD/SDXL Turbo models to OpenVINO for fast inference on CPU. These models are intended for research purpose only. Also we converted TAESDXL MODEL to OpenVINO and
274
+
275
+ - *SD Turbo OpenVINO* - [rupeshs/sd-turbo-openvino](https://huggingface.co/rupeshs/sd-turbo-openvino)
276
+ - *SDXL Turbo OpenVINO int8* - [rupeshs/sdxl-turbo-openvino-int8](https://huggingface.co/rupeshs/sdxl-turbo-openvino-int8)
277
+ - *TAESDXL OpenVINO* - [rupeshs/taesdxl-openvino](https://huggingface.co/rupeshs/taesdxl-openvino)
278
+
279
+ You can directly use these models in FastSD CPU.
280
+
281
+ ### Convert SD 1.5 models to OpenVINO LCM-LoRA fused models
282
+
283
+ We first creates LCM-LoRA baked in model,replaces the scheduler with LCM and then converts it into OpenVINO model. For more details check [LCM OpenVINO Converter](https://github.com/rupeshs/lcm-openvino-converter), you can use this tools to convert any StableDiffusion 1.5 fine tuned models to OpenVINO.
284
+
285
+ <a id="ai-pc-support"></a>
286
+
287
+ ## Intel AI PC support - OpenVINO (CPU, GPU, NPU)
288
+
289
+ Fast SD now supports AI PC with Intel® Core™ Ultra Processors. [To learn more about AI PC and OpenVINO](https://nolowiz.com/ai-pc-and-openvino-quick-and-simple-guide/).
290
+
291
+ ### GPU
292
+
293
+ For GPU mode `set device=GPU` and run webui. FastSD GPU benchmark on AI PC as shown below.
294
+
295
+ ![FastSD AI PC Arc GPU benchmark](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/ARCGPU.png)
296
+
297
+ ### NPU
298
+
299
+ FastSD CPU now supports power efficient NPU (Neural Processing Unit) that comes with Intel Core Ultra processors.
300
+
301
+ FastSD tested with following Intel processor's NPUs:
302
+
303
+ - Intel Core Ultra Series 1 (Meteor Lake)
304
+ - Intel Core Ultra Series 2 (Lunar Lake)
305
+
306
+ Currently FastSD support this model for NPU [rupeshs/sd15-lcm-square-openvino-int8](https://huggingface.co/rupeshs/sd15-lcm-square-openvino-int8).
307
+
308
+ Supports following modes on NPU :
309
+
310
+ - Text to image
311
+ - Image to image
312
+ - Image variations
313
+
314
+ To run model in NPU follow these steps (Please make sure that your AI PC's NPU driver is the latest):
315
+
316
+ - Start webui
317
+ - Select LCM-OpenVINO mode
318
+ - Select the models settings tab and select OpenVINO model `rupeshs/sd15-lcm-square-openvino-int8`
319
+ - Set device envionment variable `set DEVICE=NPU`
320
+ - Now it will run on the NPU
321
+
322
+ This is heterogeneous computing since text encoder and Unet will use NPU and VAE will use GPU for processing. Thanks to OpenVINO.
323
+
324
+ Please note that tiny auto encoder will not work in NPU mode.
325
+
326
+ *Thanks to Intel for providing AI PC dev kit and Tiber cloud access to test FastSD, special thanks to [Pooja Baraskar](https://github.com/Pooja-B),[Dmitriy Pastushenkov](https://github.com/DimaPastushenkov).*
327
+
328
+ <a id="gguf-support"></a>
329
+
330
+ ## GGUF support - Flux
331
+
332
+ [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) Flux model supported via [stablediffusion.cpp](https://github.com/leejet/stable-diffusion.cpp) shared library. Currently Flux Schenell model supported.
333
+
334
+ To use GGUF model use web UI and select GGUF mode.
335
+
336
+ Tested on Windows and Linux.
337
+
338
+ :exclamation: Main advantage here we reduced minimum system RAM required for Flux workflow to around __12 GB__.
339
+
340
+ Supported mode - Text to image
341
+
342
+ ### How to run Flux GGUF model
343
+
344
+ - Download stablediffusion.cpp prebuilt shared library and place it inside fastsdcpu folder
345
+ For Windows users, download [stable-diffusion.dll](https://huggingface.co/rupeshs/FastSD-Flux-GGUF/blob/main/stable-diffusion.dll)
346
+
347
+ For Linux users download [libstable-diffusion.so](https://huggingface.co/rupeshs/FastSD-Flux-GGUF/blob/main/libstable-diffusion.so)
348
+
349
+ You can also build the library manully by following the guide *"Build stablediffusion.cpp shared library for GGUF flux model support"*
350
+
351
+ - Download __diffusion model__ from [flux1-schnell-q4_0.gguf](https://huggingface.co/rupeshs/FastSD-Flux-GGUF/blob/main/flux1-schnell-q4_0.gguf) and place it inside `models/gguf/diffusion` directory
352
+ - Download __clip model__ from [clip_l_q4_0.gguf](https://huggingface.co/rupeshs/FastSD-Flux-GGUF/blob/main/clip_l_q4_0.gguf) and place it inside `models/gguf/clip` directory
353
+ - Download __T5-XXL model__ from [t5xxl_q4_0.gguf](https://huggingface.co/rupeshs/FastSD-Flux-GGUF/blob/main/t5xxl_q4_0.gguf) and place it inside `models/gguf/t5xxl` directory
354
+ - Download __VAE model__ from [ae.safetensors](https://huggingface.co/black-forest-labs/FLUX.1-schnell/blob/main/ae.safetensors) and place it inside `models/gguf/vae` directory
355
+ - Start web UI and select GGUF mode
356
+ - Select the models settings tab and select GGUF diffusion,clip_l,t5xxl and VAE models.
357
+ - Enter your prompt and generate image
358
+
359
+ ### Build stablediffusion.cpp shared library for GGUF flux model support(Optional)
360
+
361
+ To build the stablediffusion.cpp library follow these steps
362
+
363
+ - `git clone https://github.com/leejet/stable-diffusion.cpp`
364
+ - `cd stable-diffusion.cpp`
365
+ - `git pull origin master`
366
+ - `git submodule init`
367
+ - `git submodule update`
368
+ - `git checkout 14206fd48832ab600d9db75f15acb5062ae2c296`
369
+ - `cmake . -DSD_BUILD_SHARED_LIBS=ON`
370
+ - `cmake --build . --config Release`
371
+ - Copy the stablediffusion dll/so file to fastsdcpu folder
372
+
373
+ <a id="real-time-text-to-image"></a>
374
+
375
+ ## Real-time text to image (EXPERIMENTAL)
376
+
377
+ We can generate real-time text to images using FastSD CPU.
378
+
379
+ __CPU (OpenVINO)__
380
+
381
+ Near real-time inference on CPU using OpenVINO, run the `start-realtime.bat` batch file and open the link in browser (Resolution : 512x512,Latency : 0.82s on Intel Core i7)
382
+
383
+ Watch YouTube video :
384
+
385
+ [![IMAGE_ALT](https://img.youtube.com/vi/0XMiLc_vsyI/0.jpg)](https://www.youtube.com/watch?v=0XMiLc_vsyI)
386
+
387
+ ## Models
388
+
389
+ To use single file [Safetensors](https://huggingface.co/docs/safetensors/en/index) SD 1.5 models(Civit AI) follow this [YouTube tutorial](https://www.youtube.com/watch?v=zZTfUZnXJVk). Use LCM-LoRA Mode for single file safetensors.
390
+
391
+ Fast SD supports LCM models and LCM-LoRA models.
392
+
393
+ ### LCM Models
394
+
395
+ These models can be configured in `configs/lcm-models.txt` file.
396
+
397
+ ### OpenVINO models
398
+
399
+ These are LCM-LoRA baked in models. These models can be configured in `configs/openvino-lcm-models.txt` file
400
+
401
+ ### LCM-LoRA models
402
+
403
+ These models can be configured in `configs/lcm-lora-models.txt` file.
404
+
405
+ - *lcm-lora-sdv1-5* - distilled consistency adapter for [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
406
+ - *lcm-lora-sdxl* - Distilled consistency adapter for [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
407
+ - *lcm-lora-ssd-1b* - Distilled consistency adapter for [segmind/SSD-1B](https://huggingface.co/segmind/SSD-1B)
408
+
409
+ These models are used with Stablediffusion base models `configs/stable-diffusion-models.txt`.
410
+
411
+ :exclamation: Currently no support for OpenVINO LCM-LoRA models.
412
+
413
+ ### How to add new LCM-LoRA models
414
+
415
+ To add new model follow the steps:
416
+ For example we will add `wavymulder/collage-diffusion`, you can give Stable diffusion 1.5 Or SDXL,SSD-1B fine tuned models.
417
+
418
+ 1. Open `configs/stable-diffusion-models.txt` file in text editor.
419
+ 2. Add the model ID `wavymulder/collage-diffusion` or locally cloned path.
420
+
421
+ Updated file as shown below :
422
+
423
+ ```Lykon/dreamshaper-8
424
+ Fictiverse/Stable_Diffusion_PaperCut_Model
425
+ stabilityai/stable-diffusion-xl-base-1.0
426
+ runwayml/stable-diffusion-v1-5
427
+ segmind/SSD-1B
428
+ stablediffusionapi/anything-v5
429
+ wavymulder/collage-diffusion
430
+ ```
431
+
432
+ Similarly we can update `configs/lcm-lora-models.txt` file with lcm-lora ID.
433
+
434
+ ### How to use LCM-LoRA models offline
435
+
436
+ Please follow the steps to run LCM-LoRA models offline :
437
+
438
+ - In the settings ensure that "Use locally cached model" setting is ticked.
439
+ - Download the model for example `latent-consistency/lcm-lora-sdv1-5`
440
+ Run the following commands:
441
+
442
+ ```
443
+ git lfs install
444
+ git clone https://huggingface.co/latent-consistency/lcm-lora-sdv1-5
445
+ ```
446
+
447
+ Copy the cloned model folder path for example "D:\demo\lcm-lora-sdv1-5" and update the `configs/lcm-lora-models.txt` file as shown below :
448
+
449
+ ```
450
+ D:\demo\lcm-lora-sdv1-5
451
+ latent-consistency/lcm-lora-sdxl
452
+ latent-consistency/lcm-lora-ssd-1b
453
+ ```
454
+
455
+ - Open the app and select the newly added local folder in the combo box menu.
456
+ - That's all!
457
+ <a id="useloramodels"></a>
458
+
459
+ ## How to use Lora models
460
+
461
+ Place your lora models in "lora_models" folder. Use LCM or LCM-Lora mode.
462
+ You can download lora model (.safetensors/Safetensor) from [Civitai](https://civitai.com/) or [Hugging Face](https://huggingface.co/)
463
+ E.g: [cutecartoonredmond](https://civitai.com/models/207984/cutecartoonredmond-15v-cute-cartoon-lora-for-liberteredmond-sd-15?modelVersionId=234192)
464
+ <a id="usecontrolnet"></a>
465
+
466
+ ## ControlNet support
467
+
468
+ We can use ControlNet in LCM-LoRA mode.
469
+
470
+ Download ControlNet models from [ControlNet-v1-1](https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/tree/main).Download and place controlnet models in "controlnet_models" folder.
471
+
472
+ Use the medium size models (723 MB)(For example : <https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/blob/main/control_v11p_sd15_canny_fp16.safetensors>)
473
+
474
+ ## Installation
475
+
476
+ ### FastSD CPU on Windows
477
+
478
+ ![FastSD CPU Desktop GUI Screenshot](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastsdcpu-gui.jpg)
479
+
480
+ :exclamation:__You must have a working Python installation.(Recommended : Python 3.10 or 3.11 )__
481
+
482
+ To install FastSD CPU on Windows run the following steps :
483
+
484
+ - Clone/download this repo or download [release](https://github.com/rupeshs/fastsdcpu/releases).
485
+ - Double click `install.bat` (It will take some time to install,depending on your internet speed.)
486
+ - You can run in desktop GUI mode or web UI mode.
487
+
488
+ #### Desktop GUI
489
+
490
+ - To start desktop GUI double click `start.bat`
491
+
492
+ #### Web UI
493
+
494
+ - To start web UI double click `start-webui.bat`
495
+
496
+ ### FastSD CPU on Linux
497
+
498
+ :exclamation:__Ensure that you have Python 3.9 or 3.10 or 3.11 version installed.__
499
+
500
+ - Clone/download this repo or download [release](https://github.com/rupeshs/fastsdcpu/releases).
501
+ - In the terminal, enter into fastsdcpu directory
502
+ - Run the following command
503
+
504
+ `chmod +x install.sh`
505
+
506
+ `./install.sh`
507
+
508
+ #### To start Desktop GUI
509
+
510
+ `./start.sh`
511
+
512
+ #### To start Web UI
513
+
514
+ `./start-webui.sh`
515
+
516
+ ### FastSD CPU on Mac
517
+
518
+ ![FastSD CPU running on Mac](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastsdcpu-mac-gui.jpg)
519
+
520
+ :exclamation:__Ensure that you have Python 3.9 or 3.10 or 3.11 version installed.__
521
+
522
+ Run the following commands to install FastSD CPU on Mac :
523
+
524
+ - Clone/download this repo or download [release](https://github.com/rupeshs/fastsdcpu/releases).
525
+ - In the terminal, enter into fastsdcpu directory
526
+ - Run the following command
527
+
528
+ `chmod +x install-mac.sh`
529
+
530
+ `./install-mac.sh`
531
+
532
+ #### To start Desktop GUI
533
+
534
+ `./start.sh`
535
+
536
+ #### To start Web UI
537
+
538
+ `./start-webui.sh`
539
+
540
+ Thanks [Autantpourmoi](https://github.com/Autantpourmoi) for Mac testing.
541
+
542
+ :exclamation:We don't support OpenVINO on Mac (M1/M2/M3 chips, but *does* work on Intel chips).
543
+
544
+ If you want to increase image generation speed on Mac(M1/M2 chip) try this:
545
+
546
+ `export DEVICE=mps` and start app `start.sh`
547
+
548
+ #### Web UI screenshot
549
+
550
+ ![FastSD CPU WebUI Screenshot](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastcpu-webui.png)
551
+
552
+ ### Google Colab
553
+
554
+ Due to the limitation of using CPU/OpenVINO inside colab, we are using GPU with colab.
555
+ [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1SuAqskB-_gjWLYNRFENAkIXZ1aoyINqL?usp=sharing)
556
+
557
+ ### CLI mode (Advanced users)
558
+
559
+ ![FastSD CPU CLI Screenshot](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastcpu-cli.png)
560
+
561
+ Open the terminal and enter into fastsdcpu folder.
562
+ Activate virtual environment using the command:
563
+
564
+ ##### Windows users
565
+
566
+ (Suppose FastSD CPU available in the directory "D:\fastsdcpu")
567
+ `D:\fastsdcpu\env\Scripts\activate.bat`
568
+
569
+ ##### Linux users
570
+
571
+ `source env/bin/activate`
572
+
573
+ Start CLI `src/app.py -h`
574
+
575
+ <a id="android"></a>
576
+
577
+ ## Android (Termux + PRoot)
578
+
579
+ FastSD CPU running on Google Pixel 7 Pro.
580
+
581
+ ![FastSD CPU Android Termux Screenshot](https://raw.githubusercontent.com/rupeshs/fastsdcpu/main/docs/images/fastsdcpu-android-termux-pixel7.png)
582
+
583
+ ### 1. Prerequisites
584
+
585
+ First you have to [install Termux](https://wiki.termux.com/wiki/Installing_from_F-Droid) and [install PRoot](https://wiki.termux.com/wiki/PRoot). Then install and login to Ubuntu in PRoot.
586
+
587
+ ### 2. Install FastSD CPU
588
+
589
+ Run the following command to install without Qt GUI.
590
+
591
+ `proot-distro login ubuntu`
592
+
593
+ `./install.sh --disable-gui`
594
+
595
+ After the installation you can use WebUi.
596
+
597
+ `./start-webui.sh`
598
+
599
+ Note : If you get `libgl.so.1` import error run `apt-get install ffmpeg`.
600
+
601
+ Thanks [patienx](https://github.com/patientx) for this guide [Step by step guide to installing FASTSDCPU on ANDROID](https://github.com/rupeshs/fastsdcpu/discussions/123)
602
+
603
+ Another step by step guide to run FastSD on Android is [here](https://nolowiz.com/how-to-install-and-run-fastsd-cpu-on-android-temux-step-by-step-guide/)
604
+
605
+ <a id="raspberry"></a>
606
+
607
+ ## Raspberry PI 4 support
608
+
609
+ Thanks [WGNW_MGM] for Raspberry PI 4 testing.FastSD CPU worked without problems.
610
+ System configuration - Raspberry Pi 4 with 4GB RAM, 8GB of SWAP memory.
611
+
612
+ <a id="orangepi"></a>
613
+
614
+ ## Orange Pi 5 support
615
+
616
+ Thanks [khanumballz](https://github.com/khanumballz) for testing FastSD CPU with Orange PI 5.
617
+ [Here is a video of FastSD CPU running on Orange Pi 5](https://www.youtube.com/watch?v=KEJiCU0aK8o).
618
+
619
+ <a id="apisupport"></a>
620
+
621
+ ## API support
622
+
623
+ ![FastSD CPU API documentation](https://raw.githubusercontent.com/rupeshs/fastsdcpu/add-basic-api-support/docs/images/fastsdcpu-api.png)
624
+
625
+ FastSD CPU supports basic API endpoints. Following API endpoints are available :
626
+
627
+ - /api/info - To get system information
628
+ - /api/config - Get configuration
629
+ - /api/models - List all available models
630
+ - /api/generate - Generate images (Text to image,image to image)
631
+
632
+ To start FastAPI in webserver mode run:
633
+ ``python src/app.py --api``
634
+
635
+ or use `start-webserver.sh` for Linux and `start-webserver.bat` for Windows.
636
+
637
+ Access API documentation locally at <http://localhost:8000/api/docs> .
638
+
639
+ Generated image is JPEG image encoded as base64 string.
640
+ In the image-to-image mode input image should be encoded as base64 string.
641
+
642
+ To generate an image a minimal request `POST /api/generate` with body :
643
+
644
+ ```
645
+ {
646
+ "prompt": "a cute cat",
647
+ "use_openvino": true
648
+ }
649
+ ```
650
+
651
+ ## Known issues
652
+
653
+ - TAESD will not work with OpenVINO image to image workflow
654
+
655
+ ## License
656
+
657
+ The fastsdcpu project is available as open source under the terms of the [MIT license](https://github.com/rupeshs/fastsdcpu/blob/main/LICENSE)
658
+
659
+ ## Disclaimer
660
+
661
+ Users are granted the freedom to create images using this tool, but they are obligated to comply with local laws and utilize it responsibly. The developers will not assume any responsibility for potential misuse by users.
662
+
663
+ <a id="contributors"></a>
664
+
665
+ ## Thanks to all our contributors
666
+
667
+ Original Author & Maintainer - [Rupesh Sreeraman](https://github.com/rupeshs)
668
+
669
+ We thank all contributors for their time and hard work!
670
+
671
+ <a href="https://github.com/rupeshs/fastsdcpu/graphs/contributors">
672
+ <img src="https://contrib.rocks/image?repo=rupeshs/fastsdcpu" />
673
+ </a>
THIRD-PARTY-LICENSES ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ stablediffusion.cpp - MIT
2
+
3
+ OpenVINO stablediffusion engine - Apache 2
4
+
5
+ SD Turbo - STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
6
+
7
+ MIT License
8
+
9
+ Copyright (c) 2023 leejet
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+
29
+ ERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
30
+
31
+ Definitions.
32
+
33
+ "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
34
+
35
+ "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
36
+
37
+ "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
38
+
39
+ "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
40
+
41
+ "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
42
+
43
+ "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
44
+
45
+ "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
46
+
47
+ "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
50
+
51
+ "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
52
+
53
+ Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
54
+
55
+ Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
56
+
57
+ Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
58
+
59
+ (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
60
+
61
+ (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
62
+
63
+ (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
64
+
65
+ (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
66
+
67
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
68
+
69
+ Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
70
+
71
+ Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
72
+
73
+ Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
74
+
75
+ Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
76
+
77
+ Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
78
+
79
+ END OF TERMS AND CONDITIONS
80
+
81
+ APPENDIX: How to apply the Apache License to your work.
82
+
83
+ To apply the Apache License to your work, attach the following
84
+ boilerplate notice, with the fields enclosed by brackets "[]"
85
+ replaced with your own identifying information. (Don't include
86
+ the brackets!) The text should be enclosed in the appropriate
87
+ comment syntax for the file format. We also recommend that a
88
+ file or class name and description of purpose be included on the
89
+ same "printed page" as the copyright notice for easier
90
+ identification within third-party archives.
91
+ Copyright [yyyy] [name of copyright owner]
92
+
93
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
94
+
95
+ <http://www.apache.org/licenses/LICENSE-2.0>
96
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
97
+
98
+ STABILITY AI NON-COMMERCIAL RESEARCH COMMUNITY LICENSE AGREEMENT
99
+ Dated: November 28, 2023
100
+
101
+ By using or distributing any portion or element of the Models, Software, Software Products or Derivative Works, you agree to be bound by this Agreement.
102
+
103
+ "Agreement" means this Stable Non-Commercial Research Community License Agreement.
104
+
105
+ “AUP” means the Stability AI Acceptable Use Policy available at <https://stability.ai/use-policy>, as may be updated from time to time.
106
+
107
+ "Derivative Work(s)” means (a) any derivative work of the Software Products as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output. For clarity, Derivative Works do not include the output of any Model.
108
+
109
+ “Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software.
110
+
111
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
112
+
113
+ “Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing, made available under this Agreement.
114
+
115
+ “Non-Commercial Uses” means exercising any of the rights granted herein for the purpose of research or non-commercial purposes. Non-Commercial Uses does not include any production use of the Software Products or any Derivative Works.
116
+
117
+ "Stability AI" or "we" means Stability AI Ltd. and its affiliates.
118
+
119
+ "Software" means Stability AI’s proprietary software made available under this Agreement.
120
+
121
+ “Software Products” means the Models, Software and Documentation, individually or in any combination.
122
+
123
+ 1. License Rights and Redistribution.
124
+
125
+ a. Subject to your compliance with this Agreement, the AUP (which is hereby incorporated herein by reference), and the Documentation, Stability AI grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty free and limited license under Stability AI’s intellectual property or other rights owned or controlled by Stability AI embodied in the Software Products to use, reproduce, distribute, and create Derivative Works of, the Software Products, in each case for Non-Commercial Uses only.
126
+
127
+ b. You may not use the Software Products or Derivative Works to enable third parties to use the Software Products or Derivative Works as part of your hosted service or via your APIs, whether you are adding substantial additional functionality thereto or not. Merely distributing the Software Products or Derivative Works for download online without offering any related service (ex. by distributing the Models on HuggingFace) is not a violation of this subsection. If you wish to use the Software Products or any Derivative Works for commercial or production use or you wish to make the Software Products or any Derivative Works available to third parties via your hosted service or your APIs, contact Stability AI at <https://stability.ai/contact>.
128
+
129
+ c. If you distribute or make the Software Products, or any Derivative Works thereof, available to a third party, the Software Products, Derivative Works, or any portion thereof, respectively, will remain subject to this Agreement and you must (i) provide a copy of this Agreement to such third party, and (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Non-Commercial Research Community License, Copyright (c) Stability AI Ltd. All Rights Reserved.” If you create a Derivative Work of a Software Product, you may add your own attribution notices to the Notice file included with the Software Product, provided that you clearly indicate which attributions apply to the Software Product and you must state in the NOTICE file that you changed the Software Product and how it was modified.
130
+
131
+ 2. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SOFTWARE PRODUCTS AND ANY OUTPUT AND RESULTS THERE FROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SOFTWARE PRODUCTS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SOFTWARE PRODUCTS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
132
+
133
+ 3. Limitation of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
134
+
135
+ 4. Intellectual Property.
136
+
137
+ a. No trademark licenses are granted under this Agreement, and in connection with the Software Products or Derivative Works, neither Stability AI nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Software Products or Derivative Works.
138
+
139
+ b. Subject to Stability AI’s ownership of the Software Products and Derivative Works made by or for Stability AI, with respect to any Derivative Works that are made by you, as between you and Stability AI, you are and will be the owner of such Derivative Works
140
+
141
+ c. If you institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Software Products, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to your use or distribution of the Software Products or Derivative Works in violation of this Agreement.
142
+
143
+ 5. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Software Products and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of any Software Products or Derivative Works. Sections 2-4 shall survive the termination of this Agreement.
benchmark-openvino.bat ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal
3
+
4
+ set "PYTHON_COMMAND=python"
5
+
6
+ call python --version > nul 2>&1
7
+ if %errorlevel% equ 0 (
8
+ echo Python command check :OK
9
+ ) else (
10
+ echo "Error: Python command not found, please install Python (Recommended : Python 3.10 or Python 3.11) and try again"
11
+ pause
12
+ exit /b 1
13
+
14
+ )
15
+
16
+ :check_python_version
17
+ for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
18
+ set "python_version=%%I"
19
+ )
20
+
21
+ echo Python version: %python_version%
22
+
23
+ call "%~dp0env\Scripts\activate.bat" && %PYTHON_COMMAND% src/app.py -b --use_openvino --openvino_lcm_model_id "rupeshs/sd-turbo-openvino"
benchmark.bat ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal
3
+
4
+ set "PYTHON_COMMAND=python"
5
+
6
+ call python --version > nul 2>&1
7
+ if %errorlevel% equ 0 (
8
+ echo Python command check :OK
9
+ ) else (
10
+ echo "Error: Python command not found, please install Python (Recommended : Python 3.10 or Python 3.11) and try again"
11
+ pause
12
+ exit /b 1
13
+
14
+ )
15
+
16
+ :check_python_version
17
+ for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
18
+ set "python_version=%%I"
19
+ )
20
+
21
+ echo Python version: %python_version%
22
+
23
+ call "%~dp0env\Scripts\activate.bat" && %PYTHON_COMMAND% src/app.py -b
configs/lcm-lora-models.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ latent-consistency/lcm-lora-sdv1-5
2
+ latent-consistency/lcm-lora-sdxl
3
+ latent-consistency/lcm-lora-ssd-1b
4
+ rupeshs/hypersd-sd1-5-1-step-lora
configs/lcm-models.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ stabilityai/sd-turbo
2
+ rupeshs/sdxs-512-0.9-orig-vae
3
+ rupeshs/hyper-sd-sdxl-1-step
4
+ rupeshs/SDXL-Lightning-2steps
5
+ stabilityai/sdxl-turbo
6
+ SimianLuo/LCM_Dreamshaper_v7
7
+ latent-consistency/lcm-sdxl
8
+ latent-consistency/lcm-ssd-1b
configs/openvino-lcm-models.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ rupeshs/sd-turbo-openvino
2
+ rupeshs/sdxs-512-0.9-openvino
3
+ rupeshs/hyper-sd-sdxl-1-step-openvino-int8
4
+ rupeshs/SDXL-Lightning-2steps-openvino-int8
5
+ rupeshs/sdxl-turbo-openvino-int8
6
+ rupeshs/LCM-dreamshaper-v7-openvino
7
+ Disty0/LCM_SoteMix
8
+ rupeshs/FLUX.1-schnell-openvino-int4
9
+ rupeshs/sd15-lcm-square-openvino-int8
configs/stable-diffusion-models.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Lykon/dreamshaper-8
2
+ Fictiverse/Stable_Diffusion_PaperCut_Model
3
+ stabilityai/stable-diffusion-xl-base-1.0
4
+ runwayml/stable-diffusion-v1-5
5
+ segmind/SSD-1B
6
+ stablediffusionapi/anything-v5
7
+ prompthero/openjourney-v4
controlnet_models/Readme.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Place your ControlNet models in this folder.
2
+ You can download controlnet model (.safetensors) from https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/tree/main
3
+ E.g: https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/blob/main/control_v11p_sd15_canny_fp16.safetensors
docs/images/2steps-inference.jpg ADDED
docs/images/ARCGPU.png ADDED
docs/images/fastcpu-cli.png ADDED
docs/images/fastcpu-webui.png ADDED

Git LFS Details

  • SHA256: d26b4e4bc41d515a730b757a6a984724aa59497235bf771b25e5a7d9c2d5680f
  • Pointer size: 131 Bytes
  • Size of remote file: 263 kB
docs/images/fastsdcpu-android-termux-pixel7.png ADDED

Git LFS Details

  • SHA256: 0e18187cb43b8e905971fd607e6640a66c2877d4dc135647425f3cb66d7f22ae
  • Pointer size: 131 Bytes
  • Size of remote file: 299 kB
docs/images/fastsdcpu-api.png ADDED
docs/images/fastsdcpu-gui.jpg ADDED

Git LFS Details

  • SHA256: 03c1fe3b5ea4dfcc25654c4fc76fc392a59bdf668b1c45e5e6fc14edcf2fac5c
  • Pointer size: 131 Bytes
  • Size of remote file: 207 kB
docs/images/fastsdcpu-mac-gui.jpg ADDED
docs/images/fastsdcpu-screenshot.png ADDED

Git LFS Details

  • SHA256: 3729a8a87629800c63ca98ed36c1a48c7c0bc64c02a144e396aca05e7c529ee6
  • Pointer size: 131 Bytes
  • Size of remote file: 293 kB
docs/images/fastsdcpu-webui.png ADDED

Git LFS Details

  • SHA256: b1378a52fab340ad566e69074f93af40a82f92afb29d8a3b27cbe218b5ee4bff
  • Pointer size: 131 Bytes
  • Size of remote file: 380 kB
docs/images/fastsdcpu_flux_on_cpu.png ADDED

Git LFS Details

  • SHA256: 5e42851d654dc88a75e479cb6dcf25bc1e7a7463f7c2eb2a98ec77e2b3c74e06
  • Pointer size: 131 Bytes
  • Size of remote file: 383 kB
install-mac.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ echo Starting FastSD CPU env installation...
3
+ set -e
4
+ PYTHON_COMMAND="python3"
5
+
6
+ if ! command -v python3 &>/dev/null; then
7
+ if ! command -v python &>/dev/null; then
8
+ echo "Error: Python not found, please install python 3.8 or higher and try again"
9
+ exit 1
10
+ fi
11
+ fi
12
+
13
+ if command -v python &>/dev/null; then
14
+ PYTHON_COMMAND="python"
15
+ fi
16
+
17
+ echo "Found $PYTHON_COMMAND command"
18
+
19
+ python_version=$($PYTHON_COMMAND --version 2>&1 | awk '{print $2}')
20
+ echo "Python version : $python_version"
21
+
22
+ BASEDIR=$(pwd)
23
+
24
+ $PYTHON_COMMAND -m venv "$BASEDIR/env"
25
+ # shellcheck disable=SC1091
26
+ source "$BASEDIR/env/bin/activate"
27
+ pip install torch
28
+ pip install -r "$BASEDIR/requirements.txt"
29
+ chmod +x "start.sh"
30
+ chmod +x "start-webui.sh"
31
+ read -n1 -r -p "FastSD CPU installation completed,press any key to continue..." key
install.bat ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ @echo off
3
+ setlocal
4
+ echo Starting FastSD CPU env installation...
5
+
6
+ set "PYTHON_COMMAND=python"
7
+
8
+ call python --version > nul 2>&1
9
+ if %errorlevel% equ 0 (
10
+ echo Python command check :OK
11
+ ) else (
12
+ echo "Error: Python command not found,please install Python(Recommended : Python 3.10 or Python 3.11) and try again."
13
+ pause
14
+ exit /b 1
15
+
16
+ )
17
+
18
+ :check_python_version
19
+ for /f "tokens=2" %%I in ('%PYTHON_COMMAND% --version 2^>^&1') do (
20
+ set "python_version=%%I"
21
+ )
22
+
23
+ echo Python version: %python_version%
24
+
25
+ %PYTHON_COMMAND% -m venv "%~dp0env"
26
+ call "%~dp0env\Scripts\activate.bat" && pip install torch==2.2.2 --index-url https://download.pytorch.org/whl/cpu
27
+ call "%~dp0env\Scripts\activate.bat" && pip install -r "%~dp0requirements.txt"
28
+ echo FastSD CPU env installation completed.
29
+ pause
install.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ echo Starting FastSD CPU env installation...
3
+ set -e
4
+ PYTHON_COMMAND="python3"
5
+
6
+ if ! command -v python3 &>/dev/null; then
7
+ if ! command -v python &>/dev/null; then
8
+ echo "Error: Python not found, please install python 3.8 or higher and try again"
9
+ exit 1
10
+ fi
11
+ fi
12
+
13
+ if command -v python &>/dev/null; then
14
+ PYTHON_COMMAND="python"
15
+ fi
16
+
17
+ echo "Found $PYTHON_COMMAND command"
18
+
19
+ python_version=$($PYTHON_COMMAND --version 2>&1 | awk '{print $2}')
20
+ echo "Python version : $python_version"
21
+
22
+ BASEDIR=$(pwd)
23
+
24
+ pip install torch==2.2.2 --index-url https://download.pytorch.org/whl/cpu
25
+ pip install -r "$BASEDIR/requirements.txt"
26
+
27
+ chmod +x "start.sh"
28
+ chmod +x "start-webui.sh"
lora_models/HoloEnV2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b30945096a4f2ff4a52fcafaa8481c6a5d52073be323674a16b189ceb66d39a6
3
+ size 151111711
lora_models/Readme.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Place your lora models in this folder.
2
+ You can download lora model (.safetensors/Safetensor) from Civitai (https://civitai.com/) or Hugging Face(https://huggingface.co/)
3
+ E.g: https://civitai.com/models/207984/cutecartoonredmond-15v-cute-cartoon-lora-for-liberteredmond-sd-15?modelVersionId=234192
models/gguf/clip/readme.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Place CLIP model files here"
models/gguf/diffusion/readme.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Place your diffusion gguf model files here
models/gguf/t5xxl/readme.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Place T5-XXL model files here
models/gguf/vae/readme.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Place VAE model files here
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.33.0
2
+ diffusers==0.30.0
3
+ transformers==4.41.2
4
+ Pillow==9.4.0
5
+ openvino==2024.4.0
6
+ optimum-intel==1.18.2
7
+ onnx==1.16.0
8
+ onnxruntime==1.17.3
9
+ pydantic==2.4.2
10
+ typing-extensions==4.8.0
11
+ pyyaml==6.0.1
12
+ gradio==5.6.0
13
+ peft==0.6.1
14
+ opencv-python==4.8.1.78
15
+ omegaconf==2.3.0
16
+ controlnet-aux==0.0.7
17
+ mediapipe==0.10.9
18
+ tomesd==0.1.3
src/__init__.py ADDED
File without changes
src/app.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from argparse import ArgumentParser
3
+
4
+ import constants
5
+ from backend.controlnet import controlnet_settings_from_dict
6
+ from backend.models.gen_images import ImageFormat
7
+ from backend.models.lcmdiffusion_setting import DiffusionTask
8
+ from backend.upscale.tiled_upscale import generate_upscaled_image
9
+ from constants import APP_VERSION, DEVICE
10
+ from frontend.webui.image_variations_ui import generate_image_variations
11
+ from models.interface_types import InterfaceType
12
+ from paths import FastStableDiffusionPaths
13
+ from PIL import Image
14
+ from state import get_context, get_settings
15
+ from utils import show_system_info
16
+ from backend.device import get_device_name
17
+
18
+ parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
19
+ parser.add_argument(
20
+ "-s",
21
+ "--share",
22
+ action="store_true",
23
+ help="Create sharable link(Web UI)",
24
+ required=False,
25
+ )
26
+ group = parser.add_mutually_exclusive_group(required=False)
27
+ group.add_argument(
28
+ "-g",
29
+ "--gui",
30
+ action="store_true",
31
+ help="Start desktop GUI",
32
+ )
33
+ group.add_argument(
34
+ "-w",
35
+ "--webui",
36
+ action="store_true",
37
+ help="Start Web UI",
38
+ )
39
+ group.add_argument(
40
+ "-a",
41
+ "--api",
42
+ action="store_true",
43
+ help="Start Web API server",
44
+ )
45
+ group.add_argument(
46
+ "-r",
47
+ "--realtime",
48
+ action="store_true",
49
+ help="Start realtime inference UI(experimental)",
50
+ )
51
+ group.add_argument(
52
+ "-v",
53
+ "--version",
54
+ action="store_true",
55
+ help="Version",
56
+ )
57
+
58
+ parser.add_argument(
59
+ "-b",
60
+ "--benchmark",
61
+ action="store_true",
62
+ help="Run inference benchmark on the selected device",
63
+ )
64
+ parser.add_argument(
65
+ "--lcm_model_id",
66
+ type=str,
67
+ help="Model ID or path,Default stabilityai/sd-turbo",
68
+ default="stabilityai/sd-turbo",
69
+ )
70
+ parser.add_argument(
71
+ "--openvino_lcm_model_id",
72
+ type=str,
73
+ help="OpenVINO Model ID or path,Default rupeshs/sd-turbo-openvino",
74
+ default="rupeshs/sd-turbo-openvino",
75
+ )
76
+ parser.add_argument(
77
+ "--prompt",
78
+ type=str,
79
+ help="Describe the image you want to generate",
80
+ default="",
81
+ )
82
+ parser.add_argument(
83
+ "--negative_prompt",
84
+ type=str,
85
+ help="Describe what you want to exclude from the generation",
86
+ default="",
87
+ )
88
+ parser.add_argument(
89
+ "--image_height",
90
+ type=int,
91
+ help="Height of the image",
92
+ default=512,
93
+ )
94
+ parser.add_argument(
95
+ "--image_width",
96
+ type=int,
97
+ help="Width of the image",
98
+ default=512,
99
+ )
100
+ parser.add_argument(
101
+ "--inference_steps",
102
+ type=int,
103
+ help="Number of steps,default : 1",
104
+ default=1,
105
+ )
106
+ parser.add_argument(
107
+ "--guidance_scale",
108
+ type=float,
109
+ help="Guidance scale,default : 1.0",
110
+ default=1.0,
111
+ )
112
+
113
+ parser.add_argument(
114
+ "--number_of_images",
115
+ type=int,
116
+ help="Number of images to generate ,default : 1",
117
+ default=1,
118
+ )
119
+ parser.add_argument(
120
+ "--seed",
121
+ type=int,
122
+ help="Seed,default : -1 (disabled) ",
123
+ default=-1,
124
+ )
125
+ parser.add_argument(
126
+ "--use_openvino",
127
+ action="store_true",
128
+ help="Use OpenVINO model",
129
+ )
130
+
131
+ parser.add_argument(
132
+ "--use_offline_model",
133
+ action="store_true",
134
+ help="Use offline model",
135
+ )
136
+ parser.add_argument(
137
+ "--clip_skip",
138
+ type=int,
139
+ help="CLIP Skip (1-12), default : 1 (disabled) ",
140
+ default=1,
141
+ )
142
+ parser.add_argument(
143
+ "--token_merging",
144
+ type=float,
145
+ help="Token merging scale, 0.0 - 1.0, default : 0.0",
146
+ default=0.0,
147
+ )
148
+
149
+ parser.add_argument(
150
+ "--use_safety_checker",
151
+ action="store_true",
152
+ help="Use safety checker",
153
+ )
154
+ parser.add_argument(
155
+ "--use_lcm_lora",
156
+ action="store_true",
157
+ help="Use LCM-LoRA",
158
+ )
159
+ parser.add_argument(
160
+ "--base_model_id",
161
+ type=str,
162
+ help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
163
+ default="Lykon/dreamshaper-8",
164
+ )
165
+ parser.add_argument(
166
+ "--lcm_lora_id",
167
+ type=str,
168
+ help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
169
+ default="latent-consistency/lcm-lora-sdv1-5",
170
+ )
171
+ parser.add_argument(
172
+ "-i",
173
+ "--interactive",
174
+ action="store_true",
175
+ help="Interactive CLI mode",
176
+ )
177
+ parser.add_argument(
178
+ "-t",
179
+ "--use_tiny_auto_encoder",
180
+ action="store_true",
181
+ help="Use tiny auto encoder for SD (TAESD)",
182
+ )
183
+ parser.add_argument(
184
+ "-f",
185
+ "--file",
186
+ type=str,
187
+ help="Input image for img2img mode",
188
+ default="",
189
+ )
190
+ parser.add_argument(
191
+ "--img2img",
192
+ action="store_true",
193
+ help="img2img mode; requires input file via -f argument",
194
+ )
195
+ parser.add_argument(
196
+ "--batch_count",
197
+ type=int,
198
+ help="Number of sequential generations",
199
+ default=1,
200
+ )
201
+ parser.add_argument(
202
+ "--strength",
203
+ type=float,
204
+ help="Denoising strength for img2img and Image variations",
205
+ default=0.3,
206
+ )
207
+ parser.add_argument(
208
+ "--sdupscale",
209
+ action="store_true",
210
+ help="Tiled SD upscale,works only for the resolution 512x512,(2x upscale)",
211
+ )
212
+ parser.add_argument(
213
+ "--upscale",
214
+ action="store_true",
215
+ help="EDSR SD upscale ",
216
+ )
217
+ parser.add_argument(
218
+ "--custom_settings",
219
+ type=str,
220
+ help="JSON file containing custom generation settings",
221
+ default=None,
222
+ )
223
+ parser.add_argument(
224
+ "--usejpeg",
225
+ action="store_true",
226
+ help="Images will be saved as JPEG format",
227
+ )
228
+ parser.add_argument(
229
+ "--noimagesave",
230
+ action="store_true",
231
+ help="Disable image saving",
232
+ )
233
+ parser.add_argument(
234
+ "--lora",
235
+ type=str,
236
+ help="LoRA model full path e.g D:\lora_models\CuteCartoon15V-LiberteRedmodModel-Cartoon-CuteCartoonAF.safetensors",
237
+ default=None,
238
+ )
239
+ parser.add_argument(
240
+ "--lora_weight",
241
+ type=float,
242
+ help="LoRA adapter weight [0 to 1.0]",
243
+ default=0.5,
244
+ )
245
+ parser.add_argument(
246
+ "--port",
247
+ type=int,
248
+ help="Web server port",
249
+ default=8000,
250
+ )
251
+
252
+ args = parser.parse_args()
253
+
254
+ if args.version:
255
+ print(APP_VERSION)
256
+ exit()
257
+
258
+ # parser.print_help()
259
+ print("FastSD CPU - ", APP_VERSION)
260
+ show_system_info()
261
+ print(f"Using device : {constants.DEVICE}")
262
+
263
+ if args.webui:
264
+ app_settings = get_settings()
265
+ else:
266
+ app_settings = get_settings()
267
+
268
+ print(f"Found {len(app_settings.lcm_models)} LCM models in config/lcm-models.txt")
269
+ print(
270
+ f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
271
+ )
272
+ print(
273
+ f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
274
+ )
275
+ print(
276
+ f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
277
+ )
278
+
279
+ if args.noimagesave:
280
+ app_settings.settings.generated_images.save_image = False
281
+ else:
282
+ app_settings.settings.generated_images.save_image = True
283
+
284
+ if not args.realtime:
285
+ # To minimize realtime mode dependencies
286
+ from backend.upscale.upscaler import upscale_image
287
+ from frontend.cli_interactive import interactive_mode
288
+
289
+ if args.gui:
290
+ from frontend.gui.ui import start_gui
291
+
292
+ print("Starting desktop GUI mode(Qt)")
293
+ start_gui(
294
+ [],
295
+ app_settings,
296
+ )
297
+ elif args.webui:
298
+ from frontend.webui.ui import start_webui
299
+
300
+ print("Starting web UI mode")
301
+ start_webui(
302
+ args.share,
303
+ )
304
+ elif args.realtime:
305
+ from frontend.webui.realtime_ui import start_realtime_text_to_image
306
+
307
+ print("Starting realtime text to image(EXPERIMENTAL)")
308
+ start_realtime_text_to_image(args.share)
309
+ elif args.api:
310
+ from backend.api.web import start_web_server
311
+
312
+ start_web_server(args.port)
313
+
314
+ else:
315
+ context = get_context(InterfaceType.CLI)
316
+ config = app_settings.settings
317
+
318
+ if args.use_openvino:
319
+ config.lcm_diffusion_setting.openvino_lcm_model_id = args.openvino_lcm_model_id
320
+ else:
321
+ config.lcm_diffusion_setting.lcm_model_id = args.lcm_model_id
322
+
323
+ config.lcm_diffusion_setting.prompt = args.prompt
324
+ config.lcm_diffusion_setting.negative_prompt = args.negative_prompt
325
+ config.lcm_diffusion_setting.image_height = args.image_height
326
+ config.lcm_diffusion_setting.image_width = args.image_width
327
+ config.lcm_diffusion_setting.guidance_scale = args.guidance_scale
328
+ config.lcm_diffusion_setting.number_of_images = args.number_of_images
329
+ config.lcm_diffusion_setting.inference_steps = args.inference_steps
330
+ config.lcm_diffusion_setting.strength = args.strength
331
+ config.lcm_diffusion_setting.seed = args.seed
332
+ config.lcm_diffusion_setting.use_openvino = args.use_openvino
333
+ config.lcm_diffusion_setting.use_tiny_auto_encoder = args.use_tiny_auto_encoder
334
+ config.lcm_diffusion_setting.use_lcm_lora = args.use_lcm_lora
335
+ config.lcm_diffusion_setting.lcm_lora.base_model_id = args.base_model_id
336
+ config.lcm_diffusion_setting.lcm_lora.lcm_lora_id = args.lcm_lora_id
337
+ config.lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
338
+ config.lcm_diffusion_setting.lora.enabled = False
339
+ config.lcm_diffusion_setting.lora.path = args.lora
340
+ config.lcm_diffusion_setting.lora.weight = args.lora_weight
341
+ config.lcm_diffusion_setting.lora.fuse = True
342
+ if config.lcm_diffusion_setting.lora.path:
343
+ config.lcm_diffusion_setting.lora.enabled = True
344
+ if args.usejpeg:
345
+ config.generated_images.format = ImageFormat.JPEG.value.upper()
346
+ if args.seed > -1:
347
+ config.lcm_diffusion_setting.use_seed = True
348
+ else:
349
+ config.lcm_diffusion_setting.use_seed = False
350
+ config.lcm_diffusion_setting.use_offline_model = args.use_offline_model
351
+ config.lcm_diffusion_setting.clip_skip = args.clip_skip
352
+ config.lcm_diffusion_setting.token_merging = args.token_merging
353
+ config.lcm_diffusion_setting.use_safety_checker = args.use_safety_checker
354
+
355
+ # Read custom settings from JSON file
356
+ custom_settings = {}
357
+ if args.custom_settings:
358
+ with open(args.custom_settings) as f:
359
+ custom_settings = json.load(f)
360
+
361
+ # Basic ControlNet settings; if ControlNet is enabled, an image is
362
+ # required even in txt2img mode
363
+ config.lcm_diffusion_setting.controlnet = None
364
+ controlnet_settings_from_dict(
365
+ config.lcm_diffusion_setting,
366
+ custom_settings,
367
+ )
368
+
369
+ # Interactive mode
370
+ if args.interactive:
371
+ # wrapper(interactive_mode, config, context)
372
+ config.lcm_diffusion_setting.lora.fuse = False
373
+ interactive_mode(config, context)
374
+
375
+ # Start of non-interactive CLI image generation
376
+ if args.img2img and args.file != "":
377
+ config.lcm_diffusion_setting.init_image = Image.open(args.file)
378
+ config.lcm_diffusion_setting.diffusion_task = DiffusionTask.image_to_image.value
379
+ elif args.img2img and args.file == "":
380
+ print("Error : You need to specify a file in img2img mode")
381
+ exit()
382
+ elif args.upscale and args.file == "" and args.custom_settings == None:
383
+ print("Error : You need to specify a file in SD upscale mode")
384
+ exit()
385
+ elif (
386
+ args.prompt == ""
387
+ and args.file == ""
388
+ and args.custom_settings == None
389
+ and not args.benchmark
390
+ ):
391
+ print("Error : You need to provide a prompt")
392
+ exit()
393
+
394
+ if args.upscale:
395
+ # image = Image.open(args.file)
396
+ output_path = FastStableDiffusionPaths.get_upscale_filepath(
397
+ args.file,
398
+ 2,
399
+ config.generated_images.format,
400
+ )
401
+ result = upscale_image(
402
+ context,
403
+ args.file,
404
+ output_path,
405
+ 2,
406
+ )
407
+ # Perform Tiled SD upscale (EXPERIMENTAL)
408
+ elif args.sdupscale:
409
+ if args.use_openvino:
410
+ config.lcm_diffusion_setting.strength = 0.3
411
+ upscale_settings = None
412
+ if custom_settings != {}:
413
+ upscale_settings = custom_settings
414
+ filepath = args.file
415
+ output_format = config.generated_images.format
416
+ if upscale_settings:
417
+ filepath = upscale_settings["source_file"]
418
+ output_format = upscale_settings["output_format"].upper()
419
+ output_path = FastStableDiffusionPaths.get_upscale_filepath(
420
+ filepath,
421
+ 2,
422
+ output_format,
423
+ )
424
+
425
+ generate_upscaled_image(
426
+ config,
427
+ filepath,
428
+ config.lcm_diffusion_setting.strength,
429
+ upscale_settings=upscale_settings,
430
+ context=context,
431
+ tile_overlap=32 if config.lcm_diffusion_setting.use_openvino else 16,
432
+ output_path=output_path,
433
+ image_format=output_format,
434
+ )
435
+ exit()
436
+ # If img2img argument is set and prompt is empty, use image variations mode
437
+ elif args.img2img and args.prompt == "":
438
+ for i in range(0, args.batch_count):
439
+ generate_image_variations(
440
+ config.lcm_diffusion_setting.init_image, args.strength
441
+ )
442
+ else:
443
+
444
+ if args.benchmark:
445
+ print("Initializing benchmark...")
446
+ bench_lcm_setting = config.lcm_diffusion_setting
447
+ bench_lcm_setting.prompt = "a cat"
448
+ bench_lcm_setting.use_tiny_auto_encoder = False
449
+ context.generate_text_to_image(
450
+ settings=config,
451
+ device=DEVICE,
452
+ )
453
+ latencies = []
454
+
455
+ print("Starting benchmark please wait...")
456
+ for _ in range(3):
457
+ context.generate_text_to_image(
458
+ settings=config,
459
+ device=DEVICE,
460
+ )
461
+ latencies.append(context.latency)
462
+
463
+ avg_latency = sum(latencies) / 3
464
+
465
+ bench_lcm_setting.use_tiny_auto_encoder = True
466
+
467
+ context.generate_text_to_image(
468
+ settings=config,
469
+ device=DEVICE,
470
+ )
471
+ latencies = []
472
+ for _ in range(3):
473
+ context.generate_text_to_image(
474
+ settings=config,
475
+ device=DEVICE,
476
+ )
477
+ latencies.append(context.latency)
478
+
479
+ avg_latency_taesd = sum(latencies) / 3
480
+
481
+ benchmark_name = ""
482
+
483
+ if config.lcm_diffusion_setting.use_openvino:
484
+ benchmark_name = "OpenVINO"
485
+ else:
486
+ benchmark_name = "PyTorch"
487
+
488
+ bench_model_id = ""
489
+ if bench_lcm_setting.use_openvino:
490
+ bench_model_id = bench_lcm_setting.openvino_lcm_model_id
491
+ elif bench_lcm_setting.use_lcm_lora:
492
+ bench_model_id = bench_lcm_setting.lcm_lora.base_model_id
493
+ else:
494
+ bench_model_id = bench_lcm_setting.lcm_model_id
495
+
496
+ benchmark_result = [
497
+ ["Device", f"{DEVICE.upper()},{get_device_name()}"],
498
+ ["Stable Diffusion Model", bench_model_id],
499
+ [
500
+ "Image Size ",
501
+ f"{bench_lcm_setting.image_width}x{bench_lcm_setting.image_height}",
502
+ ],
503
+ [
504
+ "Inference Steps",
505
+ f"{bench_lcm_setting.inference_steps}",
506
+ ],
507
+ [
508
+ "Benchmark Passes",
509
+ 3,
510
+ ],
511
+ [
512
+ "Average Latency",
513
+ f"{round(avg_latency,3)} sec",
514
+ ],
515
+ [
516
+ "Average Latency(TAESD* enabled)",
517
+ f"{round(avg_latency_taesd,3)} sec",
518
+ ],
519
+ ]
520
+ print()
521
+ print(
522
+ f" FastSD Benchmark - {benchmark_name:8} "
523
+ )
524
+ print(f"-" * 80)
525
+ for benchmark in benchmark_result:
526
+ print(f"{benchmark[0]:35} - {benchmark[1]}")
527
+ print(f"-" * 80)
528
+ print("*TAESD - Tiny AutoEncoder for Stable Diffusion")
529
+
530
+ else:
531
+ for i in range(0, args.batch_count):
532
+ context.generate_text_to_image(
533
+ settings=config,
534
+ device=DEVICE,
535
+ )
src/app_settings.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+ from os import makedirs, path
3
+
4
+ import yaml
5
+ from constants import (
6
+ LCM_LORA_MODELS_FILE,
7
+ LCM_MODELS_FILE,
8
+ OPENVINO_LCM_MODELS_FILE,
9
+ SD_MODELS_FILE,
10
+ )
11
+ from paths import FastStableDiffusionPaths, join_paths
12
+ from utils import get_files_in_dir, get_models_from_text_file
13
+
14
+ from models.settings import Settings
15
+
16
+
17
+ class AppSettings:
18
+ def __init__(self):
19
+ self.config_path = FastStableDiffusionPaths().get_app_settings_path()
20
+ self._stable_diffsuion_models = get_models_from_text_file(
21
+ FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
22
+ )
23
+ self._lcm_lora_models = get_models_from_text_file(
24
+ FastStableDiffusionPaths().get_models_config_path(LCM_LORA_MODELS_FILE)
25
+ )
26
+ self._openvino_lcm_models = get_models_from_text_file(
27
+ FastStableDiffusionPaths().get_models_config_path(OPENVINO_LCM_MODELS_FILE)
28
+ )
29
+ self._lcm_models = get_models_from_text_file(
30
+ FastStableDiffusionPaths().get_models_config_path(LCM_MODELS_FILE)
31
+ )
32
+ self._gguf_diffusion_models = get_files_in_dir(
33
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "diffusion")
34
+ )
35
+ self._gguf_clip_models = get_files_in_dir(
36
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "clip")
37
+ )
38
+ self._gguf_vae_models = get_files_in_dir(
39
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "vae")
40
+ )
41
+ self._gguf_t5xxl_models = get_files_in_dir(
42
+ join_paths(FastStableDiffusionPaths().get_gguf_models_path(), "t5xxl")
43
+ )
44
+ self._config = None
45
+
46
+ @property
47
+ def settings(self):
48
+ return self._config
49
+
50
+ @property
51
+ def stable_diffsuion_models(self):
52
+ return self._stable_diffsuion_models
53
+
54
+ @property
55
+ def openvino_lcm_models(self):
56
+ return self._openvino_lcm_models
57
+
58
+ @property
59
+ def lcm_models(self):
60
+ return self._lcm_models
61
+
62
+ @property
63
+ def lcm_lora_models(self):
64
+ return self._lcm_lora_models
65
+
66
+ @property
67
+ def gguf_diffusion_models(self):
68
+ return self._gguf_diffusion_models
69
+
70
+ @property
71
+ def gguf_clip_models(self):
72
+ return self._gguf_clip_models
73
+
74
+ @property
75
+ def gguf_vae_models(self):
76
+ return self._gguf_vae_models
77
+
78
+ @property
79
+ def gguf_t5xxl_models(self):
80
+ return self._gguf_t5xxl_models
81
+
82
+ def load(self, skip_file=False):
83
+ if skip_file:
84
+ print("Skipping config file")
85
+ settings_dict = self._load_default()
86
+ self._config = Settings.model_validate(settings_dict)
87
+ else:
88
+ if not path.exists(self.config_path):
89
+ base_dir = path.dirname(self.config_path)
90
+ if not path.exists(base_dir):
91
+ makedirs(base_dir)
92
+ try:
93
+ print("Settings not found creating default settings")
94
+ with open(self.config_path, "w") as file:
95
+ yaml.dump(
96
+ self._load_default(),
97
+ file,
98
+ )
99
+ except Exception as ex:
100
+ print(f"Error in creating settings : {ex}")
101
+ exit()
102
+ try:
103
+ with open(self.config_path) as file:
104
+ settings_dict = yaml.safe_load(file)
105
+ self._config = Settings.model_validate(settings_dict)
106
+ except Exception as ex:
107
+ print(f"Error in loading settings : {ex}")
108
+
109
+ def save(self):
110
+ try:
111
+ with open(self.config_path, "w") as file:
112
+ tmp_cfg = deepcopy(self._config)
113
+ tmp_cfg.lcm_diffusion_setting.init_image = None
114
+ configurations = tmp_cfg.model_dump(
115
+ exclude=["init_image"],
116
+ )
117
+ if configurations:
118
+ yaml.dump(configurations, file)
119
+ except Exception as ex:
120
+ print(f"Error in saving settings : {ex}")
121
+
122
+ def _load_default(self) -> dict:
123
+ default_config = Settings()
124
+ return default_config.model_dump()
src/backend/__init__.py ADDED
File without changes
src/backend/annotators/canny_control.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from backend.annotators.control_interface import ControlInterface
3
+ from cv2 import Canny
4
+ from PIL import Image
5
+
6
+
7
+ class CannyControl(ControlInterface):
8
+ def get_control_image(self, image: Image) -> Image:
9
+ low_threshold = 100
10
+ high_threshold = 200
11
+ image = np.array(image)
12
+ image = Canny(image, low_threshold, high_threshold)
13
+ image = image[:, :, None]
14
+ image = np.concatenate([image, image, image], axis=2)
15
+ return Image.fromarray(image)
src/backend/annotators/control_interface.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+
3
+ from PIL import Image
4
+
5
+
6
+ class ControlInterface(ABC):
7
+ @abstractmethod
8
+ def get_control_image(
9
+ self,
10
+ image: Image,
11
+ ) -> Image:
12
+ pass
src/backend/annotators/depth_control.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from backend.annotators.control_interface import ControlInterface
3
+ from PIL import Image
4
+ from transformers import pipeline
5
+
6
+
7
+ class DepthControl(ControlInterface):
8
+ def get_control_image(self, image: Image) -> Image:
9
+ depth_estimator = pipeline("depth-estimation")
10
+ image = depth_estimator(image)["depth"]
11
+ image = np.array(image)
12
+ image = image[:, :, None]
13
+ image = np.concatenate([image, image, image], axis=2)
14
+ image = Image.fromarray(image)
15
+ return image
src/backend/annotators/image_control_factory.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.canny_control import CannyControl
2
+ from backend.annotators.depth_control import DepthControl
3
+ from backend.annotators.lineart_control import LineArtControl
4
+ from backend.annotators.mlsd_control import MlsdControl
5
+ from backend.annotators.normal_control import NormalControl
6
+ from backend.annotators.pose_control import PoseControl
7
+ from backend.annotators.shuffle_control import ShuffleControl
8
+ from backend.annotators.softedge_control import SoftEdgeControl
9
+
10
+
11
+ class ImageControlFactory:
12
+ def create_control(self, controlnet_type: str):
13
+ if controlnet_type == "Canny":
14
+ return CannyControl()
15
+ elif controlnet_type == "Pose":
16
+ return PoseControl()
17
+ elif controlnet_type == "MLSD":
18
+ return MlsdControl()
19
+ elif controlnet_type == "Depth":
20
+ return DepthControl()
21
+ elif controlnet_type == "LineArt":
22
+ return LineArtControl()
23
+ elif controlnet_type == "Shuffle":
24
+ return ShuffleControl()
25
+ elif controlnet_type == "NormalBAE":
26
+ return NormalControl()
27
+ elif controlnet_type == "SoftEdge":
28
+ return SoftEdgeControl()
29
+ else:
30
+ print("Error: Control type not implemented!")
31
+ raise Exception("Error: Control type not implemented!")
src/backend/annotators/lineart_control.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from backend.annotators.control_interface import ControlInterface
3
+ from controlnet_aux import LineartDetector
4
+ from PIL import Image
5
+
6
+
7
+ class LineArtControl(ControlInterface):
8
+ def get_control_image(self, image: Image) -> Image:
9
+ processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
10
+ control_image = processor(image)
11
+ return control_image
src/backend/annotators/mlsd_control.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.control_interface import ControlInterface
2
+ from controlnet_aux import MLSDdetector
3
+ from PIL import Image
4
+
5
+
6
+ class MlsdControl(ControlInterface):
7
+ def get_control_image(self, image: Image) -> Image:
8
+ mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
9
+ image = mlsd(image)
10
+ return image
src/backend/annotators/normal_control.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.control_interface import ControlInterface
2
+ from controlnet_aux import NormalBaeDetector
3
+ from PIL import Image
4
+
5
+
6
+ class NormalControl(ControlInterface):
7
+ def get_control_image(self, image: Image) -> Image:
8
+ processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
9
+ control_image = processor(image)
10
+ return control_image
src/backend/annotators/pose_control.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.control_interface import ControlInterface
2
+ from controlnet_aux import OpenposeDetector
3
+ from PIL import Image
4
+
5
+
6
+ class PoseControl(ControlInterface):
7
+ def get_control_image(self, image: Image) -> Image:
8
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
9
+ image = openpose(image)
10
+ return image
src/backend/annotators/shuffle_control.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.control_interface import ControlInterface
2
+ from controlnet_aux import ContentShuffleDetector
3
+ from PIL import Image
4
+
5
+
6
+ class ShuffleControl(ControlInterface):
7
+ def get_control_image(self, image: Image) -> Image:
8
+ shuffle_processor = ContentShuffleDetector()
9
+ image = shuffle_processor(image)
10
+ return image
src/backend/annotators/softedge_control.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.annotators.control_interface import ControlInterface
2
+ from controlnet_aux import PidiNetDetector
3
+ from PIL import Image
4
+
5
+
6
+ class SoftEdgeControl(ControlInterface):
7
+ def get_control_image(self, image: Image) -> Image:
8
+ processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
9
+ control_image = processor(image)
10
+ return control_image
src/backend/api/models/response.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class StableDiffusionResponse(BaseModel):
7
+ """
8
+ Stable diffusion response model
9
+
10
+ Attributes:
11
+ images (List[str]): List of JPEG image as base64 encoded
12
+ latency (float): Latency in seconds
13
+ """
14
+
15
+ images: List[str]
16
+ latency: float
src/backend/api/web.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+
3
+ import uvicorn
4
+ from backend.api.models.response import StableDiffusionResponse
5
+ from backend.models.device import DeviceInfo
6
+ from backend.base64_image import base64_image_to_pil, pil_image_to_base64_str
7
+ from backend.device import get_device_name
8
+ from backend.models.lcmdiffusion_setting import DiffusionTask, LCMDiffusionSetting
9
+ from constants import APP_VERSION, DEVICE
10
+ from context import Context
11
+ from fastapi import FastAPI
12
+ from models.interface_types import InterfaceType
13
+ from state import get_settings
14
+
15
+ app_settings = get_settings()
16
+ app = FastAPI(
17
+ title="FastSD CPU",
18
+ description="Fast stable diffusion on CPU",
19
+ version=APP_VERSION,
20
+ license_info={
21
+ "name": "MIT",
22
+ "identifier": "MIT",
23
+ },
24
+ docs_url="/api/docs",
25
+ redoc_url="/api/redoc",
26
+ openapi_url="/api/openapi.json",
27
+ )
28
+ print(app_settings.settings.lcm_diffusion_setting)
29
+
30
+ context = Context(InterfaceType.API_SERVER)
31
+
32
+
33
+ @app.get("/api/")
34
+ async def root():
35
+ return {"message": "Welcome to FastSD CPU API"}
36
+
37
+
38
+ @app.get(
39
+ "/api/info",
40
+ description="Get system information",
41
+ summary="Get system information",
42
+ )
43
+ async def info():
44
+ device_info = DeviceInfo(
45
+ device_type=DEVICE,
46
+ device_name=get_device_name(),
47
+ os=platform.system(),
48
+ platform=platform.platform(),
49
+ processor=platform.processor(),
50
+ )
51
+ return device_info.model_dump()
52
+
53
+
54
+ @app.get(
55
+ "/api/config",
56
+ description="Get current configuration",
57
+ summary="Get configurations",
58
+ )
59
+ async def config():
60
+ return app_settings.settings
61
+
62
+
63
+ @app.get(
64
+ "/api/models",
65
+ description="Get available models",
66
+ summary="Get available models",
67
+ )
68
+ async def models():
69
+ return {
70
+ "lcm_lora_models": app_settings.lcm_lora_models,
71
+ "stable_diffusion": app_settings.stable_diffsuion_models,
72
+ "openvino_models": app_settings.openvino_lcm_models,
73
+ "lcm_models": app_settings.lcm_models,
74
+ }
75
+
76
+
77
+ @app.post(
78
+ "/api/generate",
79
+ description="Generate image(Text to image,Image to Image)",
80
+ summary="Generate image(Text to image,Image to Image)",
81
+ )
82
+ async def generate(diffusion_config: LCMDiffusionSetting) -> StableDiffusionResponse:
83
+ app_settings.settings.lcm_diffusion_setting = diffusion_config
84
+ if diffusion_config.diffusion_task == DiffusionTask.image_to_image:
85
+ app_settings.settings.lcm_diffusion_setting.init_image = base64_image_to_pil(
86
+ diffusion_config.init_image
87
+ )
88
+
89
+ images = context.generate_text_to_image(app_settings.settings)
90
+
91
+ images_base64 = [pil_image_to_base64_str(img) for img in images]
92
+ return StableDiffusionResponse(
93
+ latency=round(context.latency, 2),
94
+ images=images_base64,
95
+ )
96
+
97
+
98
+ def start_web_server(port: int = 8000):
99
+ uvicorn.run(
100
+ app,
101
+ host="0.0.0.0",
102
+ port=port,
103
+ )
src/backend/base64_image.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from base64 import b64encode, b64decode
3
+ from PIL import Image
4
+
5
+
6
+ def pil_image_to_base64_str(
7
+ image: Image,
8
+ format: str = "JPEG",
9
+ ) -> str:
10
+ buffer = BytesIO()
11
+ image.save(buffer, format=format)
12
+ buffer.seek(0)
13
+ img_base64 = b64encode(buffer.getvalue()).decode("utf-8")
14
+ return img_base64
15
+
16
+
17
+ def base64_image_to_pil(base64_str) -> Image:
18
+ image_data = b64decode(base64_str)
19
+ image_buffer = BytesIO(image_data)
20
+ image = Image.open(image_buffer)
21
+ return image