diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,18 +1,121 @@
import gradio as gr
-
-def greet(name):
- return "Hello " + name + "!"
+import os
+from pathlib import Path
+import argparse
+import shutil
+from train_dreambooth import run_training
css = '''
.instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
.arrow{position: absolute;top: 0;right: -8px;margin-top: -8px !important}
#component-4, #component-3, #component-10{min-height: 0}
'''
-def swap_values_files(files):
- if files:
- return(len(files)*100)
+shutil.unpack_archive("mix.zip", "mix")
+maximum_concepts = 3
+def swap_values_files(*total_files):
+ file_counter = 0
+ for files in total_files:
+ if(files):
+ for file in files:
+ filename = Path(file.orig_name).stem
+ pt=''.join([i for i in filename if not i.isdigit()])
+ pt=pt.replace("_"," ")
+ pt=pt.replace("(","")
+ pt=pt.replace(")","")
+ instance_prompt = pt
+ print(instance_prompt)
+ file_counter += 1
+ training_steps = (file_counter*200)
+ return training_steps
+
+def swap_text(option):
+ mandatory_liability = "You must have the right to do so and you are liable for the images you use"
+ if(option == "object"):
+ instance_prompt_example = "cttoy"
+ freeze_for = 50
+ return [f"You are going to train `object`(s), upload 5-10 images of each object you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here)", freeze_for]
+ elif(option == "person"):
+ instance_prompt_example = "julcto"
+ freeze_for = 100
+ return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''
''', f"You should name the files with a unique word that represent your concept (like `{instance_prompt_example}` in this example). You can train multiple concepts as well.", freeze_for]
+ elif(option == "style"):
+ instance_prompt_example = "mspolstyll"
+ freeze_for = 10
+ return [f"You are going to train a `style`, upload 10-20 images of the style you are planning on training on. Name the files with the words you would like {mandatory_liability}:", '''
''', f"You should name your files with a unique word that represent your concept (as `{instance_prompt_example}` for example). You can train multiple concepts as well.", freeze_for]
+
+def train(*inputs):
+ file_counter = 0
+ for i, input in enumerate(inputs):
+ if(i < maximum_concepts-1):
+ if(input):
+ os.makedirs('instance_images',exist_ok=True)
+ files = inputs[i+(maximum_concepts*2)]
+ prompt = inputs[i+maximum_concepts]
+ for j, file in enumerate(files):
+ shutil.copy(file.name, f'instance_images/{prompt} ({j+1}).jpg')
+ file_counter += 1
+
+ uses_custom = inputs[-1]
+ if(uses_custom):
+ Training_Steps = int(inputs[-3])
+ Train_text_encoder_for = int(inputs[-2])
+ stptxt = int((Training_Steps*Train_text_encoder_for)/100)
else:
- return 400
+ Training_Steps = file_counter*200
+ if(inputs[-4] == "person"):
+ class_data_dir = "mix"
+ args_txt_encoder = argparse.Namespace(
+ image_captions_filename = True,
+ train_text_encoder = True,
+ pretrained_model_name_or_path="./stable-diffusion-v1-5",
+ instance_data_dir="instance_images",
+ class_data_dir=class_data_dir,
+ output_dir="output_model",
+ with_prior_preservation=True,
+ prior_loss_weight=1.0,
+ instance_prompt="",
+ seed=42,
+ resolution=512,
+ mixed_precision="fp16",
+ train_batch_size=1,
+ gradient_accumulation_steps=1,
+ gradient_checkpointing=True,
+ use_8bit_adam=True,
+ learning_rate=2e-6,
+ lr_scheduler="polynomial",
+ lr_warmup_steps=0,
+ max_train_steps=Training_Steps,
+ num_class_images=200
+ )
+ elif(inputs[-4] == "object"):
+ class_data_dir = None
+ elif(inputs[-4] == "style"):
+ class_data_dir = None
+
+ args = argparse.Namespace(
+ image_captions_filename = True,
+ train_text_encoder = True,
+ stop_text_encoder_training = stptxt,
+ save_n_steps = 0
+ dump_only_text_encoder = True,
+ pretrained_model_name_or_path = "./stable-diffusion-v1-5",
+ instance_data_dir="instance_images",
+ class_data_dir=class_data_dir,
+ output_dir="output_model",
+ instance_prompt="",
+ seed=42,
+ resolution=512,
+ mixed_precision="fp16",
+ train_batch_size=1,
+ gradient_accumulation_steps=1,
+ use_8bit_adam=True,
+ learning_rate=2e-6,
+ lr_scheduler="polynomial",
+ lr_warmup_steps = 0,
+ max_train_steps=Training_Steps,
+ )
+ run_training(args)
+ os.rmdir('instance_images')
with gr.Blocks(css=css) as demo:
with gr.Box():
# You can remove this part here for your local clone
@@ -20,29 +123,78 @@ with gr.Blocks(css=css) as demo:
For it to work, you have to duplicate the Space and run it on your own profile where a (paid) private GPU will be attributed to it during runtime. It will cost you < US$1 to train a model on default settings! 🤑
-