Upload folder using huggingface_hub
Browse files- Photo of DTTaBA 13.jpg +3 -0
- Photo of DTTaBA 13.txt +1 -0
- Photo of DTTaBA 17.png +3 -0
- Photo of DTTaBA 17.txt +1 -0
- config.yaml +66 -0
- metadata.jsonl +4 -0
- requirements.txt +21 -0
- script.py +96 -0
Photo of DTTaBA 13.jpg
ADDED
![]() |
Git LFS Details
|
Photo of DTTaBA 13.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
DTTaBA dog console seat and bag, viewed from behind, closed. The black mesh cover is zipped, providing ventilation. The gold straps are hanging, and the geometric pattern is clearly visible on the back panel. The structure of the bag is firm and well-defined.
|
Photo of DTTaBA 17.png
ADDED
![]() |
Git LFS Details
|
Photo of DTTaBA 17.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
DTTaBA dog console seat and bag with a small brown dog sitting inside, facing slightly to the left. The interior is golden-brown, and the gold straps hang naturally at the sides. The plush bone-shaped cushion is attached to the upper side button, with both buttons on the side clearly visible.
|
config.yaml
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
config:
|
2 |
+
name: dalietta-dttaba-dog-console-seat-4
|
3 |
+
process:
|
4 |
+
- datasets:
|
5 |
+
- cache_latents_to_disk: true
|
6 |
+
caption_dropout_rate: 0.05
|
7 |
+
caption_ext: txt
|
8 |
+
folder_path: datasets/2dc1e075-43d3-4fe2-abff-5799eb5f4c51
|
9 |
+
resolution:
|
10 |
+
- 512
|
11 |
+
- 768
|
12 |
+
- 1024
|
13 |
+
shuffle_tokens: false
|
14 |
+
device: cuda:0
|
15 |
+
model:
|
16 |
+
assistant_lora_path: ostris/FLUX.1-schnell-training-adapter
|
17 |
+
is_flux: true
|
18 |
+
low_vram: true
|
19 |
+
name_or_path: black-forest-labs/FLUX.1-schnell
|
20 |
+
quantize: true
|
21 |
+
network:
|
22 |
+
linear: 16
|
23 |
+
linear_alpha: 16
|
24 |
+
type: lora
|
25 |
+
sample:
|
26 |
+
guidance_scale: 4
|
27 |
+
height: 1024
|
28 |
+
neg: ''
|
29 |
+
prompts:
|
30 |
+
- 'A woman carrying DTTaBA on her shoulder. '
|
31 |
+
sample_every: 1000
|
32 |
+
sample_steps: 4
|
33 |
+
sampler: flowmatch
|
34 |
+
seed: 42
|
35 |
+
walk_seed: true
|
36 |
+
width: 1024
|
37 |
+
save:
|
38 |
+
dtype: float16
|
39 |
+
hf_private: true
|
40 |
+
hf_repo_id: StoyanG/dalietta-dttaba-dog-console-seat-4
|
41 |
+
max_step_saves_to_keep: 4
|
42 |
+
push_to_hub: true
|
43 |
+
save_every: 10000
|
44 |
+
train:
|
45 |
+
batch_size: 1
|
46 |
+
disable_sampling: false
|
47 |
+
dtype: bf16
|
48 |
+
ema_config:
|
49 |
+
ema_decay: 0.99
|
50 |
+
use_ema: true
|
51 |
+
gradient_accumulation_steps: 1
|
52 |
+
gradient_checkpointing: true
|
53 |
+
lr: 0.0004
|
54 |
+
noise_scheduler: flowmatch
|
55 |
+
optimizer: adamw8bit
|
56 |
+
skip_first_sample: true
|
57 |
+
steps: 1000
|
58 |
+
train_text_encoder: false
|
59 |
+
train_unet: true
|
60 |
+
training_folder: output
|
61 |
+
trigger_word: DTTaBA
|
62 |
+
type: sd_trainer
|
63 |
+
job: extension
|
64 |
+
meta:
|
65 |
+
name: '[name]'
|
66 |
+
version: '1.0'
|
metadata.jsonl
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"file_name": "Photo of DTTaBA 13.jpg", "prompt": "DTTaBA dog console seat and bag, open, placed on a table. The golden-brown plush interior is fully visible, showing the comfortable padded surface. The black, white, and gold geometric exterior contrasts with the soft interior. The bone-shaped front cushion is prominent, and the gold straps rest naturally on the sides."}
|
2 |
+
{"file_name": "Photo of DTTaBA 13.txt", "prompt": "DTTaBA dog console seat and bag with a small brown dog sitting inside, facing slightly to the left. The interior is golden-brown, and the gold straps hang naturally at the sides. The plush bone-shaped cushion is attached to the upper side button, with both buttons on the side clearly visible."}
|
3 |
+
{"file_name": "Photo of DTTaBA 17.png", "prompt": ""}
|
4 |
+
{"file_name": "Photo of DTTaBA 17.txt", "prompt": ""}
|
requirements.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/huggingface/diffusers.git
|
2 |
+
lycoris-lora==1.8.3
|
3 |
+
flatten_json
|
4 |
+
pyyaml
|
5 |
+
oyaml
|
6 |
+
tensorboard
|
7 |
+
kornia
|
8 |
+
invisible-watermark
|
9 |
+
einops
|
10 |
+
toml
|
11 |
+
albumentations
|
12 |
+
pydantic
|
13 |
+
omegaconf
|
14 |
+
k-diffusion
|
15 |
+
open_clip_torch
|
16 |
+
prodigyopt
|
17 |
+
controlnet_aux==0.0.7
|
18 |
+
python-dotenv
|
19 |
+
lpips
|
20 |
+
pytorch_fid
|
21 |
+
optimum-quanto
|
script.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import snapshot_download, delete_repo, metadata_update
|
3 |
+
import uuid
|
4 |
+
import json
|
5 |
+
import yaml
|
6 |
+
import subprocess
|
7 |
+
|
8 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
9 |
+
HF_DATASET = os.environ.get("DATA_PATH")
|
10 |
+
|
11 |
+
|
12 |
+
def download_dataset(hf_dataset_path: str):
|
13 |
+
random_id = str(uuid.uuid4())
|
14 |
+
snapshot_download(
|
15 |
+
repo_id=hf_dataset_path,
|
16 |
+
token=HF_TOKEN,
|
17 |
+
local_dir=f"/tmp/{random_id}",
|
18 |
+
repo_type="dataset",
|
19 |
+
)
|
20 |
+
return f"/tmp/{random_id}"
|
21 |
+
|
22 |
+
|
23 |
+
def process_dataset(dataset_dir: str):
|
24 |
+
# dataset dir consists of images, config.yaml and a metadata.jsonl (optional) with fields: file_name, prompt
|
25 |
+
# generate .txt files with the same name as the images with the prompt as the content
|
26 |
+
# remove metadata.jsonl
|
27 |
+
# return the path to the processed dataset
|
28 |
+
|
29 |
+
# check if config.yaml exists
|
30 |
+
if not os.path.exists(os.path.join(dataset_dir, "config.yaml")):
|
31 |
+
raise ValueError("config.yaml does not exist")
|
32 |
+
|
33 |
+
# check if metadata.jsonl exists
|
34 |
+
if os.path.exists(os.path.join(dataset_dir, "metadata.jsonl")):
|
35 |
+
metadata = []
|
36 |
+
with open(os.path.join(dataset_dir, "metadata.jsonl"), "r") as f:
|
37 |
+
for line in f:
|
38 |
+
if len(line.strip()) > 0:
|
39 |
+
metadata.append(json.loads(line))
|
40 |
+
for item in metadata:
|
41 |
+
txt_path = os.path.join(dataset_dir, item["file_name"])
|
42 |
+
txt_path = txt_path.rsplit(".", 1)[0] + ".txt"
|
43 |
+
with open(txt_path, "w") as f:
|
44 |
+
f.write(item["prompt"])
|
45 |
+
|
46 |
+
# remove metadata.jsonl
|
47 |
+
os.remove(os.path.join(dataset_dir, "metadata.jsonl"))
|
48 |
+
|
49 |
+
with open(os.path.join(dataset_dir, "config.yaml"), "r") as f:
|
50 |
+
config = yaml.safe_load(f)
|
51 |
+
|
52 |
+
# update config with new dataset
|
53 |
+
config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_dir
|
54 |
+
|
55 |
+
with open(os.path.join(dataset_dir, "config.yaml"), "w") as f:
|
56 |
+
yaml.dump(config, f)
|
57 |
+
|
58 |
+
return dataset_dir
|
59 |
+
|
60 |
+
|
61 |
+
def run_training(hf_dataset_path: str):
|
62 |
+
|
63 |
+
dataset_dir = download_dataset(hf_dataset_path)
|
64 |
+
dataset_dir = process_dataset(dataset_dir)
|
65 |
+
|
66 |
+
# run training
|
67 |
+
commands = "git clone https://github.com/ostris/ai-toolkit.git ai-toolkit && cd ai-toolkit && git submodule update --init --recursive"
|
68 |
+
subprocess.run(commands, shell=True)
|
69 |
+
|
70 |
+
commands = f"python run.py {os.path.join(dataset_dir, 'config.yaml')}"
|
71 |
+
process = subprocess.Popen(commands, shell=True, cwd="ai-toolkit", env=os.environ)
|
72 |
+
|
73 |
+
return process, dataset_dir
|
74 |
+
|
75 |
+
|
76 |
+
if __name__ == "__main__":
|
77 |
+
process, dataset_dir = run_training(HF_DATASET)
|
78 |
+
process.wait() # Wait for the training process to finish
|
79 |
+
|
80 |
+
with open(os.path.join(dataset_dir, "config.yaml"), "r") as f:
|
81 |
+
config = yaml.safe_load(f)
|
82 |
+
repo_id = config["config"]["process"][0]["save"]["hf_repo_id"]
|
83 |
+
|
84 |
+
metadata = {
|
85 |
+
"tags": [
|
86 |
+
"autotrain",
|
87 |
+
"spacerunner",
|
88 |
+
"text-to-image",
|
89 |
+
"flux",
|
90 |
+
"lora",
|
91 |
+
"diffusers",
|
92 |
+
"template:sd-lora",
|
93 |
+
]
|
94 |
+
}
|
95 |
+
metadata_update(repo_id, metadata, token=HF_TOKEN, repo_type="model", overwrite=True)
|
96 |
+
delete_repo(HF_DATASET, token=HF_TOKEN, repo_type="dataset", missing_ok=True)
|