naumnaum commited on
Commit
038cee4
·
verified ·
1 Parent(s): e4da9da

Upload folder using huggingface_hub

Browse files
wan_t2v_tgst1k_epoch110/adapter_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k",
27
+ "o",
28
+ "ffn.2",
29
+ "ffn.0",
30
+ "v",
31
+ "q"
32
+ ],
33
+ "task_type": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
+ }
wan_t2v_tgst1k_epoch110/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72622a518f8c0a2ff70fc15048bc15fb3d4353344343d0833b844d7d4dbb9954
3
+ size 306807976
wan_t2v_tgst1k_epoch110/wan_t2v_config.toml ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/workspace/output_models/run_2_t2v'
3
+
4
+ # Dataset config file.
5
+ dataset = 'examples/wan_t2v_dataset.toml'
6
+
7
+ # training settings
8
+
9
+ # I usually set this to a really high value because I don't know how long I want to train.
10
+ epochs = 1000
11
+ # Batch size of a single forward/backward pass for one GPU.
12
+ micro_batch_size_per_gpu = 1
13
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
14
+ pipeline_stages = 1
15
+ # Number of micro-batches sent through the pipeline for each training step.
16
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
17
+ gradient_accumulation_steps = 4
18
+ # Grad norm clipping.
19
+ gradient_clipping = 1.0
20
+ # Learning rate warmup.
21
+ warmup_steps = 100
22
+
23
+ # eval settings
24
+
25
+ eval_every_n_epochs = 1
26
+ eval_before_first_step = true
27
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
28
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
29
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
30
+ eval_micro_batch_size_per_gpu = 1
31
+ eval_gradient_accumulation_steps = 1
32
+
33
+ # misc settings
34
+
35
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
36
+ save_every_n_epochs = 2
37
+ checkpoint_every_n_minutes = 20
38
+ # Always set to true unless you have a huge amount of VRAM.
39
+ activation_checkpointing = true
40
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
41
+ partition_method = 'parameters'
42
+ # dtype for saving the LoRA or model, if different from training dtype
43
+ save_dtype = 'bfloat16'
44
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
45
+ caching_batch_size = 1
46
+ # How often deepspeed logs to console.
47
+ steps_per_print = 1
48
+ # How to extract video clips for training from a single input video file.
49
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
50
+ # number of frames for that bucket.
51
+ # single_beginning: one clip starting at the beginning of the video
52
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
53
+ # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
54
+ # default is single_beginning
55
+ video_clip_mode = 'single_beginning'
56
+
57
+ [model]
58
+ type = 'wan'
59
+ ckpt_path = '/workspace/models/wan21t2v'
60
+ dtype = 'bfloat16'
61
+ # You can use fp8 for the transformer when training LoRA.
62
+ #transformer_dtype = 'float8'
63
+ timestep_sample_method = 'logit_normal'
64
+
65
+ # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
66
+ [adapter]
67
+ type = 'lora'
68
+ rank = 32
69
+ # Dtype for the LoRA weights you are training.
70
+ dtype = 'bfloat16'
71
+ # You can initialize the lora weights from a previously trained lora.
72
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
73
+
74
+ [optimizer]
75
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
76
+ # Look at train.py for other options. You could also easily edit the file and add your own.
77
+ type = 'adamw_optimi'
78
+ lr = 5e-5
79
+ betas = [0.9, 0.99]
80
+ weight_decay = 0.01
81
+ eps = 1e-8