naumnaum commited on
Commit
8304694
·
verified ·
1 Parent(s): c0ce987

Upload folder using huggingface_hub

Browse files
16_03_i2v_140_tgst1k_loras/epoch20/16_03_i2v_140_tgst1k_config.toml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
2
+
3
+ # Dataset config file.
4
+ dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
5
+
6
+ # training settings
7
+
8
+ epochs = 30
9
+ micro_batch_size_per_gpu = 1
10
+ pipeline_stages = 1
11
+ # Number of micro-batches sent through the pipeline for each training step.
12
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
13
+ gradient_accumulation_steps = 1
14
+ # Grad norm clipping.
15
+ gradient_clipping = 1.0
16
+ # Learning rate warmup.
17
+ warmup_steps = 100
18
+
19
+
20
+ # eval settings
21
+
22
+ eval_every_n_epochs = 1
23
+ eval_before_first_step = true
24
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
25
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
26
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
27
+ eval_micro_batch_size_per_gpu = 1
28
+ eval_gradient_accumulation_steps = 1
29
+
30
+ # misc settings
31
+
32
+ save_every_n_epochs = 5
33
+ checkpoint_every_n_minutes = 20
34
+ activation_checkpointing = true
35
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
36
+ partition_method = 'parameters'
37
+ save_dtype = 'bfloat16'
38
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
39
+ caching_batch_size = 1
40
+ steps_per_print = 1
41
+ video_clip_mode = 'single_beginning'
42
+
43
+ [model]
44
+ type = 'wan'
45
+ ckpt_path = '/workspace/models/wan21i2v'
46
+ dtype = 'bfloat16'
47
+ timestep_sample_method = 'logit_normal'
48
+
49
+ [adapter]
50
+ type = 'lora'
51
+ rank = 32
52
+ # Dtype for the LoRA weights you are training.
53
+ dtype = 'bfloat16'
54
+
55
+ [optimizer]
56
+ type = 'adamw_optimi'
57
+ lr = 1e-4
58
+ betas = [0.9, 0.99]
59
+ weight_decay = 0.01
60
+ eps = 1e-8
16_03_i2v_140_tgst1k_loras/epoch20/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v",
27
+ "ffn.0",
28
+ "k_img",
29
+ "ffn.2",
30
+ "o",
31
+ "q",
32
+ "v_img",
33
+ "k"
34
+ ],
35
+ "task_type": null,
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
16_03_i2v_140_tgst1k_loras/epoch20/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50f314040fb9610c7bf33696fb205bdb7006c438f20fe34afd9bca75a3d421a5
3
+ size 359257680
16_03_i2v_140_tgst1k_loras/epoch25/16_03_i2v_140_tgst1k_config.toml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
2
+
3
+ # Dataset config file.
4
+ dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
5
+
6
+ # training settings
7
+
8
+ epochs = 30
9
+ micro_batch_size_per_gpu = 1
10
+ pipeline_stages = 1
11
+ # Number of micro-batches sent through the pipeline for each training step.
12
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
13
+ gradient_accumulation_steps = 1
14
+ # Grad norm clipping.
15
+ gradient_clipping = 1.0
16
+ # Learning rate warmup.
17
+ warmup_steps = 100
18
+
19
+
20
+ # eval settings
21
+
22
+ eval_every_n_epochs = 1
23
+ eval_before_first_step = true
24
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
25
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
26
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
27
+ eval_micro_batch_size_per_gpu = 1
28
+ eval_gradient_accumulation_steps = 1
29
+
30
+ # misc settings
31
+
32
+ save_every_n_epochs = 5
33
+ checkpoint_every_n_minutes = 20
34
+ activation_checkpointing = true
35
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
36
+ partition_method = 'parameters'
37
+ save_dtype = 'bfloat16'
38
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
39
+ caching_batch_size = 1
40
+ steps_per_print = 1
41
+ video_clip_mode = 'single_beginning'
42
+
43
+ [model]
44
+ type = 'wan'
45
+ ckpt_path = '/workspace/models/wan21i2v'
46
+ dtype = 'bfloat16'
47
+ timestep_sample_method = 'logit_normal'
48
+
49
+ [adapter]
50
+ type = 'lora'
51
+ rank = 32
52
+ # Dtype for the LoRA weights you are training.
53
+ dtype = 'bfloat16'
54
+
55
+ [optimizer]
56
+ type = 'adamw_optimi'
57
+ lr = 1e-4
58
+ betas = [0.9, 0.99]
59
+ weight_decay = 0.01
60
+ eps = 1e-8
16_03_i2v_140_tgst1k_loras/epoch25/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v",
27
+ "ffn.0",
28
+ "k_img",
29
+ "ffn.2",
30
+ "o",
31
+ "q",
32
+ "v_img",
33
+ "k"
34
+ ],
35
+ "task_type": null,
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
16_03_i2v_140_tgst1k_loras/epoch25/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d64467f293333c7540ebc50ba88d3cf6e69de96732deb645f68beda0a5a0d1b
3
+ size 359257680
16_03_i2v_140_tgst1k_loras/epoch30/16_03_i2v_140_tgst1k_config.toml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
2
+
3
+ # Dataset config file.
4
+ dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
5
+
6
+ # training settings
7
+
8
+ epochs = 30
9
+ micro_batch_size_per_gpu = 1
10
+ pipeline_stages = 1
11
+ # Number of micro-batches sent through the pipeline for each training step.
12
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
13
+ gradient_accumulation_steps = 1
14
+ # Grad norm clipping.
15
+ gradient_clipping = 1.0
16
+ # Learning rate warmup.
17
+ warmup_steps = 100
18
+
19
+
20
+ # eval settings
21
+
22
+ eval_every_n_epochs = 1
23
+ eval_before_first_step = true
24
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
25
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
26
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
27
+ eval_micro_batch_size_per_gpu = 1
28
+ eval_gradient_accumulation_steps = 1
29
+
30
+ # misc settings
31
+
32
+ save_every_n_epochs = 5
33
+ checkpoint_every_n_minutes = 20
34
+ activation_checkpointing = true
35
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
36
+ partition_method = 'parameters'
37
+ save_dtype = 'bfloat16'
38
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
39
+ caching_batch_size = 1
40
+ steps_per_print = 1
41
+ video_clip_mode = 'single_beginning'
42
+
43
+ [model]
44
+ type = 'wan'
45
+ ckpt_path = '/workspace/models/wan21i2v'
46
+ dtype = 'bfloat16'
47
+ timestep_sample_method = 'logit_normal'
48
+
49
+ [adapter]
50
+ type = 'lora'
51
+ rank = 32
52
+ # Dtype for the LoRA weights you are training.
53
+ dtype = 'bfloat16'
54
+
55
+ [optimizer]
56
+ type = 'adamw_optimi'
57
+ lr = 1e-4
58
+ betas = [0.9, 0.99]
59
+ weight_decay = 0.01
60
+ eps = 1e-8
16_03_i2v_140_tgst1k_loras/epoch30/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": false,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 32,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v",
27
+ "ffn.0",
28
+ "k_img",
29
+ "ffn.2",
30
+ "o",
31
+ "q",
32
+ "v_img",
33
+ "k"
34
+ ],
35
+ "task_type": null,
36
+ "use_dora": false,
37
+ "use_rslora": false
38
+ }
16_03_i2v_140_tgst1k_loras/epoch30/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e772dfc14bd645efc151aac397968755bbe320e201b829752aacf3f3cbd4d406
3
+ size 359257680
16_03_i2v_140_tgst1k_loras/trigger.txt CHANGED
@@ -1 +0,0 @@
1
- tgst1k style