Upload folder using huggingface_hub
Browse files- epoch10/16_03_i2v_140_tgst1k_config.toml +60 -0
- epoch10/adapter_config.json +38 -0
- epoch10/adapter_model.safetensors +3 -0
- epoch15/16_03_i2v_140_tgst1k_config.toml +60 -0
- epoch15/adapter_config.json +38 -0
- epoch15/adapter_model.safetensors +3 -0
- epoch5/16_03_i2v_140_tgst1k_config.toml +60 -0
- epoch5/adapter_config.json +38 -0
- epoch5/e5_16_03_i2v_140_tgst1k_model.safetensors +3 -0
- trigger.txt +0 -0
epoch10/16_03_i2v_140_tgst1k_config.toml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
|
2 |
+
|
3 |
+
# Dataset config file.
|
4 |
+
dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
|
5 |
+
|
6 |
+
# training settings
|
7 |
+
|
8 |
+
epochs = 30
|
9 |
+
micro_batch_size_per_gpu = 1
|
10 |
+
pipeline_stages = 1
|
11 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
12 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
13 |
+
gradient_accumulation_steps = 1
|
14 |
+
# Grad norm clipping.
|
15 |
+
gradient_clipping = 1.0
|
16 |
+
# Learning rate warmup.
|
17 |
+
warmup_steps = 100
|
18 |
+
|
19 |
+
|
20 |
+
# eval settings
|
21 |
+
|
22 |
+
eval_every_n_epochs = 1
|
23 |
+
eval_before_first_step = true
|
24 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
25 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
26 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
27 |
+
eval_micro_batch_size_per_gpu = 1
|
28 |
+
eval_gradient_accumulation_steps = 1
|
29 |
+
|
30 |
+
# misc settings
|
31 |
+
|
32 |
+
save_every_n_epochs = 5
|
33 |
+
checkpoint_every_n_minutes = 20
|
34 |
+
activation_checkpointing = true
|
35 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
36 |
+
partition_method = 'parameters'
|
37 |
+
save_dtype = 'bfloat16'
|
38 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
39 |
+
caching_batch_size = 1
|
40 |
+
steps_per_print = 1
|
41 |
+
video_clip_mode = 'single_beginning'
|
42 |
+
|
43 |
+
[model]
|
44 |
+
type = 'wan'
|
45 |
+
ckpt_path = '/workspace/models/wan21i2v'
|
46 |
+
dtype = 'bfloat16'
|
47 |
+
timestep_sample_method = 'logit_normal'
|
48 |
+
|
49 |
+
[adapter]
|
50 |
+
type = 'lora'
|
51 |
+
rank = 32
|
52 |
+
# Dtype for the LoRA weights you are training.
|
53 |
+
dtype = 'bfloat16'
|
54 |
+
|
55 |
+
[optimizer]
|
56 |
+
type = 'adamw_optimi'
|
57 |
+
lr = 1e-4
|
58 |
+
betas = [0.9, 0.99]
|
59 |
+
weight_decay = 0.01
|
60 |
+
eps = 1e-8
|
epoch10/adapter_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": null,
|
5 |
+
"bias": "none",
|
6 |
+
"eva_config": null,
|
7 |
+
"exclude_modules": null,
|
8 |
+
"fan_in_fan_out": false,
|
9 |
+
"inference_mode": false,
|
10 |
+
"init_lora_weights": true,
|
11 |
+
"layer_replication": null,
|
12 |
+
"layers_pattern": null,
|
13 |
+
"layers_to_transform": null,
|
14 |
+
"loftq_config": {},
|
15 |
+
"lora_alpha": 32,
|
16 |
+
"lora_bias": false,
|
17 |
+
"lora_dropout": 0.0,
|
18 |
+
"megatron_config": null,
|
19 |
+
"megatron_core": "megatron.core",
|
20 |
+
"modules_to_save": null,
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 32,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"v",
|
27 |
+
"ffn.0",
|
28 |
+
"k_img",
|
29 |
+
"ffn.2",
|
30 |
+
"o",
|
31 |
+
"q",
|
32 |
+
"v_img",
|
33 |
+
"k"
|
34 |
+
],
|
35 |
+
"task_type": null,
|
36 |
+
"use_dora": false,
|
37 |
+
"use_rslora": false
|
38 |
+
}
|
epoch10/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0726f743a7ecff916e4d62b0ecd055e01afb146b9f7319842d980ed9b95c838a
|
3 |
+
size 359257680
|
epoch15/16_03_i2v_140_tgst1k_config.toml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
|
2 |
+
|
3 |
+
# Dataset config file.
|
4 |
+
dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
|
5 |
+
|
6 |
+
# training settings
|
7 |
+
|
8 |
+
epochs = 30
|
9 |
+
micro_batch_size_per_gpu = 1
|
10 |
+
pipeline_stages = 1
|
11 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
12 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
13 |
+
gradient_accumulation_steps = 1
|
14 |
+
# Grad norm clipping.
|
15 |
+
gradient_clipping = 1.0
|
16 |
+
# Learning rate warmup.
|
17 |
+
warmup_steps = 100
|
18 |
+
|
19 |
+
|
20 |
+
# eval settings
|
21 |
+
|
22 |
+
eval_every_n_epochs = 1
|
23 |
+
eval_before_first_step = true
|
24 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
25 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
26 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
27 |
+
eval_micro_batch_size_per_gpu = 1
|
28 |
+
eval_gradient_accumulation_steps = 1
|
29 |
+
|
30 |
+
# misc settings
|
31 |
+
|
32 |
+
save_every_n_epochs = 5
|
33 |
+
checkpoint_every_n_minutes = 20
|
34 |
+
activation_checkpointing = true
|
35 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
36 |
+
partition_method = 'parameters'
|
37 |
+
save_dtype = 'bfloat16'
|
38 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
39 |
+
caching_batch_size = 1
|
40 |
+
steps_per_print = 1
|
41 |
+
video_clip_mode = 'single_beginning'
|
42 |
+
|
43 |
+
[model]
|
44 |
+
type = 'wan'
|
45 |
+
ckpt_path = '/workspace/models/wan21i2v'
|
46 |
+
dtype = 'bfloat16'
|
47 |
+
timestep_sample_method = 'logit_normal'
|
48 |
+
|
49 |
+
[adapter]
|
50 |
+
type = 'lora'
|
51 |
+
rank = 32
|
52 |
+
# Dtype for the LoRA weights you are training.
|
53 |
+
dtype = 'bfloat16'
|
54 |
+
|
55 |
+
[optimizer]
|
56 |
+
type = 'adamw_optimi'
|
57 |
+
lr = 1e-4
|
58 |
+
betas = [0.9, 0.99]
|
59 |
+
weight_decay = 0.01
|
60 |
+
eps = 1e-8
|
epoch15/adapter_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": null,
|
5 |
+
"bias": "none",
|
6 |
+
"eva_config": null,
|
7 |
+
"exclude_modules": null,
|
8 |
+
"fan_in_fan_out": false,
|
9 |
+
"inference_mode": false,
|
10 |
+
"init_lora_weights": true,
|
11 |
+
"layer_replication": null,
|
12 |
+
"layers_pattern": null,
|
13 |
+
"layers_to_transform": null,
|
14 |
+
"loftq_config": {},
|
15 |
+
"lora_alpha": 32,
|
16 |
+
"lora_bias": false,
|
17 |
+
"lora_dropout": 0.0,
|
18 |
+
"megatron_config": null,
|
19 |
+
"megatron_core": "megatron.core",
|
20 |
+
"modules_to_save": null,
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 32,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"v",
|
27 |
+
"ffn.0",
|
28 |
+
"k_img",
|
29 |
+
"ffn.2",
|
30 |
+
"o",
|
31 |
+
"q",
|
32 |
+
"v_img",
|
33 |
+
"k"
|
34 |
+
],
|
35 |
+
"task_type": null,
|
36 |
+
"use_dora": false,
|
37 |
+
"use_rslora": false
|
38 |
+
}
|
epoch15/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b527c0d699a882950cccf761b248a16ad20f204222c1f13ef559ddbf1806194c
|
3 |
+
size 359257680
|
epoch5/16_03_i2v_140_tgst1k_config.toml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
output_dir = '/workspace/output_models/16_03_i2v_140_tgst1k_model'
|
2 |
+
|
3 |
+
# Dataset config file.
|
4 |
+
dataset = 'examples/16_03_i2v_140_tgst1k_dataset.toml'
|
5 |
+
|
6 |
+
# training settings
|
7 |
+
|
8 |
+
epochs = 30
|
9 |
+
micro_batch_size_per_gpu = 1
|
10 |
+
pipeline_stages = 1
|
11 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
12 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
13 |
+
gradient_accumulation_steps = 1
|
14 |
+
# Grad norm clipping.
|
15 |
+
gradient_clipping = 1.0
|
16 |
+
# Learning rate warmup.
|
17 |
+
warmup_steps = 100
|
18 |
+
|
19 |
+
|
20 |
+
# eval settings
|
21 |
+
|
22 |
+
eval_every_n_epochs = 1
|
23 |
+
eval_before_first_step = true
|
24 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
25 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
26 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
27 |
+
eval_micro_batch_size_per_gpu = 1
|
28 |
+
eval_gradient_accumulation_steps = 1
|
29 |
+
|
30 |
+
# misc settings
|
31 |
+
|
32 |
+
save_every_n_epochs = 5
|
33 |
+
checkpoint_every_n_minutes = 20
|
34 |
+
activation_checkpointing = true
|
35 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
36 |
+
partition_method = 'parameters'
|
37 |
+
save_dtype = 'bfloat16'
|
38 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
39 |
+
caching_batch_size = 1
|
40 |
+
steps_per_print = 1
|
41 |
+
video_clip_mode = 'single_beginning'
|
42 |
+
|
43 |
+
[model]
|
44 |
+
type = 'wan'
|
45 |
+
ckpt_path = '/workspace/models/wan21i2v'
|
46 |
+
dtype = 'bfloat16'
|
47 |
+
timestep_sample_method = 'logit_normal'
|
48 |
+
|
49 |
+
[adapter]
|
50 |
+
type = 'lora'
|
51 |
+
rank = 32
|
52 |
+
# Dtype for the LoRA weights you are training.
|
53 |
+
dtype = 'bfloat16'
|
54 |
+
|
55 |
+
[optimizer]
|
56 |
+
type = 'adamw_optimi'
|
57 |
+
lr = 1e-4
|
58 |
+
betas = [0.9, 0.99]
|
59 |
+
weight_decay = 0.01
|
60 |
+
eps = 1e-8
|
epoch5/adapter_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": null,
|
5 |
+
"bias": "none",
|
6 |
+
"eva_config": null,
|
7 |
+
"exclude_modules": null,
|
8 |
+
"fan_in_fan_out": false,
|
9 |
+
"inference_mode": false,
|
10 |
+
"init_lora_weights": true,
|
11 |
+
"layer_replication": null,
|
12 |
+
"layers_pattern": null,
|
13 |
+
"layers_to_transform": null,
|
14 |
+
"loftq_config": {},
|
15 |
+
"lora_alpha": 32,
|
16 |
+
"lora_bias": false,
|
17 |
+
"lora_dropout": 0.0,
|
18 |
+
"megatron_config": null,
|
19 |
+
"megatron_core": "megatron.core",
|
20 |
+
"modules_to_save": null,
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 32,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"v",
|
27 |
+
"ffn.0",
|
28 |
+
"k_img",
|
29 |
+
"ffn.2",
|
30 |
+
"o",
|
31 |
+
"q",
|
32 |
+
"v_img",
|
33 |
+
"k"
|
34 |
+
],
|
35 |
+
"task_type": null,
|
36 |
+
"use_dora": false,
|
37 |
+
"use_rslora": false
|
38 |
+
}
|
epoch5/e5_16_03_i2v_140_tgst1k_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07e4bc4b31757793d12661177bd85ecd02817c15c63dd2fad410a70ebc17da2c
|
3 |
+
size 359257680
|
trigger.txt
ADDED
File without changes
|