| # Output path for training runs. Each training run makes a new directory in here. | |
| output_dir = '/root/diffusion-pipe/output/tmnt_v1_low_noise' | |
| # Dataset config file. | |
| dataset = '/root/diffusion-pipe/my_configs/dataset_tmnt.toml' | |
| # You can have separate eval datasets. Give them a name for Tensorboard metrics. | |
| # eval_datasets = [ | |
| # {name = 'something', config = 'path/to/eval_dataset.toml'}, | |
| # ] | |
| # training settings | |
| # I usually set this to a really high value because I don't know how long I want to train. | |
| epochs = 1000 | |
| # Batch size of a single forward/backward pass for one GPU. | |
| micro_batch_size_per_gpu = 2 | |
| image_micro_batch_size_per_gpu = 4 | |
| # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs. | |
| pipeline_stages = 1 | |
| # Number of micro-batches sent through the pipeline for each training step. | |
| # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation). | |
| gradient_accumulation_steps = 1 | |
| # Grad norm clipping. | |
| gradient_clipping = 1.0 | |
| # Learning rate warmup. | |
| warmup_steps = 100 | |
| # eval settings | |
| eval_every_n_epochs = 1 | |
| eval_before_first_step = true | |
| # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set). | |
| # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means | |
| # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter. | |
| eval_micro_batch_size_per_gpu = 1 | |
| eval_gradient_accumulation_steps = 1 | |
| # misc settings | |
| # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models. | |
| save_every_n_epochs = 100 | |
| save_every_n_steps = 250 | |
| # Can checkpoint the traiing state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag. | |
| #checkpoint_every_n_epochs = 1 | |
| checkpoint_every_n_minutes = 30 | |
| # Always set to true unless you have a huge amount of VRAM. | |
| activation_checkpointing = 'unsloth' | |
| # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this. | |
| partition_method = 'parameters' | |
| # dtype for saving the LoRA or model, if different from training dtype | |
| save_dtype = 'bfloat16' | |
| # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory. | |
| caching_batch_size = 16 | |
| # How often deepspeed logs to console. | |
| steps_per_print = 1 | |
| # How to extract video clips for training from a single input video file. | |
| # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right | |
| # number of frames for that bucket. | |
| # single_beginning: one clip starting at the beginning of the video | |
| # single_middle: one clip from the middle of the video (cutting off the start and end equally) | |
| # multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some. | |
| # default is single_middle | |
| video_clip_mode = 'single_middle' | |
| # blocks_to_swap = 10 | |
| [model] | |
| type = 'wan' | |
| # Can load Hunyuan Video entirely from the ckpt path set up for the official inference scripts. | |
| #ckpt_path = '/home/anon/HunyuanVideo/ckpts' | |
| ckpt_path = '/root/diffusion-pipe/imagegen_models/wan/Wan2.2-T2V-A14B' | |
| transformer_path = '/root/diffusion-pipe/imagegen_models/wan/Wan2.2-T2V-A14B/low_noise_model' | |
| # Or you can load it by pointing to all the ComfyUI files. | |
| # transformer_path = '/notebooks/diffusion-pipe/imagegen_models/hunyuan_video_720_cfgdistill_fp8_e4m3fn.safetensors' | |
| # vae_path = '/notebooks/diffusion-pipe/imagegen_models/hunyuan_video_vae_bf16.safetensors' | |
| # llm_path = '/notebooks/diffusion-pipe/imagegen_models/llava-llama-3-8b-text-encoder-tokenizer' | |
| # clip_path = '/notebooks/diffusion-pipe/imagegen_models/clip-vit-large-patch14' | |
| # Base dtype used for all models. | |
| dtype = 'bfloat16' | |
| transformer_dtype = 'float8' | |
| min_t = 0 | |
| max_t = 0.875 | |
| # How to sample timesteps to train on. Can be logit_normal or uniform. | |
| timestep_sample_method = 'logit_normal' | |
| [adapter] | |
| type = 'lora' | |
| rank = 64 | |
| # Dtype for the LoRA weights you are training. | |
| dtype = 'bfloat16' | |
| # You can initialize the lora weights from a previously trained lora. | |
| #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50' | |
| # init_from_existing = '/root/diffusion-pipe/output/akira_v1_low_noise/20250930_15-55-20/step13000' | |
| [optimizer] | |
| # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights. | |
| # Look at train.py for other options. You could also easily edit the file and add your own. | |
| type = 'adamw_optimi' | |
| # # type = 'adamw8bitkahan' | |
| lr = 2e-5 | |
| # betas = [0.9, 0.99] | |
| # weight_decay = 0.01 | |
| # eps = 1e-8 | |
| # type = 'automagic' | |
| # lr = 1e-6 # Starting learning rate | |
| # weight_decay = 0.001 # Weight decay | |
| # lr_bump = 2e-6 # Amount to bump LR when adjusting | |