data_cfgs: eval_data_files: {} eval_datasets: {} eval_optional_args: [] eval_size: {} eval_split: {} eval_subset: {} eval_template: {} train_data_files: pre_tokenized/train_no_pixel_values.pt train_datasets: /aifs4su/yaodong/hantao/datasets/MMInstruct-GPT4V_mistral-7b_cosi_cut/merged//top1-100_valid train_name: {} train_optional_args: [] train_size: {} train_split: {} train_template: {} logger_cfgs: cache_dir: {} log_project: align-anything log_run_name: sft log_type: wandb output_dir: ../outputs/chameleon_sft/top1-100_valid save_total_limit: 12 model_cfgs: model_max_length: 4096 model_name_or_path: /aifs4su/yaodong/hantao/models/chameleon-7b-hf trust_remote_code: true special_tokens: {} train_cfgs: adam_betas: - 0.9 - 0.95 adam_epsilon: 1.0e-08 bf16: true ds_cfgs: ds_z3_config.json epochs: 3 eval_interval: 1000 eval_strategy: steps fp16: false freeze_language_model: false gradient_accumulation_steps: 16 gradient_checkpointing: true learning_rate: 2.0e-05 load_checkpoint: false lr_scheduler_type: cosine lr_warmup_ratio: 0.03 max_grad_norm: 1.0 per_device_eval_batch_size: 1 per_device_train_batch_size: 1 save_checkpoint: true seed: 42 weight_decay: 0.0