citrinegui's picture
Training in progress, step 1600
7f4b6c3 verified
mode: train
experiment:
dataset_size: 6000
dataset_seed: 1234
test_size: 0.1
hf_token: ${oc.env:HF_TOKEN,null}
output:
root_path: ${oc.env:ROOT_PATH}
run_name: ${model.trim}_${task.name}_${algorithm.name}_${algorithm.training.curriculum_schedule}_${algorithm.training.scheduler_params.mu_exp}_${algorithm.training.scheduler_params.sigma}_${algorithm.training.scheduler_params.min_prob}_${algorithm.training.max_steps}
lora:
r: 32
alpha: 64
dropout: 0.1
target_modules:
- q_proj
- v_proj
task_type: CAUSAL_LM
occupy_gpu_memory: false
occupy_gpu_memory_gb: 50
gpu_device: cuda:0
model:
family: meta-llama
trim: Llama-3.2-3B-Instruct
name: ${model.family}/${model.trim}
trust_remote_code: true
torch_dtype: bfloat16
attn_implementation: flash_attention_2
task:
name: countdown2345
data_files:
- citrinegui/countdown_n2t100_1-100
- citrinegui/countdown_n3t100_1-100
- citrinegui/countdown_n4t100_1-100
- citrinegui/countdown_n5t100_1-100
test_file: citrinegui/countdown_n5t100_1-100
force_redownload: false
train_size: 327680
test_size: 1024
training:
max_prompt_length: 1000
max_completion_length: 512
inference:
checkpoint: ${algorithm.training.max_steps}
temperature: 0.0
sc_num: 1
pass_at_k: 1
resume: 0
max_new_tokens: 512
batch_size: 32
algorithm:
name: grpo
training:
learning_rate: 1.0e-06
lr_scheduler_type: cosine
logging_steps: 10
max_steps: 1600
per_device_train_batch_size: 2
gradient_accumulation_steps: 4
gradient_checkpointing: true
bf16: true
report_to:
- wandb
push_to_hub: true
save_strategy: steps
save_steps: ${algorithm.training.max_steps}
tf32: true
num_generations: 8
beta: 0.001
use_vllm: true
vllm_gpu_memory_utilization: 0.2
curriculum: false
curriculum_schedule: classic
scheduler_params:
mu_exp: 0.5
sigma: 0.5
min_prob: true