add finetune cfg

This commit is contained in:
ThuanNaN 2025-01-20 07:24:05 +07:00
parent bb2a833fc1
commit 023fdf11f7

View File

@ -0,0 +1,56 @@
model:
arch: minigpt_v2
model_type: pretrain
max_txt_len: 1024
image_size: 448
end_sym: "</s>"
llama_model: "meta-llama/Llama-2-7b-chat-hf"
ckpt: "./ckpt/checkpoint_stage3.pth"
use_grad_checkpoint: True
low_resource: True
chat_template: True
lora_r: 64
lora_alpha: 16
datasets:
textvqa:
batch_size: 2
vis_processor:
train:
name: "blip2_image_train"
image_size: 448
text_processor:
train:
name: "blip_caption"
sample_ratio: 100
run:
task: image_text_pretrain
# optimizer
lr_sched: "linear_warmup_cosine_lr"
init_lr: 1e-5
min_lr: 1e-6
warmup_lr: 1e-6
weight_decay: 0.05
max_epoch: 5
num_workers: 6
warmup_steps: 1000
iters_per_epoch: 2000
seed: 42
output_dir: "textvqa_outputs"
amp: True
resume_ckpt_path: null
evaluate: False
train_splits: ["train"]
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True
wandb_log: False
job_name: minigptv2_finetune