mirror of
https://github.com/Vision-CAIR/MiniGPT-4.git
synced 2025-04-05 02:20:47 +00:00
57 lines
995 B
YAML
57 lines
995 B
YAML
model:
|
|
arch: minigpt_v2
|
|
model_type: pretrain
|
|
max_txt_len: 1024
|
|
image_size: 448
|
|
end_sym: "</s>"
|
|
llama_model: "meta-llama/Llama-2-7b-chat-hf"
|
|
ckpt: "./ckpt/checkpoint_stage3.pth"
|
|
use_grad_checkpoint: True
|
|
low_resource: True
|
|
chat_template: True
|
|
lora_r: 64
|
|
lora_alpha: 16
|
|
|
|
datasets:
|
|
textvqa:
|
|
batch_size: 2
|
|
vis_processor:
|
|
train:
|
|
name: "blip2_image_train"
|
|
image_size: 448
|
|
text_processor:
|
|
train:
|
|
name: "blip_caption"
|
|
sample_ratio: 100
|
|
|
|
run:
|
|
task: image_text_pretrain
|
|
# optimizer
|
|
lr_sched: "linear_warmup_cosine_lr"
|
|
init_lr: 1e-5
|
|
min_lr: 1e-6
|
|
warmup_lr: 1e-6
|
|
|
|
weight_decay: 0.05
|
|
max_epoch: 5
|
|
num_workers: 6
|
|
warmup_steps: 1000
|
|
iters_per_epoch: 2000
|
|
|
|
seed: 42
|
|
output_dir: "textvqa_outputs"
|
|
|
|
amp: True
|
|
resume_ckpt_path: null
|
|
|
|
evaluate: False
|
|
train_splits: ["train"]
|
|
|
|
device: "cuda"
|
|
world_size: 1
|
|
dist_url: "env://"
|
|
distributed: True
|
|
|
|
wandb_log: False
|
|
job_name: minigptv2_finetune
|