MiniGPT-4/eval_configs/minigptv2_benchmark_evaluation.yaml
2025-01-20 06:29:45 +07:00

42 lines
802 B
YAML

model:
arch: minigpt_v2
model_type: pretrain
max_txt_len: 500
end_sym: "</s>"
low_resource: False
prompt_template: '[INST] {} [/INST]'
llama_model: "meta-llama/Llama-2-7b-chat-hf"
ckpt: "./ckpt/checkpoint_stage3.pth"
lora_r: 64
lora_alpha: 16
datasets:
cc_sbu_align:
vis_processor:
train:
name: "blip2_image_eval"
image_size: 448
text_processor:
train:
name: "blip_caption"
evaluation_datasets:
mvtec_ad:
batch_size: 4
eval_file_path: ./data/MVTEC_det/val_data.json
max_new_tokens: 40
textvqa:
batch_size: 4
eval_file_path: ./data/TextVQA_tiny/val.json
img_path: ./data/TextVQA_tiny/images
max_new_tokens: 20
run:
task: image_text_pretrain
name: minigptv2_evaluation
save_path: eval_outputs