MiniGPT-4/eval_scripts/EVAL_README.md
2023-10-31 05:37:37 +03:00

2.5 KiB

Evaluation Instruction for MiniGPT-v2

Data preparation

Images download

Image source Download path
OKVQA annotations    images
gqa annotations    images
hateful meme images and annotations
iconqa images and annotation
vizwiz images and annotation
RefCOCO annotations
RefCOCO+ annotations
RefCOCOg annotations

Evaluation dataset structure

environment setup

export PYTHONPATH=$PYTHONPATH:/path/to/directory/of/MiniGPT-4

start evalauting RefCOCO, RefCOCO+, RefCOCOg

port=port_number cfg_path=/path/to/eval_configs/minigptv2_eval.yaml eval_file_path=/path/to/eval/image/path save_path=/path/to/save/path ckpt=/path/to/evaluation/checkpoint

split=/evaluation/data/split/type # e.g. val, testA, testB, test dataset=/data/type #refcoco, refcoco+, refcocog

torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
 --cfg-path ${cfg_path} --img_path ${IMG_PATH} --eval_file_path ${eval_file_path} --save_path ${save_path} \
 --ckpt ${ckpt} --split ${split}  --dataset ${dataset} --lora_r 64 --lora_alpha 16 \
 --batch_size 10 --max_new_tokens 20 --resample

start evaluating visual question answering

port=port_number cfg_path=/path/to/eval_configs/minigptv2_eval.yaml eval_file_path=/path/to/eval/image/path save_path=/path/to/save/path ckpt=/path/to/evaluation/checkpoint

split=/evaluation/data/split/type # e.g. val,test dataset=/data/type # vqa data types: okvqa, vizwiz, iconvqa, gqa, vsr, hm

torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
 --cfg-path ${cfg_path} --img_path ${IMG_PATH} --eval_file_path ${eval_file_path} --save_path ${save_path} \
 --ckpt ${ckpt} --split ${split}  --dataset ${dataset} --lora_r 64 --lora_alpha 16 \
 --batch_size 10 --max_new_tokens 20 --resample