diff --git a/eval_scripts/EVAL_README.md b/eval_scripts/EVAL_README.md index 7d617e0..eae1dfb 100644 --- a/eval_scripts/EVAL_README.md +++ b/eval_scripts/EVAL_README.md @@ -60,13 +60,16 @@ export PYTHONPATH=$PYTHONPATH:/path/to/directory/of/MiniGPT-4 ### start evalauting RefCOCO, RefCOCO+, RefCOCOg port=port_number cfg_path=/path/to/eval_configs/minigptv2_eval.yaml -eval_file_path=/path/to/eval/image/path save_path=/path/to/save/path ckpt=/path/to/evaluation/checkpoint +split=/evaluation/data/split/type +dataset=/data/type - -split=/evaluation/data/split/type # e.g. val, testA, testB, test -dataset=/data/type #refcoco, refcoco+, refcocog +dataset | split +--- | :---: +refcoco | val, testA, testB +refcoco+ | val, testA, testB +refcocog | val, test ``` torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \ @@ -80,17 +83,26 @@ torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \ port=port_number cfg_path=/path/to/eval_configs/minigptv2_eval.yaml -eval_file_path=/path/to/eval/image/path +eval_file_path=/path/to/eval/annotation/path +image_path=/path/to/eval/image/path save_path=/path/to/save/path ckpt=/path/to/evaluation/checkpoint +split=/evaluation/data/split/type +dataset=/data/type +dataset | image_path | eval_file_path +--- | :---: +okvqa | coco_2017 | /path/to/okvqa/folder +vizwiz | vizwiz_images | /path/to/vizwiz/folder +iconvqa | iconvqa_images | /path/to/iconvqa/folder +gqa | gqa_images | /path/to/gqa/folder +vsr | vsr_images | None +hateful meme | hm_images | /path/to/hateful_mem/folder -split=/evaluation/data/split/type # e.g. val,test -dataset=/data/type # vqa data types: okvqa, vizwiz, iconvqa, gqa, vsr, hm ``` -torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \ - --cfg-path ${cfg_path} --img_path ${IMG_PATH} --eval_file_path ${eval_file_path} --save_path ${save_path} \ +torchrun --master-port ${port} --nproc_per_node 1 eval_vqa.py \ + --cfg-path ${cfg_path} --img_path ${image_path} --eval_file_path ${eval_file_path} --save_path ${save_path} \ --ckpt ${ckpt} --split ${split} --dataset ${dataset} --lora_r 64 --lora_alpha 16 \ --batch_size 10 --max_new_tokens 20 --resample ``` diff --git a/eval_scripts/eval_vqa.py b/eval_scripts/eval_vqa.py index cd25b3d..0ec4cf6 100644 --- a/eval_scripts/eval_vqa.py +++ b/eval_scripts/eval_vqa.py @@ -162,7 +162,7 @@ if 'gqa' in args.dataset: json.dump(minigpt4_predict, f) if 'vsr' in args.dataset: - annotation = load_dataset(args.eval_file_path, split='test') + annotation = load_dataset("cambridgeltl/vsr_zeroshot", split='test') img_path = args.img_path data = VSREvalData(annotation, vis_processor, img_path) eval_dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=False)