evaluation readme

This commit is contained in:
junchen14 2023-10-31 09:04:43 +03:00
parent 936f0a326e
commit 89e29937da
2 changed files with 22 additions and 10 deletions

View File

@ -60,13 +60,16 @@ export PYTHONPATH=$PYTHONPATH:/path/to/directory/of/MiniGPT-4
### start evalauting RefCOCO, RefCOCO+, RefCOCOg
port=port_number
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
eval_file_path=/path/to/eval/image/path
save_path=/path/to/save/path
ckpt=/path/to/evaluation/checkpoint
split=/evaluation/data/split/type
dataset=/data/type
split=/evaluation/data/split/type # e.g. val, testA, testB, test
dataset=/data/type #refcoco, refcoco+, refcocog
dataset | split
--- | :---:
refcoco | val, testA, testB
refcoco+ | val, testA, testB
refcocog | val, test
```
torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
@ -80,17 +83,26 @@ torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
port=port_number
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
eval_file_path=/path/to/eval/image/path
eval_file_path=/path/to/eval/annotation/path
image_path=/path/to/eval/image/path
save_path=/path/to/save/path
ckpt=/path/to/evaluation/checkpoint
split=/evaluation/data/split/type
dataset=/data/type
dataset | image_path | eval_file_path
--- | :---:
okvqa | coco_2017 | /path/to/okvqa/folder
vizwiz | vizwiz_images | /path/to/vizwiz/folder
iconvqa | iconvqa_images | /path/to/iconvqa/folder
gqa | gqa_images | /path/to/gqa/folder
vsr | vsr_images | None
hateful meme | hm_images | /path/to/hateful_mem/folder
split=/evaluation/data/split/type # e.g. val,test
dataset=/data/type # vqa data types: okvqa, vizwiz, iconvqa, gqa, vsr, hm
```
torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
--cfg-path ${cfg_path} --img_path ${IMG_PATH} --eval_file_path ${eval_file_path} --save_path ${save_path} \
torchrun --master-port ${port} --nproc_per_node 1 eval_vqa.py \
--cfg-path ${cfg_path} --img_path ${image_path} --eval_file_path ${eval_file_path} --save_path ${save_path} \
--ckpt ${ckpt} --split ${split} --dataset ${dataset} --lora_r 64 --lora_alpha 16 \
--batch_size 10 --max_new_tokens 20 --resample
```

View File

@ -162,7 +162,7 @@ if 'gqa' in args.dataset:
json.dump(minigpt4_predict, f)
if 'vsr' in args.dataset:
annotation = load_dataset(args.eval_file_path, split='test')
annotation = load_dataset("cambridgeltl/vsr_zeroshot", split='test')
img_path = args.img_path
data = VSREvalData(annotation, vis_processor, img_path)
eval_dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=False)