mirror of
https://github.com/Vision-CAIR/MiniGPT-4.git
synced 2025-04-06 19:10:45 +00:00
evaluation readme
This commit is contained in:
parent
936f0a326e
commit
89e29937da
@ -60,13 +60,16 @@ export PYTHONPATH=$PYTHONPATH:/path/to/directory/of/MiniGPT-4
|
|||||||
### start evalauting RefCOCO, RefCOCO+, RefCOCOg
|
### start evalauting RefCOCO, RefCOCO+, RefCOCOg
|
||||||
port=port_number
|
port=port_number
|
||||||
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
|
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
|
||||||
eval_file_path=/path/to/eval/image/path
|
|
||||||
save_path=/path/to/save/path
|
save_path=/path/to/save/path
|
||||||
ckpt=/path/to/evaluation/checkpoint
|
ckpt=/path/to/evaluation/checkpoint
|
||||||
|
split=/evaluation/data/split/type
|
||||||
|
dataset=/data/type
|
||||||
|
|
||||||
|
dataset | split
|
||||||
split=/evaluation/data/split/type # e.g. val, testA, testB, test
|
--- | :---:
|
||||||
dataset=/data/type #refcoco, refcoco+, refcocog
|
refcoco | val, testA, testB
|
||||||
|
refcoco+ | val, testA, testB
|
||||||
|
refcocog | val, test
|
||||||
|
|
||||||
```
|
```
|
||||||
torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
|
torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
|
||||||
@ -80,17 +83,26 @@ torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
|
|||||||
|
|
||||||
port=port_number
|
port=port_number
|
||||||
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
|
cfg_path=/path/to/eval_configs/minigptv2_eval.yaml
|
||||||
eval_file_path=/path/to/eval/image/path
|
eval_file_path=/path/to/eval/annotation/path
|
||||||
|
image_path=/path/to/eval/image/path
|
||||||
save_path=/path/to/save/path
|
save_path=/path/to/save/path
|
||||||
ckpt=/path/to/evaluation/checkpoint
|
ckpt=/path/to/evaluation/checkpoint
|
||||||
|
split=/evaluation/data/split/type
|
||||||
|
dataset=/data/type
|
||||||
|
|
||||||
|
dataset | image_path | eval_file_path
|
||||||
|
--- | :---:
|
||||||
|
okvqa | coco_2017 | /path/to/okvqa/folder
|
||||||
|
vizwiz | vizwiz_images | /path/to/vizwiz/folder
|
||||||
|
iconvqa | iconvqa_images | /path/to/iconvqa/folder
|
||||||
|
gqa | gqa_images | /path/to/gqa/folder
|
||||||
|
vsr | vsr_images | None
|
||||||
|
hateful meme | hm_images | /path/to/hateful_mem/folder
|
||||||
|
|
||||||
split=/evaluation/data/split/type # e.g. val,test
|
|
||||||
dataset=/data/type # vqa data types: okvqa, vizwiz, iconvqa, gqa, vsr, hm
|
|
||||||
|
|
||||||
```
|
```
|
||||||
torchrun --master-port ${port} --nproc_per_node 1 eval_ref.py \
|
torchrun --master-port ${port} --nproc_per_node 1 eval_vqa.py \
|
||||||
--cfg-path ${cfg_path} --img_path ${IMG_PATH} --eval_file_path ${eval_file_path} --save_path ${save_path} \
|
--cfg-path ${cfg_path} --img_path ${image_path} --eval_file_path ${eval_file_path} --save_path ${save_path} \
|
||||||
--ckpt ${ckpt} --split ${split} --dataset ${dataset} --lora_r 64 --lora_alpha 16 \
|
--ckpt ${ckpt} --split ${split} --dataset ${dataset} --lora_r 64 --lora_alpha 16 \
|
||||||
--batch_size 10 --max_new_tokens 20 --resample
|
--batch_size 10 --max_new_tokens 20 --resample
|
||||||
```
|
```
|
||||||
|
@ -162,7 +162,7 @@ if 'gqa' in args.dataset:
|
|||||||
json.dump(minigpt4_predict, f)
|
json.dump(minigpt4_predict, f)
|
||||||
|
|
||||||
if 'vsr' in args.dataset:
|
if 'vsr' in args.dataset:
|
||||||
annotation = load_dataset(args.eval_file_path, split='test')
|
annotation = load_dataset("cambridgeltl/vsr_zeroshot", split='test')
|
||||||
img_path = args.img_path
|
img_path = args.img_path
|
||||||
data = VSREvalData(annotation, vis_processor, img_path)
|
data = VSREvalData(annotation, vis_processor, img_path)
|
||||||
eval_dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=False)
|
eval_dataloader = DataLoader(data, batch_size=args.batch_size, shuffle=False)
|
||||||
|
Loading…
Reference in New Issue
Block a user