mirror of
https://github.com/Vision-CAIR/MiniGPT-4.git
synced 2025-04-03 09:30:48 +00:00
update dataset
This commit is contained in:
parent
83b7238a55
commit
c13fa168fd
@ -15,7 +15,7 @@ RefCOCOg | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcocog
|
||||
OKVQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json"> annotations </a>
|
||||
AOK-VQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/aokvqa/aokvqa_v1p0_train.json"> annotations </a>
|
||||
OCR-VQA | <a href="https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing"> annotations </a>
|
||||
GQA | <a href="https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip">images</a> <a href="/ibex/project/c2133/minigpt4_v2_dataset/gqa/annotations/train_balanced_questions.json"> annotations </a>
|
||||
GQA | <a href="https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip">images</a> <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json"> annotations </a>
|
||||
Filtered flickr-30k | <a href="https://drive.google.com/drive/folders/19c_ggBI77AvdtYlPbuI0ZpnPz73T5teX?usp=sharing"> annotations </a>
|
||||
Multi-task conversation | <a href="https://drive.google.com/file/d/11HHqB2c29hbSk-WLxdta-nG8UCUrcCN1/view?usp=sharing"> annotations </a>
|
||||
Filtered unnatural instruction | <a href="https://drive.google.com/file/d/1lXNnBcb5WU-sc8Fe2T2N8J0NRw4sBLev/view?usp=sharing"> annotations </a>
|
||||
|
@ -1,7 +1,19 @@
|
||||
## Evaluation Instruction for MiniGPT-v2
|
||||
|
||||
### Data preparation
|
||||
Images download
|
||||
Image source | Download path
|
||||
--- | :---:
|
||||
OKVQA| <a href="https://drive.google.com/drive/folders/1jxIgAhtaLu_YqnZEl8Ym11f7LhX3nptN?usp=sharing">annotations</a> <a href="http://images.cocodataset.org/zips/train2017.zip"> images</a>
|
||||
gqa | <a href="https://drive.google.com/drive/folders/1-dF-cgFwstutS4qq2D9CFQTDS0UTmIft?usp=drive_link">annotations</a> <a href="https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip">images</a>
|
||||
hateful meme | <a href="https://github.com/faizanahemad/facebook-hateful-memes">images and annotations</a>
|
||||
iconqa | <a href="https://iconqa.github.io/#download">images and annotation</a>
|
||||
vizwiz | <a href="https://vizwiz.org/tasks-and-datasets/vqa/">images and annotation</a>
|
||||
RefCOCO | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco.zip"> annotations </a>
|
||||
RefCOCO+ | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco+.zip"> annotations </a>
|
||||
RefCOCOg | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcocog.zip"> annotations </a>
|
||||
|
||||
### Evaluation dataset structure
|
||||
|
||||
|
||||
### environment setup
|
||||
|
@ -47,7 +47,7 @@ model.eval()
|
||||
os.makedirs('results', exist_ok=True)
|
||||
|
||||
if 'okvqa' in args.dataset:
|
||||
evaluation_annntation_path = os.path.join(args.eval_file_path, "test_split.json")
|
||||
evaluation_annntation_path = os.path.join(args.eval_file_path, "okvqa_test_split.json")
|
||||
with open(evaluation_annntation_path) as f:
|
||||
ok_vqa_test_split = json.load(f)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user