mirror of
https://github.com/Vision-CAIR/MiniGPT-4.git
synced 2025-04-05 02:20:47 +00:00
update visual genome
This commit is contained in:
parent
83c3cb06ed
commit
f976c7800f
@ -7,16 +7,16 @@ Image source | Download path
|
|||||||
--- | :---:
|
--- | :---:
|
||||||
COCO 2014 images | <a href="http://images.cocodataset.org/zips/train2014.zip">images</a> <a href="https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json"> captions</a>
|
COCO 2014 images | <a href="http://images.cocodataset.org/zips/train2014.zip">images</a> <a href="https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json"> captions</a>
|
||||||
COCO VQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_train.json">vqa train</a> <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_val.json"> vqa val</a>
|
COCO VQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_train.json">vqa train</a> <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_val.json"> vqa val</a>
|
||||||
Visual Genome | <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip">images part1</a> <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip">images part2</a>
|
Visual Genome | <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip">images part1</a> <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip">images part2</a> <a href="https://homes.cs.washington.edu/~ranjay/visualgenome/data/dataset/image_data.json.zip"> image meta data </a>
|
||||||
TextCaps | <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip">images</a> <a href="https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json"> annotations</a>
|
TextCaps | <a href="https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip">images</a> <a href="https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json"> annotations</a>
|
||||||
RefCOCO | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco.zip"> annotations </a>
|
RefCOCO | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco.zip"> annotations </a>
|
||||||
RefCOCO+ | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco+.zip"> annotations </a>
|
RefCOCO+ | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco+.zip"> annotations </a>
|
||||||
RefCOCOg | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcocog.zip"> annotations </a>
|
RefCOCOg | <a href="https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcocog.zip"> annotations </a>
|
||||||
OKVQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json"> annotations </a>
|
OKVQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json"> annotations </a>
|
||||||
AOK-VQA | <a href="https://prior-datasets.s3.us-east-2.amazonaws.com/aokvqa/aokvqa_v1p0.tar.gz"> annotations </a>
|
AOK-VQA | <a href="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/aokvqa/aokvqa_v1p0_train.json"> annotations </a>
|
||||||
OCR-VQA | <a href="https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing"> annotations </a>
|
OCR-VQA | <a href="https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing"> annotations </a>
|
||||||
GQA | <a href="https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip">images</a> <a href="/ibex/project/c2133/minigpt4_v2_dataset/gqa/annotations/train_balanced_questions.json"> annotations </a>
|
GQA | <a href="https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip">images</a> <a href="/ibex/project/c2133/minigpt4_v2_dataset/gqa/annotations/train_balanced_questions.json"> annotations </a>
|
||||||
Filtered Flickr-30k | <a href="https://drive.google.com/drive/folders/19c_ggBI77AvdtYlPbuI0ZpnPz73T5teX?usp=sharing"> annotations </a>
|
Filtered flickr-30k | <a href="https://drive.google.com/drive/folders/19c_ggBI77AvdtYlPbuI0ZpnPz73T5teX?usp=sharing"> annotations </a>
|
||||||
Multi-task conversation | <a href="https://drive.google.com/file/d/11HHqB2c29hbSk-WLxdta-nG8UCUrcCN1/view?usp=sharing"> annotations </a>
|
Multi-task conversation | <a href="https://drive.google.com/file/d/11HHqB2c29hbSk-WLxdta-nG8UCUrcCN1/view?usp=sharing"> annotations </a>
|
||||||
Filtered unnatural instruction | <a href="https://drive.google.com/file/d/1lXNnBcb5WU-sc8Fe2T2N8J0NRw4sBLev/view?usp=sharing"> annotations </a>
|
Filtered unnatural instruction | <a href="https://drive.google.com/file/d/1lXNnBcb5WU-sc8Fe2T2N8J0NRw4sBLev/view?usp=sharing"> annotations </a>
|
||||||
LLaVA | <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/complex_reasoning_77k.json"> Compelex reasoning </a> <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/detail_23k.json"> Detailed description </a> <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/conversation_58k.json"> Conversation </a>
|
LLaVA | <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/complex_reasoning_77k.json"> Compelex reasoning </a> <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/detail_23k.json"> Detailed description </a> <a href="https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/conversation_58k.json"> Conversation </a>
|
||||||
@ -65,6 +65,8 @@ ${MINIGPTv2_DATASET}
|
|||||||
│ ├── VG_100K
|
│ ├── VG_100K
|
||||||
│ ├── VG_100K_2
|
│ ├── VG_100K_2
|
||||||
│ └── region_descriptions.json
|
│ └── region_descriptions.json
|
||||||
|
│ └── image_data.json
|
||||||
|
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -28,6 +28,6 @@ datasets:
|
|||||||
url:
|
url:
|
||||||
- https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json
|
- https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json
|
||||||
storage:
|
storage:
|
||||||
- /ibex/project/c2133/minigpt4_v2_dataset/gqa/annotations/train_balanced_questions.json
|
- /ibex/project/c2090/minigptv2_dataset/gqa/train_balanced_questions.json
|
||||||
images:
|
images:
|
||||||
storage: /ibex/project/c2133/minigpt4_v2_dataset/gqa/images_copy/
|
storage: /ibex/project/c2090/minigptv2_dataset/gqa/images
|
@ -9,4 +9,4 @@ datasets:
|
|||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/llava/conversation_58k.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/llava/conversation_58k.json
|
@ -9,4 +9,4 @@ datasets:
|
|||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/llava/detail_23k.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/llava/detail_23k.json
|
@ -9,4 +9,4 @@ datasets:
|
|||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/llava/complex_reasoning_77k.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/llava/complex_reasoning_77k.json
|
@ -11,4 +11,4 @@ datasets:
|
|||||||
build_info:
|
build_info:
|
||||||
|
|
||||||
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
image_path: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/multitask_conversation/multi_task_conversation.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/multitask_conversation/multi_task_conversation.json
|
@ -7,4 +7,4 @@ datasets:
|
|||||||
unnatural_instruction:
|
unnatural_instruction:
|
||||||
data_type: text
|
data_type: text
|
||||||
build_info:
|
build_info:
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/unnatural-instructions/data/unnatural_instruction_filer.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/unnatural_instructions/unnatural_instruction_filer.json
|
@ -8,5 +8,5 @@ datasets:
|
|||||||
ocrvqa:
|
ocrvqa:
|
||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
image_path: /ibex/project/c2133/minigpt4_v2_dataset/ocrvqa/images
|
image_path: /ibex/project/c2090/minigptv2_dataset/ocrvqa/images
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/ocrvqa/dataset.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/ocrvqa/dataset.json
|
@ -31,6 +31,6 @@ datasets:
|
|||||||
# - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/OpenEnded_mscoco_train2014_questions.json
|
# - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/OpenEnded_mscoco_train2014_questions.json
|
||||||
# - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/mscoco_train2014_annotations.json
|
# - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/mscoco_train2014_annotations.json
|
||||||
storage:
|
storage:
|
||||||
- /ibex/project/c2133/minigpt4_v2_dataset/okvqa_v2/annotations/okvqa_train.json
|
- /ibex/project/c2090/minigptv2_dataset/okvqa/okvqa_train.json
|
||||||
images:
|
images:
|
||||||
storage: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg
|
storage: /ibex/reference/CV/COCO/cocoapi/data/2014/images/jpeg
|
@ -10,7 +10,7 @@ datasets:
|
|||||||
data_type: images
|
data_type: images
|
||||||
|
|
||||||
build_info:
|
build_info:
|
||||||
image_path: /ibex/project/c2133/minigpt4_v2_dataset/TextCaps/train_images
|
image_path: /ibex/project/c2090/minigptv2_dataset/textcaps/train_images
|
||||||
ann_path: /ibex/project/c2133/minigpt4_v2_dataset/TextCaps/TextCaps_0.1_train.json
|
ann_path: /ibex/project/c2090/minigptv2_dataset/textcaps/TextCaps_0.1_train.json
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,4 +7,4 @@ datasets:
|
|||||||
refvg:
|
refvg:
|
||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
data_dir: /ibex/project/c2133/minigpt4_v2_dataset/visual_genome
|
data_dir: /ibex/project/c2090/minigptv2_dataset/visual_genome
|
Loading…
Reference in New Issue
Block a user