From 8c2297750ffe6c4d9081cabc8acb54bdd712ba03 Mon Sep 17 00:00:00 2001 From: Deyao Zhu Date: Mon, 23 Oct 2023 20:42:36 +0300 Subject: [PATCH 01/18] update env name to minigptv, add bug report and feature request back --- minigpt4/models/minigpt_base.py | 3 --- train.py | 14 ++++---------- train_configs/minigpt4_llama2_stage1_pretrain.yaml | 5 ++++- train_configs/minigpt4_llama2_stage2_finetune.yaml | 5 ++++- train_configs/minigpt4_stage1_pretrain.yaml | 5 ++++- train_configs/minigpt4_stage2_finetune.yaml | 5 ++++- train_configs/minigpt_v2_finetune.yaml | 6 ++++-- 7 files changed, 24 insertions(+), 19 deletions(-) diff --git a/minigpt4/models/minigpt_base.py b/minigpt4/models/minigpt_base.py index b5dd9d4..cd051ec 100644 --- a/minigpt4/models/minigpt_base.py +++ b/minigpt4/models/minigpt_base.py @@ -178,7 +178,6 @@ class MiniGPTBase(BaseModel): answers = [self.llama_tokenizer(a + self.end_sym, return_tensors="pt", add_special_tokens=False).to(self.device) for a in answers] - cur_id = [] cur_target = [] for i in range(len(questions)): @@ -226,8 +225,6 @@ class MiniGPTBase(BaseModel): conv_q = [[self.prompt_template.format(item) for item in items] for items in conv_q] - - cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, [q[0] for q in conv_q]) regress_token_ids, regress_atts, part_targets = self.tokenize_conversation(conv_q, conv_a) diff --git a/train.py b/train.py index 072a078..0013079 100644 --- a/train.py +++ b/train.py @@ -12,6 +12,7 @@ import random import numpy as np import torch import torch.backends.cudnn as cudnn +import wandb import minigpt4.tasks as tasks from minigpt4.common.config import Config @@ -30,7 +31,6 @@ from minigpt4.models import * from minigpt4.processors import * from minigpt4.runners import * from minigpt4.tasks import * -import wandb def parse_args(): @@ -44,12 +44,10 @@ def parse_args(): "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) - parser.add_argument("--wandb_log", default=False) - parser.add_argument("--job_name",default="minigpt_v2",type=str) + parser.add_argument("--job_name", default="minigpt_v2",type=str) args = parser.parse_args() - return args @@ -80,16 +78,13 @@ def main(): # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now() args = parse_args() - - cfg = Config(parse_args()) + cfg = Config(args) init_distributed_mode(cfg.run_cfg) - setup_seeds(cfg) # set after init_distributed_mode() to only log on master. setup_logger() - cfg.pretty_print() task = tasks.setup_task(cfg) @@ -98,10 +93,9 @@ def main(): if cfg.run_cfg.wandb_log: wandb.login() - wandb.init(project="minigptv2",name=args.job_name) + wandb.init(project="minigptv", name=cfg.run_cfg.job_name) wandb.watch(model) - runner = get_runner_class(cfg)( cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets ) diff --git a/train_configs/minigpt4_llama2_stage1_pretrain.yaml b/train_configs/minigpt4_llama2_stage1_pretrain.yaml index c13d31f..bcc458e 100644 --- a/train_configs/minigpt4_llama2_stage1_pretrain.yaml +++ b/train_configs/minigpt4_llama2_stage1_pretrain.yaml @@ -52,4 +52,7 @@ run: device: "cuda" world_size: 1 dist_url: "env://" - distributed: True \ No newline at end of file + distributed: True + + wandb_log: True + job_name: minigpt4_llama2_pretrain \ No newline at end of file diff --git a/train_configs/minigpt4_llama2_stage2_finetune.yaml b/train_configs/minigpt4_llama2_stage2_finetune.yaml index 8c138ae..29b5358 100644 --- a/train_configs/minigpt4_llama2_stage2_finetune.yaml +++ b/train_configs/minigpt4_llama2_stage2_finetune.yaml @@ -46,4 +46,7 @@ run: device: "cuda" world_size: 1 dist_url: "env://" - distributed: True \ No newline at end of file + distributed: True + + wandb_log: True + job_name: minigpt4_llama2_finetune \ No newline at end of file diff --git a/train_configs/minigpt4_stage1_pretrain.yaml b/train_configs/minigpt4_stage1_pretrain.yaml index ce8bc87..bd9a451 100644 --- a/train_configs/minigpt4_stage1_pretrain.yaml +++ b/train_configs/minigpt4_stage1_pretrain.yaml @@ -52,4 +52,7 @@ run: device: "cuda" world_size: 1 dist_url: "env://" - distributed: True \ No newline at end of file + distributed: True + + wandb_log: True + job_name: minigpt4_pretrain \ No newline at end of file diff --git a/train_configs/minigpt4_stage2_finetune.yaml b/train_configs/minigpt4_stage2_finetune.yaml index 531a3a0..89d1100 100644 --- a/train_configs/minigpt4_stage2_finetune.yaml +++ b/train_configs/minigpt4_stage2_finetune.yaml @@ -46,4 +46,7 @@ run: device: "cuda" world_size: 1 dist_url: "env://" - distributed: True \ No newline at end of file + distributed: True + + wandb_log: True + job_name: minigpt4_finetune \ No newline at end of file diff --git a/train_configs/minigpt_v2_finetune.yaml b/train_configs/minigpt_v2_finetune.yaml index 7bf6bbf..4039ea6 100644 --- a/train_configs/minigpt_v2_finetune.yaml +++ b/train_configs/minigpt_v2_finetune.yaml @@ -276,7 +276,6 @@ run: init_lr: 1e-5 min_lr: 8e-5 warmup_lr: 1e-6 - wandb_log: True weight_decay: 0.05 max_epoch: 50 @@ -296,4 +295,7 @@ run: device: "cuda" world_size: 1 dist_url: "env://" - distributed: True \ No newline at end of file + distributed: True + + wandb_log: True + job_name: minigptv2_finetune \ No newline at end of file From f0b6a9e7d77747ad1d73c02c35f04b41a6c23be4 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:11:35 +0300 Subject: [PATCH 02/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 29 ++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 2d5c825..2bae2d7 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -1,23 +1,44 @@ ## Download the COCO captions, RefCOCO, RefCOCO+. RefCOCOg, visual genome, textcaps, LLaVA, gqa, AOK-VQA, OK-VQA, OCR-VQA, filtered Flickr-30k, multi-task conversation, and Unnatural instruction datasets -### COCO captions +After downloading all of them, organize the data as follows in `./playground/data`, +``` +├── coco +│ └── train2017 +├── gqa +│ └── images +├── ocr_vqa +│ └── images +├── textvqa +│ └── train_images +└── vg + ├── VG_100K + └── VG_100K_2 + + +### COCO captions +- [train2017](http://images.cocodataset.org/zips/train2017.zip) ### RefCOCO, RefCOCO+, RefCOCOg ### Visual genome - -### textcaps +- [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) +### TextCaps ### LLaVA -### gqa +### TextVQA +- [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) +### GQA +- [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) +- [Annotations](https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/testdev_balanced_questions.json) ### OKVQA ### AOK-VQA ### OCR-VQA +- [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`** ### filtered Flickr-30k From 68df270f14dfd30213f95349c458bd1eced6c602 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:15:14 +0300 Subject: [PATCH 03/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 2bae2d7..438240f 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -14,7 +14,7 @@ After downloading all of them, organize the data as follows in `./playground/dat └── vg ├── VG_100K └── VG_100K_2 - +``` ### COCO captions - [train2017](http://images.cocodataset.org/zips/train2017.zip) From c2de397a8674b1546fad0f6f01088a42efde98cf Mon Sep 17 00:00:00 2001 From: Deyao Zhu Date: Mon, 23 Oct 2023 21:31:13 +0300 Subject: [PATCH 04/18] update refcoco preparation --- dataset/README_MINIGPTv2_FINETUNE.md | 46 ++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 2d5c825..be1e09b 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -2,16 +2,56 @@ ### COCO captions - ### RefCOCO, RefCOCO+, RefCOCOg -### Visual genome +Makesure you have the COCO 2014 images first. + +Then, +download RefCOCO, RefCOCO+, and RefCOCOg annotation files in the following links. + +- https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco.zip +- https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco+.zip +- https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcocog.zip + +Unzip these files to the location you like. It should have the structure like the following + +``` +Location_you_like +├── refcoco +│ ├── instances.json +│ ├── refs(google).p +│ └── refs(unc).p +├── refcoco+ +│ ├── instances.json +│ └── refs(unc).p +└── refcocog + ├── instances.json + ├── refs(google).p + └── refs(umd).p +``` + +Set **image_path** in all the following dataset configuration files to the COCO 2014 image folder. +Similarly, set **ann_path** in all the following configs to the above folder (Location_you_like) that contains refcoco, refcoco+, and refcocog. + +- [minigpt4/configs/refcoco.yaml](../minigpt4/configs/refcoco.yaml) +- [minigpt4/configs/refcocog.yaml](../minigpt4/configs/refcocog.yaml) +- [minigpt4/configs/refcocop.yaml](../minigpt4/configs/refcocop.yaml) +- [minigpt4/configs/invrefcoco.yaml](../minigpt4/configs/invrefcoco.yaml) +- [minigpt4/configs/invrefcocog.yaml](../minigpt4/configs/invrefcocog.yaml) +- [minigpt4/configs/invrefcocop.yaml](../minigpt4/configs/invrefcocop.yaml) + + + +### Visual Genome ### textcaps ### LLaVA -### gqa + + + +### GQA ### OKVQA From 114852b529ffb2b8fd5dc68e5653fb7f6b9bdacd Mon Sep 17 00:00:00 2001 From: Deyao Zhu Date: Mon, 23 Oct 2023 21:34:47 +0300 Subject: [PATCH 05/18] update refococo --- dataset/README_MINIGPTv2_FINETUNE.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index c582acc..3d09ffe 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -19,11 +19,13 @@ After downloading all of them, organize the data as follows in `./playground/dat ### COCO captions - [train2017](http://images.cocodataset.org/zips/train2017.zip) -### RefCOCO, RefCOCO+, RefCOCOg + ### Visual genome - [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) ### TextCaps + +### RefCOCO, RefCOCO+, RefCOCOg Makesure you have the COCO 2014 images first. Then, From ab520c89fc76d14b623aeea0a8e3bd5134ed36fd Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:41:39 +0300 Subject: [PATCH 06/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 3d09ffe..f658adc 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -26,7 +26,7 @@ After downloading all of them, organize the data as follows in `./playground/dat ### TextCaps ### RefCOCO, RefCOCO+, RefCOCOg -Makesure you have the COCO 2014 images first. +Make sure you have the COCO 2014 images first. Then, download RefCOCO, RefCOCO+, and RefCOCOg annotation files in the following links. @@ -71,14 +71,10 @@ Similarly, set **ann_path** in all the following configs to the above folder (Lo ### LLaVA ### TextVQA -- [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) -### GQA -- [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) -- [Annotations](https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/testdev_balanced_questions.json) - - +Images, and question-answer pairs will be loaded during evaluation. ### GQA +Images, and question-answer pairs will be loaded during evaluation. ### OKVQA From 50df66e81e437091b7a55e68c793dee2b3b4f5f0 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:42:58 +0300 Subject: [PATCH 07/18] Create Evaluation.md --- dataset/Evaluation.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 dataset/Evaluation.md diff --git a/dataset/Evaluation.md b/dataset/Evaluation.md new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/dataset/Evaluation.md @@ -0,0 +1 @@ + From fa19bc09f21c220c564148c9d7ed86d3c1ced523 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:45:22 +0300 Subject: [PATCH 08/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index f658adc..280622e 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -1,26 +1,10 @@ ## Download the COCO captions, RefCOCO, RefCOCO+. RefCOCOg, visual genome, textcaps, LLaVA, gqa, AOK-VQA, OK-VQA, OCR-VQA, filtered Flickr-30k, multi-task conversation, and Unnatural instruction datasets -After downloading all of them, organize the data as follows in `./playground/data`, - -``` -├── coco -│ └── train2017 -├── gqa -│ └── images -├── ocr_vqa -│ └── images -├── textvqa -│ └── train_images -└── vg - ├── VG_100K - └── VG_100K_2 -``` ### COCO captions - [train2017](http://images.cocodataset.org/zips/train2017.zip) - ### Visual genome - [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) ### TextCaps @@ -70,12 +54,6 @@ Similarly, set **ann_path** in all the following configs to the above folder (Lo ### LLaVA -### TextVQA -Images, and question-answer pairs will be loaded during evaluation. - -### GQA -Images, and question-answer pairs will be loaded during evaluation. - ### OKVQA ### AOK-VQA From b15fec91a5f6c164aa8148b577f8b19a08217057 Mon Sep 17 00:00:00 2001 From: Deyao Zhu Date: Mon, 23 Oct 2023 21:47:29 +0300 Subject: [PATCH 09/18] update llava --- dataset/README_MINIGPTv2_FINETUNE.md | 30 ++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 3d09ffe..512b1ff 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -23,6 +23,7 @@ After downloading all of them, organize the data as follows in `./playground/dat ### Visual genome - [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) + ### TextCaps ### RefCOCO, RefCOCO+, RefCOCOg @@ -55,12 +56,12 @@ Location_you_like Set **image_path** in all the following dataset configuration files to the COCO 2014 image folder. Similarly, set **ann_path** in all the following configs to the above folder (Location_you_like) that contains refcoco, refcoco+, and refcocog. -- [minigpt4/configs/refcoco.yaml](../minigpt4/configs/refcoco.yaml) -- [minigpt4/configs/refcocog.yaml](../minigpt4/configs/refcocog.yaml) -- [minigpt4/configs/refcocop.yaml](../minigpt4/configs/refcocop.yaml) -- [minigpt4/configs/invrefcoco.yaml](../minigpt4/configs/invrefcoco.yaml) -- [minigpt4/configs/invrefcocog.yaml](../minigpt4/configs/invrefcocog.yaml) -- [minigpt4/configs/invrefcocop.yaml](../minigpt4/configs/invrefcocop.yaml) +- [minigpt4/configs/datasets/coco_bbox/refcoco.yaml](../minigpt4/configs/datasets/coco_bbox/refcoco.yaml) +- [minigpt4/configs/datasets/coco_bbox/refcocog.yaml](../minigpt4/configs/datasets/coco_bbox/refcocog.yaml) +- [minigpt4/configs/datasets/coco_bbox/refcocop.yaml](../minigpt4/configs/datasets/coco_bbox/refcocop.yaml) +- [minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml](../minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml) +- [minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml](../minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml) +- [minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml](../minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml) @@ -69,6 +70,23 @@ Similarly, set **ann_path** in all the following configs to the above folder (Lo ### textcaps ### LLaVA +Makesure you have the COCO 2014 images first. + +Download Llava annotation files in the following link to the place you like. + +- https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/conversation_58k.json +- https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/detail_23k.json +- https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/resolve/main/complex_reasoning_77k.json + +Set **image_path** in all the following dataset configuration files to the COCO 2014 image folder. +Similarly, set **ann_path** to the location of the previous downloaded conversation_58k.json, +detail_23k.json, and complex_reasoning_77k.json in conversation.yaml, detail.yaml, and reason.yaml, respectively. + + +- [minigpt4/configs/datasets/llava/conversation.yaml](../minigpt4/configs/datasets/llava/conversation.yaml) +- [minigpt4/configs/datasets/llava/detail.yaml](../minigpt4/configs/datasets/llava/detail.yaml) +- [minigpt4/configs/datasets/llava/reason.yaml](../minigpt4/configs/datasets/llava/reason.yaml) + ### TextVQA - [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) From f13ad74b267b53f9abab23a4c48fb46796cf61d2 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:48:58 +0300 Subject: [PATCH 10/18] Update Evaluation.md --- dataset/Evaluation.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/dataset/Evaluation.md b/dataset/Evaluation.md index 8b13789..9e3ff86 100644 --- a/dataset/Evaluation.md +++ b/dataset/Evaluation.md @@ -1 +1,21 @@ +### OKVQA + +### GQA +Images and question-answer pairs will be loaded during the evaluation. +''' +python run_eval.py xxxx +''' +### VSR +Images and question-answer pairs will be loaded during the evaluation. + +### IconVQA + +### VizWiz + +### HM + + + + + From 75f87692971a1a4b5107c1f2b8e1926b275bde2d Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:49:30 +0300 Subject: [PATCH 11/18] Update Evaluation.md --- dataset/Evaluation.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dataset/Evaluation.md b/dataset/Evaluation.md index 9e3ff86..e2b366c 100644 --- a/dataset/Evaluation.md +++ b/dataset/Evaluation.md @@ -3,11 +3,15 @@ ### GQA Images and question-answer pairs will be loaded during the evaluation. -''' +``` python run_eval.py xxxx -''' +``` + ### VSR Images and question-answer pairs will be loaded during the evaluation. +``` +python run_eval.py xxxx +``` ### IconVQA From 45a97de8cc02e1b85bb92bf5a194a2569a0a5ada Mon Sep 17 00:00:00 2001 From: Deyao Zhu Date: Mon, 23 Oct 2023 21:49:33 +0300 Subject: [PATCH 12/18] remove unused pretrain set in the finetune readme --- dataset/README_MINIGPTv2_FINETUNE.md | 107 --------------------------- 1 file changed, 107 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 070fbf5..924ecca 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -84,110 +84,3 @@ detail_23k.json, and complex_reasoning_77k.json in conversation.yaml, detail.yam ### Multi-task conversation ### Unnatural instruction - - - - - - - - - - - - - -### Pre-training datasets download: -We use the filtered synthetic captions prepared by BLIP. For more details about the dataset, please refer to [BLIP](https://github.com/salesforce/BLIP). - -It requires ~2.3T to store LAION and CC3M+CC12M+SBU datasets - -Image source | Filtered synthetic caption by ViT-L ---- | :---: -CC3M+CC12M+SBU | Download -LAION115M | Download - -This will download two json files -``` -ccs_synthetic_filtered_large.json -laion_synthetic_filtered_large.json -``` - -## prepare the data step-by-step - - -### setup the dataset folder and move the annotation file to the data storage folder -``` -export MINIGPT4_DATASET=/YOUR/PATH/FOR/LARGE/DATASET/ -mkdir ${MINIGPT4_DATASET}/cc_sbu -mkdir ${MINIGPT4_DATASET}/laion -mv ccs_synthetic_filtered_large.json ${MINIGPT4_DATASET}/cc_sbu -mv laion_synthetic_filtered_large.json ${MINIGPT4_DATASET}/laion -``` - -### Convert the scripts to data storate folder -``` -cp convert_cc_sbu.py ${MINIGPT4_DATASET}/cc_sbu -cp download_cc_sbu.sh ${MINIGPT4_DATASET}/cc_sbu -cp convert_laion.py ${MINIGPT4_DATASET}/laion -cp download_laion.sh ${MINIGPT4_DATASET}/laion -``` - - -### Convert the laion and cc_sbu annotation file format to be img2dataset format -``` -cd ${MINIGPT4_DATASET}/cc_sbu -python convert_cc_sbu.py - -cd ${MINIGPT4_DATASET}/laion -python convert_laion.py -``` - -### Download the datasets with img2dataset -``` -cd ${MINIGPT4_DATASET}/cc_sbu -sh download_cc_sbu.sh -cd ${MINIGPT4_DATASET}/laion -sh download_laion.sh -``` - - -The final dataset structure - -``` -. -├── ${MINIGPT4_DATASET} -│ ├── cc_sbu -│ ├── convert_cc_sbu.py -│ ├── download_cc_sbu.sh -│ ├── ccs_synthetic_filtered_large.json -│ ├── ccs_synthetic_filtered_large.tsv -│ └── cc_sbu_dataset -│ ├── 00000.tar -│ ├── 00000.parquet -│ ... -│ ├── laion -│ ├── convert_laion.py -│ ├── download_laion.sh -│ ├── laion_synthetic_filtered_large.json -│ ├── laion_synthetic_filtered_large.tsv -│ └── laion_dataset -│ ├── 00000.tar -│ ├── 00000.parquet -│ ... -... -``` - - -## Set up the dataset configuration files - -Then, set up the LAION dataset loading path in -[here](../minigpt4/configs/datasets/laion/defaults.yaml#L5) at Line 5 as -${MINIGPT4_DATASET}/laion/laion_dataset/{00000..10488}.tar - -and the Conceptual Captoin and SBU datasets loading path in -[here](../minigpt4/configs/datasets/cc_sbu/defaults.yaml#L5) at Line 5 as -${MINIGPT4_DATASET}/cc_sbu/cc_sbu_dataset/{00000..01255}.tar - - - From 0c27a75fd693df2eef6af871fcfe61ff6d637e87 Mon Sep 17 00:00:00 2001 From: Xiang Li <44761952+lx709@users.noreply.github.com> Date: Mon, 23 Oct 2023 21:51:36 +0300 Subject: [PATCH 13/18] Update Evaluation.md --- dataset/Evaluation.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/dataset/Evaluation.md b/dataset/Evaluation.md index e2b366c..34118f3 100644 --- a/dataset/Evaluation.md +++ b/dataset/Evaluation.md @@ -3,19 +3,20 @@ ### GQA Images and question-answer pairs will be loaded during the evaluation. -``` -python run_eval.py xxxx -``` + +``` python run_eval.py xxxx ``` ### VSR Images and question-answer pairs will be loaded during the evaluation. -``` -python run_eval.py xxxx -``` + +``` python run_eval.py xxxx ``` ### IconVQA ### VizWiz +1. Download [`test.json`](https://vizwiz.cs.colorado.edu/VizWiz_final/vqa_data/Annotations.zip) and extract [`test.zip`](https://vizwiz.cs.colorado.edu/VizWiz_final/images/test.zip) to `test`. Put them under `your_path/vizwiz`. +2. Single-GPU inference. +``` python run_eval.py xxxx ``` ### HM From 41c050de7661763df95aca8d7e6601481e33d058 Mon Sep 17 00:00:00 2001 From: ZhuDeyao Date: Mon, 23 Oct 2023 21:57:25 +0300 Subject: [PATCH 14/18] Update modeling_llama.py for transformers package compatibility --- minigpt4/models/modeling_llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/minigpt4/models/modeling_llama.py b/minigpt4/models/modeling_llama.py index 6d28020..5d59a53 100644 --- a/minigpt4/models/modeling_llama.py +++ b/minigpt4/models/modeling_llama.py @@ -75,7 +75,7 @@ class LlamaForCausalLM(LlamaForCausalLMOrig): ) hidden_states = outputs[0] - if self.config.pretraining_tp > 1: + if hasattr(self.config, 'pretraining_tp') and self.config.pretraining_tp > 1: lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] logits = torch.cat(logits, dim=-1) From f955e62227b140135f2daa4c89acec3517c4931a Mon Sep 17 00:00:00 2001 From: XiaoqianShen <64844805+xiaoqian-shen@users.noreply.github.com> Date: Mon, 23 Oct 2023 19:22:16 +0000 Subject: [PATCH 15/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 924ecca..9eb3dcf 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -74,8 +74,18 @@ detail_23k.json, and complex_reasoning_77k.json in conversation.yaml, detail.yam ### OKVQA +- [OK-VQA Input Questions](https://okvqa.allenai.org/static/data/OpenEnded_mscoco_train2014_questions.json.zip) +- [OK-VQA Annotations](https://okvqa.allenai.org/static/data/mscoco_train2014_annotations.json.zip) +- Images are from COCO + ### AOK-VQA +``` +export AOKVQA_DIR=YOUR_DATASET_PATH +mkdir -p ${AOKVQA_DIR} +curl -fsSL https://prior-datasets.s3.us-east-2.amazonaws.com/aokvqa/aokvqa_v1p0.tar.gz | tar xvz -C ${AOKVQA_DIR} +``` + ### OCR-VQA - [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`** From ac626b3d9a4eaf883c00919d4fe654083a844af7 Mon Sep 17 00:00:00 2001 From: XiaoqianShen <64844805+xiaoqian-shen@users.noreply.github.com> Date: Mon, 23 Oct 2023 19:27:01 +0000 Subject: [PATCH 16/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 1 + 1 file changed, 1 insertion(+) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 9eb3dcf..edbdf2f 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -76,6 +76,7 @@ detail_23k.json, and complex_reasoning_77k.json in conversation.yaml, detail.yam - [OK-VQA Input Questions](https://okvqa.allenai.org/static/data/OpenEnded_mscoco_train2014_questions.json.zip) - [OK-VQA Annotations](https://okvqa.allenai.org/static/data/mscoco_train2014_annotations.json.zip) +- [okvqa_train](https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json) - Images are from COCO ### AOK-VQA From dc84c5e5c7c95f9e084f6fed12c3b2db56ee1fcd Mon Sep 17 00:00:00 2001 From: XiaoqianShen <64844805+xiaoqian-shen@users.noreply.github.com> Date: Mon, 23 Oct 2023 19:29:37 +0000 Subject: [PATCH 17/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index edbdf2f..0181f7d 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -10,6 +10,9 @@ ### TextCaps +-[TextCaps_0.1_train](https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json) +-[Images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) + ### RefCOCO, RefCOCO+, RefCOCOg Make sure you have the COCO 2014 images first. @@ -47,12 +50,6 @@ Similarly, set **ann_path** in all the following configs to the above folder (Lo - [minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml](../minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml) - [minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml](../minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml) - - -### Visual Genome - -### textcaps - ### LLaVA Makesure you have the COCO 2014 images first. From b93d40f23cfc69ba1c68d8fc78014099869e47f2 Mon Sep 17 00:00:00 2001 From: XiaoqianShen <64844805+xiaoqian-shen@users.noreply.github.com> Date: Mon, 23 Oct 2023 19:31:05 +0000 Subject: [PATCH 18/18] Update README_MINIGPTv2_FINETUNE.md --- dataset/README_MINIGPTv2_FINETUNE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dataset/README_MINIGPTv2_FINETUNE.md b/dataset/README_MINIGPTv2_FINETUNE.md index 0181f7d..f7af60d 100644 --- a/dataset/README_MINIGPTv2_FINETUNE.md +++ b/dataset/README_MINIGPTv2_FINETUNE.md @@ -10,8 +10,8 @@ ### TextCaps --[TextCaps_0.1_train](https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json) --[Images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) +- [TextCaps_0.1_train](https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json) +- [Images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) ### RefCOCO, RefCOCO+, RefCOCOg Make sure you have the COCO 2014 images first.