update evaluation

This commit is contained in:
junchen14 2023-10-29 10:41:16 +03:00
parent 9d8335c1f5
commit 68f42c0570
43 changed files with 35 additions and 2102 deletions

View File

@ -1,11 +1,12 @@
model:
arch: minigpt_v2
model_type: pretrain
max_txt_len: 160
max_txt_len: 500
end_sym: "</s>"
low_resource: True
prompt_template: '[INST] {} [/INST]'
ckpt: 'please set this value to the path of pretrained checkpoint'
llama_model: "/ibex/project/c2133/llama_v2/llama-2-7b-chat-pytorch_update"
ckpt: "/ibex/ai/project/c2090/minigpt4_ckpt/448_conversation_correct_best_v7_ablation1_v5_v6/20231007035/checkpoint_35.pth"
lora_r: 64
lora_alpha: 16

View File

@ -8,15 +8,18 @@ NAME=EXP_NAME
IMG_PATH=YOUR_IMG_PATH
EVAL_FILE_PATH=YOUR_EVAL_FILE_PATH
torchrun --nproc_per_node 1 eval_scripts/eval_ref.py --name ${NAME} \
--cfg-path ${cfg_path} \
--ckpt ${CKPT} --dataset refcoco,refcoco+,refcocog --lora_r 64 --lora_alpha 16 \
--batch_size 64 --max_new_tokens 20 --resample --img_path ${IMG_PATH} --eval_file_path ${EVAL_FILE_PATH}
# torchrun --nproc_per_node 1 eval_scripts/eval_ref.py --name ${NAME} \
# --cfg-path ${cfg_path} \
# --ckpt ${CKPT} --dataset refcoco,refcoco+,refcocog --lora_r 64 --lora_alpha 16 \
# --batch_size 64 --max_new_tokens 20 --resample --img_path ${IMG_PATH} --eval_file_path ${EVAL_FILE_PATH}
# torchrun --nproc_per_node 1 eval_scripts/eval_vqa.py --name ${NAME} \
# --cfg-path ${cfg_path} \
# --ckpt ${CKPT} --split val,test --dataset okvqa,vizwiz,iconqa,gqa,vsr,hm --lora_r 64 --lora_alpha 16 \
# --batch_size 32 --max_new_tokens 20 --resample
torchrun --nproc_per_node 1 eval_scripts/eval_vqa.py --name ${NAME} \
--cfg-path ${cfg_path} \
--ckpt ${CKPT} --split val,test --dataset okvqa,vizwiz,aokvqa,iconqa,gqa,vsr,hm --lora_r 64 --lora_alpha 16 \
--batch_size 32 --max_new_tokens 20 --resample
torchrun --master-port ${PORT} --nproc_per_node 1 run_textvqa_eval.py --name ${NAME} --ckpt_path ${CKPT} --lora_r 64 --lora_alpha 16 --eval_file_path ${EVAL_FILE_PATH}
torchrun --master-port ${PORT} --nproc_per_node 1 run_sciencevqa_eval.py --name ${NAME} --ckpt_path ${CKPT} --lora_r 64 --lora_alpha 16
--ckpt ${CKPT} --split val,test --dataset okvqa --lora_r 64 --lora_alpha 16 \
--batch_size 32 --max_new_tokens 20

View File

@ -1,177 +0,0 @@
import argparse
import os
import random
import requests
from io import BytesIO
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
import gradio as gr
import minigpt4.tasks as tasks
from minigpt4.common.config import Config
from minigpt4.common.dist_utils import get_rank, init_distributed_mode
from minigpt4.common.logger import setup_logger
from minigpt4.common.optims import (
LinearWarmupCosineLRScheduler,
LinearWarmupStepLRScheduler,
)
from minigpt4.common.registry import registry
from minigpt4.common.utils import now
# imports modules for registration
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import *
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", required=False, default='scienceqa_eval.yaml', help="path to configuration file.")
parser.add_argument("--ckpt_path", required=False, help="path to configuration file.")
parser.add_argument("--lora_r", type=int, default=64, help="path to configuration file.")
parser.add_argument("--lora_alpha", type=int, default=16, help="path to configuration file.")
parser.add_argument("--name", type=str)
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
print('Initializing Chat')
args = parser.parse_args()
cfg = Config(args)
ckpt_list = [args.ckpt_path]
print('evaluating config:', args.cfg_path)
print('evaluating checkpoint:', args.ckpt_path)
for ckpt in ckpt_list:
cfg.model_cfg.ckpt = ckpt
cfg.model_cfg.lora_r=args.lora_r
cfg.model_cfg.lora_alpha=args.lora_alpha
model_config = cfg.model_cfg
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to('cuda:0')
model.eval()
print('Initialization Finished')
vis_processor_cfg = cfg.datasets_cfg.coco_vqa.vis_processor.eval
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
text_processor_cfg = cfg.datasets_cfg.coco_vqa.text_processor.eval
text_processor = registry.get_processor_class(text_processor_cfg.name).from_config(text_processor_cfg)
task = tasks.setup_task(cfg)
from minigpt4.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
class textVQAEvalDataset(VQADataset):
def __init__(self, vis_processor, text_processor, vis_root=None, ann_paths=None):
# super().__init__(vis_processor, text_processor, vis_root, ann_paths)
from datasets import load_dataset
from datasets import load_dataset
self.annotation = load_dataset("derek-thomas/ScienceQA", split='test')
## select anns with image
self.annotation = [ann for ann in self.annotation if ann['image']]
self.vis_processor = vis_processor
self.text_processor = text_processor
self.instruction_pool = [
'[vqa] Question: {} Answer:',
]
self.alphabet_options = ["A", "B", "C", "D", "E"]
self.max_num_choices = len(self.alphabet_options)
def __getitem__(self, index):
ann = self.annotation[index]
image = ann['image'].convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
lecture = self.text_processor(ann['lecture'])
hint = self.text_processor(ann['hint'])
options = ann['choices']
num_choices = len(ann['choices'])
if len(ann['choices'])<self.max_num_choices:
options = options + (self.max_num_choices - len(ann['choices'])) * [""]
else:
options = options
options = [self.text_processor(opt) for opt in options]
instruction = random.choice(self.instruction_pool).format(question, lecture)
instruction = "<s>[INST]<Img><ImageHere></Img> {} [/INST]".format(instruction)
answer = options[ann['answer']]
return {
"image": image,
"question": question,
"choices": options,
# "choices": self.alphabet_options,
"num_choices": num_choices,
"answer": answer,
"instruction_input": instruction,
"question_id": index,
}
dataset = textVQAEvalDataset(vis_processor, text_processor)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.run_cfg.batch_size_eval, \
num_workers=cfg.run_cfg.num_workers)
print(len(dataset), len(dataloader))
result_dir = cfg.run_cfg.output_dir
if not os.path.exists(result_dir):
os.makedirs(result_dir)
epoch = cfg.model_cfg.ckpt.split('/')[-1].split('.')[0].split('_')[1]
exp_id = cfg.model_cfg.ckpt.split('/')[-2] + "_" + epoch
val_result = task.evaluation(model, dataloader, cuda_enabled=True)
task.after_evaluation(val_result, split_name=f'{args.name}_scienceqa_val', result_dir=result_dir)
from minigpt4.common.vqa_tools.vqa import VQA
from minigpt4.common.vqa_tools.vqa_eval import VQAEval
import json, logging, os
result_file = '{}/{}_scienceqa_val.json'.format(result_dir, args.name)
results = json.load(open(result_file, "r"))
acc = []
vqa_tool = VQAEval()
for res in results:
gt_ans = res["gt_ans"]
pred = res["pred_ans"]
pred = vqa_tool.processPunctuation(pred)
pred = vqa_tool.processDigitArticle(pred)
vqa_acc = 1 if pred == gt_ans else 0
acc.append(vqa_acc)
accuracy = sum(acc) / len(acc) * 100
print('scienceqa val acc: ', accuracy, flush=True)

View File

@ -1,184 +0,0 @@
import argparse
import os
import random
import requests
from io import BytesIO
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
import gradio as gr
import minigpt4.tasks as tasks
from minigpt4.common.config import Config
from minigpt4.common.dist_utils import get_rank, init_distributed_mode
from minigpt4.common.logger import setup_logger
from minigpt4.common.optims import (
LinearWarmupCosineLRScheduler,
LinearWarmupStepLRScheduler,
)
from minigpt4.common.registry import registry
from minigpt4.common.utils import now
# imports modules for registration
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import *
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", required=False, default='textvqa_eval.yaml', help="path to configuration file.")
parser.add_argument("--ckpt_path", required=False, help="path to configuration file.")
parser.add_argument("--lora_r", type=int, default=64, help="path to configuration file.")
parser.add_argument("--lora_alpha", type=int, default=16, help="path to configuration file.")
parser.add_argument("--name", type=str)
parser.add_argument("--img_path", type=str)
parser.add_argument("--eval_file_path", type=str)
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
print('Initializing Chat')
args = parser.parse_args()
cfg = Config(args)
ckpt_list = [args.ckpt_path]
print('evaluating config:', args.cfg_path)
print('evaluating checkpoint:', args.ckpt_path)
import json
import numpy as np
f = open(f'{args.eval_file_path}/textocr/TextOCR_0.1_val.json', 'r')
textcap_dataset_val = json.load(f)
all_img_ids = list(textcap_dataset_val['imgs'].keys())
ann_ids = list(textcap_dataset_val['anns'].keys())
for ckpt in ckpt_list:
cfg.model_cfg.ckpt = ckpt
cfg.model_cfg.lora_r=args.lora_r
cfg.model_cfg.lora_alpha=args.lora_alpha
model_config = cfg.model_cfg
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to('cuda:0')
model.eval()
print('Initialization Finished')
vis_processor_cfg = cfg.datasets_cfg.coco_vqa.vis_processor.eval
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
text_processor_cfg = cfg.datasets_cfg.coco_vqa.text_processor.eval
text_processor = registry.get_processor_class(text_processor_cfg.name).from_config(text_processor_cfg)
task = tasks.setup_task(cfg)
from collections import Counter
def find_most_frequent_item(items):
counter = Counter(items)
most_common = counter.most_common(1)
if most_common:
return most_common[0][0]
else:
return None
from minigpt4.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
class textVQAEvalDataset(VQADataset):
def __init__(self, vis_processor, text_processor, vis_root=None, ann_paths=None, use_ocr=False):
# super().__init__(vis_processor, text_processor, vis_root, ann_paths)
from datasets import load_dataset
self.annotation = load_dataset("textvqa", split="validation",cache_dir=f"{args.eval_file_path}/textvqa_cache")
self.vis_processor = vis_processor
self.text_processor = text_processor
self.instruction_pool = [
'[vqa] Question: {} Short answer:',
]
self.use_ocr = use_ocr
def __getitem__(self, index):
ann = self.annotation[index]
image_path = ann['flickr_original_url']
image = ann["image"].convert("RGB")
img_id = ann['image_id']
sel_anns = [textcap_dataset_val['anns'][ann] for ann in ann_ids if ann.startswith(img_id)]
sel_caps = [ann['utf8_string'] for ann in sel_anns]
sel_caps = [cap for cap in sel_caps if cap not in ['\n', '.']]
captions = " ".join(sel_caps)
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
captions = self.text_processor(captions)
instruction = random.choice(self.instruction_pool).format(question)
if self.use_ocr and img_id in all_img_ids:
instruction = "<s>[INST] <Img><ImageHere></Img> OCR tokens: {}. {} [/INST]".format(captions, instruction)
else:
instruction = "<s>[INST] <Img><ImageHere></Img> {} [/INST]".format(instruction)
answers = find_most_frequent_item(ann["answers"])
return {
"image": image,
"text_input": question,
"answer": answers,
'image_path': image_path,
"instruction_input": instruction,
"question_id": ann["question_id"],
# "instance_id": ann["instance_id"],
}
dataset = textVQAEvalDataset(vis_processor, text_processor, use_ocr=cfg.run_cfg.use_ocr)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.run_cfg.batch_size_eval, num_workers=cfg.run_cfg.num_workers)
result_dir = cfg.run_cfg.output_dir
if not os.path.exists(result_dir):
os.makedirs(result_dir)
epoch = cfg.model_cfg.ckpt.split('/')[-1].split('.')[0].split('_')[1]
exp_id = cfg.model_cfg.ckpt.split('/')[-2] + "_" + epoch
val_result = task.evaluation(model, dataloader, cuda_enabled=True)
task.after_evaluation(val_result, split_name=f'{args.name}_textvqa_val', result_dir=result_dir)
anno_files = f'{args.eval_file_path}/TextVQA/gt_ann.json'
ques_files = f'{args.eval_file_path}/TextVQA/gt_ques.json'
from minigpt4.common.vqa_tools.vqa import VQA
from minigpt4.common.vqa_tools.vqa_eval import VQAEval
import logging, json, os
result_file = '{}/{}_textvqa_val.json'.format(result_dir, args.name)
vqa = VQA(anno_files, ques_files)
vqa_result = vqa.loadRes(
resFile=result_file, quesFile=ques_files
)
# create vqaEval object by taking vqa and vqaRes
# n is precision of accuracy (number of places after decimal), default is 2
vqa_scorer = VQAEval(vqa, vqa_result, n=2)
logging.info("Start VQA evaluation.")
vqa_scorer.evaluate()
# print accuracies
overall_acc = vqa_scorer.accuracy["overall"]
print('textvqa val acc: ', overall_acc, flush=True)

View File

@ -1,48 +0,0 @@
model:
arch: minigpt4
model_type: pretrain_llama2
max_txt_len: 160
end_sym: "</s>"
low_resource: True
prompt_template: '[INST] {} [/INST] '
ckpt: 'please set this value to the path of pretrained checkpoint'
datasets:
coco_vqa: # not used
type: eval
vis_processor:
eval:
name: "blip2_image_eval"
image_size: 336
text_processor:
eval:
name: "blip_caption"
run:
task: scienceqa
# optimization-specific
batch_size_train: 16
batch_size_eval: 32
num_workers: 8
# inference-specific
num_ans_candidates: 5
max_len: 10
min_len: 1
num_beams: 5
inference_method: "generate"
prompt: "Question: {} Short answer:"
seed: 42
output_dir: "results"
evaluate: True
test_splits: ["val"]
# distribution-specific
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True

View File

@ -1,48 +0,0 @@
model:
arch: minigpt4
model_type: pretrain_llama2
max_txt_len: 160
end_sym: "</s>"
low_resource: True
prompt_template: '[INST] {} [/INST] '
ckpt: 'please set this value to the path of pretrained checkpoint'
datasets:
coco_vqa: # not used
type: eval
vis_processor:
eval:
name: "blip2_image_eval"
image_size: 336
text_processor:
eval:
name: "blip_caption"
run:
task: vqa
# optimization-specific
batch_size_train: 32
batch_size_eval: 128
# if do not use OCR token: batch=32, Evaluation Total time: 0:23:47 (9.0954 s / it)
use_ocr: True
# use OCR token: batch_size=16; Evaluation Total time: 0:36:34 (7.0116 s / it)
num_workers: 8
# inference-specific
max_len: 10
min_len: 1
num_beams: 1
inference_method: "generate"
prompt: "Question: {} Short answer:"
seed: 42
output_dir: "results"
evaluate: True
test_splits: ["val"]
# distribution-specific
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True

View File

@ -11,13 +11,14 @@ import torch
from torch.utils.data import DataLoader
from datasets import load_dataset
from minigpt4.datasets.datasets.vqa_datasets import OKVQAEvalData,VizWizEvalData,AOKVQADAEvalData,AOKVQAMCEvalData,IconQAEvalData,GQAEvalData,VSREvalData,HMEvalData
from minigpt4.datasets.datasets.vqa_datasets import OKVQAEvalData,VizWizEvalData,IconQAEvalData,GQAEvalData,VSREvalData,HMEvalData
from minigpt4.common.vqa_tools.VQA.PythonHelperTools.vqaTools.vqa import VQA
from minigpt4.common.vqa_tools.VQA.PythonEvaluationTools.vqaEvaluation.vqaEval import VQAEval
from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser
from minigpt4.conversation.conversation import CONV_VISION_LLama2
from minigpt4.conversation.conversation import CONV_VISION_minigptv2
import random
@ -37,7 +38,7 @@ print(args.ckpt)
print(args.name)
model, vis_processor = init_model(args)
conv_temp = CONV_VISION.copy()
conv_temp = CONV_VISION_LLama2.copy()
conv_temp.system = ""
model.eval()
@ -45,8 +46,8 @@ model.eval()
os.makedirs('results', exist_ok=True)
if 'okvqa' in args.dataset:
img_path=f'{args.img_path}/COCO/cocoapi/data/2017/images/jpeg/train'
with open(f'{args.eval_file_path}/okvqa/test_split.json', 'r') as f:
img_path=os.path.join(args.img_path,"train")
with open(os.path.join(args.eval_file_path,"ok_vqa/test_split.json")) as f:
ok_vqa_test_split = json.load(f)
data = OKVQAEvalData(ok_vqa_test_split, vis_processor, img_path)
@ -61,6 +62,8 @@ if 'okvqa' in args.dataset:
for answer, question_id, question, img_id in zip(answers, question_ids, questions, img_ids):
result = dict()
if "<unk>" in answer.lower():
print("answer: ", answer)
answer = answer.lower().replace('<unk>','').strip()
result['answer'] = answer
result['question_id'] = int(question_id)
@ -90,7 +93,7 @@ if 'okvqa' in args.dataset:
if len(resamples) == 0:
break
save_path=f'results_correct/{args.name}_okvqa.json'
save_path=f'results/{args.name}_okvqa.json'
with open(save_path,'w') as f:
json.dump(minigpt4_predict, f)

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Allen Institute for Artificial Intelligence
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,207 +0,0 @@
# A-OKVQA
Official repository for **A-OKVQA: A Benchmark for Visual Question Answering using World Knowledge**.
Links: [[Paper]](https://arxiv.org/abs/2206.01718) [[Website]](http://a-okvqa.allenai.org) [[Leaderboard]](https://leaderboard.allenai.org/a-okvqa/submissions/public)
### Abstract
The Visual Question Answering (VQA) task aspires to provide a meaningful testbed for the development of AI models that can jointly reason over visual and natural language inputs. Despite a proliferation of VQA datasets, this goal is hindered by a set of common limitations. These include a reliance on relatively simplistic questions that are repetitive in both concepts and linguistic structure, little world knowledge needed outside of the paired image, and limited reasoning required to arrive at the correct answer. We introduce A-OKVQA, a crowdsourced dataset composed of a diverse set of about 25K questions requiring a broad base of commonsense and world knowledge to answer. In contrast to the existing knowledge-based VQA datasets, the questions generally cannot be answered by simply querying a knowledge base, and instead require some form of commonsense reasoning about the scene depicted in the image. We demonstrate the potential of this new dataset through a detailed analysis of its contents and baseline performance measurements over a variety of state-of-the-art visionlanguage models.
![dataset_web](https://user-images.githubusercontent.com/28768645/170799740-f0d9ea60-6aff-4322-98d5-cae8e05983f4.svg)
<hr>
#### Table of Contents
- [Getting started](#getting-started)
* [Downloading the dataset](#downloading-the-dataset)
- [Evaluation & Leaderboard](#evaluation)
- [Codebase](#codebase)
* [Preparing data](#preparing-data)
* [Models and Predictions](#models-and-predictions)
<hr>
## Getting started
```bash
git clone --single-branch --recurse-submodules https://github.com/allenai/aokvqa.git
cd aokvqa
export PYTHONPATH=.
conda env create --name aokvqa
conda activate aokvqa
```
### Downloading the dataset
```bash
export AOKVQA_DIR=./datasets/aokvqa/
mkdir -p ${AOKVQA_DIR}
curl -fsSL https://prior-datasets.s3.us-east-2.amazonaws.com/aokvqa/aokvqa_v1p0.tar.gz | tar xvz -C ${AOKVQA_DIR}
```
<details> <summary><b>Downloading COCO 2017</b></summary>
```bash
export COCO_DIR=./datasets/coco/
mkdir -p ${COCO_DIR}
for split in train val test; do
wget "http://images.cocodataset.org/zips/${split}2017.zip"
unzip "${split}2017.zip" -d ${COCO_DIR}; rm "${split}2017.zip"
done
wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
unzip annotations_trainval2017.zip -d ${COCO_DIR}; rm annotations_trainval2017.zip
```
</details>
Loading our dataset is easy! Just grab our [load_aokvqa.py](https://github.com/allenai/aokvqa/blob/main/load_aokvqa.py) file and refer to the following code.
```python
import os
aokvqa_dir = os.getenv('AOKVQA_DIR')
from load_aokvqa import load_aokvqa, get_coco_path
train_dataset = load_aokvqa(aokvqa_dir, 'train') # also 'val' or 'test'
```
<details> <summary><b>Example dataset entry</b></summary>
```python
dataset_example = train_dataset[0]
print(dataset_example['question_id'])
# 22MexNkBPpdZGX6sxbxVBH
coco_dir = os.getenv('COCO_DIR')
image_path = get_coco_path('train', dataset_example['image_id'], coco_dir)
print(image_path)
# ./datasets/coco/train2017/000000299207.jpg
print(dataset_example['question'])
print(dataset_example['choices'])
# What is the man by the bags awaiting?
# ['skateboarder', 'train', 'delivery', 'cab']
correct_choice = dataset_example['choices'][ dataset_example['correct_choice_idx'] ]
# Corrrect: cab
print(dataset_example['rationales'][0])
# A train would not be on the street, he would not have luggage waiting for a delivery, and the skateboarder is there and not paying attention to him so a cab is the only possible answer.
```
</details>
## Evaluation
Please prepare `predictions_{split}.json` files (for `split: {val,test}`) in the format below. You may omit either `multiple_choice` or `direct_answer` field if you only want to evaluate one setting.
```python
{
'<question_id>' : {
'multiple_choice' : '<prediction>',
'direct_answer' : '<prediction>'
}
}
```
You can run evaluation on the validation set as follows.
```bash
python evaluation/eval_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --preds ./predictions_val.json
```
### Leaderboard
You may submit `predictions_test.json` to the [leaderboard](https://leaderboard.allenai.org/a-okvqa/submissions/get-started).
## Codebase
We provide all code and pretrained models necessary to replicate our experiments for Large-Scale Pretrained Models (sec. 5.2) and Rationale Generation (sec. 5.3).
### Preparing data
```bash
export FEATURES_DIR=./features/
mkdir -p ${FEATURES_DIR}
```
You can compute CLIP features for our vocabulary and dataset. These are most commonly used by our other experiments.
```bash
python data_scripts/encode_vocab_clip.py --vocab ${AOKVQA_DIR}/large_vocab_train.csv --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt
for split in train val test; do
python data_scripts/extract_clip_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_${split}.pt
done
```
<details> <summary><b>For training ClipCap with a transformer mapping network</b></summary>
If you want to train our ClipCap models with the transformer mapping network (instead of an MLP, like we do), you'll also need to run `extract_clip_features.py` with `--model-type RN50x4`.
</details>
<details> <summary><b>For ResNet and BERT input features</b></summary>
Our ResNet and BERT classification experiments require these respective features instead of CLIP. To generate these, please run the following commands:
```bash
# ResNet
for split in train val test; do
python data_scripts/extract_resnet_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --out ${FEATURES_DIR}/resnet_${split}.pt
done
# BERT
for split in train val test; do
python data_scripts/extract_bert_features.py --aokvqa-dir ${AOKVQA_DIR} --split ${split} --out ${FEATURES_DIR}/bert_${split}.pt
done
```
</details>
### Models and Predictions
```bash
export LOG_DIR=./logs/
export PREDS_DIR=./predictions/
export PT_MODEL_DIR=./pretrained_models/
mkdir -p ${LOG_DIR} ${PREDS_DIR} ${PT_MODEL_DIR}
```
<details> <summary><b>Download our pretrained model weights</b></summary>
```bash
# Checkpoints for transfer learning experiments
curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/transfer_exp_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models
# Checkpoints for ClipCap models (generating answers and rationales)
curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/clipcap_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models
```
</details>
We have included instructions for replicating each of our experiments (see README.md files below).
All Python scripts should be run from the root of this repository. Please be sure to first run the installation and data preparation as directed above.
- [Heuristics](./heuristics/README.md)
- [Transfer Learning Experiments](./transfer_experiments/README.md)
- [Querying GPT-3](./gpt3/README.md)
- [ClipCap](https://github.com/allenai/aokvqa/blob/ClipCap/README.md)
- [Generating Captions & Rationales](https://github.com/allenai/aokvqa/blob/ClipCap/README.md)
For each experiment, we follow this prediction file naming scheme: `{model-name}_{split}-{setting}.json` (e.g. `random-weighted_val-mc.json` or `random-weighted_test-da.json`). As examples in these Readme files, we produce predictions on the validation set.
We unify predictions for each split before evaluation. (You can omit one of `--mc` or `--da` prediction file if you only want to evaluate one setting.)
```bash
python evaluation/prepare_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --mc ./predictions_val-mc.json --da ./predictions_val-da.json --out ./predictions_val.json
# repeat for test split ...
```

View File

@ -1,45 +0,0 @@
import os
import argparse
from collections import Counter
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
# Build vocab from train set: correct choices + (direct answers appearing in >= 3 )
train_set = load_aokvqa(args.aokvqa_dir, 'train')
vocab = []
all_choices = Counter()
direct_answers = Counter()
for i in train_set:
vocab.append( i['choices'][i['correct_choice_idx']] )
all_choices.update(i['choices'])
direct_answers.update(set(i['direct_answers']))
vocab += [k for k,v in all_choices.items() if v >= 3]
vocab += [k for k,v in direct_answers.items() if v >= 3]
vocab = sorted(set(vocab))
print(f"Vocab size: {len(vocab)}")
# Save vocabulary Output
with open(args.output_file, 'w') as f:
for v in vocab:
print(v, file=f)
## Check validation set coverage
val_set = load_aokvqa(args.aokvqa_dir, 'val')
val_acc = [v['choices'][v['correct_choice_idx']] in vocab for v in val_set]
val_acc = sum(val_acc) / len(val_acc) * 100
print(f"Val set coverage: {val_acc:.2f}" )

View File

@ -1,26 +0,0 @@
import json
from tqdm import tqdm
import argparse
import pathlib
import torch
import clip
parser = argparse.ArgumentParser()
parser.add_argument('--vocab', type=pathlib.Path, required=True, dest='vocab_file')
parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load(args.model_type, device=device)
with torch.no_grad():
a = open(args.vocab_file).read().splitlines()
mc_text = clip.tokenize(a).to(device)
mc_text_features = torch.stack([model.encode_text(mct.unsqueeze(0)).cpu() for mct in tqdm(mc_text)], dim=1)[0]
mc_text_features = mc_text_features.float()
model_name = args.model_type.replace('/', '-').replace('@', '-')
torch.save(mc_text_features, args.output_file)

View File

@ -1,50 +0,0 @@
import os
import argparse
import pathlib
from tqdm import tqdm
import torch
from transformers import AutoTokenizer, AutoModel
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval()
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
encoded_input = tokenizer([d['question']], padding=True, return_tensors='pt')
encoded_input = {k:v.to(device) for k,v in encoded_input.items()}
e = mean_pooling(model(**encoded_input), encoded_input['attention_mask'])
embeddings[d['question_id']] = {
'question' : e[0].cpu()
}
torch.save(embeddings, args.output_file)

View File

@ -1,51 +0,0 @@
import os
from PIL import Image
from tqdm import tqdm
import argparse
import pathlib
import torch
import clip
from load_aokvqa import load_aokvqa, get_coco_path
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type')
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load(args.model_type, device=device)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
q = d["question"]
q_text = clip.tokenize(q).to(device)
q_text_features = model.encode_text(q_text)
img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir))
img = preprocess(img).unsqueeze(0).to(device)
image_features = model.encode_image(img)
embeddings[d['question_id']] = {
'question' : q_text_features[0].float().cpu(),
'image' : image_features[0].float().cpu(),
}
torch.save(embeddings, args.output_file)

View File

@ -1,62 +0,0 @@
import os
import argparse
import pathlib
from tqdm import tqdm
from PIL import Image
import torch
import torch.nn as nn
from torchvision import models
from torchvision import transforms as T
from load_aokvqa import load_aokvqa, get_coco_path
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file')
args = parser.parse_args()
assert args.output_file.suffix == '.pt'
## Load dataset
dataset = load_aokvqa(args.aokvqa_dir, args.split)
## Load model
resnet_preprocess = T.Compose([
T.Resize(size=224, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(size=(224, 224)),
T.ToTensor(),
T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
device = "cuda" if torch.cuda.is_available() else "cpu"
resnet_model = models.resnet50(pretrained=True)
resnet_model = torch.nn.Sequential(
*list(resnet_model.children())[:-1],
nn.Flatten()
) # strip classification layer
resnet_model = resnet_model.to(device)
## Encoding loop
with torch.no_grad():
embeddings = {}
for d in tqdm(dataset):
img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)).convert('RGB')
resnet_input = resnet_preprocess(img).unsqueeze(0).to(device)
resnet_features = resnet_model(resnet_input)
embeddings[d['question_id']] = {
'image' : resnet_features[0].cpu()
}
torch.save(embeddings, args.output_file)

View File

@ -1,36 +0,0 @@
name: aokvqa
channels:
- pytorch
- nvidia
- huggingface
- conda-forge
- defaults
dependencies:
- python=3.7
- cudatoolkit=11.3
- numpy=1.21.6
- pytorch=1.11.0
- torchvision=0.12.0
- pytorch-lightning=1.6.3
- torchmetrics=0.8.1
- gdown=4.4.0
- pip=22.0.4
- pip:
- argparse==1.4.0
- Pillow==9.0.1
- tensorboard==2.9.0
- ftfy==6.1.1
- regex==2022.3.15
- tqdm==4.64.0
- clip @ git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620
- openai==0.18.1
- nltk==3.7
- sacrebleu==2.0.0
- sacremoses==0.0.53
- sentence-transformers==2.2.0
- datasets==2.1.0
- tokenizers==0.10.3
- transformers==4.10.3
# Next: resolve conflict between sentence-transfomers and pytorch-lightning
# pip uninstall sentencepiece

View File

@ -1,97 +0,0 @@
import argparse
import pathlib
import json
import glob
from load_aokvqa import load_aokvqa
def eval_aokvqa(dataset, preds, multiple_choice=False, strict=True):
if isinstance(dataset, list):
dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) }
if multiple_choice is False:
dataset = {k:v for k,v in dataset.items() if v['difficult_direct_answer'] is False}
if strict:
dataset_qids = set(dataset.keys())
preds_qids = set(preds.keys())
assert dataset_qids.issubset(preds_qids)
# dataset = q_id (str) : dataset element (dict)
# preds = q_id (str) : prediction (str)
acc = []
for q in dataset.keys():
if q not in preds.keys():
acc.append(0.0)
continue
pred = preds[q]
choices = dataset[q]['choices']
direct_answers = dataset[q]['direct_answers']
## Multiple Choice setting
if multiple_choice:
if strict:
assert pred in choices, 'Prediction must be a valid choice'
correct_choice_idx = dataset[q]['correct_choice_idx']
acc.append( float(pred == choices[correct_choice_idx]) )
## Direct Answer setting
else:
num_match = sum([pred.lower() == da.lower() for da in direct_answers])
vqa_acc = min(1.0, num_match / 3.0)
acc.append(vqa_acc)
acc = sum(acc) / len(acc) * 100
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--preds', type=str, required=True, dest='prediction_files')
args = parser.parse_args()
dataset = load_aokvqa(args.aokvqa_dir, args.split)
for prediction_file in glob.glob(args.prediction_files):
predictions = json.load(open(prediction_file, 'r'))
# Multiple choice
mc_predictions = {}
for q in predictions.keys():
if 'multiple_choice' in predictions[q].keys():
mc_predictions[q] = predictions[q]['multiple_choice']
if mc_predictions != {}:
mc_acc = eval_aokvqa(
dataset,
mc_predictions,
multiple_choice=True,
strict=False
)
print(prediction_file, 'MC', mc_acc)
# Direct Answer
da_predictions = {}
for q in predictions.keys():
if 'direct_answer' in predictions[q].keys():
da_predictions[q] = predictions[q]['direct_answer']
if da_predictions != {}:
da_acc = eval_aokvqa(
dataset,
da_predictions,
multiple_choice=False,
strict=False
)
print(prediction_file, 'DA', da_acc)

View File

@ -1,13 +0,0 @@
import os
import json
def load_aokvqa(aokvqa_dir, split, version='v1p0'):
assert split in ['train', 'val', 'test', 'test_w_ans']
dataset = json.load(open(
os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json")
))
return dataset
def get_coco_path(split, image_id, coco_dir):
return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg")

View File

@ -1,31 +0,0 @@
import argparse
import pathlib
import json
from load_aokvqa import load_aokvqa
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', type=argparse.FileType('r'), dest='mc_pred_file')
parser.add_argument('--da', type=argparse.FileType('r'), dest='da_pred_file')
parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file')
args = parser.parse_args()
assert args.mc_pred_file or args.da_pred_file
dataset = load_aokvqa(args.aokvqa_dir, args.split)
mc_preds = json.load(args.mc_pred_file) if args.mc_pred_file else None
da_preds = json.load(args.da_pred_file) if args.da_pred_file else None
predictions = {}
for d in dataset:
q = d['question_id']
predictions[q] = {}
if mc_preds and q in mc_preds.keys():
predictions[q]['multiple_choice'] = mc_preds[q]
if da_preds and q in da_preds.keys():
predictions[q]['direct_answer'] = da_preds[q]
json.dump(predictions, args.output_file)

View File

@ -1,44 +0,0 @@
import argparse
import pathlib
import json
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import cos_sim
from load_aokvqa import load_aokvqa
def map_to_choices(dataset, predictions, device='cpu'):
if isinstance(dataset, list):
dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) }
if all([p in dataset[q]['choices'] for q, p in predictions.items()]):
return predictions
model = SentenceTransformer('sentence-transformers/average_word_embeddings_glove.6B.300d')
model.to(device)
for q in tqdm(predictions.keys()):
choices = dataset[q]['choices']
if predictions[q] not in choices:
choice_embeddings = model.encode([predictions[q]] + choices, convert_to_tensor=True)
a_idx = cos_sim(choice_embeddings[0], choice_embeddings[1:]).argmax().item()
predictions[q] = choices[a_idx]
return predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--pred', type=argparse.FileType('r'), required=True, dest='prediction_file')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
dataset = load_aokvqa(args.aokvqa_dir, args.split)
predictions = json.load(args.prediction_file)
predictions = map_to_choices(dataset, predictions)
json.dump(predictions, args.output_file)

View File

@ -1,14 +0,0 @@
## Querying GPT-3
To follow our experiments which use GPT-3, you must have access to the [OpenAI API](https://openai.com/api/) (at cost). Please retrieve your [organization](https://beta.openai.com/account/org-settings) and [API](https://beta.openai.com/account/api-keys) keys and set them in your environment variables.
```bash
export OPENAI_ORG=....
export OPENAI_API_KEY=...
```
For producing predictions for both DA and MC settings, run:
```bash
python gpt3/query_gpt3.py --aokvqa-dir ${AOKVQA_DIR} --split val --out ${PREDS_DIR}/gpt3_val-da.json
python remap_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --pred ${PREDS_DIR}/gpt3_val-da.json --out ${PREDS_DIR}/gpt3_val-mc.json
```

View File

@ -1,23 +0,0 @@
import os
import json
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir')
parser.add_argument('--split', type=str, choices=['train', 'val'], required=True)
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
coco_captions = json.load(open(os.path.join(args.coco_dir, 'annotations', f'captions_{args.split}2017.json')))['annotations']
coco_captions = {c['image_id'] : c['caption'] for c in coco_captions}
captions = { d['question_id'] : coco_captions[d['image_id']] for d in aokvqa_set }
json.dump(captions, args.output_file)

View File

@ -1,79 +0,0 @@
import os
import random
import json
from tqdm import tqdm
import argparse
import pathlib
import openai
openai.organization = os.getenv('OPENAI_ORG')
openai.api_key = os.getenv('OPENAI_API_KEY')
from load_aokvqa import load_aokvqa
random.seed(0)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--n', type=int, default=10, dest='num_examples')
parser.add_argument('--train-context', type=argparse.FileType('r'), dest='train_context_file')
parser.add_argument('--prefix', type=str, default='', dest='prompt_prefix')
parser.add_argument('--include-choices', action='store_true', dest='include_choices')
parser.add_argument('--context', type=argparse.FileType('r'), dest='context_file')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
train_set = load_aokvqa(args.aokvqa_dir, 'train')
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
train_context = {}
context = {}
if args.context_file is not None:
train_context = json.load(args.train_context_file)
context = json.load(args.context_file)
predictions = {}
for d in tqdm(eval_set):
q = d['question_id']
prompt = args.prompt_prefix
for e in random.sample(train_set, args.num_examples):
prompt += prompt_element(e,
context=train_context.get(q, None),
include_choices=args.include_choices,
answer=True
)
prompt += '\n\n'
prompt += prompt_element(d,
context=context.get(q, None),
include_choices=args.include_choices,
answer=False
)
response = openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
temperature=0.0,
max_tokens=10,
)
predictions[q] = response.choices[0].text.strip()
json.dump(predictions, args.output_file)
def prompt_element(d, context=None, include_choices=False, answer=False):
return (f"Context: {context}\n" if context is not None else '') + \
f"Q: {d['question']}\n" + \
(f"Choices: {', '.join(d['choices'])}.\n" if include_choices else '') + \
f"A:" + (f" {d['choices'][d['correct_choice_idx']]}" if answer else '')
if __name__ == '__main__':
main()

View File

@ -1,16 +0,0 @@
import json
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test_w_ans'], required=True)
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
rationales = {d['question_id'] : d['rationales'][0] for d in aokvqa_set}
json.dump(rationales, args.output_file)

View File

@ -1,11 +0,0 @@
## Heuristics
```bash
# These scripts accept the same arguments.
# heuristics/random_unweighted.py
# heuristics/random_weighted.py
# heuristics/most_common_answer.py
python heuristics/random_unweighted.py --aokvqa-dir ${AOKVQA_DIR} --split val --mc --out ${PREDS_DIR}/random-unweighted_val-mc.json
# Exclude --mc for the direct answer setting
```

View File

@ -1,39 +0,0 @@
import os
import json
import argparse
import pathlib
from collections import Counter
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
train_set = load_aokvqa(args.aokvqa_dir, 'train')
train_freq = dict(Counter(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
most_common_answer = max(train_freq.keys(), key=train_freq.get)
##
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
predictions = {}
for d in eval_set:
q = d['question_id']
predictions[q] = most_common_answer
if args.multiple_choice:
choices = [c for c in d['choices'] if c in train_freq.keys()]
if len(choices) > 0:
predictions[q] = max(choices, key=train_freq.get)
json.dump(predictions, args.output_file)

View File

@ -1,38 +0,0 @@
import os
import json
from random import seed, sample
import argparse
import pathlib
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
seed(0)
train_set = load_aokvqa(args.aokvqa_dir, 'train')
if args.multiple_choice is False:
choices = list(set(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
##
predictions = {}
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
for d in eval_set:
q = d['question_id']
if args.multiple_choice:
choices = d['choices']
predictions[q] = sample(choices, 1)[0]
json.dump(predictions, args.output_file)

View File

@ -1,46 +0,0 @@
import os
import json
import numpy as np
import argparse
import pathlib
from collections import Counter
from load_aokvqa import load_aokvqa
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file')
args = parser.parse_args()
np.random.seed(0)
train_set = load_aokvqa(args.aokvqa_dir, 'train')
train_freq = dict(Counter(
[d['choices'][d['correct_choice_idx']] for d in train_set]
))
if args.multiple_choice is False:
choices = list(train_freq.keys())
probs = [f / len(train_set) for f in train_freq.values()]
##
predictions = {}
eval_set = load_aokvqa(args.aokvqa_dir, args.split)
for d in eval_set:
if args.multiple_choice:
choices = d['choices']
probs = [train_freq.get(c, 0) for c in choices]
if probs == [0, 0, 0, 0]:
probs = [1, 1, 1, 1]
probs = [p / sum(probs) for p in probs]
q = d['question_id']
predictions[q] = np.random.choice(choices, size=1, p=probs)[0]
json.dump(predictions, args.output_file)

View File

@ -1,13 +0,0 @@
import os
import json
def load_aokvqa(aokvqa_dir, split, version='v1p0'):
assert split in ['train', 'val', 'test', 'test_w_ans']
dataset = json.load(open(
os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json")
))
return dataset
def get_coco_path(split, image_id, coco_dir):
return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg")

View File

@ -1,41 +0,0 @@
## Transfer Learning Experiments
We use the following training/prediction scripts for the classifier, zero-shot, and contrastive experiments in Table 3.
```bash
## Training
python transfer_experiments/train.py --aokvqa-dir ${AOKVQA_DIR} --vocab ${AOKVQA_DIR}/large_vocab_train.csv --log-dir ${LOG_DIR}
--backbone clip --clip-model-type ViT-B/32 --train-features ${FEATURES_DIR}/clip-ViT-B-32_train.pt --val-features ${FEATURES_DIR}/clip-ViT-B-32_val.pt
--inputs question # OR --inputs image # OR --inputs question image
# OR
--backbone resnet --train-features ${FEATURES_DIR}/resnet_train.pt --val-features ${FEATURES_DIR}/resnet_val.pt --inputs image
# OR
--backbone bert --train-features ${FEATURES_DIR}/bert_train.pt --val-features ${FEATURES_DIR}/bert_val.pt --inputs question
--objective classifier
# OR
--objective contrastive --vocab-features ${FEATURE_DIR}/clip-ViT-B-32_large_vocab.pt
```
You can make predictions for CLIP zero-shot or from a classifier/contrastive checkpoint trained above.
```bash
## Predicting
python transfer_experiments/predict.py --aokvqa-dir ${AOKVQA_DIR} --out ${PREDS_DIR}/clip-classifier_val-mc.json
--split val # or test
--features ${FEATURE_DIR}/clip-ViT-B-32_val.pt # adjust for backbone and eval split
--ckpt path/to/model.ckpt
# OR
--zero-shot --clip-model-type ViT-B/32
--inputs question # OR --inputs image # OR --inputs question image
--mc # Multiple-choice. Exclude for direct-answer.
# IF classifier OR direct-answer
--vocab ${AOKVQA_DIR}/large_vocab_train.csv
# IF contrastive/zero-shot AND direct-answer
--vocab-features ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt
```

View File

@ -1,126 +0,0 @@
import sys
import os
import argparse
import pathlib
from tqdm import tqdm
import json
import torch
import torch.nn as nn
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663
import sentencepiece; import pytorch_lightning as pl; import clip
from transfer_experiments.train import LinearClassifier
from load_aokvqa import load_aokvqa
from evaluation.remap_predictions import map_to_choices
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True)
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--features', type=pathlib.Path, required=True)
parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file')
#
parser_weights = parser.add_mutually_exclusive_group(required=True)
parser_weights.add_argument('--ckpt', type=pathlib.Path, dest='checkpoint_path')
parser_weights.add_argument('--zero-shot', action='store_true', dest='clip_zero_shot')
parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=('--zero-shot' in sys.argv))
#
parser.add_argument('--vocab', type=argparse.FileType('r'))
parser.add_argument('--vocab-features', type=pathlib.Path, dest='vocab_features')
parser.add_argument('--mc', action='store_true', dest='multiple_choice')
parser.add_argument('--clip-model-type', type=str,
choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'],
dest='clip_model_type', required=('--zero-shot' in sys.argv and '--mc' in sys.argv))
#
args = parser.parse_args()
## Load dataset
aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split)
## Load models
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.checkpoint_path is not None:
classifier = LinearClassifier.load_from_checkpoint(args.checkpoint_path)
classifier.to(device)
hp = classifier.hparams
elif args.clip_zero_shot:
classifier = nn.Identity().to(device)
hp = pl.utilities.AttributeDict(backbone='clip', clip_model_type=args.clip_model_type, objective='zero-shot', inputs=args.inputs)
# Load input features
embeddings = torch.load(args.features)
if hp.backbone == 'clip':
for q in embeddings.keys():
embeddings[q]['question'] = embeddings[q]['question'] / embeddings[q]['question'].norm(dim=-1, keepdim=True)
embeddings[q]['image'] = embeddings[q]['image'] / embeddings[q]['image'].norm(dim=-1, keepdim=True)
# Load vocab, vocab features, clip
if (hp.objective == 'classifier') or \
(hp.objective in ['contrastive', 'zero-shot'] and args.multiple_choice is False):
vocab = args.vocab.read().splitlines()
if hp.objective in ['contrastive', 'zero-shot']:
if args.multiple_choice is False:
vocab_features = torch.load(args.vocab_features).cpu()
vocab_features /= vocab_features.norm(dim=-1, keepdim=True)
else:
clip_model = clip.load(hp.clip_model_type, device=device)[0]
logit_scale = clip_model.logit_scale.exp().cpu()
## Prediction loop
predictions = {}
with torch.no_grad():
for o in tqdm(aokvqa_set):
q = o['question_id']
# Load input embedding (from question / image)
if hp.objective == 'zero-shot' and ('question' in hp.inputs and 'image' in hp.inputs):
e = embeddings[q]['question'] + embeddings[q]['image']
elif 'question' in hp.inputs and 'image' in hp.inputs:
e = torch.cat((embeddings[q]['question'], embeddings[q]['image']))
elif 'question' in hp.inputs:
e = embeddings[q]['question']
elif 'image' in hp.inputs:
e = embeddings[q]['image']
# Pass inputs through model
e = e.unsqueeze(0).to(device)
x = classifier(e)[0].cpu()
# Predict
if hp.objective in ['contrastive', 'zero-shot']:
if args.multiple_choice:
vocab = o['choices']
# Encode choices
vocab_features = clip.tokenize(vocab).to(device)
vocab_features = torch.stack([
clip_model.encode_text(v.unsqueeze(0)) for v in vocab_features
], dim=1)[0]
vocab_features /= vocab_features.norm(dim=-1, keepdim=True)
vocab_features = vocab_features.float().cpu()
x = logit_scale * x @ vocab_features.t()
x = x.softmax(dim=-1)
predictions[q] = vocab[x.argmax().item()]
## Save and evaluate predictions
# Map prediction to nearest neighbor choice (by word embeddings)
if args.multiple_choice and hp.objective == 'classifier':
predictions = map_to_choices(aokvqa_set, predictions)
json.dump(predictions, args.output_file)

View File

@ -1,263 +0,0 @@
import os
import sys
import json
import argparse
import pathlib
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663
import sentencepiece; import pytorch_lightning as pl
import torchmetrics.functional as MF
from load_aokvqa import load_aokvqa
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir')
parser.add_argument('--vocab', type=argparse.FileType('r'), required=True)
parser.add_argument('--log-dir', type=pathlib.Path, dest='log_dir', required=True)
#
parser.add_argument('--backbone', type=str, choices=['clip', 'resnet', 'bert'], required=True)
parser.add_argument('--clip-model-type', type=str,
choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'],
dest='clip_model_type', required=('clip' in sys.argv))
parser.add_argument('--train-features', type=pathlib.Path, required=True, dest='train_features')
parser.add_argument('--val-features', type=pathlib.Path, required=True, dest='val_features')
parser.add_argument('--vocab-features', type=pathlib.Path, required=('contrastive' in sys.argv), dest='vocab_features')
#
parser.add_argument('--objective', type=str, choices=['classifier', 'contrastive'], required=True)
parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=True)
# Defaults
parser.add_argument('--bs', type=int, default=128, dest='batch_size')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--gpus', type=int, default=1)
args = parser.parse_args()
pl.seed_everything(1)
vocab = args.vocab.read().splitlines()
## Data loading
dm = AokvqaEmbeddingsDataModule(
args.aokvqa_dir,
args.train_features,
args.val_features,
args.objective,
args.backbone,
args.inputs,
vocab,
args.vocab_features,
batch_size=args.batch_size,
num_workers=16
)
## Model definition
model = LinearClassifier(
args.objective,
args.backbone,
args.clip_model_type,
args.inputs,
len(vocab),
args.lr
)
## Training and testing loops
logger = pl.loggers.TensorBoardLogger(
args.log_dir,
name=f'{args.backbone}-{args.objective}',
version=f"inputs:{'+'.join(args.inputs)}"
)
trainer = pl.Trainer(
logger=logger,
gpus=args.gpus,
max_epochs=args.epochs,
callbacks=[
pl.callbacks.ModelCheckpoint(
monitor="val_acc",
filename="{epoch:02d}-{val_acc:.2f}",
mode="max"
)
],
)
trainer.fit(model, dm)
class AokvqaEmbeddingsDataset(Dataset):
def __init__(self, aokvqa_dir, split, input_features, objective, backbone, inputs, vocab, vocab_features):
aokvqa_set = load_aokvqa(aokvqa_dir, split)
assert ( backbone == 'resnet' and inputs == ['image'] and objective == 'classifier' ) \
or ( backbone == 'bert' and inputs == ['question'] and objective == 'classifier' ) \
or ( backbone == 'clip' )
embeddings = torch.load(input_features)
if backbone == 'clip':
for q in embeddings.keys():
embeddings[q]['question'] /= embeddings[q]['question'].norm(dim=-1, keepdim=True)
embeddings[q]['image'] /= embeddings[q]['image'].norm(dim=-1, keepdim=True)
if objective == 'contrastive':
vocab_embeddings = torch.load(vocab_features)
vocab_embeddings /= vocab_embeddings.norm(dim=-1, keepdim=True)
self.objective = objective
self.vocab_len = len(vocab)
self.embeddings = []
self.answers = []
for o in aokvqa_set:
correct_answers = set([o['choices'][o['correct_choice_idx']]] + o['direct_answers'])
correct_answers = [vocab.index(a) for a in correct_answers if a in vocab]
if self.objective == 'contrastive':
correct_answers = [vocab_embeddings[a] for a in correct_answers]
if len(correct_answers) == 0: continue
self.answers.append(correct_answers)
q = o['question_id']
if 'question' in inputs and 'image' in inputs:
e = torch.cat((embeddings[q]['question'], embeddings[q]['image']))
elif 'question' in inputs and 'image' not in inputs:
e = embeddings[q]['question']
elif 'question' not in inputs and 'image' in inputs:
e = embeddings[q]['image']
self.embeddings.append(e)
def __getitem__(self, index):
e = self.embeddings[index]
a = self.answers[index]
if self.objective == 'classifier':
a = torch.sum(F.one_hot(torch.tensor(a), num_classes=self.vocab_len), dim=0)
elif self.objective == 'contrastive':
a = random.sample(a, 1)[0]
return e, a
def __len__(self):
return len(self.embeddings)
class AokvqaEmbeddingsDataModule(pl.LightningDataModule):
def __init__(self, aokvqa_dir, train_features, val_features, objective, backbone, inputs, vocab, vocab_features, batch_size=1, num_workers=0):
super().__init__()
self.aokvqa_dir = aokvqa_dir
self.train_features = train_features
self.val_features = val_features
self.objective = objective
self.backbone = backbone
self.inputs = inputs
self.vocab = vocab
self.vocab_features = vocab_features
self.batch_size = batch_size
self.num_workers = num_workers
def setup(self, stage=None):
self.train_dataset = AokvqaEmbeddingsDataset(
self.aokvqa_dir, 'train', self.train_features, self.objective,
self.backbone, self.inputs, self.vocab, self.vocab_features
)
self.val_dataset = AokvqaEmbeddingsDataset(
self.aokvqa_dir, 'val', self.val_features, self.objective,
self.backbone, self.inputs, self.vocab, self.vocab_features
)
def train_dataloader(self):
return DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True,
num_workers=int(0.8 * self.num_workers)
)
def val_dataloader(self):
return DataLoader(
self.val_dataset, batch_size=self.batch_size, shuffle=False,
num_workers=int(0.2 * self.num_workers)
)
class LinearClassifier(pl.LightningModule):
def __init__(self, objective, backbone, clip_model_type, inputs, vocab_len, lr=0.001):
super().__init__()
self.save_hyperparameters(ignore=['lr'])
self.lr = lr
if self.hparams.backbone == 'clip':
clip_dim = {
'RN50' : 1024,
'RN50x4' : 640,
'RN50x16' : 768,
'RN50x64' : 1024,
'RN101' : 512,
'ViT-B/32' : 512,
'ViT-B/16' : 512,
'ViT-L/14' : 768,
'ViT-L/14@336px' : 768,
}[clip_model_type]
emb_dim = clip_dim * len(inputs)
elif self.hparams.backbone == 'resnet':
emb_dim = 2048
elif self.hparams.backbone == 'bert':
emb_dim = 768
if self.hparams.objective == 'classifier':
out_dim = vocab_len
elif self.hparams.objective == 'contrastive':
out_dim = clip_dim
self.linear = nn.Linear(emb_dim, out_dim)
def forward(self, x):
x = self.linear(x)
if self.hparams.objective == 'classifier':
x = torch.sigmoid(x)
return x
def compute_loss(self, batch):
x, y = batch
y_pred = self.forward(x)
if self.hparams.objective == 'classifier':
loss = F.binary_cross_entropy(y_pred, y.float())
elif self.hparams.objective == 'contrastive':
indices = torch.arange(0, x.shape[0], dtype=torch.int64, device=self.device)
sim = (y_pred @ y.T).softmax(dim=-1)
loss = F.cross_entropy(sim, indices)
if self.hparams.objective == 'classifier':
acc = MF.f1_score(y_pred, y)
elif self.hparams.objective == 'contrastive':
acc = torch.mean(sim[indices, indices])
return loss, acc
def training_step(self, batch, batch_idx):
loss, acc = self.compute_loss(batch)
self.log("train_loss", loss)
self.log("train_acc", acc)
return loss
def validation_step(self, batch, batch_idx):
loss, acc = self.compute_loss(batch)
self.log("val_loss", loss)
self.log("val_acc", acc)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
if __name__ == '__main__':
main()

View File

@ -11,7 +11,7 @@ model:
# generation configs
prompt: ""
llama_model: "please set this value to the path of llama2-chat-7b"
llama_model: "/ibex/project/c2133/llama_v2/llama-2-7b-chat-pytorch"
lora_r: 64
lora_alpha: 16

View File

@ -127,7 +127,14 @@ CONV_VISION_LLama2 = Conversation(
sep="",
)
CONV_VISION_minigptv2 = Conversation(
system="",
roles=("<s>[INST] ", " [/INST]"),
messages=[],
offset=2,
sep_style=SeparatorStyle.SINGLE,
sep="",
)
class Chat:
def __init__(self, model, vis_processor, device='cuda:0', stopping_criteria=None):

View File

@ -3,7 +3,6 @@ import json
import pickle
import random
import time
# import iterto
import numpy as np
from PIL import Image
import skimage.io as io

View File

@ -65,7 +65,6 @@ class OKVQAEvalData(torch.utils.data.Dataset):
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
question = f"[vqa] Based on the image, respond to this question with a short answer: {question}"
# question = f"[vqa] {question} "
return image, question, question_id, img_id
class VizWizEvalData(torch.utils.data.Dataset):
@ -86,54 +85,9 @@ class VizWizEvalData(torch.utils.data.Dataset):
image_path = os.path.join(self.root_path, img_id)
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
# question = f"[vqa] Based on the image, respond to this question with a short answer: {question} "
question = f"[vqa] Based on the image, respond to this question with a short answer: {question} and reply 'unanswerable' if you could not answer it"
question = f"[vqa] The question is'{question}' Based on the image, answer the question with a single word or phrase. and reply 'unanswerable' when the provided information is insufficient" # 52.0
return image, question, answers
class AOKVQADAEvalData(torch.utils.data.Dataset):
def __init__(self, loaded_data, vis_processor, root_path):
self.loaded_data = loaded_data
self.root_path = root_path
self.vis_processor = vis_processor
def __len__(self):
return len(self.loaded_data)
def __getitem__(self, idx):
data = self.loaded_data[idx]
img_file = data['image']
question = data['question']
question_id = data['question_id']
image_path = os.path.join(self.root_path, img_file)
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
question = f"[vqa] Based on the image, respond to this question with a short answer: {question}"
# question = f"[vqa] {question} "
return image, question, question_id
class AOKVQAMCEvalData(torch.utils.data.Dataset):
def __init__(self, loaded_data, vis_processor, root_path):
self.loaded_data = loaded_data
self.root_path = root_path
self.vis_processor = vis_processor
def __len__(self):
return len(self.loaded_data)
def __getitem__(self, idx):
data = self.loaded_data[idx]
img_file = data['image']
question = data['question']
question_id = data['question_id']
image_path = os.path.join(self.root_path, img_file)
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image).half().cuda()
candidates=data['choices']
# question = f"Given this image, choose one correct answer from {candidates} for this question: {question} "
question = f"[vqa] Based on the image, respond to this question with a short answer: {question}"
# question = f"[vqa] {question} "
return image, question, question_id, candidates
class IconQAEvalData(torch.utils.data.Dataset):
def __init__(self, loaded_data, vis_processor, root_path):
self.loaded_data = loaded_data
@ -152,9 +106,7 @@ class IconQAEvalData(torch.utils.data.Dataset):
image = self.vis_processor(image).half().cuda()
candidates = '_'.join(data['choices'])
answer = data['answer']
# question = f"Given this image, choose one correct answer from {candidates} for this question: {question} "
question = f"[vqa] Based on the image, respond to this question with a short answer: {question}"
# question = f"[vqa] {question} "
return image, question, candidates, answer
class GQAEvalData(torch.utils.data.Dataset):
@ -173,9 +125,7 @@ class GQAEvalData(torch.utils.data.Dataset):
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = ann["question"]
# question = f'Question: {question} Short answer: '
question = f"[vqa] Based on the image, respond to this question with a short answer: {question}"
# question = f"[vqa] {question} "
labels = ann["answer"]
return image, question, labels
@ -196,7 +146,7 @@ class HMEvalData(torch.utils.data.Dataset):
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = ann["text"]
question = f"[vqa] This is an image with: \"{question}\" written on it. Is it hateful? Answer:"
question = f"This is an image writting '{question}'. Is this image hateful? Answer yes or no. Answer:"
labels = ann["label"]
return image, question, labels
@ -217,7 +167,6 @@ class VSREvalData(torch.utils.data.Dataset):
image = self.vis_processor(image)
question = ann["caption"]
question = f'[vqa] Based on the image, is this statement true or false? {question}'
question_id = ann["image"].split('.')[0]
labels = 'true' if ann["label"] == 1 else 'false'
return image, question, labels