From fc20acdff5be187413803bbfbabdcfc062d4d0cb Mon Sep 17 00:00:00 2001 From: XiaoqianShen <64844805+xiaoqian-shen@users.noreply.github.com> Date: Wed, 25 Oct 2023 20:56:14 +0200 Subject: [PATCH] Add files via upload --- .../VQA/PythonEvaluationTools/vqaEvalDemo.py | 89 +++++ .../vqaEvaluation/__init__.py | 1 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 231 bytes .../__pycache__/vqaEval.cpython-39.pyc | Bin 0 -> 7564 bytes .../vqaEvaluation/vqaEval.py | 192 +++++++++++ .../VQA/PythonHelperTools/vqaDemo.py | 73 ++++ .../PythonHelperTools/vqaTools/__init__.py | 1 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 226 bytes .../vqaTools/__pycache__/vqa.cpython-39.pyc | Bin 0 -> 7028 bytes .../VQA/PythonHelperTools/vqaTools/vqa.py | 179 ++++++++++ .../abstract_v002_question_types.txt | 81 +++++ .../QuestionTypes/mscoco_question_types.txt | 65 ++++ minigpt4/common/vqa_tools/VQA/README.md | 80 +++++ minigpt4/common/vqa_tools/VQA/license.txt | 30 ++ minigpt4/common/vqa_tools/__init__.py | 8 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 426 bytes .../vqa_tools/__pycache__/vqa.cpython-39.pyc | Bin 0 -> 7599 bytes .../__pycache__/vqa_eval.cpython-39.pyc | Bin 0 -> 7955 bytes minigpt4/common/vqa_tools/aokvqa/LICENSE | 201 +++++++++++ minigpt4/common/vqa_tools/aokvqa/README.md | 207 +++++++++++ .../aokvqa/data_scripts/build_vocab.py | 45 +++ .../aokvqa/data_scripts/encode_vocab_clip.py | 26 ++ .../data_scripts/extract_bert_features.py | 50 +++ .../data_scripts/extract_clip_features.py | 51 +++ .../data_scripts/extract_resnet_features.py | 62 ++++ .../common/vqa_tools/aokvqa/environment.yml | 36 ++ .../__pycache__/load_aokvqa.cpython-39.pyc | Bin 0 -> 703 bytes .../aokvqa/evaluation/eval_predictions.py | 97 ++++++ .../aokvqa/evaluation/load_aokvqa.py | 13 + .../aokvqa/evaluation/prepare_predictions.py | 31 ++ .../aokvqa/evaluation/remap_predictions.py | 44 +++ .../common/vqa_tools/aokvqa/gpt3/README.md | 14 + .../vqa_tools/aokvqa/gpt3/caption_inputs.py | 23 ++ .../vqa_tools/aokvqa/gpt3/query_gpt3.py | 79 +++++ .../vqa_tools/aokvqa/gpt3/rationale_inputs.py | 16 + .../vqa_tools/aokvqa/heuristics/README.md | 11 + .../aokvqa/heuristics/most_common_answer.py | 39 +++ .../aokvqa/heuristics/random_unweighted.py | 38 ++ .../aokvqa/heuristics/random_weighted.py | 46 +++ .../common/vqa_tools/aokvqa/load_aokvqa.py | 13 + .../aokvqa/transfer_experiments/README.md | 41 +++ .../aokvqa/transfer_experiments/predict.py | 126 +++++++ .../aokvqa/transfer_experiments/train.py | 263 ++++++++++++++ minigpt4/common/vqa_tools/vqa.py | 211 ++++++++++++ minigpt4/common/vqa_tools/vqa_eval.py | 324 ++++++++++++++++++ 45 files changed, 2906 insertions(+) create mode 100644 minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py create mode 100644 minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py create mode 100644 minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__pycache__/__init__.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__pycache__/vqaEval.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py create mode 100644 minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py create mode 100644 minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py create mode 100644 minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__pycache__/__init__.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__pycache__/vqa.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py create mode 100644 minigpt4/common/vqa_tools/VQA/QuestionTypes/abstract_v002_question_types.txt create mode 100644 minigpt4/common/vqa_tools/VQA/QuestionTypes/mscoco_question_types.txt create mode 100644 minigpt4/common/vqa_tools/VQA/README.md create mode 100644 minigpt4/common/vqa_tools/VQA/license.txt create mode 100644 minigpt4/common/vqa_tools/__init__.py create mode 100644 minigpt4/common/vqa_tools/__pycache__/__init__.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/__pycache__/vqa.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/__pycache__/vqa_eval.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/aokvqa/LICENSE create mode 100644 minigpt4/common/vqa_tools/aokvqa/README.md create mode 100644 minigpt4/common/vqa_tools/aokvqa/data_scripts/build_vocab.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/environment.yml create mode 100644 minigpt4/common/vqa_tools/aokvqa/evaluation/__pycache__/load_aokvqa.cpython-39.pyc create mode 100644 minigpt4/common/vqa_tools/aokvqa/evaluation/eval_predictions.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/evaluation/load_aokvqa.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/evaluation/prepare_predictions.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/evaluation/remap_predictions.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/gpt3/README.md create mode 100644 minigpt4/common/vqa_tools/aokvqa/gpt3/caption_inputs.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/gpt3/query_gpt3.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/gpt3/rationale_inputs.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/heuristics/README.md create mode 100644 minigpt4/common/vqa_tools/aokvqa/heuristics/most_common_answer.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/heuristics/random_unweighted.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/heuristics/random_weighted.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/load_aokvqa.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/transfer_experiments/README.md create mode 100644 minigpt4/common/vqa_tools/aokvqa/transfer_experiments/predict.py create mode 100644 minigpt4/common/vqa_tools/aokvqa/transfer_experiments/train.py create mode 100644 minigpt4/common/vqa_tools/vqa.py create mode 100644 minigpt4/common/vqa_tools/vqa_eval.py diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py new file mode 100644 index 0000000..07ca21d --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +import sys +dataDir = '../../VQA' +sys.path.insert(0, '%s/PythonHelperTools/vqaTools' %(dataDir)) +from vqa import VQA +from vqaEvaluation.vqaEval import VQAEval +import matplotlib.pyplot as plt +import skimage.io as io +import json +import random +import os + +# set up file names and paths +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir ='%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) +resultType ='fake' +fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType'] + +# An example result json file has been provided in './Results' folder. + +[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/Results/%s%s_%s_%s_%s_%s.json'%(dataDir, versionType, taskType, dataType, dataSubType, \ +resultType, fileType) for fileType in fileTypes] + +# create vqa object and vqaRes object +vqa = VQA(annFile, quesFile) +vqaRes = vqa.loadRes(resFile, quesFile) + +# create vqaEval object by taking vqa and vqaRes +vqaEval = VQAEval(vqa, vqaRes, n=2) #n is precision of accuracy (number of places after decimal), default is 2 + +# evaluate results +""" +If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function +By default it uses all the question ids in annotation file +""" +vqaEval.evaluate() + +# print accuracies +print "\n" +print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall']) +print "Per Question Type Accuracy is the following:" +for quesType in vqaEval.accuracy['perQuestionType']: + print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType]) +print "\n" +print "Per Answer Type Accuracy is the following:" +for ansType in vqaEval.accuracy['perAnswerType']: + print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType]) +print "\n" +# demo how to use evalQA to retrieve low score result +evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35] #35 is per question percentage accuracy +if len(evals) > 0: + print 'ground truth answers' + randomEval = random.choice(evals) + randomAnn = vqa.loadQA(randomEval) + vqa.showQA(randomAnn) + + print '\n' + print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval]) + ann = vqaRes.loadQA(randomEval)[0] + print "Answer: %s\n" %(ann['answer']) + + imgId = randomAnn[0]['image_id'] + imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' + if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# plot accuracy for various question types +plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center') +plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10) +plt.title('Per Question Type Accuracy', fontsize=10) +plt.xlabel('Question Types', fontsize=10) +plt.ylabel('Accuracy', fontsize=10) +plt.show() + +# save evaluation results to ./Results folder +json.dump(vqaEval.accuracy, open(accuracyFile, 'w')) +json.dump(vqaEval.evalQA, open(evalQAFile, 'w')) +json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w')) +json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w')) + diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py new file mode 100644 index 0000000..148424d --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py @@ -0,0 +1 @@ +author='aagrawal' diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__pycache__/__init__.cpython-39.pyc b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09b0c20d0cbffe1368999db90828c64561e009f GIT binary patch literal 231 zcmYjLF$%&!5WJ{J5N!N~jf;t;h!(b@C>A!ynw;_Eau<_}CQtH9X)Sz!ot0|}bzo+h z-Co+PFF-q}@YSDM9Hs={1@R&}r$G(N^GYq`qx{@*UwF9RD|;AUO2=^?Vo zxv9YA2zitP&C@tdp!I2zrqCAX5KW^k(qWoGJ4P<#IBN3m zy?yqz4Txc5ovOP}1ctZpoWm3Ul(MXz)3Yj8-9oLTwxxEYc6$nVuV-WAx+n7JY%1>5FuM-bQ(c-ldE5C3=s3g1$^YNtft-l*_b& z@)ao+`T!-MD^w-==sNaYpb+IMtx}EZv_{vcK}~8=MD34C7%#3`wEi8|&{$F$jFaAB zctYS<;7L#N{o}w^jz@vzpTrCiMA#H`U76?VY7Hr5GZY`XnZk}r%=Wg8fYSAXJIc^$_ zv>SJGODT=BG}r99OHA(7g_~PXm%^*9a44)(qx?X4h}lS8EB2H8z5nHbpa( zZ+2oC7eFwrFq94CT9IrtAI()8QT1Bak2)DMe-1Lna{+c6*HU%G*=B|wLvgnd*G3A= zK$fDEVgX_)Q;4HxcqO7O>W+`VbTebSU1*}X+hMkt617-;rCpH479t+Ngk7SzD~ys_ zy|s!N6Fej9@EqZg_Ax7$)B;vrKBZd~|aVmx-i#JKRl#N@@@Q}`Yp7#>*_IjXk!_j)T<3lv!Pss&t|*c z0QYvN>k+-1=z7~Aem0vy3VZENtD2Nu#h1~J9(~hwcZLEyWIs|C2B2T7nLN68u0?o$qN^@ zl3LW_<8mip;V;SvX!cM%gM0JsE#p@z;GR?}CCCwffXY(2d&-0=suIeCn#!P}@}(5B zK97|E7gg@R{Uf$|VEJEvyqIG?-Kp$2KR;S}xylFo(Qg_6$4$On#NWn95BG{|wtkfF zDmQVmU&CGHn04OTR(+iD`#9nEot`>xtvuOxF{;+=o_ox~Dc@aWYA5SE++RPjYV|$L z$l=M8+xL0qhxW>&m@D(hW8N!^{X(xmxntHqZZA7F_rPz2E0BKx=k3TS)<|#*fdoAJ zj=HTz95PsD3XB~l4+jrWQ?5A)GJV)YO8 zJj^`AGkcT0+=jwlf3Rxxa=l4t95vp;Xnd-d$GELOf$@BA3gf8vja!3>o(o7R*KF{@O=xha=gW>D7t18F_%wFZ>igJ>HJI*skR;EX%BJK=9BR&IQM6NxMY8J%ggI0`Oh-j7qMiqx@cBl&X z$;=mMk$DDy$XsHX$vS4Ga^F?@*O@Pesj{tq8I5tk^?=;^S26Gb9{p>mK4#TCaIM1_ zh#y8JYfbT5x2CfYi7fdf z09o?U(e?(`j&`5h2qF|QFpf%>=7UQUNG);_T8H4LWP^RYB6AbHu zj#jbR7>B$tIh!Ta^0fXo3!P6*Ha5;Gj(iU2^sg}9L*~UAW894R7ttBUfc98yPt+pyvn z@NLQ&!HF6$6*An8jXdb+*L(KB1+~3n_wDs!*2AdutTk_tBfDoOY7Nl}EAY2GXIcLS zRy0=G*WUu5e~x>(I1F^PrhlF>f5_E084cUHV)9>FUOM;1^RJcN^xfy`t&8Dx9BZRa zB7kZi3;}WmRC|YC46Dsf)mAR(--Qrx(RQPr=&v(egnv+YHRv=F{S6@WpEI@) zwmQvF!;$pQa^GEx>aB84RH*-j$^MnmJ|bFMw1~Tc<|_#5`*MN)Q_wPR37D)y{-WIea+wZ_9y9M)Hekiu9NDA7B-j3~s`>+}szP3mBE? z{5$~uLc220(HUY{EY@v!!!azEiZ<#&JjLRDOVTRR6R&aCGoFq*Ez*C6p8gB2{+O$6 zR9V{6|B%9e%Xp4wd*K{em+I9t>$0CnRPWA>6uU1J?n}J8Gt4B;{v7z^xQ%fC!`P_) zH;%d>gR-=@cOw=Om>6o-3Ae@3rc`ME5&*VjGpEx`*~jl_tjf9AsCf z)0lyCE8;T;6t>Pa^cP{79Gyi@TjT^5w~b6z{e4V>pkG6?C+$e5A}E{T1Eu{d4E#2p zOplysx$bIC1+^|13zWbs)n~a#iM*s+Mm+Zci zK%EJUKp#(IQy(D#7^pjbhMc~_wGEMqoNJa|l)fkZ+%-?XMLCS;!9yXM!GvZcfb?DR zaSbc<+!>3IGJEdB){J!&FB5#mn5O3-S$c`LmK)@I9%l}L{T(Yi@Ryc9o`H<|pSfat z>OO%3Vx(W1MKY5!)9gl|XZd5|?3HjcvcXs^s zdW%09&Rz(072jrGntlJq$^OdhjZ^(k&fYkUFCb6o8@!xYf58;%F(xEI3&%wVpLA0n zW^XpRtC7aZ3Aq~|pmK3SNYnT#rEzxVqhTkmWm(AhBXLdPN}%5gTdPTpXW(3ouUc35 zzX*J5L#jXMOn-xQ{s>R}I4aIYoZx*t>8Wb3o$Y)_}Y?==wAZ0{wJ>ZfFVgyLXLR1W~b3?ftuypC$oeCiBIg( zV|e0eR1o;Z;>&)?pY%Q7!Q*Kj$M+dX5OnbEL{}} HL+Aehm90O{ literal 0 HcmV?d00001 diff --git a/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py new file mode 100644 index 0000000..8a65604 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py @@ -0,0 +1,192 @@ +# coding=utf-8 + +__author__='aagrawal' + +import re +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys + + +class VQAEval: + def __init__(self, vqa, vqaRes, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + self.params = {'question_id': vqa.getQuesIds()} + self.contractions = {"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \ + "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", \ + "hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", \ + "he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", \ + "Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", \ + "maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", \ + "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", \ + "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", \ + "she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", \ + "somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", \ + "somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", \ + "someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", \ + "something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", \ + "there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", \ + "they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", \ + "wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", \ + "whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", \ + "whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", \ + "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", \ + "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", \ + "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", \ + "youll": "you'll", "youre": "you're", "youve": "you've"} + self.manualMap = { 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10' + } + self.articles = ['a', + 'an', + 'the' + ] + + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(\,)(\d)") + self.punct = [';', r"/", '[', ']', '"', '{', '}', + '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!'] + + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params['question_id']] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + # print "computing accuracy" + step = 0 + for quesId in quesIds: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = ansDic['answer'].replace('\n', ' ') + ansDic['answer'] = ansDic['answer'].replace('\t', ' ') + ansDic['answer'] = ansDic['answer'].strip() + resAns = res[quesId]['answer'] + resAns = resAns.replace('\n', ' ') + resAns = resAns.replace('\t', ' ') + resAns = resAns.strip() + gtAcc = [] + gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']] + + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = self.processPunctuation(ansDic['answer']) + ansDic['answer'] = self.processDigitArticle(ansDic['answer']) + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + + for gtAnsDatum in gts[quesId]['answers']: + otherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum] + matchingAns = [item for item in otherGTAns if item['answer'].lower()==resAns.lower()] + acc = min(1, float(len(matchingAns))/3) + gtAcc.append(acc) + quesType = gts[quesId]['question_type'] + ansType = gts[quesId]['answer_type'] + avgGTAcc = float(sum(gtAcc))/len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step%100 == 0: + self.updateProgress(step/float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + # print "Done computing accuracy" + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = self.periodStrip.sub("", + outText, + re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = ' '.join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n) + self.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType} + self.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType} + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100*acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100*acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100*acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength*progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), int(progress*100), status) + sys.stdout.write(text) + sys.stdout.flush() diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py new file mode 100644 index 0000000..406b596 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +from vqaTools.vqa import VQA +import random +import skimage.io as io +import matplotlib.pyplot as plt +import os + +dataDir ='../../VQA' +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir = '%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) + +# initialize VQA api for QA annotations +vqa=VQA(annFile, quesFile) + +# load and display QA annotations for given question types +""" +All possible quesTypes for abstract and mscoco has been provided in respective text files in ../QuestionTypes/ folder. +""" +annIds = vqa.getQuesIds(quesTypes='how many'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given answer types +""" +ansTypes can be one of the following +yes/no +number +other +""" +annIds = vqa.getQuesIds(ansTypes='yes/no'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given images +""" +Usage: vqa.getImgIds(quesIds=[], quesTypes=[], ansTypes=[]) +Above method can be used to retrieve imageIds for given question Ids or given question types or given answer types. +""" +ids = vqa.getImgIds() +annIds = vqa.getQuesIds(imgIds=random.sample(ids,5)); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py new file mode 100644 index 0000000..072d8d9 --- /dev/null +++ b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py @@ -0,0 +1 @@ +__author__ = 'aagrawal' diff --git a/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__pycache__/__init__.cpython-39.pyc b/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b6021287783e6c9f5626fd9c270635e4ab59e0a GIT binary patch literal 226 zcmYjLyAHxI3`~KcLSp9^x?u4b5rT~gfj|r_mT3bmN|TU=*C+8yT^aZQHYN;6h?DN@ zv#pb<(Wnc?y;GA-uD7@ON5E#tEJBt}2dA?zFpIAi^S*`V4udv#s?#++>>6&q8eS+-NxN3B56@faPoi(+lV8 gu~b&N#b?U)H%+zk=H!yqS43 zJMYc!ebAqstqDB;c-{$LeNu?OQDgG4(RdYK{XB|5tcyUd%fJfkuC;FQuf1;bue0vF zB?2cX-4{XWz*;W__D#_&XRhyelz-Rn4(w-_Uil~3h_yDAlTWeC2B@LH5(4ZWBgXe7 z7GSpPPEZai_r-dtBj~ef2X0VBf4OMaf*G_c=#xP`m_^+U=7M?5sRoVU9DZwo#J0|7 z_D^qITWiz0`BO%vgzszk>K=+n3`NV7lIs?iHkZy&2J*J0yF<(TrlsxDQTc(`x6{gz zn^w_p9asdKaLMeyhsP;fe94mHZ5B^f~#?Na>tW(u*r-|~CO%EIP zw!&^NR9?I5Ywc}9%HKg> zoTNUDYi&l|(7S^5nk!y6X)`fjbv(?YVPh^TO#3QcDF|zsRLz;J68NdF!!)ZF+sLe4 zKeKmHRHB{Ed&!M!nTt_j8tsHxxu3S<m<&cZsG@-lk~zkn`z?()9`v6g!iCJ zjt;w<+5C7vyzOi@U&b`c#I%(v5~d=lJ{0N#e*S%XIob&CE%#Ki9k$cu_VdrZ@WQh1 zxAayR-&@{^;;7S0Uj&ob?-tGFcCxdR#LIVf{Z^VJUA+uJEWfjtZYA;CEROe>ivFUq z)Z5G4RtpT%R%;PU>x(Evr7ZJL!>Y-KY*^GD)WLBnAKsQUbo3n4$s-U0Xw`k_<+cdy zMe({gkoyw1W140Kr4uxJJnPmo`_SJZ?$aVveo+jaj(l0zVimQa1$t!>_1!lyLk2Fb zbE~mmg2vjz(x~qq#)*~%ifGy3C0b4(pu^R*V=rJ~&knvf@zo0`=e4KY6<<||>QYgquCi+6^@*;pfR;J*V$<(2kDg_GS#m;N;Ln^~kaP5LSb68C zPX5^Ik}lJiF_rXyan2gtEFj|@6~(C+#Q?J%13Laj(oel~D`dnQi;S^n0>Vy!5AXh= zH19|kenM#+TH*VkY0^j`2QF=!h;7Xx);7%PqS$xh z)XZe-^83{Jz=BPKCA{wrCEfduIIu9PHl6{lScAF!dFaHA4 z>|}XzL{LEsD8S=!<8h;1|93cuLSS!&X<=QwDA16UpL!ay)0=xtKuL3aN&#l)u+RYsKqKFHIKEtcXdv;FOzC zQt9{hdZA{+_8B?l7u-u}<_VrKrH18y!_(N`DUK^61*YB7Ii7r9ynfihIr0&ZF)sFo z@x{&*X|#F*#j&fGBky@qWe-J%koO`X?;*^`9igD0+feUC@eyM%-?= zjXVj)?1r!rUF>GCRK}ZBPowYW_&yqg`D8xB$}k(`KDAD)>*mTZwd^m>T=phdb}; z@wOk@Og019JQyoG7UkQRWgHlH0ocD_Ey=o7mzUteT*hZk4(5)_{{N8rsgU}cX-H-7 z=ZnBo!VKKUQ{aW*biC{sOHV}^LMc4d$2aX%_&bvusp=TD3_C<1XTj-d{LaQwXf<=5 zHXSFRv|#^1YfP94#9uOaUol6h)c_LBBS-Mc3H?{%-+PJwwZ|Q^XGCm7+~z1GYZ73*iPZY?E2o7WP`%$NjHlkdD@2WnZEJFN9z~`iEJ4W zB%b1+sPCdbzg8B4u;ac7Yw|BK#9XN-NJ6v={EQ>Q!Xa5}I*Q^OMS&ztVw~o;mJQ)p zKX?(=kFw!*4-+&6R-#;&kIA|m)W%C4(*(*w5EO6-w1JAz1a`t36b_L)f9vgi3yCCh z{OuHOJ_2uq){DreEs6y=`%<8{#DVzZNGQQ+H$gf|gy)-<8MAwJ_d-yG%U&BC79x5* z(!H*~H>DZluj?ok!# zMrZ@?1EC+1jit0UEFlP;L2B}kKNLavK>kW1sB;n2l{ly?r}ZJID8Jk_I_np32M+qm)`!R#Wu^be)Jg!ywkUu@4)&wh1S z4dyYj63ip1gybvN8(crf^>auteM~7OoYP~V+eZybgbpcde17n!Lai9_RH!L<M1BmRz>RbHpdECmGYpbTIZ_nZ?MB&=|=4^*5Tl?SzddQ_K=Fj6&nY8Bgi}zDbKc( zpB_W}0XsDa`D6}T?}n$wY=gg}181+jAmJ^x6NOFp5ZV(6wEJiwN(UDY8Q&%E4Ok(B ztdzdV-)%H;%fAzPJrz>+jx*Ebar)hEVb0k5jc@`*R@#UP!-!`{hvcW-(ATL4h?$&+ z0Z2jw3FJk`xDj>}B*C#i+D8sFIe(%}t+Av$oXBAQCwSAShfMfuTM0y@en1j!?2M?b zHdFQBh0$xkwi#f`RLoLAu*7M+%xNc- zMGnHSM>UhwBPdccR7{vHoV$9NXq@QdlpiJTnl%Q-tO7aE9WaQ?H5(*#o~96La$Zyu zIA2FhIEK*Du7B-h`n5s^@ZC^3QNzl oT6gf@3H?XXGJ;|u{3vI3+A5j5YPNnA)4%bHA*l}#q@Zffg4@(+3F)r0@Q zgD1CDLRQ8}#9tG^DlLpuVMGac!LwBG?>ZJ{wOL(G*gt(An*08Y==#WWoK- zt*y+6F{+#q)w|P3XY6O2VgKBg2SQ|8JPEbvHMbqk6N)+C`kaf}=A=1)@pQ|@v4_uF zChu|0DoqaWUEKPEs-*ebEmYN530@f R;H;peR(QTk-!vVxJ6~JFf#LuF literal 0 HcmV?d00001 diff --git a/minigpt4/common/vqa_tools/__pycache__/vqa.cpython-39.pyc b/minigpt4/common/vqa_tools/__pycache__/vqa.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac761c593ece91d16ed089185ea84829c90c0171 GIT binary patch literal 7599 zcmeHM&5s;M74NV4+}Yi+y)j-0N!yWw2jW@B&IegJR-CmHjLas^MhK0thW1qNY_Dg! zXRB(~-qEatvX-)NAyK$-V9m`)NJv}|An`YF;m`*TAV8eCArbR?Roy+avuh_r;YN?9 zy6RQctB?2U_g=mBrl-pWp1+@W{NJ23jK5G}{IOAZ4S(?x5@s|EW;RS_H7sT`r(-v4 z^>!MLdgmIsHw~6!`FjS-?^umIb5@L6VerSYyA<`dc+g&x?wMB2J%8@}`R82Wb$qcJ z@s?k2McwDzAZ*pk?&VI$RV{_fec|&>pMkpa_Lc9Py&kmuQ21xBF+Y^SYT)xl_w|)4 zXD^&x>Ue$Om)$oa?ymO1p`)9+(!VP)lJC26&3CU~Te`Zua@Ac8IzEO|H178zmq(Fw z(RWSCo>*L1heJLB-d;$!iG_Wm#YN9+bMKDV8QACQulxgBWGuHR9e=VdyalRn zVEYVg-%*Awyk}wq9hSRiU<324fOoEK&~MddMOH$6J}Z~m6v_qEnXJO5kuS0tHj6$b zR%J)4MWK+q-;&t)Xw}86-8H|{^SRsVctW^P4XA*}g0Ss+A#*na(f2yRfXW+vU&sIq zRE-ykJ3bfCljK?KdED!wE{vo{?V3vDXE0uE(d|SnMNCH>HB*`~;=+B|=iy>TSRTC4 ziS&eMwxfP}8ly=Cp^V(F-;MZIyT?F}S_pxb^ox^X$q z^*B1mMKtzh(DmbdU$(;NPVB6UD2yE%KXynzMrE22+8a$9 zHAa(o8qFLtpzu@6gCj~#q_qKw3JS4rjTV;W;J<>uIF5u4;mw(C6UPg4Y8xL}8^F;A zW?T%qUfXX5OiAp46N%vQC}=gSdQ=1uY5L^e%lJ<$vOD44$D~9#fh6MLGu^ zaj8CP9Z!waM|{RvPGt(0Oq9=K_=zd% zyOP1y+XN|4J^*T01D|&*>Puz*_T!kszcCn{1i;uKE`$7OvFM%_%QYJ|-jf7sxj_0| zq4u$6tL=*I+x}LZ+w?kpe{zH1YZ!0R29e?$x#Q-bNLxY7TZ%+vJR~hD(_RP*$RLd& z&cB3t;?Ks7)$Bz6l_i+VU<3{D-C&@wHN z(c{C=F^#aYYpvVNnm4wcQ0kt_B>qe~?^`g$FyHrzLzD2QZR}WRRUY-wV<>LF8#-b2 zeG@%$s)wTx%hBVi7JpJL$hgC60?(ucE1(24Qtg2}kZjTWIW{5_?wh{Mtf3n)0jYWt zzLgBb>XzFMHvN#KD#@%yAq$k%th>x#_4*w&ydCtCKDt%LII*E6Sf*=Wi{}ifk;l2W zRYPgfZBGtO7F&;qP9oz(ea9aLp-!>*uK&}OOySJDM1VtBmWp+!Y0_XoMyp*g{AgE(Tln6RXn5>jTEWO1Smw{t`$jYs)WyK6Lf0$0>v!gZt zTE?x8E%>u8{JvH-FmTL|_M~$CH0!tan^b{-mIMNq22p^wK)|zcoJbUc=tNmpkoW!o$b=b94o=MFAsJz5nTyHuj6e^XvWz6 z{D5MR+u)wbyiqMW#Jk4&xQI(J%{Ut<(}m?IL<}`~wA<=p-Gj;vm=U@+(4{Xf(jlc% zT+kRV2Gf_7|K*-$)S^++bOb8myw2jL$j^h7zGu_+*K*otRp*;-()<-96wR2tO~-k$ z7TvjV`KPECGe}5s4kAcJxH_r)bd!BSPrB06-BHQ!J#}~7QBvBoq&-AH^S(K>n~u!w zkx?PpgldgZA>o8-k5M7>gzAh@A^n7!1GR|AxQS3|X}z>pMkp0J zq2`0zC<3WDIW^4Tc3MIB_1E7tSYgL}&s3pc1XI-=FfXTv;DFTFo4JpBCtCg)EfIry z&!jdt*S8D90-J(H7S(F70tS8`nEVvz(};2z>$5{dw%E)(T=)-6aSHusRexi>N;TW1 zA%ei zLuQ(eN(Fm#kJZm&+QI7u$*5U3eiCXFmk_OA>gY0=An&1o`*`#F$v2eWV?i2dpH9*5 z9skf9;oz^RfFy*=A~nTU#4+g}LJ5Q=tv*VK3V;i~vdFr76~+u#V*1*Wt}W@f=56{e zwt_;FDsoT0tA}qjDV``t8ee5baHS=RX2cU;Y+yHK$M*#EBJ{`mp#g#(EP_~R8*llY z2)m8>(L5&k6K!IQI(M-ngV}Fkr9lsBwX_!D3le_~)VSJBsLht5iou0s88DF=-%|O} zw4i%5LfXkqI_VfBxhb52d4t(2Z!KSS&&XF#pR1o=6*Uzhs2$_vc<^aT2~BpijoQ?V>S`e;4c%6&{kQ;cv|9XNT(@$3IqvGQOa6b$Lb0Y z7M&-J?kZ=4#E(&D!srG?h6NddYv`H*b|bgpT|O9qE|9T(#}~gqPjLl_kF7?sHc|kA;ZPR Xi+T;0V6F^7B6SLm=^UZ0Dcb)5R~rZ; literal 0 HcmV?d00001 diff --git a/minigpt4/common/vqa_tools/__pycache__/vqa_eval.cpython-39.pyc b/minigpt4/common/vqa_tools/__pycache__/vqa_eval.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d12fb206d53798d172c785eea5156091da14fcb7 GIT binary patch literal 7955 zcmd^E&669)b)TLu3_kXQ%M~d~vIBisOIYMCKV(^!R}?8ODXmSdNRhH-h-+wwH@lc6 zfWh_*b_o!OlOhw+r_gpzDIIomNEQEwTvDmZpO8aeQk7IC#dC5A;#m2;p22eYp`%Yh z*L1(`_qzM_>-W*!@Fyk;27do>$_~7@4CCLpvj1nHau#3wePm=*4H8vBCRt6hYUeR7v>^FmWEt2(Mz8# z-;IMp`C=r?Yh8@cOi9a0a4W$~K~PQ_LHW}1`3oyoFO=89W`OB*n+!TpSw>M(M&Cw~ zbmI3G7NQO&in`2NB-acQ;uStE`md@>gK)W+Ak-G!1+ZFOG ziGkS3?HVEDJl<&DVsgPW*k+cQT_iBej3R+eW)Vw$0gF6LFVYcu3F&2eg}y`Ir9VL` z)2s9vy-sh?_vlTU)9ENxkiJiE(J?xXG*1giC+H-dqSHuk(+}t!`XSQ0^dowYevGt8 z@6#Fj0O>59qYvpkEzt#}i}VqFOw05W`YBzaPiTccrOR{$=`*@Y*XVQl8U38TpkGjx zzC^lCt4P1psYYKR`E-NoL|@;+y7Lqut`$aZqVl6vV@9B zX=j4uF2j==jx{_5ibPXPlYwpPuBhT>H;9ukYS%*QJAT-nOZu)a11_Dq&xKcyx=otf z3Nnb>inx|F#`@oT(}Kix98JRlQw)!k!$!{dJel0%uzqbIpet+_nk(x zjaHY7W;3nEeRnyfv6kmreS4Y7oluWvhY1hDdD@zw+sJ7K2@lrt=loXRYq9AwTg5TN zv}rR#xmGvUcn#>5b`afjOTo;Hg2TiinGOx9ERe4 zK5mQ!n1ReiDQyLasjB%nY6UkU>QR4U45o`I?o}a)!d``kMVF|};w$Ze%(fy z#eF?c(g@q@=rPGXvI_UnBcyfoa+8M7qRYWyNuHpP0Zi(pu!O`?2s;Cy2eN!xG8N-` z%s~od)ICNybIG=kPRLjgx$SK49RS>O8*`=z=7NXVW&lID8USCq1T7jssvA;9fngx- zdnv%vZ8swDh6WTo0XjuItVS>&sHV-DVbps(8PGip>k;9glzDUxD60{$)di`TSdBr0 z#<*(36bah~)mjF*4UQKdo6q{YlswGS9qn7@7Ym>HoqVP1tcD6Z+1@7XP+w`b}x zrbtbgQsacpYL2v}YRd*2V+%&lF?n3o<7sov{6z}$#M*simW}R@tsVn7Y}av>%~fKj z*<4X_uN2h0rdBN>RE>_gR{+1ncHh&)8q&1d@G-D1vo1Z%YO{q`9ky^d#_dd@*w7id z9Uw=mR&kh{54AuYr3@6zPV>kG$WX;Gh_TFSR>sxT&h=o82(BMCoNlJJ^0Z&nY$_Xd zW~33NZ9SF(3>&-x)q`TgykTM5l0lLJV7idnw%+m#)*5N|*vfO9YuBWW94&d>lFCxI z?E}+Q!@|KR=PYmrbh`t9nL+`kxPUc@6H1&^;*=7nm3Ui;A1LvT5{8|n^0yJ9KfE7NbI zdn4%29R1+c#rLmM<>>YK3SdC};!&<2LladhFO0;GM`B?lz8s0wk$6ps+e!>a;^;_J zM&kXEcxxoCD^VMX<0ElqBo;^F>_~hy630g3gOPZ(azKeTPObHNSNipyaswP1ubfjQ zUE(Zqt=9=uk!$DLX~o*w^p%V3vmjQ5^&q(dvSo^u+wrB}idCtOJ;Z2yjEPmg<+r=X=D=Cal%(7Gz;Fd)xoJ-6+3|pBf6T%u%qBUTP;6@oldu1Pb#*u?;z_Rx?kC`MNn!W+S@g^r9if_k!0KXvl-=>^}f!Hu+UEwsXRxZX+LuC0Y_zqwak z;8~*f0#;E=qNo{bh*vq^=_#*PL${<>JBSJ5H*rKZg!@GD#H4A8|5xe0cl%q``(NH< z!7PcA=uc<2j`^_w>wNnd|7HNNQ|CKSyaWtiCvKYZ6~dkIF0LON23bdpCF715;vzD{ zHDqWF#FDZ4#?S^FpF~~h){94=>I}YKpvTJ5=eh+qcfgczC3p!w|A#yw+A;KF3y_4Z>Ri_$#-%%sMY zv~(sCT(6Q+k{?XyIj3GW?wG^LL4IdqFu99$KM;L3N=gD4xl!j)jXOxg#Ec`J}_k&lrsZQuLyiGPns8xN{5Z1x48lQ!GQx#ay$}t z_n_WgzBJ9Q)Y(ACKUO$au&emuX=Gs2SoMauvHr%~w6CUKL%WWOsm%Ek%H=E9GU=u? z`xM<&VX+y;3Hzus|A_j3;xqbhj1`<+Mb}S)`9)kn{APRxz?fV*Ogh6|y9v(s<*|2_ z%$#!lc8p-FL}4V#BawfGeZHV;_GbZT_Djq@8AptC2@i$*9H?v&@(Pb?VbrIVAbAkQ zgx46zrSry*SX|GY^dJ!Ni^%k((>&>Y!{7cW7W@u|uFmC1Ouk5~uP#NKT> z(P1o=K#ePp2n7AZONi9vXLqVHE0=gE4mrx)^mW)#PDkQ0pmKO0YWj7Yjba_LW)OSo zK^$M^Xa%J}*i#3ywxw`AOwOfYPgzZbd@BxcvPWD}&RR3_6X-(+PfG+ux25vHUPwdm z)EFE`;mNE4%hpkOvfCl3;AI)DLm06t0)H+G0GZO2K_sXExzdcB(0n+u)A!4D!_3hUKE#HZ1Zu7SQ;?&7WEHkifk+@bfIFWJMV_=Yk28YxPv7c5S`Rm_|&9jE<-^7jW6cNY-qv0D5k$Ltw{)HM)Y4A zd%I^9{0Twv6>?8dTc{vVns^~Jc8wje%R$_N$d2kDZtfrm4{R(RImBdd2{{XqIA}xo z*__>+P27Qbhr@pPx;b<*y4;olK^hym>gkB`EgJ8MK+rmOKQ=wJz%4tUIVx73t z3=??~EU5ylgMu}`+f3v~Xpj-N=7V;(6-bzoyufvPBMRFUSBp^o1(W?HvZ@C!*U?hM z1x50`2l+EGL$Z=MaIe~lvOrl!S@%jfEwXm<*MIpbvF`T~&%D&KT-le!zeJYcfjtok zWXnNv)<~da94zmf8}=O&xkKinaZlVc*Yun0T^wBaz@RaZ*gZpITs$^=_w_+bV{(gz zet+~WfGtA!Xv9m{Xrm&ty=R+*;0?@8w{@xbf z74{$cb7K+jbBX&x^|Fy+y->3<&3GWJ>W+tJZzu~UW5uJP=h|C2@e&v^1dU_@{{E8(t{8*;!ePtKf zEYCU#zz}bWl4Jf)DOa-1-}etbN0xO~$?=b2{?6lzUqZ%eIxuIU9~-S*$suRV?c)uXo2b6Q9P&}Z5d8!DxJm=&Gp4i0(<$`82^gN6 zoek^l2I`B@S@zqSPgC=WR@^T#Ugck-8yNin#ba*Avek^TfB*WH_ryhJYWIgBCT=b%Ef+O-UA@^);> z5A0ck^d;v-W7ar~&BQoGrWsfWqfX*%xVUsV991;z?Hafc@|B^q@`#Vt^3OTrW%OSU zq>S*_Ap)H~OO>~LS?0IR@{ORZk7*Nye*R;>nat16PZZ>J?v&@lqptVM6BolazhIUx z2l$7uoxE56^7e`0YWeob;rGh7PvJ%8b@?_=r?o$?3So>6iQmRC(!!xKwPm&m<>LQO zoTw1hdw5fc6G8yTGn~Xpn~#j$xRJ#t<&D{`=5P4&QqW#c8r%b?@K`xF_^%OV^WU9M z8&iULPVzRt34r4ipQ${2;}ceT&ax4&;84G@E!^$*)ZoceCr&CW(#9wiv4U4HJe{=t zR#2;{Lao+{sM}`4JgIP%b=Ej#u*<#bkNieNZYt%(~+!)lDXW+xpoNJTd`r! z;EQ<|!S=6=g~GdD$(!<~J0jlQMp=;qI$}V-%T2ktkvY-0H=>rn(BEIC~q-k R{S0K3dSW|o+n(^O{{yJ;FrEMa literal 0 HcmV?d00001 diff --git a/minigpt4/common/vqa_tools/aokvqa/LICENSE b/minigpt4/common/vqa_tools/aokvqa/LICENSE new file mode 100644 index 0000000..663d675 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Allen Institute for Artificial Intelligence + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/minigpt4/common/vqa_tools/aokvqa/README.md b/minigpt4/common/vqa_tools/aokvqa/README.md new file mode 100644 index 0000000..21caefa --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/README.md @@ -0,0 +1,207 @@ +# A-OKVQA + +Official repository for **A-OKVQA: A Benchmark for Visual Question Answering using World Knowledge**. + +Links: [[Paper]](https://arxiv.org/abs/2206.01718) [[Website]](http://a-okvqa.allenai.org) [[Leaderboard]](https://leaderboard.allenai.org/a-okvqa/submissions/public) + +### Abstract + +The Visual Question Answering (VQA) task aspires to provide a meaningful testbed for the development of AI models that can jointly reason over visual and natural language inputs. Despite a proliferation of VQA datasets, this goal is hindered by a set of common limitations. These include a reliance on relatively simplistic questions that are repetitive in both concepts and linguistic structure, little world knowledge needed outside of the paired image, and limited reasoning required to arrive at the correct answer. We introduce A-OKVQA, a crowdsourced dataset composed of a diverse set of about 25K questions requiring a broad base of commonsense and world knowledge to answer. In contrast to the existing knowledge-based VQA datasets, the questions generally cannot be answered by simply querying a knowledge base, and instead require some form of commonsense reasoning about the scene depicted in the image. We demonstrate the potential of this new dataset through a detailed analysis of its contents and baseline performance measurements over a variety of state-of-the-art vision–language models. + +![dataset_web](https://user-images.githubusercontent.com/28768645/170799740-f0d9ea60-6aff-4322-98d5-cae8e05983f4.svg) + +
+ +#### Table of Contents + +- [Getting started](#getting-started) + * [Downloading the dataset](#downloading-the-dataset) +- [Evaluation & Leaderboard](#evaluation) +- [Codebase](#codebase) + * [Preparing data](#preparing-data) + * [Models and Predictions](#models-and-predictions) + +
+ +## Getting started + +```bash +git clone --single-branch --recurse-submodules https://github.com/allenai/aokvqa.git + +cd aokvqa +export PYTHONPATH=. + +conda env create --name aokvqa +conda activate aokvqa +``` + +### Downloading the dataset + +```bash +export AOKVQA_DIR=./datasets/aokvqa/ +mkdir -p ${AOKVQA_DIR} + +curl -fsSL https://prior-datasets.s3.us-east-2.amazonaws.com/aokvqa/aokvqa_v1p0.tar.gz | tar xvz -C ${AOKVQA_DIR} +``` + +
Downloading COCO 2017 + +```bash +export COCO_DIR=./datasets/coco/ +mkdir -p ${COCO_DIR} + +for split in train val test; do + wget "http://images.cocodataset.org/zips/${split}2017.zip" + unzip "${split}2017.zip" -d ${COCO_DIR}; rm "${split}2017.zip" +done + +wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +unzip annotations_trainval2017.zip -d ${COCO_DIR}; rm annotations_trainval2017.zip +``` + +
+ +Loading our dataset is easy! Just grab our [load_aokvqa.py](https://github.com/allenai/aokvqa/blob/main/load_aokvqa.py) file and refer to the following code. + +```python +import os +aokvqa_dir = os.getenv('AOKVQA_DIR') + +from load_aokvqa import load_aokvqa, get_coco_path +train_dataset = load_aokvqa(aokvqa_dir, 'train') # also 'val' or 'test' +``` + +
Example dataset entry + +```python +dataset_example = train_dataset[0] + +print(dataset_example['question_id']) +# 22MexNkBPpdZGX6sxbxVBH + +coco_dir = os.getenv('COCO_DIR') +image_path = get_coco_path('train', dataset_example['image_id'], coco_dir) +print(image_path) +# ./datasets/coco/train2017/000000299207.jpg + +print(dataset_example['question']) +print(dataset_example['choices']) +# What is the man by the bags awaiting? +# ['skateboarder', 'train', 'delivery', 'cab'] + +correct_choice = dataset_example['choices'][ dataset_example['correct_choice_idx'] ] +# Corrrect: cab + +print(dataset_example['rationales'][0]) +# A train would not be on the street, he would not have luggage waiting for a delivery, and the skateboarder is there and not paying attention to him so a cab is the only possible answer. +``` + +
+ +## Evaluation + +Please prepare `predictions_{split}.json` files (for `split: {val,test}`) in the format below. You may omit either `multiple_choice` or `direct_answer` field if you only want to evaluate one setting. + +```python +{ + '' : { + 'multiple_choice' : '', + 'direct_answer' : '' + } +} +``` + +You can run evaluation on the validation set as follows. + +```bash +python evaluation/eval_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --preds ./predictions_val.json +``` + +### Leaderboard + +You may submit `predictions_test.json` to the [leaderboard](https://leaderboard.allenai.org/a-okvqa/submissions/get-started). + +## Codebase + +We provide all code and pretrained models necessary to replicate our experiments for Large-Scale Pretrained Models (sec. 5.2) and Rationale Generation (sec. 5.3). + +### Preparing data + +```bash +export FEATURES_DIR=./features/ +mkdir -p ${FEATURES_DIR} +``` + +You can compute CLIP features for our vocabulary and dataset. These are most commonly used by our other experiments. + +```bash +python data_scripts/encode_vocab_clip.py --vocab ${AOKVQA_DIR}/large_vocab_train.csv --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt + +for split in train val test; do + python data_scripts/extract_clip_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_${split}.pt +done +``` + +
For training ClipCap with a transformer mapping network + +If you want to train our ClipCap models with the transformer mapping network (instead of an MLP, like we do), you'll also need to run `extract_clip_features.py` with `--model-type RN50x4`. + +
+ +
For ResNet and BERT input features + +Our ResNet and BERT classification experiments require these respective features instead of CLIP. To generate these, please run the following commands: + +```bash +# ResNet +for split in train val test; do + python data_scripts/extract_resnet_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --out ${FEATURES_DIR}/resnet_${split}.pt +done + +# BERT +for split in train val test; do + python data_scripts/extract_bert_features.py --aokvqa-dir ${AOKVQA_DIR} --split ${split} --out ${FEATURES_DIR}/bert_${split}.pt +done +``` + +
+ +### Models and Predictions + +```bash +export LOG_DIR=./logs/ +export PREDS_DIR=./predictions/ +export PT_MODEL_DIR=./pretrained_models/ +mkdir -p ${LOG_DIR} ${PREDS_DIR} ${PT_MODEL_DIR} +``` + +
Download our pretrained model weights + +```bash +# Checkpoints for transfer learning experiments +curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/transfer_exp_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models + +# Checkpoints for ClipCap models (generating answers and rationales) +curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/clipcap_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models +``` + +
+ +We have included instructions for replicating each of our experiments (see README.md files below). + +All Python scripts should be run from the root of this repository. Please be sure to first run the installation and data preparation as directed above. + +- [Heuristics](./heuristics/README.md) +- [Transfer Learning Experiments](./transfer_experiments/README.md) +- [Querying GPT-3](./gpt3/README.md) +- [ClipCap](https://github.com/allenai/aokvqa/blob/ClipCap/README.md) +- [Generating Captions & Rationales](https://github.com/allenai/aokvqa/blob/ClipCap/README.md) + +For each experiment, we follow this prediction file naming scheme: `{model-name}_{split}-{setting}.json` (e.g. `random-weighted_val-mc.json` or `random-weighted_test-da.json`). As examples in these Readme files, we produce predictions on the validation set. + +We unify predictions for each split before evaluation. (You can omit one of `--mc` or `--da` prediction file if you only want to evaluate one setting.) + +```bash +python evaluation/prepare_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --mc ./predictions_val-mc.json --da ./predictions_val-da.json --out ./predictions_val.json +# repeat for test split ... +``` diff --git a/minigpt4/common/vqa_tools/aokvqa/data_scripts/build_vocab.py b/minigpt4/common/vqa_tools/aokvqa/data_scripts/build_vocab.py new file mode 100644 index 0000000..2c44686 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/data_scripts/build_vocab.py @@ -0,0 +1,45 @@ +import os +import argparse +from collections import Counter +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + + +# Build vocab from train set: correct choices + (direct answers appearing in >= 3 ) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') + +vocab = [] +all_choices = Counter() +direct_answers = Counter() + +for i in train_set: + vocab.append( i['choices'][i['correct_choice_idx']] ) + all_choices.update(i['choices']) + direct_answers.update(set(i['direct_answers'])) +vocab += [k for k,v in all_choices.items() if v >= 3] +vocab += [k for k,v in direct_answers.items() if v >= 3] + +vocab = sorted(set(vocab)) +print(f"Vocab size: {len(vocab)}") + +# Save vocabulary Output + +with open(args.output_file, 'w') as f: + for v in vocab: + print(v, file=f) + +## Check validation set coverage + +val_set = load_aokvqa(args.aokvqa_dir, 'val') + +val_acc = [v['choices'][v['correct_choice_idx']] in vocab for v in val_set] +val_acc = sum(val_acc) / len(val_acc) * 100 +print(f"Val set coverage: {val_acc:.2f}" ) diff --git a/minigpt4/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py b/minigpt4/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py new file mode 100644 index 0000000..1dce760 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py @@ -0,0 +1,26 @@ +import json +from tqdm import tqdm +import argparse +import pathlib + +import torch +import clip + +parser = argparse.ArgumentParser() +parser.add_argument('--vocab', type=pathlib.Path, required=True, dest='vocab_file') +parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +device = "cuda" if torch.cuda.is_available() else "cpu" +model, preprocess = clip.load(args.model_type, device=device) + +with torch.no_grad(): + a = open(args.vocab_file).read().splitlines() + mc_text = clip.tokenize(a).to(device) + mc_text_features = torch.stack([model.encode_text(mct.unsqueeze(0)).cpu() for mct in tqdm(mc_text)], dim=1)[0] + mc_text_features = mc_text_features.float() + model_name = args.model_type.replace('/', '-').replace('@', '-') + torch.save(mc_text_features, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py new file mode 100644 index 0000000..60cd40f --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py @@ -0,0 +1,50 @@ +import os +import argparse +import pathlib +from tqdm import tqdm + +import torch +from transformers import AutoTokenizer, AutoModel + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens') +model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens') +device = "cuda" if torch.cuda.is_available() else "cpu" +model = model.to(device) +model.eval() + +def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[0] # First element of model_output contains all token embeddings + input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + encoded_input = tokenizer([d['question']], padding=True, return_tensors='pt') + encoded_input = {k:v.to(device) for k,v in encoded_input.items()} + e = mean_pooling(model(**encoded_input), encoded_input['attention_mask']) + embeddings[d['question_id']] = { + 'question' : e[0].cpu() + } + + torch.save(embeddings, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py new file mode 100644 index 0000000..20d0455 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py @@ -0,0 +1,51 @@ +import os +from PIL import Image +from tqdm import tqdm +import argparse +import pathlib + +import torch +import clip + +from load_aokvqa import load_aokvqa, get_coco_path + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +device = "cuda" if torch.cuda.is_available() else "cpu" +model, preprocess = clip.load(args.model_type, device=device) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + q = d["question"] + q_text = clip.tokenize(q).to(device) + q_text_features = model.encode_text(q_text) + + img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)) + img = preprocess(img).unsqueeze(0).to(device) + image_features = model.encode_image(img) + + embeddings[d['question_id']] = { + 'question' : q_text_features[0].float().cpu(), + 'image' : image_features[0].float().cpu(), + } + + torch.save(embeddings, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py new file mode 100644 index 0000000..0d7277b --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py @@ -0,0 +1,62 @@ +import os +import argparse +import pathlib +from tqdm import tqdm +from PIL import Image + +import torch +import torch.nn as nn +from torchvision import models +from torchvision import transforms as T + +from load_aokvqa import load_aokvqa, get_coco_path + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +resnet_preprocess = T.Compose([ + T.Resize(size=224, interpolation=T.InterpolationMode.BICUBIC), + T.CenterCrop(size=(224, 224)), + T.ToTensor(), + T.Normalize( + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225] + ) +]) + +device = "cuda" if torch.cuda.is_available() else "cpu" + +resnet_model = models.resnet50(pretrained=True) +resnet_model = torch.nn.Sequential( + *list(resnet_model.children())[:-1], + nn.Flatten() +) # strip classification layer +resnet_model = resnet_model.to(device) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)).convert('RGB') + resnet_input = resnet_preprocess(img).unsqueeze(0).to(device) + resnet_features = resnet_model(resnet_input) + embeddings[d['question_id']] = { + 'image' : resnet_features[0].cpu() + } + + torch.save(embeddings, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/environment.yml b/minigpt4/common/vqa_tools/aokvqa/environment.yml new file mode 100644 index 0000000..58284ec --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/environment.yml @@ -0,0 +1,36 @@ +name: aokvqa +channels: + - pytorch + - nvidia + - huggingface + - conda-forge + - defaults +dependencies: + - python=3.7 + - cudatoolkit=11.3 + - numpy=1.21.6 + - pytorch=1.11.0 + - torchvision=0.12.0 + - pytorch-lightning=1.6.3 + - torchmetrics=0.8.1 + - gdown=4.4.0 + - pip=22.0.4 + - pip: + - argparse==1.4.0 + - Pillow==9.0.1 + - tensorboard==2.9.0 + - ftfy==6.1.1 + - regex==2022.3.15 + - tqdm==4.64.0 + - clip @ git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620 + - openai==0.18.1 + - nltk==3.7 + - sacrebleu==2.0.0 + - sacremoses==0.0.53 + - sentence-transformers==2.2.0 + - datasets==2.1.0 + - tokenizers==0.10.3 + - transformers==4.10.3 + +# Next: resolve conflict between sentence-transfomers and pytorch-lightning +# pip uninstall sentencepiece diff --git a/minigpt4/common/vqa_tools/aokvqa/evaluation/__pycache__/load_aokvqa.cpython-39.pyc b/minigpt4/common/vqa_tools/aokvqa/evaluation/__pycache__/load_aokvqa.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e2afd5c0cb4bd8bfd78719427ff6edb3cd0c29d GIT binary patch literal 703 zcmY*Xy^a$x5caIS*;Ot<;-{jZMA{~sX1&S|xFH1<@P=gR4?Ig2o^59Bv*2?*0BaKyfGZEZz zE8SOO?qc^pFLL!(UgxdRD_Oa`n$OP9bIwb9DVytjts1p#-IMY{HN5`k=9ONrb(4qg zoYu9?j}>!?lU#9!h4Rp0c??h5kN)h$j{|r8qrnIU@bK4wn)?|0kLW=90JSIj5gLxH zV2_k`1nGobgv3X;Owe6)2!ygl!RFK1vw}`%^DUdK+GR#fsA9re)ELJv`#aOX 0: + predictions[q] = max(choices, key=train_freq.get) + +json.dump(predictions, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/heuristics/random_unweighted.py b/minigpt4/common/vqa_tools/aokvqa/heuristics/random_unweighted.py new file mode 100644 index 0000000..cfcf900 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/heuristics/random_unweighted.py @@ -0,0 +1,38 @@ +import os +import json +from random import seed, sample +import argparse +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--mc', action='store_true', dest='multiple_choice') +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +seed(0) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') + +if args.multiple_choice is False: + choices = list(set( + [d['choices'][d['correct_choice_idx']] for d in train_set] + )) + +## + +predictions = {} + +eval_set = load_aokvqa(args.aokvqa_dir, args.split) + +for d in eval_set: + q = d['question_id'] + if args.multiple_choice: + choices = d['choices'] + predictions[q] = sample(choices, 1)[0] + +json.dump(predictions, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/heuristics/random_weighted.py b/minigpt4/common/vqa_tools/aokvqa/heuristics/random_weighted.py new file mode 100644 index 0000000..2ccfa61 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/heuristics/random_weighted.py @@ -0,0 +1,46 @@ +import os +import json +import numpy as np +import argparse +import pathlib +from collections import Counter + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--mc', action='store_true', dest='multiple_choice') +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +np.random.seed(0) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') +train_freq = dict(Counter( + [d['choices'][d['correct_choice_idx']] for d in train_set] +)) + +if args.multiple_choice is False: + choices = list(train_freq.keys()) + probs = [f / len(train_set) for f in train_freq.values()] + +## + +predictions = {} + +eval_set = load_aokvqa(args.aokvqa_dir, args.split) + +for d in eval_set: + if args.multiple_choice: + choices = d['choices'] + probs = [train_freq.get(c, 0) for c in choices] + if probs == [0, 0, 0, 0]: + probs = [1, 1, 1, 1] + probs = [p / sum(probs) for p in probs] + + q = d['question_id'] + predictions[q] = np.random.choice(choices, size=1, p=probs)[0] + +json.dump(predictions, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/load_aokvqa.py b/minigpt4/common/vqa_tools/aokvqa/load_aokvqa.py new file mode 100644 index 0000000..3e3dd49 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/load_aokvqa.py @@ -0,0 +1,13 @@ +import os +import json + + +def load_aokvqa(aokvqa_dir, split, version='v1p0'): + assert split in ['train', 'val', 'test', 'test_w_ans'] + dataset = json.load(open( + os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json") + )) + return dataset + +def get_coco_path(split, image_id, coco_dir): + return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg") diff --git a/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/README.md b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/README.md new file mode 100644 index 0000000..dc5138d --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/README.md @@ -0,0 +1,41 @@ +## Transfer Learning Experiments + +We use the following training/prediction scripts for the classifier, zero-shot, and contrastive experiments in Table 3. + +```bash +## Training +python transfer_experiments/train.py --aokvqa-dir ${AOKVQA_DIR} --vocab ${AOKVQA_DIR}/large_vocab_train.csv --log-dir ${LOG_DIR} + +--backbone clip --clip-model-type ViT-B/32 --train-features ${FEATURES_DIR}/clip-ViT-B-32_train.pt --val-features ${FEATURES_DIR}/clip-ViT-B-32_val.pt +--inputs question # OR --inputs image # OR --inputs question image +# OR +--backbone resnet --train-features ${FEATURES_DIR}/resnet_train.pt --val-features ${FEATURES_DIR}/resnet_val.pt --inputs image +# OR +--backbone bert --train-features ${FEATURES_DIR}/bert_train.pt --val-features ${FEATURES_DIR}/bert_val.pt --inputs question + +--objective classifier +# OR +--objective contrastive --vocab-features ${FEATURE_DIR}/clip-ViT-B-32_large_vocab.pt +``` + +You can make predictions for CLIP zero-shot or from a classifier/contrastive checkpoint trained above. + +```bash +## Predicting +python transfer_experiments/predict.py --aokvqa-dir ${AOKVQA_DIR} --out ${PREDS_DIR}/clip-classifier_val-mc.json + +--split val # or test +--features ${FEATURE_DIR}/clip-ViT-B-32_val.pt # adjust for backbone and eval split + +--ckpt path/to/model.ckpt +# OR +--zero-shot --clip-model-type ViT-B/32 +--inputs question # OR --inputs image # OR --inputs question image + +--mc # Multiple-choice. Exclude for direct-answer. + +# IF classifier OR direct-answer +--vocab ${AOKVQA_DIR}/large_vocab_train.csv +# IF contrastive/zero-shot AND direct-answer +--vocab-features ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt +``` diff --git a/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/predict.py b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/predict.py new file mode 100644 index 0000000..d2fbb42 --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/predict.py @@ -0,0 +1,126 @@ +import sys +import os +import argparse +import pathlib +from tqdm import tqdm +import json + +import torch +import torch.nn as nn + +# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 +import sentencepiece; import pytorch_lightning as pl; import clip + +from transfer_experiments.train import LinearClassifier +from load_aokvqa import load_aokvqa +from evaluation.remap_predictions import map_to_choices + + +parser = argparse.ArgumentParser() +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--features', type=pathlib.Path, required=True) +parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file') +# +parser_weights = parser.add_mutually_exclusive_group(required=True) + +parser_weights.add_argument('--ckpt', type=pathlib.Path, dest='checkpoint_path') + +parser_weights.add_argument('--zero-shot', action='store_true', dest='clip_zero_shot') +parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=('--zero-shot' in sys.argv)) +# +parser.add_argument('--vocab', type=argparse.FileType('r')) +parser.add_argument('--vocab-features', type=pathlib.Path, dest='vocab_features') +parser.add_argument('--mc', action='store_true', dest='multiple_choice') + +parser.add_argument('--clip-model-type', type=str, + choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], + dest='clip_model_type', required=('--zero-shot' in sys.argv and '--mc' in sys.argv)) +# +args = parser.parse_args() + + +## Load dataset + +aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split) + +## Load models + +device = "cuda" if torch.cuda.is_available() else "cpu" + +if args.checkpoint_path is not None: + classifier = LinearClassifier.load_from_checkpoint(args.checkpoint_path) + classifier.to(device) + hp = classifier.hparams +elif args.clip_zero_shot: + classifier = nn.Identity().to(device) + hp = pl.utilities.AttributeDict(backbone='clip', clip_model_type=args.clip_model_type, objective='zero-shot', inputs=args.inputs) + +# Load input features + +embeddings = torch.load(args.features) +if hp.backbone == 'clip': + for q in embeddings.keys(): + embeddings[q]['question'] = embeddings[q]['question'] / embeddings[q]['question'].norm(dim=-1, keepdim=True) + embeddings[q]['image'] = embeddings[q]['image'] / embeddings[q]['image'].norm(dim=-1, keepdim=True) + +# Load vocab, vocab features, clip + +if (hp.objective == 'classifier') or \ + (hp.objective in ['contrastive', 'zero-shot'] and args.multiple_choice is False): + vocab = args.vocab.read().splitlines() + +if hp.objective in ['contrastive', 'zero-shot']: + if args.multiple_choice is False: + vocab_features = torch.load(args.vocab_features).cpu() + vocab_features /= vocab_features.norm(dim=-1, keepdim=True) + else: + clip_model = clip.load(hp.clip_model_type, device=device)[0] + logit_scale = clip_model.logit_scale.exp().cpu() + +## Prediction loop + +predictions = {} + +with torch.no_grad(): + for o in tqdm(aokvqa_set): + q = o['question_id'] + + # Load input embedding (from question / image) + if hp.objective == 'zero-shot' and ('question' in hp.inputs and 'image' in hp.inputs): + e = embeddings[q]['question'] + embeddings[q]['image'] + elif 'question' in hp.inputs and 'image' in hp.inputs: + e = torch.cat((embeddings[q]['question'], embeddings[q]['image'])) + elif 'question' in hp.inputs: + e = embeddings[q]['question'] + elif 'image' in hp.inputs: + e = embeddings[q]['image'] + + # Pass inputs through model + e = e.unsqueeze(0).to(device) + x = classifier(e)[0].cpu() + + # Predict + if hp.objective in ['contrastive', 'zero-shot']: + if args.multiple_choice: + vocab = o['choices'] + # Encode choices + vocab_features = clip.tokenize(vocab).to(device) + vocab_features = torch.stack([ + clip_model.encode_text(v.unsqueeze(0)) for v in vocab_features + ], dim=1)[0] + vocab_features /= vocab_features.norm(dim=-1, keepdim=True) + vocab_features = vocab_features.float().cpu() + + x = logit_scale * x @ vocab_features.t() + x = x.softmax(dim=-1) + + predictions[q] = vocab[x.argmax().item()] + +## Save and evaluate predictions + +# Map prediction to nearest neighbor choice (by word embeddings) +if args.multiple_choice and hp.objective == 'classifier': + predictions = map_to_choices(aokvqa_set, predictions) + +json.dump(predictions, args.output_file) diff --git a/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/train.py b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/train.py new file mode 100644 index 0000000..ac48b5a --- /dev/null +++ b/minigpt4/common/vqa_tools/aokvqa/transfer_experiments/train.py @@ -0,0 +1,263 @@ +import os +import sys +import json +import argparse +import pathlib +import random + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader + +# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 +import sentencepiece; import pytorch_lightning as pl + +import torchmetrics.functional as MF + +from load_aokvqa import load_aokvqa + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--vocab', type=argparse.FileType('r'), required=True) + parser.add_argument('--log-dir', type=pathlib.Path, dest='log_dir', required=True) + # + parser.add_argument('--backbone', type=str, choices=['clip', 'resnet', 'bert'], required=True) + parser.add_argument('--clip-model-type', type=str, + choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], + dest='clip_model_type', required=('clip' in sys.argv)) + parser.add_argument('--train-features', type=pathlib.Path, required=True, dest='train_features') + parser.add_argument('--val-features', type=pathlib.Path, required=True, dest='val_features') + parser.add_argument('--vocab-features', type=pathlib.Path, required=('contrastive' in sys.argv), dest='vocab_features') + # + parser.add_argument('--objective', type=str, choices=['classifier', 'contrastive'], required=True) + parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=True) + # Defaults + parser.add_argument('--bs', type=int, default=128, dest='batch_size') + parser.add_argument('--lr', type=float, default=0.01) + parser.add_argument('--epochs', type=int, default=500) + parser.add_argument('--gpus', type=int, default=1) + args = parser.parse_args() + + pl.seed_everything(1) + vocab = args.vocab.read().splitlines() + + ## Data loading + + dm = AokvqaEmbeddingsDataModule( + args.aokvqa_dir, + args.train_features, + args.val_features, + args.objective, + args.backbone, + args.inputs, + vocab, + args.vocab_features, + batch_size=args.batch_size, + num_workers=16 + ) + + ## Model definition + + model = LinearClassifier( + args.objective, + args.backbone, + args.clip_model_type, + args.inputs, + len(vocab), + args.lr + ) + + ## Training and testing loops + + logger = pl.loggers.TensorBoardLogger( + args.log_dir, + name=f'{args.backbone}-{args.objective}', + version=f"inputs:{'+'.join(args.inputs)}" + ) + + trainer = pl.Trainer( + logger=logger, + gpus=args.gpus, + max_epochs=args.epochs, + callbacks=[ + pl.callbacks.ModelCheckpoint( + monitor="val_acc", + filename="{epoch:02d}-{val_acc:.2f}", + mode="max" + ) + ], + ) + + trainer.fit(model, dm) + + +class AokvqaEmbeddingsDataset(Dataset): + def __init__(self, aokvqa_dir, split, input_features, objective, backbone, inputs, vocab, vocab_features): + + aokvqa_set = load_aokvqa(aokvqa_dir, split) + + assert ( backbone == 'resnet' and inputs == ['image'] and objective == 'classifier' ) \ + or ( backbone == 'bert' and inputs == ['question'] and objective == 'classifier' ) \ + or ( backbone == 'clip' ) + + embeddings = torch.load(input_features) + if backbone == 'clip': + for q in embeddings.keys(): + embeddings[q]['question'] /= embeddings[q]['question'].norm(dim=-1, keepdim=True) + embeddings[q]['image'] /= embeddings[q]['image'].norm(dim=-1, keepdim=True) + if objective == 'contrastive': + vocab_embeddings = torch.load(vocab_features) + vocab_embeddings /= vocab_embeddings.norm(dim=-1, keepdim=True) + + self.objective = objective + self.vocab_len = len(vocab) + + self.embeddings = [] + self.answers = [] + + for o in aokvqa_set: + correct_answers = set([o['choices'][o['correct_choice_idx']]] + o['direct_answers']) + correct_answers = [vocab.index(a) for a in correct_answers if a in vocab] + if self.objective == 'contrastive': + correct_answers = [vocab_embeddings[a] for a in correct_answers] + if len(correct_answers) == 0: continue + self.answers.append(correct_answers) + + q = o['question_id'] + if 'question' in inputs and 'image' in inputs: + e = torch.cat((embeddings[q]['question'], embeddings[q]['image'])) + elif 'question' in inputs and 'image' not in inputs: + e = embeddings[q]['question'] + elif 'question' not in inputs and 'image' in inputs: + e = embeddings[q]['image'] + self.embeddings.append(e) + + def __getitem__(self, index): + e = self.embeddings[index] + a = self.answers[index] + if self.objective == 'classifier': + a = torch.sum(F.one_hot(torch.tensor(a), num_classes=self.vocab_len), dim=0) + elif self.objective == 'contrastive': + a = random.sample(a, 1)[0] + return e, a + + def __len__(self): + return len(self.embeddings) + + +class AokvqaEmbeddingsDataModule(pl.LightningDataModule): + + def __init__(self, aokvqa_dir, train_features, val_features, objective, backbone, inputs, vocab, vocab_features, batch_size=1, num_workers=0): + super().__init__() + self.aokvqa_dir = aokvqa_dir + self.train_features = train_features + self.val_features = val_features + self.objective = objective + self.backbone = backbone + self.inputs = inputs + self.vocab = vocab + self.vocab_features = vocab_features + self.batch_size = batch_size + self.num_workers = num_workers + + def setup(self, stage=None): + self.train_dataset = AokvqaEmbeddingsDataset( + self.aokvqa_dir, 'train', self.train_features, self.objective, + self.backbone, self.inputs, self.vocab, self.vocab_features + ) + self.val_dataset = AokvqaEmbeddingsDataset( + self.aokvqa_dir, 'val', self.val_features, self.objective, + self.backbone, self.inputs, self.vocab, self.vocab_features + ) + + def train_dataloader(self): + return DataLoader( + self.train_dataset, batch_size=self.batch_size, shuffle=True, + num_workers=int(0.8 * self.num_workers) + ) + + def val_dataloader(self): + return DataLoader( + self.val_dataset, batch_size=self.batch_size, shuffle=False, + num_workers=int(0.2 * self.num_workers) + ) + + +class LinearClassifier(pl.LightningModule): + def __init__(self, objective, backbone, clip_model_type, inputs, vocab_len, lr=0.001): + super().__init__() + self.save_hyperparameters(ignore=['lr']) + self.lr = lr + + if self.hparams.backbone == 'clip': + clip_dim = { + 'RN50' : 1024, + 'RN50x4' : 640, + 'RN50x16' : 768, + 'RN50x64' : 1024, + 'RN101' : 512, + 'ViT-B/32' : 512, + 'ViT-B/16' : 512, + 'ViT-L/14' : 768, + 'ViT-L/14@336px' : 768, + }[clip_model_type] + emb_dim = clip_dim * len(inputs) + elif self.hparams.backbone == 'resnet': + emb_dim = 2048 + elif self.hparams.backbone == 'bert': + emb_dim = 768 + + if self.hparams.objective == 'classifier': + out_dim = vocab_len + elif self.hparams.objective == 'contrastive': + out_dim = clip_dim + + self.linear = nn.Linear(emb_dim, out_dim) + + def forward(self, x): + x = self.linear(x) + if self.hparams.objective == 'classifier': + x = torch.sigmoid(x) + return x + + def compute_loss(self, batch): + x, y = batch + + y_pred = self.forward(x) + + if self.hparams.objective == 'classifier': + loss = F.binary_cross_entropy(y_pred, y.float()) + elif self.hparams.objective == 'contrastive': + indices = torch.arange(0, x.shape[0], dtype=torch.int64, device=self.device) + sim = (y_pred @ y.T).softmax(dim=-1) + loss = F.cross_entropy(sim, indices) + + if self.hparams.objective == 'classifier': + acc = MF.f1_score(y_pred, y) + elif self.hparams.objective == 'contrastive': + acc = torch.mean(sim[indices, indices]) + + return loss, acc + + def training_step(self, batch, batch_idx): + loss, acc = self.compute_loss(batch) + self.log("train_loss", loss) + self.log("train_acc", acc) + return loss + + def validation_step(self, batch, batch_idx): + loss, acc = self.compute_loss(batch) + self.log("val_loss", loss) + self.log("val_acc", acc) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + +if __name__ == '__main__': + main() diff --git a/minigpt4/common/vqa_tools/vqa.py b/minigpt4/common/vqa_tools/vqa.py new file mode 100644 index 0000000..a386b90 --- /dev/null +++ b/minigpt4/common/vqa_tools/vqa.py @@ -0,0 +1,211 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +__author__ = "aagrawal" +__version__ = "0.9" + +# Interface for accessing the VQA dataset. + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). + +# The following functions are defined: +# VQA - VQA class that loads VQA annotation file and prepares data structures. +# getQuesIds - Get question ids that satisfy given filter conditions. +# getImgIds - Get image ids that satisfy given filter conditions. +# loadQA - Load questions and answers with the specified question ids. +# showQA - Display the specified questions and answers. +# loadRes - Load result file and create result object. + +# Help on each function can be accessed by: "help(COCO.function)" + +import json +import datetime +import copy + + +class VQA: + def __init__(self, annotation_file=None, question_file=None): + """ + Constructor of VQA helper class for reading and visualizing questions and answers. + :param annotation_file (str): location of VQA annotation file + :return: + """ + # load dataset + self.dataset = {} + self.questions = {} + self.qa = {} + self.qqa = {} + self.imgToQA = {} + if not annotation_file == None and not question_file == None: + print("loading VQA annotations and questions into memory...") + time_t = datetime.datetime.utcnow() + dataset = json.load(open(annotation_file, "r")) + questions = json.load(open(question_file, "r")) + self.dataset = dataset + self.questions = questions + self.createIndex() + + def createIndex(self): + # create index + print("creating index...") + imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]} + qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + for ann in self.dataset["annotations"]: + imgToQA[ann["image_id"]] += [ann] + qa[ann["question_id"]] = ann + for ques in self.questions["questions"]: + qqa[ques["question_id"]] = ques + print("index created!") + + # create class members + self.qa = qa + self.qqa = qqa + self.imgToQA = imgToQA + + def info(self): + """ + Print information about the VQA annotation file. + :return: + """ + for key, value in self.datset["info"].items(): + print("%s: %s" % (key, value)) + + def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): + """ + Get question ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get question ids for given imgs + quesTypes (str array) : get question ids for given question types + ansTypes (str array) : get question ids for given answer types + :return: ids (int array) : integer array of question ids + """ + imgIds = imgIds if type(imgIds) == list else [imgIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(imgIds) == 0: + anns = sum( + [self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], + [], + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["question_id"] for ann in anns] + return ids + + def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): + """ + Get image ids that satisfy given filter conditions. default skips that filter + :param quesIds (int array) : get image ids for given question ids + quesTypes (str array) : get image ids for given question types + ansTypes (str array) : get image ids for given answer types + :return: ids (int array) : integer array of image ids + """ + quesIds = quesIds if type(quesIds) == list else [quesIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(quesIds) == 0: + anns = sum( + [self.qa[quesId] for quesId in quesIds if quesId in self.qa], [] + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["image_id"] for ann in anns] + return ids + + def loadQA(self, ids=[]): + """ + Load questions and answers with the specified question ids. + :param ids (int array) : integer ids specifying question ids + :return: qa (object array) : loaded qa objects + """ + if type(ids) == list: + return [self.qa[id] for id in ids] + elif type(ids) == int: + return [self.qa[ids]] + + def showQA(self, anns): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + for ann in anns: + quesId = ann["question_id"] + print("Question: %s" % (self.qqa[quesId]["question"])) + for ans in ann["answers"]: + print("Answer %d: %s" % (ans["answer_id"], ans["answer"])) + + def loadRes(self, resFile, quesFile): + """ + Load result file and return a result object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = VQA() + res.questions = json.load(open(quesFile)) + res.dataset["info"] = copy.deepcopy(self.questions["info"]) + res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"]) + res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"]) + res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"]) + res.dataset["license"] = copy.deepcopy(self.questions["license"]) + + print("Loading and preparing results... ") + time_t = datetime.datetime.utcnow() + anns = json.load(open(resFile)) + assert type(anns) == list, "results is not an array of objects" + annsQuesIds = [ann["question_id"] for ann in anns] + assert set(annsQuesIds) == set( + self.getQuesIds() + ), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file." + for ann in anns: + quesId = ann["question_id"] + if res.dataset["task_type"] == "Multiple Choice": + assert ( + ann["answer"] in self.qqa[quesId]["multiple_choices"] + ), "predicted answer is not one of the multiple choices" + qaAnn = self.qa[quesId] + ann["image_id"] = qaAnn["image_id"] + ann["question_type"] = qaAnn["question_type"] + ann["answer_type"] = qaAnn["answer_type"] + print( + "DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds()) + ) + + res.dataset["annotations"] = anns + res.createIndex() + return res diff --git a/minigpt4/common/vqa_tools/vqa_eval.py b/minigpt4/common/vqa_tools/vqa_eval.py new file mode 100644 index 0000000..ee808b3 --- /dev/null +++ b/minigpt4/common/vqa_tools/vqa_eval.py @@ -0,0 +1,324 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +# coding=utf-8 + +__author__ = "aagrawal" + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys +import re + + +class VQAEval: + def __init__(self, vqa=None, vqaRes=None, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + if vqa is not None: + self.params = {"question_id": vqa.getQuesIds()} + self.contractions = { + "aint": "ain't", + "arent": "aren't", + "cant": "can't", + "couldve": "could've", + "couldnt": "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + "didnt": "didn't", + "doesnt": "doesn't", + "dont": "don't", + "hadnt": "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + "hasnt": "hasn't", + "havent": "haven't", + "hed": "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + "hes": "he's", + "howd": "how'd", + "howll": "how'll", + "hows": "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + "Im": "I'm", + "Ive": "I've", + "isnt": "isn't", + "itd": "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + "itll": "it'll", + "let's": "let's", + "maam": "ma'am", + "mightnt": "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + "mightve": "might've", + "mustnt": "mustn't", + "mustve": "must've", + "neednt": "needn't", + "notve": "not've", + "oclock": "o'clock", + "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + "shant": "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + "shouldve": "should've", + "shouldnt": "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": "somebodyd", + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + "somebodyll": "somebody'll", + "somebodys": "somebody's", + "someoned": "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + "someonell": "someone'll", + "someones": "someone's", + "somethingd": "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + "somethingll": "something'll", + "thats": "that's", + "thered": "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + "therere": "there're", + "theres": "there's", + "theyd": "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + "theyll": "they'll", + "theyre": "they're", + "theyve": "they've", + "twas": "'twas", + "wasnt": "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + "weve": "we've", + "werent": "weren't", + "whatll": "what'll", + "whatre": "what're", + "whats": "what's", + "whatve": "what've", + "whens": "when's", + "whered": "where'd", + "wheres": "where's", + "whereve": "where've", + "whod": "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + "wholl": "who'll", + "whos": "who's", + "whove": "who've", + "whyll": "why'll", + "whyre": "why're", + "whys": "why's", + "wont": "won't", + "wouldve": "would've", + "wouldnt": "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + "yall": "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + "youd": "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + "youll": "you'll", + "youre": "you're", + "youve": "you've", + } + self.manualMap = { + "none": "0", + "zero": "0", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + } + self.articles = ["a", "an", "the"] + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(,)(\d)") + self.punct = [ + ";", + r"/", + "[", + "]", + '"', + "{", + "}", + "(", + ")", + "=", + "+", + "\\", + "_", + "-", + ">", + "<", + "@", + "`", + ",", + "?", + "!", + ] + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params["question_id"]] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + print("computing accuracy") + step = 0 + for quesId in quesIds: + resAns = res[quesId]["answer"] + resAns = resAns.replace("\n", " ") + resAns = resAns.replace("\t", " ") + resAns = resAns.strip() + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + gtAcc = [] + gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]] + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]["answers"]: + ansDic["answer"] = self.processPunctuation(ansDic["answer"]) + for gtAnsDatum in gts[quesId]["answers"]: + otherGTAns = [ + item for item in gts[quesId]["answers"] if item != gtAnsDatum + ] + matchingAns = [item for item in otherGTAns if item["answer"] == resAns] + acc = min(1, float(len(matchingAns)) / 3) + gtAcc.append(acc) + quesType = gts[quesId]["question_type"] + ansType = gts[quesId]["answer_type"] + avgGTAcc = float(sum(gtAcc)) / len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step % 100 == 0: + self.updateProgress(step / float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + print("Done computing accuracy") + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + " " in inText or " " + p in inText) or ( + re.search(self.commaStrip, inText) != None + ): + outText = outText.replace(p, "") + else: + outText = outText.replace(p, " ") + outText = self.periodStrip.sub("", outText, re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = " ".join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n) + self.accuracy["perQuestionType"] = { + quesType: round( + 100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]), + self.n, + ) + for quesType in accQuesType + } + self.accuracy["perAnswerType"] = { + ansType: round( + 100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n + ) + for ansType in accAnsType + } + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100 * acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100 * acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100 * acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength * progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( + "#" * block + "-" * (barLength - block), int(progress * 100), status + ) + sys.stdout.write(text) + sys.stdout.flush()