update evaluation files

This commit is contained in:
junchen14 2023-11-01 11:51:39 +03:00
parent 45c07e5603
commit c4d6737266
3 changed files with 15 additions and 15 deletions

View File

@ -97,7 +97,7 @@ Download the pretrained model checkpoints
| MiniGPT-v2 (after stage-2) | MiniGPT-v2 (after stage-3) | MiniGPT-v2 (online developing demo)| | MiniGPT-v2 (after stage-2) | MiniGPT-v2 (after stage-3) | MiniGPT-v2 (online developing demo)|
|------------------------------|------------------------------|------------------------------| |------------------------------|------------------------------|------------------------------|
| [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1jAbxUiyl04SFJMN4sF1vvUU69Etuz4qa/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) | | [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1HkoUUrjzFGn33cSiUkI-KcT-zysCynAz/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) |
For **MiniGPT-v2**, set the path to the pretrained checkpoint in the evaluation config file For **MiniGPT-v2**, set the path to the pretrained checkpoint in the evaluation config file

View File

@ -5,8 +5,8 @@ model:
end_sym: "</s>" end_sym: "</s>"
low_resource: False low_resource: False
prompt_template: '[INST] {} [/INST]' prompt_template: '[INST] {} [/INST]'
llama_model: "/ibex/project/c2133/llama_v2/llama-2-7b-chat-pytorch_update" llama_model: ""
ckpt: "/ibex/project/c2133/minigpt_checkpoints/checkpoint_stage3_correct/checkpoint_10.pth" ckpt: ""
lora_r: 64 lora_r: 64
lora_alpha: 16 lora_alpha: 16
@ -23,8 +23,8 @@ datasets:
evaluation_datasets: evaluation_datasets:
refcoco: refcoco:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset eval_file_path: /path/to/eval/annotation/path
img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train img_path: /path/to/eval/image/path
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 10
refcocog: refcocog:
@ -43,15 +43,15 @@ evaluation_datasets:
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 10
okvqa: okvqa:
eval_file_path: /ibex/project/c2133/blip_dataset/ok_vqa_copy eval_file_path: /path/to/eval/annotation/path
img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2017/images/jpeg/train img_path: /path/to/eval/image/path
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 10
vizwiz: vizwiz:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val.json eval_file_path: /path/to/eval/annotation/path
img_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val img_path: /path/to/eval/image/path
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 100
iconvqa: iconvqa:
eval_file_path: /path/to/eval/annotation/path eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path img_path: /path/to/eval/image/path
@ -63,10 +63,10 @@ evaluation_datasets:
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 10
hm: hm:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme/dev.jsonl eval_file_path: /path/to/eval/annotation/path
img_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme img_path: /path/to/eval/image/path
max_new_tokens: 20 max_new_tokens: 20
batch_size: 10 batch_size: 100
run: run:
task: image_text_pretrain task: image_text_pretrain

View File

@ -65,7 +65,7 @@ if 'okvqa' in args.dataset:
result['question_id'] = int(question_id) result['question_id'] = int(question_id)
minigpt4_predict.append(result) minigpt4_predict.append(result)
file_save_path= os.join(save_path,"okvqa.json") file_save_path= os.path.join(save_path,"okvqa.json")
with open(file_save_path,'w') as f: with open(file_save_path,'w') as f:
json.dump(minigpt4_predict, f) json.dump(minigpt4_predict, f)
@ -245,7 +245,7 @@ if 'hm' in args.dataset:
if answer == label: if answer == label:
count+=1 count+=1
total+=1 total+=1
print('hm val:', count / total * 100, flush=True) print('hm val:', count / total * 100, flush=True)
file_save_path = os.path.join(save_path, "hm.json") file_save_path = os.path.join(save_path, "hm.json")
with open(file_save_path,'w') as f: with open(file_save_path,'w') as f: