update evaluation files

This commit is contained in:
junchen14 2023-11-01 11:51:39 +03:00
parent 45c07e5603
commit c4d6737266
3 changed files with 15 additions and 15 deletions

View File

@ -97,7 +97,7 @@ Download the pretrained model checkpoints
| MiniGPT-v2 (after stage-2) | MiniGPT-v2 (after stage-3) | MiniGPT-v2 (online developing demo)|
|------------------------------|------------------------------|------------------------------|
| [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1jAbxUiyl04SFJMN4sF1vvUU69Etuz4qa/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) |
| [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1HkoUUrjzFGn33cSiUkI-KcT-zysCynAz/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) |
For **MiniGPT-v2**, set the path to the pretrained checkpoint in the evaluation config file

View File

@ -5,8 +5,8 @@ model:
end_sym: "</s>"
low_resource: False
prompt_template: '[INST] {} [/INST]'
llama_model: "/ibex/project/c2133/llama_v2/llama-2-7b-chat-pytorch_update"
ckpt: "/ibex/project/c2133/minigpt_checkpoints/checkpoint_stage3_correct/checkpoint_10.pth"
llama_model: ""
ckpt: ""
lora_r: 64
lora_alpha: 16
@ -23,8 +23,8 @@ datasets:
evaluation_datasets:
refcoco:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset
img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train
eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path
max_new_tokens: 20
batch_size: 10
refcocog:
@ -43,15 +43,15 @@ evaluation_datasets:
max_new_tokens: 20
batch_size: 10
okvqa:
eval_file_path: /ibex/project/c2133/blip_dataset/ok_vqa_copy
img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2017/images/jpeg/train
eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path
max_new_tokens: 20
batch_size: 10
vizwiz:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val.json
img_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val
eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path
max_new_tokens: 20
batch_size: 10
batch_size: 100
iconvqa:
eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path
@ -63,10 +63,10 @@ evaluation_datasets:
max_new_tokens: 20
batch_size: 10
hm:
eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme/dev.jsonl
img_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme
eval_file_path: /path/to/eval/annotation/path
img_path: /path/to/eval/image/path
max_new_tokens: 20
batch_size: 10
batch_size: 100
run:
task: image_text_pretrain

View File

@ -65,7 +65,7 @@ if 'okvqa' in args.dataset:
result['question_id'] = int(question_id)
minigpt4_predict.append(result)
file_save_path= os.join(save_path,"okvqa.json")
file_save_path= os.path.join(save_path,"okvqa.json")
with open(file_save_path,'w') as f:
json.dump(minigpt4_predict, f)
@ -245,7 +245,7 @@ if 'hm' in args.dataset:
if answer == label:
count+=1
total+=1
print('hm val:', count / total * 100, flush=True)
file_save_path = os.path.join(save_path, "hm.json")
with open(file_save_path,'w') as f: