diff --git a/README.md b/README.md index d24923d..c8a80ab 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ Download the pretrained model checkpoints | MiniGPT-v2 (after stage-2) | MiniGPT-v2 (after stage-3) | MiniGPT-v2 (online developing demo)| |------------------------------|------------------------------|------------------------------| -| [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1jAbxUiyl04SFJMN4sF1vvUU69Etuz4qa/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) | +| [Download](https://drive.google.com/file/d/1Vi_E7ZtZXRAQcyz4f8E6LtLh2UXABCmu/view?usp=sharing) |[Download](https://drive.google.com/file/d/1HkoUUrjzFGn33cSiUkI-KcT-zysCynAz/view?usp=sharing) | [Download](https://drive.google.com/file/d/1aVbfW7nkCSYx99_vCRyP1sOlQiWVSnAl/view?usp=sharing) | For **MiniGPT-v2**, set the path to the pretrained checkpoint in the evaluation config file diff --git a/eval_configs/minigptv2_benchmark_evaluation.yaml b/eval_configs/minigptv2_benchmark_evaluation.yaml index 4fcb91b..8eb5ced 100644 --- a/eval_configs/minigptv2_benchmark_evaluation.yaml +++ b/eval_configs/minigptv2_benchmark_evaluation.yaml @@ -5,8 +5,8 @@ model: end_sym: "" low_resource: False prompt_template: '[INST] {} [/INST]' - llama_model: "/ibex/project/c2133/llama_v2/llama-2-7b-chat-pytorch_update" - ckpt: "/ibex/project/c2133/minigpt_checkpoints/checkpoint_stage3_correct/checkpoint_10.pth" + llama_model: "" + ckpt: "" lora_r: 64 lora_alpha: 16 @@ -23,8 +23,8 @@ datasets: evaluation_datasets: refcoco: - eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset - img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2014/images/jpeg/train + eval_file_path: /path/to/eval/annotation/path + img_path: /path/to/eval/image/path max_new_tokens: 20 batch_size: 10 refcocog: @@ -43,15 +43,15 @@ evaluation_datasets: max_new_tokens: 20 batch_size: 10 okvqa: - eval_file_path: /ibex/project/c2133/blip_dataset/ok_vqa_copy - img_path: /ibex/ai/reference/CV/COCO/cocoapi/data/2017/images/jpeg/train + eval_file_path: /path/to/eval/annotation/path + img_path: /path/to/eval/image/path max_new_tokens: 20 batch_size: 10 vizwiz: - eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val.json - img_path: /ibex/project/c2133/minigpt4_v2_dataset/vizwiz/val + eval_file_path: /path/to/eval/annotation/path + img_path: /path/to/eval/image/path max_new_tokens: 20 - batch_size: 10 + batch_size: 100 iconvqa: eval_file_path: /path/to/eval/annotation/path img_path: /path/to/eval/image/path @@ -63,10 +63,10 @@ evaluation_datasets: max_new_tokens: 20 batch_size: 10 hm: - eval_file_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme/dev.jsonl - img_path: /ibex/project/c2133/minigpt4_v2_dataset/hateful_meme + eval_file_path: /path/to/eval/annotation/path + img_path: /path/to/eval/image/path max_new_tokens: 20 - batch_size: 10 + batch_size: 100 run: task: image_text_pretrain diff --git a/eval_scripts/eval_vqa.py b/eval_scripts/eval_vqa.py index 9e40c43..75f0d91 100644 --- a/eval_scripts/eval_vqa.py +++ b/eval_scripts/eval_vqa.py @@ -65,7 +65,7 @@ if 'okvqa' in args.dataset: result['question_id'] = int(question_id) minigpt4_predict.append(result) - file_save_path= os.join(save_path,"okvqa.json") + file_save_path= os.path.join(save_path,"okvqa.json") with open(file_save_path,'w') as f: json.dump(minigpt4_predict, f) @@ -245,7 +245,7 @@ if 'hm' in args.dataset: if answer == label: count+=1 total+=1 - + print('hm val:', count / total * 100, flush=True) file_save_path = os.path.join(save_path, "hm.json") with open(file_save_path,'w') as f: