MiniGPT-4/eval_scripts/scienceqa_eval.yaml
2023-10-27 13:49:07 +03:00

48 lines
889 B
YAML

model:
arch: minigpt4
model_type: pretrain_llama2
max_txt_len: 160
end_sym: "</s>"
low_resource: True
prompt_template: '[INST] {} [/INST] '
ckpt: 'please set this value to the path of pretrained checkpoint'
datasets:
coco_vqa: # not used
type: eval
vis_processor:
eval:
name: "blip2_image_eval"
image_size: 336
text_processor:
eval:
name: "blip_caption"
run:
task: scienceqa
# optimization-specific
batch_size_train: 16
batch_size_eval: 32
num_workers: 8
# inference-specific
num_ans_candidates: 5
max_len: 10
min_len: 1
num_beams: 5
inference_method: "generate"
prompt: "Question: {} Short answer:"
seed: 42
output_dir: "results"
evaluate: True
test_splits: ["val"]
# distribution-specific
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True