2023-04-16 22:04:16 +00:00
|
|
|
model:
|
|
|
|
arch: mini_gpt4
|
|
|
|
|
|
|
|
# vit encoder
|
|
|
|
image_size: 224
|
|
|
|
drop_path_rate: 0
|
|
|
|
use_grad_checkpoint: False
|
|
|
|
vit_precision: "fp16"
|
|
|
|
freeze_vit: True
|
|
|
|
freeze_qformer: True
|
|
|
|
|
|
|
|
# Q-Former
|
2023-05-22 10:38:05 +00:00
|
|
|
q_former_model: "/mnt/bn/bykang/chixma/data/pretrained_models/blip2_pretrained_flant5xxl.pth"
|
2023-04-16 22:04:16 +00:00
|
|
|
num_query_token: 32
|
|
|
|
|
|
|
|
# Vicuna
|
2023-05-22 10:38:05 +00:00
|
|
|
llama_model: "/mnt/bn/bykang/chixma/data/pretrained_models/vicuna-13b-v0/"
|
2023-04-16 22:04:16 +00:00
|
|
|
|
|
|
|
# generation configs
|
|
|
|
prompt: ""
|
|
|
|
|
|
|
|
preprocess:
|
|
|
|
vis_processor:
|
|
|
|
train:
|
|
|
|
name: "blip2_image_train"
|
|
|
|
image_size: 224
|
|
|
|
eval:
|
|
|
|
name: "blip2_image_eval"
|
|
|
|
image_size: 224
|
|
|
|
text_processor:
|
|
|
|
train:
|
|
|
|
name: "blip_caption"
|
|
|
|
eval:
|
|
|
|
name: "blip_caption"
|