mirror of
https://github.com/Vision-CAIR/MiniGPT-4.git
synced 2025-04-05 18:40:46 +00:00
Changes to be committed:
modified: eval_configs/minigpt4_eval.yaml modified: eval_configs/minigpt4_llama2_eval.yaml modified: minigpt4/configs/datasets/cc_sbu/align.yaml modified: minigpt4/configs/models/minigpt4_llama2.yaml modified: train_configs/minigpt4_llama2_stage2_finetune.yaml
This commit is contained in:
parent
871918f758
commit
ff3ea8a312
@ -5,7 +5,7 @@ model:
|
|||||||
end_sym: "###"
|
end_sym: "###"
|
||||||
low_resource: True
|
low_resource: True
|
||||||
prompt_template: '###Human: {} ###Assistant: '
|
prompt_template: '###Human: {} ###Assistant: '
|
||||||
ckpt: '/path/to/checkpoint/'
|
ckpt: 'minigpt4/output/minigpt4_stage2_finetune/20230828181/checkpoint_19.pth'
|
||||||
|
|
||||||
|
|
||||||
datasets:
|
datasets:
|
||||||
@ -13,7 +13,7 @@ datasets:
|
|||||||
vis_processor:
|
vis_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip2_image_eval"
|
name: "blip2_image_eval"
|
||||||
image_size: 224
|
image_size: 512
|
||||||
text_processor:
|
text_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip_caption"
|
name: "blip_caption"
|
||||||
|
@ -5,7 +5,7 @@ model:
|
|||||||
end_sym: "</s>"
|
end_sym: "</s>"
|
||||||
low_resource: True
|
low_resource: True
|
||||||
prompt_template: '[INST] {} [/INST] '
|
prompt_template: '[INST] {} [/INST] '
|
||||||
ckpt: '/path/to/checkpoint/'
|
ckpt: 'minigpt4/output/minigpt4_stage2_finetune/DATETIME/CHECKPOINT'
|
||||||
|
|
||||||
|
|
||||||
datasets:
|
datasets:
|
||||||
@ -13,7 +13,7 @@ datasets:
|
|||||||
vis_processor:
|
vis_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip2_image_eval"
|
name: "blip2_image_eval"
|
||||||
image_size: 224
|
image_size: 512
|
||||||
text_processor:
|
text_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip_caption"
|
name: "blip_caption"
|
||||||
|
@ -2,4 +2,4 @@ datasets:
|
|||||||
cc_sbu_align:
|
cc_sbu_align:
|
||||||
data_type: images
|
data_type: images
|
||||||
build_info:
|
build_info:
|
||||||
storage: /path/to/cc_sbu_align/
|
storage: caped_images
|
||||||
|
@ -2,26 +2,26 @@ model:
|
|||||||
arch: mini_gpt4
|
arch: mini_gpt4
|
||||||
|
|
||||||
# vit encoder
|
# vit encoder
|
||||||
image_size: 224
|
image_size: 512
|
||||||
drop_path_rate: 0
|
drop_path_rate: 0
|
||||||
use_grad_checkpoint: False
|
use_grad_checkpoint: False
|
||||||
vit_precision: "fp16"
|
vit_precision: "fp32"
|
||||||
freeze_vit: True
|
freeze_vit: True
|
||||||
has_qformer: False
|
has_qformer: False
|
||||||
|
|
||||||
# generation configs
|
# generation configs
|
||||||
prompt: ""
|
prompt: ""
|
||||||
|
|
||||||
llama_model: "/path/to/llama2/weight"
|
llama_model: "Llama-2-7b-chat"
|
||||||
|
|
||||||
preprocess:
|
preprocess:
|
||||||
vis_processor:
|
vis_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip2_image_train"
|
name: "blip2_image_train"
|
||||||
image_size: 224
|
image_size: 512
|
||||||
eval:
|
eval:
|
||||||
name: "blip2_image_eval"
|
name: "blip2_image_eval"
|
||||||
image_size: 224
|
image_size: 512
|
||||||
text_processor:
|
text_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip_caption"
|
name: "blip_caption"
|
||||||
|
@ -6,7 +6,7 @@ model:
|
|||||||
end_sym: "</s>"
|
end_sym: "</s>"
|
||||||
prompt_path: "prompts/alignment.txt"
|
prompt_path: "prompts/alignment.txt"
|
||||||
prompt_template: '[INST] {} [/INST] '
|
prompt_template: '[INST] {} [/INST] '
|
||||||
ckpt: '/path/to/stage1/checkpoint/'
|
ckpt: 'pretrained_minigpt4_llama2_7b.pth'
|
||||||
|
|
||||||
|
|
||||||
datasets:
|
datasets:
|
||||||
@ -14,7 +14,7 @@ datasets:
|
|||||||
vis_processor:
|
vis_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip2_image_train"
|
name: "blip2_image_train"
|
||||||
image_size: 224
|
image_size: 512
|
||||||
text_processor:
|
text_processor:
|
||||||
train:
|
train:
|
||||||
name: "blip_caption"
|
name: "blip_caption"
|
||||||
|
Loading…
Reference in New Issue
Block a user