import logging import random import torch from torch.cuda.amp import autocast as autocast import torch.nn as nn from minigpt4.common.registry import registry from minigpt4.models.base_model import BaseModel, disabled_train from minigpt4.models.minigpt_base import MiniGPTBase from transformers.models.llama.modeling_llama import LlamaForCausalLM from transformers import LlamaTokenizer from peft import ( LoraConfig, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training, set_peft_model_state_dict, ) @registry.register_model("mini_gpt4") class MiniGPT4(MiniGPTBase): """ MiniGPT-4 model """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_vicuna0": "configs/models/minigpt4_vicuna0.yaml", "pretrain_llama2": "configs/models/minigpt4_llama2.yaml", } def encode_img(self, image): device = image.device if len(image.shape) > 4: image = image.reshape(-1, *image.shape[-3:]) with self.maybe_autocast(): image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) if self.has_qformer: image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) inputs_llama = self.llama_proj(query_output.last_hidden_state) else: image_embeds = image_embeds[:, 1:, :] bs, pn, hs = image_embeds.shape image_embeds = image_embeds.view(bs, int(pn / 4), int(hs * 4)) inputs_llama = self.llama_proj(image_embeds) atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) return inputs_llama, atts_llama @classmethod def from_config(cls, cfg): vit_model = cfg.get("vit_model", "eva_clip_g") q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") img_size = cfg.get("image_size") num_query_token = cfg.get("num_query_token") llama_model = cfg.get("llama_model") drop_path_rate = cfg.get("drop_path_rate", 0) use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) vit_precision = cfg.get("vit_precision", "fp16") freeze_vit = cfg.get("freeze_vit", True) has_qformer = cfg.get("has_qformer", True) freeze_qformer = cfg.get("freeze_qformer", True) low_resource = cfg.get("low_resource", False) device_8bit = cfg.get("device_8bit", 0) prompt_path = cfg.get("prompt_path", "") prompt_template = cfg.get("prompt_template", "") max_txt_len = cfg.get("max_txt_len", 32) end_sym = cfg.get("end_sym", '\n') lora_r = cfg.get("lora_r", 0) lora_alpha = cfg.get("lora_alpha", 32) model = cls( vit_model=vit_model, q_former_model=q_former_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, has_qformer=has_qformer, freeze_qformer=freeze_qformer, num_query_token=num_query_token, llama_model=llama_model, prompt_path=prompt_path, prompt_template=prompt_template, max_txt_len=max_txt_len, end_sym=end_sym, low_resource=low_resource, device_8bit=device_8bit, lora_r=lora_r, lora_alpha=lora_alpha, ) ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 if ckpt_path: print("Load MiniGPT-4 Checkpoint: {}".format(ckpt_path)) ckpt = torch.load(ckpt_path, map_location="cpu") msg = model.load_state_dict(ckpt['model'], strict=False) return model