fix the chat template and reduction error in v1 training

This commit is contained in:
Deyao Zhu 2023-10-21 16:32:54 +03:00
parent 111d849b72
commit 99383a919f
2 changed files with 4 additions and 4 deletions

View File

@ -13,17 +13,17 @@ from omegaconf import OmegaConf
import numpy as np
import torch
import torch.nn as nn
from transformers import BertTokenizer, LlamaTokenizer
from transformers.models.llama.modeling_llama import LlamaForCausalLM
from transformers import LlamaTokenizer
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
)
from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from minigpt4.common.dist_utils import download_cached_file
from minigpt4.common.utils import get_abs_path, is_url
from minigpt4.models.eva_vit import create_eva_vit_g
from minigpt4.models.modeling_llama import LlamaForCausalLM

View File

@ -236,7 +236,7 @@ class MiniGPTBase(BaseModel):
else:
instruction = None
if self.chat_template:
if hasattr(self, 'chat_template') and self.chat_template:
instruction = [self.prompt_template.format(instruct) for instruct in instruction]
if 'length' in samples: