Adapt to the new model names

This commit is contained in:
oobabooga 2023-03-29 21:47:36 -03:00
parent 0345e04249
commit 1cb9246160
6 changed files with 18 additions and 25 deletions

View file

@ -51,11 +51,12 @@ def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exc
def load_quantized(model_name):
if not shared.args.model_type:
# Try to determine model type from model name
if model_name.lower().startswith(('llama', 'alpaca')):
name = model_name.lower()
if any((k in name for k in ['llama', 'alpaca'])):
model_type = 'llama'
elif model_name.lower().startswith(('opt', 'galactica')):
elif any((k in name for k in ['opt-', 'galactica'])):
model_type = 'opt'
elif model_name.lower().startswith(('gpt-j', 'pygmalion-6b')):
elif any((k in name for k in ['gpt-j', 'pygmalion-6b'])):
model_type = 'gptj'
else:
print("Can't determine model type from model name. Please specify it manually using --model_type "

View file

@ -41,7 +41,7 @@ def load_model(model_name):
print(f"Loading {model_name}...")
t0 = time.time()
shared.is_RWKV = model_name.lower().startswith('rwkv-')
shared.is_RWKV = 'rwkv-' in model_name.lower()
# Default settings
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.wbits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
@ -159,7 +159,7 @@ def load_model(model_name):
model = AutoModelForCausalLM.from_pretrained(checkpoint, **params)
# Loading the tokenizer
if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists():
if any((k in shared.model_name.lower() for k in ['gpt4chan', 'gpt-4chan'])) and Path(f"{shared.args.model_dir}/gpt-j-6B/").exists():
tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/"))
else:
tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/{shared.model_name}/"))

View file

@ -37,10 +37,6 @@ settings = {
'chat_generation_attempts': 1,
'chat_generation_attempts_min': 1,
'chat_generation_attempts_max': 5,
'name1_pygmalion': 'You',
'name2_pygmalion': 'Kawaii',
'context_pygmalion': "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
'stop_at_newline_pygmalion': False,
'default_extensions': [],
'chat_default_extensions': ["gallery"],
'presets': {

View file

@ -42,7 +42,7 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
def decode(output_ids):
# Open Assistant relies on special tokens like <|endoftext|>
if re.match('(oasst|galactica)-*', shared.model_name.lower()):
if re.match('.*(oasst|galactica)-*', shared.model_name.lower()):
return shared.tokenizer.decode(output_ids, skip_special_tokens=False)
else:
reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True)
@ -77,10 +77,10 @@ def fix_galactica(s):
def formatted_outputs(reply, model_name):
if not (shared.args.chat or shared.args.cai_chat):
if model_name.lower().startswith('galactica'):
if 'galactica' in model_name.lower():
reply = fix_galactica(reply)
return reply, reply, generate_basic_html(reply)
elif model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')):
elif any((k in shared.model_name.lower() for k in ['gpt4chan', 'gpt-4chan'])):
reply = fix_gpt4chan(reply)
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
else: