Add LLaMA 4-bit support

This commit is contained in:
oobabooga 2023-03-09 15:50:26 -03:00
parent 33414478bf
commit 828a524f9a
3 changed files with 23 additions and 2 deletions

View file

@ -1,5 +1,6 @@
import json
import os
import sys
import time
import zipfile
from pathlib import Path
@ -41,7 +42,7 @@ def load_model(model_name):
shared.is_RWKV = model_name.lower().startswith('rwkv-')
# Default settings
if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.load_in_4bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
else:
@ -86,6 +87,24 @@ def load_model(model_name):
return model, tokenizer
# 4-bit LLaMA
elif shared.args.load_in_4bit:
sys.path.append(os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
from llama import load_quant
path_to_model = Path(f'models/{model_name}')
pt_model = ''
if path_to_model.name.lower().startswith('llama-7b'):
pt_model = 'llama-7b-4bit.pt'
if path_to_model.name.lower().startswith('llama-13b'):
pt_model = 'llama-13b-4bit.pt'
if path_to_model.name.lower().startswith('llama-30b'):
pt_model = 'llama-30b-4bit.pt'
model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4)
model = model.to(torch.device('cuda:0'))
# Custom
else:
command = "AutoModelForCausalLM.from_pretrained"
@ -159,3 +178,4 @@ def load_soft_prompt(name):
shared.soft_prompt_tensor = tensor
return name