Remove GGML support

This commit is contained in:
oobabooga 2023-09-11 07:30:56 -07:00
parent cc7b7ba153
commit ed86878f02
15 changed files with 24 additions and 123 deletions

View file

@ -9,39 +9,23 @@ from transformers.modeling_outputs import CausalLMOutputWithPast
from modules import RoPE, shared
from modules.logging_colors import logger
from modules.utils import is_gguf
import llama_cpp
try:
import llama_cpp_ggml
except:
llama_cpp_ggml = llama_cpp
if torch.cuda.is_available() and not torch.version.hip:
try:
import llama_cpp_cuda
except:
llama_cpp_cuda = None
try:
import llama_cpp_ggml_cuda
except:
llama_cpp_ggml_cuda = llama_cpp_cuda
else:
llama_cpp_cuda = None
llama_cpp_ggml_cuda = None
def llama_cpp_lib(model_file: Union[str, Path] = None):
if model_file is not None:
gguf_model = is_gguf(model_file)
else:
gguf_model = True
def llama_cpp_lib():
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp if gguf_model else llama_cpp_ggml
return llama_cpp
else:
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda
return llama_cpp_cuda
class LlamacppHF(PreTrainedModel):
@ -64,7 +48,7 @@ class LlamacppHF(PreTrainedModel):
'n_tokens': self.model.n_tokens,
'input_ids': self.model.input_ids.copy(),
'scores': self.model.scores.copy(),
'ctx': llama_cpp_lib(path).llama_new_context_with_model(model.model, model.params)
'ctx': llama_cpp_lib().llama_new_context_with_model(model.model, model.params)
}
def _validate_model_class(self):
@ -181,7 +165,7 @@ class LlamacppHF(PreTrainedModel):
if path.is_file():
model_file = path
else:
model_file = (list(path.glob('*.gguf*')) + list(path.glob('*ggml*.bin')))[0]
model_file = list(path.glob('*.gguf'))[0]
logger.info(f"llama.cpp weights detected: {model_file}\n")
@ -207,14 +191,7 @@ class LlamacppHF(PreTrainedModel):
'logits_all': True,
}
if not is_gguf(model_file):
ggml_params = {
'n_gqa': shared.args.n_gqa or None,
'rms_norm_eps': shared.args.rms_norm_eps or None,
}
params = params | ggml_params
Llama = llama_cpp_lib(model_file).Llama
Llama = llama_cpp_lib().Llama
model = Llama(**params)
return LlamacppHF(model, model_file)

View file

@ -1,7 +1,5 @@
import re
from functools import partial
from pathlib import Path
from typing import Union
import torch
@ -9,39 +7,23 @@ from modules import RoPE, shared
from modules.callbacks import Iteratorize
from modules.logging_colors import logger
from modules.text_generation import get_max_prompt_length
from modules.utils import is_gguf
import llama_cpp
try:
import llama_cpp_ggml
except:
llama_cpp_ggml = llama_cpp
if torch.cuda.is_available() and not torch.version.hip:
try:
import llama_cpp_cuda
except:
llama_cpp_cuda = None
try:
import llama_cpp_ggml_cuda
except:
llama_cpp_ggml_cuda = llama_cpp_cuda
else:
llama_cpp_cuda = None
llama_cpp_ggml_cuda = None
def llama_cpp_lib(model_file: Union[str, Path] = None):
if model_file is not None:
gguf_model = is_gguf(model_file)
else:
gguf_model = True
def llama_cpp_lib():
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp if gguf_model else llama_cpp_ggml
return llama_cpp
else:
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda
return llama_cpp_cuda
def ban_eos_logits_processor(eos_token, input_ids, logits):
@ -59,8 +41,8 @@ class LlamaCppModel:
@classmethod
def from_pretrained(self, path):
Llama = llama_cpp_lib(path).Llama
LlamaCache = llama_cpp_lib(path).LlamaCache
Llama = llama_cpp_lib().Llama
LlamaCache = llama_cpp_lib().LlamaCache
result = self()
cache_capacity = 0
@ -95,13 +77,6 @@ class LlamaCppModel:
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
}
if not is_gguf(path):
ggml_params = {
'n_gqa': shared.args.n_gqa or None,
'rms_norm_eps': shared.args.rms_norm_eps or None,
}
params = params | ggml_params
result.model = Llama(**params)
if cache_capacity > 0:
result.model.set_cache(LlamaCache(capacity_bytes=cache_capacity))

View file

@ -68,8 +68,6 @@ loaders_and_params = OrderedDict({
],
'llama.cpp': [
'n_ctx',
'n_gqa',
'rms_norm_eps',
'n_gpu_layers',
'tensor_split',
'n_batch',
@ -86,8 +84,6 @@ loaders_and_params = OrderedDict({
],
'llamacpp_HF': [
'n_ctx',
'n_gqa',
'rms_norm_eps',
'n_gpu_layers',
'tensor_split',
'n_batch',

View file

@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
if path.is_file():
model_file = path
else:
model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0]
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf'))[0]
logger.info(f"llama.cpp weights detected: {model_file}")
model, tokenizer = LlamaCppModel.from_pretrained(model_file)

View file

@ -24,9 +24,9 @@ def infer_loader(model_name):
loader = None
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
loader = 'AutoGPTQ'
elif len(list(path_to_model.glob('*.gguf*')) + list(path_to_model.glob('*ggml*.bin'))) > 0:
elif len(list(path_to_model.glob('*.gguf'))) > 0:
loader = 'llama.cpp'
elif re.match(r'.*\.gguf|.*ggml.*\.bin', model_name.lower()):
elif re.match(r'.*\.gguf', model_name.lower()):
loader = 'llama.cpp'
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
loader = 'RWKV'

View file

@ -126,8 +126,6 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama-2 70b.')
parser.add_argument('--rms_norm_eps', type=float, default=0, help='5e-6 is a good value for llama-2 models.')
# GPTQ
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')

View file

@ -73,8 +73,6 @@ def list_model_elements():
'n_gpu_layers',
'tensor_split',
'n_ctx',
'n_gqa',
'rms_norm_eps',
'llama_cpp_seed',
'gpu_split',
'max_seq_len',

View file

@ -82,8 +82,6 @@ def create_ui():
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=16384, step=256, label="n_ctx", value=shared.args.n_ctx)
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads)
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
shared.gradio['n_gqa'] = gr.Slider(minimum=0, maximum=16, step=1, label="n_gqa", value=shared.args.n_gqa, info='GGML only (not used by GGUF): Grouped-Query Attention. Must be 8 for llama-2 70b.')
shared.gradio['rms_norm_eps'] = gr.Slider(minimum=0, maximum=1e-5, step=1e-6, label="rms_norm_eps", value=shared.args.rms_norm_eps, info='GGML only (not used by GGUF): 5e-6 is a good value for llama-2 models.')
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=str(shared.args.wbits) if shared.args.wbits > 0 else "None")
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=str(shared.args.groupsize) if shared.args.groupsize > 0 else "None")
@ -128,7 +126,7 @@ def create_ui():
shared.gradio['autoload_model'] = gr.Checkbox(value=shared.settings['autoload_model'], label='Autoload the model', info='Whether to load the model as soon as it is selected in the Model dropdown.')
shared.gradio['custom_model_menu'] = gr.Textbox(label="Download model or LoRA", info="Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main. To download a single file, enter its name in the second box.")
shared.gradio['download_specific_file'] = gr.Textbox(placeholder="File name (for GGUF/GGML)", show_label=False, max_lines=1)
shared.gradio['download_specific_file'] = gr.Textbox(placeholder="File name (for GGUF models)", show_label=False, max_lines=1)
with gr.Row():
shared.gradio['download_model_button'] = gr.Button("Download", variant='primary')
shared.gradio['get_file_list'] = gr.Button("Get file list")

View file

@ -2,7 +2,6 @@ import os
import re
from datetime import datetime
from pathlib import Path
from typing import Union
from modules import shared
from modules.logging_colors import logger
@ -125,15 +124,3 @@ def get_datasets(path: str, ext: str):
def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
def is_gguf(path: Union[str, Path]) -> bool:
'''
Determines if a llama.cpp model is in GGUF format
Copied from ctransformers utils.py
'''
path = str(Path(path).resolve())
with open(path, "rb") as f:
magic = f.read(4)
return magic == "GGUF".encode()