Use separate llama-cpp-python packages for GGML support
This commit is contained in:
parent
6e6431e73f
commit
4a999e3bcd
7 changed files with 74 additions and 17 deletions
|
|
@ -9,23 +9,38 @@ from transformers.modeling_outputs import CausalLMOutputWithPast
|
|||
|
||||
from modules import RoPE, shared
|
||||
from modules.logging_colors import logger
|
||||
from modules.utils import is_gguf
|
||||
|
||||
import llama_cpp
|
||||
|
||||
try:
|
||||
import llama_cpp_ggml
|
||||
except:
|
||||
llama_cpp_ggml = llama_cpp
|
||||
|
||||
if torch.cuda.is_available() and not torch.version.hip:
|
||||
try:
|
||||
import llama_cpp_cuda
|
||||
except:
|
||||
llama_cpp_cuda = None
|
||||
try:
|
||||
import llama_cpp_ggml_cuda
|
||||
except:
|
||||
llama_cpp_ggml_cuda = llama_cpp_cuda
|
||||
else:
|
||||
llama_cpp_cuda = None
|
||||
llama_cpp_ggml_cuda = None
|
||||
|
||||
|
||||
def llama_cpp_lib():
|
||||
if shared.args.cpu or llama_cpp_cuda is None:
|
||||
return llama_cpp
|
||||
def llama_cpp_lib(model_file: Union[str, Path] = None):
|
||||
if model_file is not None:
|
||||
gguf_model = is_gguf(model_file)
|
||||
else:
|
||||
return llama_cpp_cuda
|
||||
gguf_model = True
|
||||
if shared.args.cpu or llama_cpp_cuda is None:
|
||||
return llama_cpp if gguf_model else llama_cpp_ggml
|
||||
else:
|
||||
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda
|
||||
|
||||
|
||||
class LlamacppHF(PreTrainedModel):
|
||||
|
|
@ -165,7 +180,7 @@ class LlamacppHF(PreTrainedModel):
|
|||
if path.is_file():
|
||||
model_file = path
|
||||
else:
|
||||
model_file = list(path.glob('*.gguf*'))[0]
|
||||
model_file = (list(path.glob('*.gguf*')) + list(path.glob('*ggml*.bin')))[0]
|
||||
|
||||
logger.info(f"llama.cpp weights detected: {model_file}\n")
|
||||
|
||||
|
|
@ -193,7 +208,7 @@ class LlamacppHF(PreTrainedModel):
|
|||
'logits_all': True,
|
||||
}
|
||||
|
||||
Llama = llama_cpp_lib().Llama
|
||||
Llama = llama_cpp_lib(model_file).Llama
|
||||
model = Llama(**params)
|
||||
|
||||
return LlamacppHF(model)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import re
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
|
||||
|
|
@ -7,23 +9,38 @@ from modules import RoPE, shared
|
|||
from modules.callbacks import Iteratorize
|
||||
from modules.logging_colors import logger
|
||||
from modules.text_generation import get_max_prompt_length
|
||||
from modules.utils import is_gguf
|
||||
|
||||
import llama_cpp
|
||||
|
||||
try:
|
||||
import llama_cpp_ggml
|
||||
except:
|
||||
llama_cpp_ggml = llama_cpp
|
||||
|
||||
if torch.cuda.is_available() and not torch.version.hip:
|
||||
try:
|
||||
import llama_cpp_cuda
|
||||
except:
|
||||
llama_cpp_cuda = None
|
||||
try:
|
||||
import llama_cpp_ggml_cuda
|
||||
except:
|
||||
llama_cpp_ggml_cuda = llama_cpp_cuda
|
||||
else:
|
||||
llama_cpp_cuda = None
|
||||
llama_cpp_ggml_cuda = None
|
||||
|
||||
|
||||
def llama_cpp_lib():
|
||||
if shared.args.cpu or llama_cpp_cuda is None:
|
||||
return llama_cpp
|
||||
def llama_cpp_lib(model_file: Union[str, Path] = None):
|
||||
if model_file is not None:
|
||||
gguf_model = is_gguf(model_file)
|
||||
else:
|
||||
return llama_cpp_cuda
|
||||
gguf_model = True
|
||||
if shared.args.cpu or llama_cpp_cuda is None:
|
||||
return llama_cpp if gguf_model else llama_cpp_ggml
|
||||
else:
|
||||
return llama_cpp_cuda if gguf_model else llama_cpp_ggml_cuda
|
||||
|
||||
|
||||
def ban_eos_logits_processor(eos_token, input_ids, logits):
|
||||
|
|
@ -41,8 +58,8 @@ class LlamaCppModel:
|
|||
@classmethod
|
||||
def from_pretrained(self, path):
|
||||
|
||||
Llama = llama_cpp_lib().Llama
|
||||
LlamaCache = llama_cpp_lib().LlamaCache
|
||||
Llama = llama_cpp_lib(str(path)).Llama
|
||||
LlamaCache = llama_cpp_lib(str(path)).LlamaCache
|
||||
|
||||
result = self()
|
||||
cache_capacity = 0
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
|
|||
if path.is_file():
|
||||
model_file = path
|
||||
else:
|
||||
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
|
||||
model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0]
|
||||
|
||||
logger.info(f"llama.cpp weights detected: {model_file}")
|
||||
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ def infer_loader(model_name):
|
|||
loader = None
|
||||
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
|
||||
loader = 'AutoGPTQ'
|
||||
elif len(list(path_to_model.glob('*.gguf*'))) > 0:
|
||||
elif len(list(path_to_model.glob('*.gguf*')) + list(path_to_model.glob('*ggml*.bin'))) > 0:
|
||||
loader = 'llama.cpp'
|
||||
elif re.match(r'.*\.gguf', model_name.lower()):
|
||||
elif re.match(r'.*\.gguf|.*ggml.*\.bin', model_name.lower()):
|
||||
loader = 'llama.cpp'
|
||||
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
|
||||
loader = 'RWKV'
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from modules import shared
|
||||
from modules.logging_colors import logger
|
||||
|
|
@ -124,3 +125,11 @@ def get_datasets(path: str, ext: str):
|
|||
|
||||
def get_available_chat_styles():
|
||||
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
|
||||
|
||||
# Determines if a llama.cpp model is in GGUF format
|
||||
# Copied from ctransformers utils.py
|
||||
def is_gguf(path: Union[str, Path]) -> bool:
|
||||
path = str(Path(path).resolve())
|
||||
with open(path, "rb") as f:
|
||||
magic = f.read(4)
|
||||
return magic == "GGUF".encode()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue