Minor fixes/cosmetics

This commit is contained in:
oobabooga 2023-08-26 22:11:07 -07:00
parent d826bc5d1b
commit 7f5370a272
6 changed files with 23 additions and 10 deletions

View file

@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None):
gguf_model = is_gguf(model_file)
else:
gguf_model = True
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp if gguf_model else llama_cpp_ggml
else:
@ -205,7 +206,7 @@ class LlamacppHF(PreTrainedModel):
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
'logits_all': True,
}
if not is_gguf(model_file):
ggml_params = {
'n_gqa': shared.args.n_gqa or None,

View file

@ -37,6 +37,7 @@ def llama_cpp_lib(model_file: Union[str, Path] = None):
gguf_model = is_gguf(model_file)
else:
gguf_model = True
if shared.args.cpu or llama_cpp_cuda is None:
return llama_cpp if gguf_model else llama_cpp_ggml
else:
@ -58,8 +59,8 @@ class LlamaCppModel:
@classmethod
def from_pretrained(self, path):
Llama = llama_cpp_lib(str(path)).Llama
LlamaCache = llama_cpp_lib(str(path)).LlamaCache
Llama = llama_cpp_lib(path).Llama
LlamaCache = llama_cpp_lib(path).LlamaCache
result = self()
cache_capacity = 0
@ -93,8 +94,8 @@ class LlamaCppModel:
'tensor_split': tensor_split_list,
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
}
if not is_gguf(str(path)):
if not is_gguf(path):
ggml_params = {
'n_gqa': shared.args.n_gqa or None,
'rms_norm_eps': shared.args.rms_norm_eps or None,

View file

@ -126,10 +126,14 @@ def get_datasets(path: str, ext: str):
def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
# Determines if a llama.cpp model is in GGUF format
# Copied from ctransformers utils.py
def is_gguf(path: Union[str, Path]) -> bool:
'''
Determines if a llama.cpp model is in GGUF format
Copied from ctransformers utils.py
'''
path = str(Path(path).resolve())
with open(path, "rb") as f:
magic = f.read(4)
return magic == "GGUF".encode()