Use separate llama-cpp-python packages for GGML support

This commit is contained in:
jllllll 2023-08-26 09:15:11 -05:00
parent 6e6431e73f
commit 4a999e3bcd
No known key found for this signature in database
GPG key ID: 7FCD00C417935797
7 changed files with 74 additions and 17 deletions

View file

@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
if path.is_file():
model_file = path
else:
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0]
logger.info(f"llama.cpp weights detected: {model_file}")
model, tokenizer = LlamaCppModel.from_pretrained(model_file)