Use separate llama-cpp-python packages for GGML support
This commit is contained in:
parent
6e6431e73f
commit
4a999e3bcd
7 changed files with 74 additions and 17 deletions
|
@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
|
|||
if path.is_file():
|
||||
model_file = path
|
||||
else:
|
||||
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
|
||||
model_file = (list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*')) + list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin')))[0]
|
||||
|
||||
logger.info(f"llama.cpp weights detected: {model_file}")
|
||||
model, tokenizer = LlamaCppModel.from_pretrained(model_file)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue