Replace ggml occurences with gguf

This commit is contained in:
oobabooga 2023-08-26 01:06:59 -07:00
parent 1a642c12b5
commit 83640d6f43
5 changed files with 12 additions and 12 deletions

View file

@ -165,7 +165,7 @@ class LlamacppHF(PreTrainedModel):
if path.is_file():
model_file = path
else:
model_file = list(path.glob('*ggml*.bin'))[0]
model_file = list(path.glob('*.gguf*'))[0]
logger.info(f"llama.cpp weights detected: {model_file}\n")

View file

@ -241,7 +241,7 @@ def llamacpp_loader(model_name):
if path.is_file():
model_file = path
else:
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin'))[0]
model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0]
logger.info(f"llama.cpp weights detected: {model_file}")
model, tokenizer = LlamaCppModel.from_pretrained(model_file)

View file

@ -24,9 +24,9 @@ def infer_loader(model_name):
loader = None
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
loader = 'AutoGPTQ'
elif len(list(path_to_model.glob('*ggml*.bin'))) > 0:
elif len(list(path_to_model.glob('*.gguf*'))) > 0:
loader = 'llama.cpp'
elif re.match(r'.*ggml.*\.bin', model_name.lower()):
elif re.match(r'.*\.gguf', model_name.lower()):
loader = 'llama.cpp'
elif re.match(r'.*rwkv.*\.pth', model_name.lower()):
loader = 'RWKV'