Make AutoGPTQ the default again

Purely for compatibility with more models.
You should still use ExLlama_HF for LLaMA models.
This commit is contained in:
oobabooga 2023-07-15 22:29:23 -07:00
parent 5e3f7e00a9
commit 27a84b4e04
2 changed files with 3 additions and 3 deletions

View file

@ -23,7 +23,7 @@ def infer_loader(model_name):
if not path_to_model.exists():
loader = None
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
loader = 'ExLlama_HF'
loader = 'AutoGPTQ'
elif len(list(path_to_model.glob('*ggml*.bin'))) > 0:
loader = 'llama.cpp'
elif re.match('.*ggml.*\.bin', model_name.lower()):