From 27a84b4e0445cc10a70c516851682e7dace47087 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 15 Jul 2023 22:29:23 -0700 Subject: [PATCH] Make AutoGPTQ the default again Purely for compatibility with more models. You should still use ExLlama_HF for LLaMA models. --- modules/models_settings.py | 2 +- server.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/models_settings.py b/modules/models_settings.py index c6803d1..3f37e48 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -23,7 +23,7 @@ def infer_loader(model_name): if not path_to_model.exists(): loader = None elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): - loader = 'ExLlama_HF' + loader = 'AutoGPTQ' elif len(list(path_to_model.glob('*ggml*.bin'))) > 0: loader = 'llama.cpp' elif re.match('.*ggml.*\.bin', model_name.lower()): diff --git a/server.py b/server.py index 9b375ef..d3105e4 100644 --- a/server.py +++ b/server.py @@ -204,7 +204,7 @@ def create_model_menus(): with gr.Row(): with gr.Column(): - shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "ExLlama_HF", "AutoGPTQ", "llama.cpp", "ExLlama", "llama.cpp_HF", "GPTQ-for-LLaMa"], value=None) + shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "ExLlama_HF", "ExLlama", "AutoGPTQ", "GPTQ-for-LLaMa", "llama.cpp", "llamacpp_HF"], value=None) with gr.Box(): with gr.Row(): with gr.Column(): @@ -223,7 +223,7 @@ def create_model_menus(): shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=str(shared.args.groupsize) if shared.args.groupsize > 0 else "None") shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None") shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) - shared.gradio['autogptq_info'] = gr.Markdown('On some systems, AutoGPTQ can be 2x slower than GPTQ-for-LLaMa. You can manually select the GPTQ-for-LLaMa loader above.') + shared.gradio['autogptq_info'] = gr.Markdown('* ExLlama_HF is recommended over AutoGPTQ for models derived from LLaMA.') shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=2048, maximum=16384, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len) shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=1, info='Positional embeddings compression factor. Should typically be set to max_seq_len / 2048.', value=shared.args.compress_pos_emb)