AutoGPTQ: Add UI and command line support for disabling fused attention and fused MLP (#2648)

This commit is contained in:
Tom Jobbins 2023-06-16 03:59:54 +01:00 committed by GitHub
parent 909d8c6ae3
commit 646b0c889f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 11 additions and 3 deletions

View file

@ -321,6 +321,8 @@ def create_model_menus():
with gr.Column():
gr.Markdown('GPTQ')
shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton)
shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Disable if running low on VRAM.')
shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')
shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.')
shared.gradio['gptq_for_llama'] = gr.Checkbox(label="gptq-for-llama", value=shared.args.gptq_for_llama, info='Use GPTQ-for-LLaMa loader instead of AutoGPTQ. pre_layer should be used for CPU offloading instead of gpu-memory.')