Add the --disable_exllama option for AutoGPTQ
This commit is contained in:
parent
0e05818266
commit
0230fa4e9c
6 changed files with 6 additions and 0 deletions
|
@ -145,6 +145,7 @@ parser.add_argument('--no_inject_fused_attention', action='store_true', help='Do
|
|||
parser.add_argument('--no_inject_fused_mlp', action='store_true', help='Triton mode only: Do not use fused MLP (lowers VRAM requirements).')
|
||||
parser.add_argument('--no_use_cuda_fp16', action='store_true', help='This can make models faster on some systems.')
|
||||
parser.add_argument('--desc_act', action='store_true', help='For models that don\'t have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig.')
|
||||
parser.add_argument('--disable_exllama', action='store_true', help='Disable ExLlama kernel, which can improve inference speed on some systems.')
|
||||
|
||||
# ExLlama
|
||||
parser.add_argument('--gpu-split', type=str, help="Comma-separated list of VRAM (in GB) to use per GPU device for model layers, e.g. 20,7,7")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue