Add --no_use_cuda_fp16 param for AutoGPTQ
This commit is contained in:
parent
5646690769
commit
3ae9af01aa
5 changed files with 6 additions and 2 deletions
|
|
@ -48,7 +48,8 @@ def load_quantized(model_name):
|
|||
'use_safetensors': use_safetensors,
|
||||
'trust_remote_code': shared.args.trust_remote_code,
|
||||
'max_memory': get_max_memory_dict(),
|
||||
'quantize_config': quantize_config
|
||||
'quantize_config': quantize_config,
|
||||
'use_cuda_fp16': not shared.args.no_use_cuda_fp16,
|
||||
}
|
||||
|
||||
logger.info(f"The AutoGPTQ params are: {params}")
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ loaders_and_params = {
|
|||
'triton',
|
||||
'no_inject_fused_attention',
|
||||
'no_inject_fused_mlp',
|
||||
'no_use_cuda_fp16',
|
||||
'wbits',
|
||||
'groupsize',
|
||||
'desc_act',
|
||||
|
|
|
|||
|
|
@ -147,6 +147,7 @@ parser.add_argument('--autogptq', action='store_true', help='DEPRECATED')
|
|||
parser.add_argument('--triton', action='store_true', help='Use triton.')
|
||||
parser.add_argument('--no_inject_fused_attention', action='store_true', help='Do not use fused attention (lowers VRAM requirements).')
|
||||
parser.add_argument('--no_inject_fused_mlp', action='store_true', help='Triton mode only: Do not use fused MLP (lowers VRAM requirements).')
|
||||
parser.add_argument('--no_use_cuda_fp16', action='store_true', help='This can make models faster on some systems.')
|
||||
parser.add_argument('--desc_act', action='store_true', help='For models that don\'t have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig.')
|
||||
|
||||
# ExLlama
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ theme = gr.themes.Default(
|
|||
|
||||
|
||||
def list_model_elements():
|
||||
elements = ['loader', 'cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', 'no_inject_fused_mlp', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed', 'gpu_split']
|
||||
elements = ['loader', 'cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', 'no_inject_fused_mlp', 'no_use_cuda_fp16', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed', 'gpu_split']
|
||||
for i in range(torch.cuda.device_count()):
|
||||
elements.append(f'gpu_memory_{i}')
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue