AutoGPTQ: Add UI and command line support for disabling fused attention and fused MLP (#2648)

This commit is contained in:
Tom Jobbins 2023-06-16 03:59:54 +01:00 committed by GitHub
parent 909d8c6ae3
commit 646b0c889f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 11 additions and 3 deletions

View file

@ -43,6 +43,8 @@ def load_quantized(model_name):
'model_basename': pt_path.stem,
'device': "cuda:0" if not shared.args.cpu else "cpu",
'use_triton': shared.args.triton,
'inject_fused_attention': not shared.args.no_inject_fused_attention,
'inject_fused_mlp': not shared.args.no_inject_fused_mlp,
'use_safetensors': use_safetensors,
'trust_remote_code': shared.args.trust_remote_code,
'max_memory': get_max_memory_dict(),