Streamline GPTQ-for-LLaMa support
This commit is contained in:
parent
a3295dd666
commit
bee73cedbd
5 changed files with 21 additions and 55 deletions
|
@ -138,9 +138,6 @@ parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
|
|||
parser.add_argument('--pre_layer', type=int, nargs="+", help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg --pre_layer 30 60.')
|
||||
parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
|
||||
parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
|
||||
parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
|
||||
parser.add_argument('--warmup_autotune', action='store_true', help='(triton) Enable warmup autotune.')
|
||||
parser.add_argument('--fused_mlp', action='store_true', help='(triton) Enable fused mlp.')
|
||||
|
||||
# AutoGPTQ
|
||||
parser.add_argument('--triton', action='store_true', help='Use triton.')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue