Add 4-bit LoRA support (#1200)

This commit is contained in:
oobabooga 2023-04-16 23:26:52 -03:00 committed by GitHub
parent ec3e869c27
commit 39099663a0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 100 additions and 34 deletions

View file

@ -124,6 +124,7 @@ parser.add_argument('--model_type', type=str, help='GPTQ: Model type of pre-quan
parser.add_argument('--groupsize', type=int, default=-1, help='GPTQ: Group size.')
parser.add_argument('--pre_layer', type=int, default=0, help='GPTQ: The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
parser.add_argument('--no-warmup_autotune', action='store_true', help='GPTQ: Disable warmup autotune for triton.')
parser.add_argument('--monkey-patch', action='store_true', help='GPTQ: Apply the monkey patch for using LoRAs with quantized models.')
# FlexGen
parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.')