Rename --llama-bits to --gptq-bits

This commit is contained in:
oobabooga 2023-03-12 11:19:07 -03:00
parent fed3617f07
commit 65dda28c9d
3 changed files with 4 additions and 4 deletions

View file

@ -16,7 +16,7 @@ def load_quantized_LLaMA(model_name):
if shared.args.load_in_4bit:
bits = 4
else:
bits = shared.args.llama_bits
bits = shared.args.gptq_bits
path_to_model = Path(f'models/{model_name}')
pt_model = ''