Rename a file, add deprecation warning for --load-in-4bit

This commit is contained in:
oobabooga 2023-03-14 07:56:31 -03:00
parent 3da73e409f
commit 265ba384b7
3 changed files with 7 additions and 1 deletions

View file

@ -89,7 +89,7 @@ def load_model(model_name):
# Quantized model
elif shared.args.gptq_bits > 0:
from modules.quant_loader import load_quantized
from modules.GPTQ_loader import load_quantized
model = load_quantized(model_name)