Improve error handling while loading GPTQ models

This commit is contained in:
oobabooga 2023-05-19 11:20:08 -03:00
parent 39dab18307
commit 9d5025f531
2 changed files with 5 additions and 2 deletions

View file

@ -140,7 +140,7 @@ def load_quantized(model_name):
if shared.args.model_type is None:
logging.error("The model could not be loaded because its type could not be inferred from its name.")
logging.error("Please specify the type manually using the --model_type argument.")
return
return None
# Select the appropriate load_quant function
model_type = shared.args.model_type.lower()