Improve error handling while loading GPTQ models

This commit is contained in:
oobabooga 2023-05-19 11:20:08 -03:00
parent 39dab18307
commit 9d5025f531
2 changed files with 5 additions and 2 deletions

View file

@ -97,7 +97,10 @@ def load_model(model_name):
model, tokenizer = output
else:
model = output
tokenizer = load_tokenizer(model_name, model)
if model is None:
return None, None
else:
tokenizer = load_tokenizer(model_name, model)
# Hijack attention with xformers
if any((shared.args.xformers, shared.args.sdp_attention)):