Implement a demo HF wrapper for exllama to utilize existing HF transformers decoding. (#2777)

This commit is contained in:
LarryVRH 2023-06-22 02:31:42 +08:00 committed by GitHub
parent a06acd6d09
commit 580c1ee748
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 101 additions and 6 deletions

View file

@ -49,7 +49,8 @@ def load_model(model_name, loader=None):
'llama.cpp': llamacpp_loader,
'FlexGen': flexgen_loader,
'RWKV': RWKV_loader,
'ExLlama': ExLlama_loader
'ExLlama': ExLlama_loader,
'ExLlama_HF': ExLlama_HF_loader
}
if loader is None:
@ -278,6 +279,12 @@ def ExLlama_loader(model_name):
return model, tokenizer
def ExLlama_HF_loader(model_name):
from modules.exllama_hf import ExllamaHF
return ExllamaHF.from_pretrained(model_name)
def get_max_memory_dict():
max_memory = {}
if shared.args.gpu_memory: