Implement a demo HF wrapper for exllama to utilize existing HF transformers decoding. (#2777)
This commit is contained in:
parent
a06acd6d09
commit
580c1ee748
7 changed files with 101 additions and 6 deletions
|
@ -49,7 +49,8 @@ def load_model(model_name, loader=None):
|
|||
'llama.cpp': llamacpp_loader,
|
||||
'FlexGen': flexgen_loader,
|
||||
'RWKV': RWKV_loader,
|
||||
'ExLlama': ExLlama_loader
|
||||
'ExLlama': ExLlama_loader,
|
||||
'ExLlama_HF': ExLlama_HF_loader
|
||||
}
|
||||
|
||||
if loader is None:
|
||||
|
@ -278,6 +279,12 @@ def ExLlama_loader(model_name):
|
|||
return model, tokenizer
|
||||
|
||||
|
||||
def ExLlama_HF_loader(model_name):
|
||||
from modules.exllama_hf import ExllamaHF
|
||||
|
||||
return ExllamaHF.from_pretrained(model_name)
|
||||
|
||||
|
||||
def get_max_memory_dict():
|
||||
max_memory = {}
|
||||
if shared.args.gpu_memory:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue