Remove exllamav1 loaders (#5128)

This commit is contained in:
oobabooga 2023-12-31 01:57:06 -03:00 committed by GitHub
parent 8e397915c9
commit 0e54a09bcb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 28 additions and 635 deletions

View file

@ -14,11 +14,10 @@ def get_next_logits(prompt, state, use_samplers, previous, top_logits=50, return
return 'Error: No model is loaded1 Select one in the Model tab.', previous
is_non_hf_exllamav2 = shared.model.__class__.__name__ == 'Exllamav2Model'
is_non_hf_exllamav1 = shared.model.__class__.__name__ == 'ExllamaModel'
is_non_hf_llamacpp = shared.model.__class__.__name__ == 'LlamaCppModel'
if use_samplers:
if any([is_non_hf_exllamav2, is_non_hf_exllamav1, is_non_hf_llamacpp]):
if any([is_non_hf_exllamav2, is_non_hf_llamacpp]):
logger.error("Sampler hijacking is not supported non-Huggingface loaders.")
# sampling is all done in c for exllama, so it is really hard to hijack
# it should be possible to hijack llamacpp sampler by hijacking all their sampling methods,
@ -32,7 +31,7 @@ def get_next_logits(prompt, state, use_samplers, previous, top_logits=50, return
scores = sampler_hijack.global_scores[-1]
else:
if is_non_hf_exllamav2 or is_non_hf_exllamav1:
if is_non_hf_exllamav2:
if is_torch_xpu_available():
tokens = shared.tokenizer.encode(prompt).to("xpu:0")
else:
@ -51,7 +50,7 @@ def get_next_logits(prompt, state, use_samplers, previous, top_logits=50, return
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
topk_values, topk_indices = torch.topk(probs, k=top_logits, largest=True, sorted=True)
if is_non_hf_exllamav1 or is_non_hf_llamacpp:
if is_non_hf_llamacpp:
topk_indices = [i.expand((1, 1)) for i in topk_indices]
if hasattr(shared.tokenizer, 'convert_ids_to_tokens'):