Tokenization improvements
This commit is contained in:
parent
cd08eb0753
commit
ad8ac545a5
5 changed files with 19 additions and 15 deletions
|
|
@ -46,17 +46,14 @@ def get_next_logits(prompt, state, use_samplers, previous):
|
|||
scores = output['logits'][-1][-1]
|
||||
|
||||
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
||||
topk_values, topk_indices = torch.topk(probs, k=25, largest=True, sorted=True)
|
||||
topk_values, topk_indices = torch.topk(probs, k=50, largest=True, sorted=True)
|
||||
topk_values = [f"{float(i):.5f}" for i in topk_values]
|
||||
if is_non_hf_exllamav1 or is_non_hf_llamacpp:
|
||||
topk_indices = [i.expand((1, 1)) for i in topk_indices]
|
||||
|
||||
tokens = [shared.tokenizer.decode(i) for i in topk_indices]
|
||||
if is_non_hf_llamacpp:
|
||||
tokens = [i.decode('utf-8') for i in tokens] # llamacpp returns bytes, not str
|
||||
|
||||
output = ''
|
||||
for row in list(zip(topk_values, tokens)):
|
||||
output += f"{row[0]} - {repr(row[1])[1:-1]}\n"
|
||||
output += f"{row[0]} - {repr(row[1])}\n"
|
||||
|
||||
return output, previous
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue