Fix llama.cpp truncation (#3400)
--------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
parent
4e6dc6d99d
commit
f4005164f4
2 changed files with 7 additions and 1 deletions
|
@ -39,7 +39,6 @@ def encode(prompt, add_special_tokens=True, add_bos_token=True, truncation_lengt
|
|||
if shared.model.__class__.__name__ in ['LlamaCppModel', 'RWKVModel']:
|
||||
input_ids = shared.tokenizer.encode(str(prompt))
|
||||
input_ids = np.array(input_ids).reshape(1, len(input_ids))
|
||||
return input_ids
|
||||
else:
|
||||
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', add_special_tokens=add_special_tokens)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue