Add a tokenizer placeholder
This commit is contained in:
parent
4cc36dc434
commit
a345a2acd2
1 changed files with 4 additions and 1 deletions
|
@ -21,8 +21,11 @@ def get_max_prompt_length(tokens):
|
||||||
return max_length
|
return max_length
|
||||||
|
|
||||||
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
||||||
|
|
||||||
|
# These models do not have explicit tokenizers for now, so
|
||||||
|
# we return an estimate on the number of tokens
|
||||||
if shared.is_RWKV or shared.is_LLaMA:
|
if shared.is_RWKV or shared.is_LLaMA:
|
||||||
return prompt
|
return np.zeros((1, len(prompt)//5))
|
||||||
|
|
||||||
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
||||||
if shared.args.cpu:
|
if shared.args.cpu:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue