Move towards HF LLaMA implementation

This commit is contained in:
oobabooga 2023-03-05 01:20:31 -03:00
parent bd8aac8fa4
commit c33715ad5b
6 changed files with 4 additions and 245 deletions

View file

@ -24,7 +24,7 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
# These models do not have explicit tokenizers for now, so
# we return an estimate for the number of tokens
if shared.is_RWKV or shared.is_LLaMA:
if shared.is_RWKV:
return np.zeros((1, len(prompt)//4))
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
@ -90,7 +90,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
# These models are not part of Hugging Face, so we handle them
# separately and terminate the function call earlier
if shared.is_RWKV or shared.is_LLaMA:
if shared.is_RWKV:
if shared.args.no_stream:
reply = shared.model.generate(question, token_count=max_new_tokens, temperature=temperature, top_p=top_p)
t1 = time.time()