From ebed1dea561af56cff15445fb37d152e60b19409 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 25 Jan 2023 10:38:26 -0300 Subject: [PATCH] Generate 8 tokens at a time in streaming mode instead of just 1 This is a performance optimization. --- server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index 0e6983e..5c694fe 100644 --- a/server.py +++ b/server.py @@ -204,8 +204,8 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok # Generate the reply 1 token at a time else: yield formatted_outputs(question, model_name) - preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=1') - for i in tqdm(range(tokens)): + preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=8') + for i in tqdm(range(tokens//8+1)): output = eval(f"model.generate(input_ids, eos_token_id={n}, stopping_criteria=stopping_criteria_list, {preset}){cuda}") reply = decode(output[0]) if eos_token is not None and reply[-1] == eos_token: