diff --git a/modules/text_generation.py b/modules/text_generation.py index fd017e2..eb8f6ca 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -236,8 +236,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi break yield formatted_outputs(reply, shared.model_name) - yield formatted_outputs(reply, shared.model_name) - # Stream the output naively for FlexGen since it doesn't support 'stopping_criteria' else: for i in range(max_new_tokens//8+1):