Refactor chat functions (#2003)

This commit is contained in:
oobabooga 2023-05-11 15:37:04 -03:00 committed by GitHub
parent 4e9da22c58
commit 638c6a65a2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 138 additions and 157 deletions

View file

@ -188,7 +188,7 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False):
# Generate
for i in range(state['chat_generation_attempts']):
reply = None
for j, reply in enumerate(generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings)):
for j, reply in enumerate(generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings, is_chat=True)):
reply = cumulative_reply + reply
# Extracting the reply
@ -242,7 +242,7 @@ def impersonate_wrapper(text, state):
cumulative_reply = text
for i in range(state['chat_generation_attempts']):
reply = None
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings, is_chat=True):
reply = cumulative_reply + reply
reply, next_character_found = extract_message_from_reply(reply, state)
yield reply
@ -255,35 +255,31 @@ def impersonate_wrapper(text, state):
yield reply
def cai_chatbot_wrapper(text, state):
for history in chatbot_wrapper(text, state):
def generate_chat_reply(text, state, regenerate=False, _continue=False):
if regenerate or _continue:
text = ''
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
yield shared.history['visible']
return
for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue):
yield history
# Same as above but returns HTML
def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
for history in generate_chat_reply(text, state, regenerate, _continue):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def regenerate_wrapper(text, state):
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
else:
for history in chatbot_wrapper('', state, regenerate=True):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def continue_wrapper(text, state):
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
else:
for history in chatbot_wrapper('', state, _continue=True):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def remove_last_message(name1, name2, mode, style):
def remove_last_message():
if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
last = shared.history['visible'].pop()
shared.history['internal'].pop()
else:
last = ['', '']
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style), last[0]
return last[0]
def send_last_reply_to_input():
@ -293,35 +289,27 @@ def send_last_reply_to_input():
return ''
def replace_last_reply(text, name1, name2, mode, style):
def replace_last_reply(text):
if len(shared.history['visible']) > 0:
shared.history['visible'][-1][1] = text
shared.history['internal'][-1][1] = apply_extensions("input", text)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def send_dummy_message(text, name1, name2, mode, style):
def send_dummy_message(text):
shared.history['visible'].append([text, ''])
shared.history['internal'].append([apply_extensions("input", text), ''])
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def send_dummy_reply(text, name1, name2, mode, style):
def send_dummy_reply(text):
if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
shared.history['visible'].append(['', ''])
shared.history['internal'].append(['', ''])
shared.history['visible'][-1][1] = text
shared.history['internal'][-1][1] = apply_extensions("input", text)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def clear_html():
return chat_html_wrapper([], "", "")
def clear_chat_log(name1, name2, greeting, mode, style):
def clear_chat_log(greeting, mode):
shared.history['visible'] = []
shared.history['internal'] = []
@ -332,14 +320,12 @@ def clear_chat_log(name1, name2, greeting, mode, style):
save_history(mode)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def redraw_html(name1, name2, mode, style, reset_cache=False):
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=reset_cache)
def redraw_html(name1, name2, mode, style):
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def tokenize_dialogue(dialogue, name1, name2, mode, style):
def tokenize_dialogue(dialogue, name1, name2):
history = []
messages = []
dialogue = re.sub('<START>', '', dialogue)
@ -447,7 +433,7 @@ def generate_pfp_cache(character):
return None
def load_character(character, name1, name2, mode, style):
def load_character(character, name1, name2, mode):
shared.character = character
context = greeting = turn_template = ""
greeting_field = 'greeting'
@ -521,7 +507,7 @@ def load_character(character, name1, name2, mode, style):
# Create .json log files since they don't already exist
save_history(mode)
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1]
def upload_character(json_file, img, tavern=False):
@ -556,7 +542,7 @@ def upload_tavern_character(img, name1, name2):
return upload_character(json.dumps(_json), img, tavern=True)
def upload_your_profile_picture(img, name1, name2, mode, style):
def upload_your_profile_picture(img):
cache_folder = Path("cache")
if not cache_folder.exists():
cache_folder.mkdir()
@ -568,5 +554,3 @@ def upload_your_profile_picture(img, name1, name2, mode, style):
img = make_thumbnail(img)
img.save(Path('cache/pfp_me.png'))
logging.info('Profile picture saved to "cache/pfp_me.png"')
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=True)

View file

@ -101,10 +101,10 @@ def fix_galactica(s):
return s
def get_reply_from_output_ids(output_ids, input_ids, original_question, state):
def get_reply_from_output_ids(output_ids, input_ids, original_question, state, is_chat=False):
if shared.model_type == 'HF_seq2seq':
reply = decode(output_ids, state['skip_special_tokens'])
if not shared.is_chat():
if not is_chat:
reply = apply_extensions('output', reply)
else:
new_tokens = len(output_ids) - len(input_ids[0])
@ -114,24 +114,21 @@ def get_reply_from_output_ids(output_ids, input_ids, original_question, state):
if len(original_question) > 0 and original_question[-1] not in [' ', '\n']:
reply = ' ' + reply
if not shared.is_chat():
if not is_chat:
reply = original_question + apply_extensions('output', reply)
return reply
def formatted_outputs(reply, model_name):
if not shared.is_chat():
if shared.model_type == 'galactica':
reply = fix_galactica(reply)
return reply, reply, generate_basic_html(reply)
elif shared.model_type == 'gpt4chan':
reply = fix_gpt4chan(reply)
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
else:
return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
if shared.model_type == 'galactica':
reply = fix_galactica(reply)
return reply, reply, generate_basic_html(reply)
elif shared.model_type == 'gpt4chan':
reply = fix_gpt4chan(reply)
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
else:
return reply
return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
def set_manual_seed(seed):
@ -150,13 +147,18 @@ def stop_everything_event():
shared.stop_everything = True
def generate_reply(question, state, eos_token=None, stopping_strings=None):
def generate_reply_wrapper(question, state, eos_token=None, stopping_strings=None):
for reply in generate_reply(question, state, eos_token, stopping_strings, is_chat=False):
yield formatted_outputs(reply, shared.model_name)
def generate_reply(question, state, eos_token=None, stopping_strings=None, is_chat=False):
state = apply_extensions('state', state)
generate_func = apply_extensions('custom_generate_reply')
if generate_func is None:
if shared.model_name == 'None' or shared.model is None:
logging.error("No model is loaded! Select one in the Model tab.")
yield formatted_outputs(question, shared.model_name)
yield question
return
if shared.model_type in ['rwkv', 'llamacpp']:
@ -168,7 +170,7 @@ def generate_reply(question, state, eos_token=None, stopping_strings=None):
# Preparing the input
original_question = question
if not shared.is_chat():
if not is_chat:
question = apply_extensions('input', question)
if shared.args.verbose:
@ -177,11 +179,11 @@ def generate_reply(question, state, eos_token=None, stopping_strings=None):
shared.stop_everything = False
clear_torch_cache()
seed = set_manual_seed(state['seed'])
for reply in generate_func(question, original_question, seed, state, eos_token, stopping_strings):
yield formatted_outputs(reply, shared.model_name)
for reply in generate_func(question, original_question, seed, state, eos_token, stopping_strings, is_chat=is_chat):
yield reply
def generate_reply_HF(question, original_question, seed, state, eos_token=None, stopping_strings=None):
def generate_reply_HF(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
generate_params = {}
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
generate_params[k] = state[k]
@ -233,7 +235,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
t0 = time.time()
try:
if not shared.is_chat() and shared.model_type != 'HF_seq2seq':
if not is_chat and shared.model_type != 'HF_seq2seq':
yield original_question
# Generate the entire reply at once.
@ -246,7 +248,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
if shared.soft_prompt:
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
yield get_reply_from_output_ids(output, input_ids, original_question, state)
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
# Stream the reply 1 token at a time.
# This is based on the trick of using 'stopping_criteria' to create an iterator.
@ -266,7 +268,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
if shared.soft_prompt:
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
yield get_reply_from_output_ids(output, input_ids, original_question, state)
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
if output[-1] in eos_token_ids:
break
@ -280,7 +282,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
return
def generate_reply_custom(question, original_question, seed, state, eos_token=None, stopping_strings=None):
def generate_reply_custom(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
seed = set_manual_seed(state['seed'])
generate_params = {'token_count': state['max_new_tokens']}
for k in ['temperature', 'top_p', 'top_k', 'repetition_penalty']:
@ -288,13 +290,13 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
t0 = time.time()
try:
if not shared.is_chat():
if not is_chat:
yield question
if not state['stream']:
reply = shared.model.generate(context=question, **generate_params)
output = original_question + reply
if not shared.is_chat():
if not is_chat:
reply = original_question + apply_extensions('output', reply)
yield reply
@ -302,7 +304,7 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
for reply in shared.model.generate_with_streaming(context=question, **generate_params):
output = original_question + reply
if not shared.is_chat():
if not is_chat:
reply = original_question + apply_extensions('output', reply)
yield reply
@ -317,7 +319,7 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
return
def generate_reply_flexgen(question, original_question, seed, state, eos_token=None, stopping_strings=None):
def generate_reply_flexgen(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
generate_params = {}
for k in ['max_new_tokens', 'do_sample', 'temperature']:
generate_params[k] = state[k]
@ -346,7 +348,7 @@ def generate_reply_flexgen(question, original_question, seed, state, eos_token=N
t0 = time.time()
try:
if not shared.is_chat():
if not is_chat:
yield question
# Generate the entire reply at once.
@ -354,7 +356,7 @@ def generate_reply_flexgen(question, original_question, seed, state, eos_token=N
with torch.no_grad():
output = shared.model.generate(**generate_params)[0]
yield get_reply_from_output_ids(output, input_ids, original_question, state)
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
# Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
else: