Add Instruct mode
This commit is contained in:
parent
3d6cb5ed63
commit
e722c240af
9 changed files with 217 additions and 105 deletions
139
modules/chat.py
139
modules/chat.py
|
@ -12,46 +12,51 @@ from PIL import Image
|
|||
import modules.extensions as extensions_module
|
||||
import modules.shared as shared
|
||||
from modules.extensions import apply_extensions
|
||||
from modules.html_generator import (fix_newlines, generate_chat_html,
|
||||
from modules.html_generator import (fix_newlines, chat_html_wrapper,
|
||||
make_thumbnail)
|
||||
from modules.text_generation import (encode, generate_reply,
|
||||
get_max_prompt_length)
|
||||
|
||||
|
||||
def generate_chat_output(history, name1, name2):
|
||||
if shared.args.cai_chat:
|
||||
return generate_chat_html(history, name1, name2)
|
||||
else:
|
||||
return history
|
||||
|
||||
def generate_chat_prompt(user_input, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=False, also_return_rows=False):
|
||||
def generate_chat_prompt(user_input, max_new_tokens, name1, name2, context, chat_prompt_size, is_instruct, end_of_turn="", impersonate=False, also_return_rows=False):
|
||||
user_input = fix_newlines(user_input)
|
||||
rows = [f"{context.strip()}\n"]
|
||||
|
||||
# Finding the maximum prompt size
|
||||
if shared.soft_prompt:
|
||||
chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
|
||||
max_length = min(get_max_prompt_length(max_new_tokens), chat_prompt_size)
|
||||
|
||||
if is_instruct:
|
||||
prefix1 = f"{name1}\n"
|
||||
prefix2 = f"{name2}\n"
|
||||
else:
|
||||
prefix1 = f"{name1}: "
|
||||
prefix2 = f"{name2}: "
|
||||
|
||||
i = len(shared.history['internal'])-1
|
||||
while i >= 0 and len(encode(''.join(rows), max_new_tokens)[0]) < max_length:
|
||||
rows.insert(1, f"{name2}: {shared.history['internal'][i][1].strip()}\n")
|
||||
prev_user_input = shared.history['internal'][i][0]
|
||||
if prev_user_input not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
|
||||
rows.insert(1, f"{name1}: {prev_user_input.strip()}\n")
|
||||
rows.insert(1, f"{prefix2}{shared.history['internal'][i][1].strip()}{end_of_turn}\n")
|
||||
string = shared.history['internal'][i][0]
|
||||
if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
|
||||
rows.insert(1, f"{prefix1}{string.strip()}{end_of_turn}\n")
|
||||
i -= 1
|
||||
|
||||
if not impersonate:
|
||||
if len(user_input) > 0:
|
||||
rows.append(f"{name1}: {user_input}\n")
|
||||
rows.append(apply_extensions(f"{name2}:", "bot_prefix"))
|
||||
limit = 3
|
||||
else:
|
||||
rows.append(f"{name1}:")
|
||||
if impersonate:
|
||||
rows.append(f"{prefix1.strip() if not is_instruct else prefix1}")
|
||||
limit = 2
|
||||
else:
|
||||
|
||||
# Adding the user message
|
||||
if len(user_input) > 0:
|
||||
rows.append(f"{prefix1}{user_input}{end_of_turn}\n")
|
||||
|
||||
# Adding the Character prefix
|
||||
rows.append(apply_extensions(f"{prefix2.strip() if not is_instruct else prefix2}", "bot_prefix"))
|
||||
limit = 3
|
||||
|
||||
while len(rows) > limit and len(encode(''.join(rows), max_new_tokens)[0]) >= max_length:
|
||||
rows.pop(1)
|
||||
|
||||
prompt = ''.join(rows)
|
||||
|
||||
if also_return_rows:
|
||||
|
@ -86,7 +91,7 @@ def extract_message_from_reply(reply, name1, name2, stop_at_newline):
|
|||
reply = fix_newlines(reply)
|
||||
return reply, next_character_found
|
||||
|
||||
def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, regenerate=False):
|
||||
def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, regenerate=False, mode="cai-chat", end_of_turn=""):
|
||||
just_started = True
|
||||
eos_token = '\n' if stop_at_newline else None
|
||||
name1_original = name1
|
||||
|
@ -105,14 +110,13 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
|
|||
|
||||
if visible_text is None:
|
||||
visible_text = text
|
||||
if shared.args.chat:
|
||||
visible_text = visible_text.replace('\n', '<br>')
|
||||
text = apply_extensions(text, "input")
|
||||
|
||||
is_instruct = mode == 'instruct'
|
||||
if custom_generate_chat_prompt is None:
|
||||
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
|
||||
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, is_instruct, end_of_turn=end_of_turn)
|
||||
else:
|
||||
prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
|
||||
prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, is_instruct, end_of_turn=end_of_turn)
|
||||
|
||||
# Yield *Is typing...*
|
||||
if not regenerate:
|
||||
|
@ -129,8 +133,6 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
|
|||
reply, next_character_found = extract_message_from_reply(reply, name1, name2, stop_at_newline)
|
||||
visible_reply = re.sub("(<USER>|<user>|{{user}})", name1_original, reply)
|
||||
visible_reply = apply_extensions(visible_reply, "output")
|
||||
if shared.args.chat:
|
||||
visible_reply = visible_reply.replace('\n', '<br>')
|
||||
|
||||
# We need this global variable to handle the Stop event,
|
||||
# otherwise gradio gets confused
|
||||
|
@ -153,13 +155,13 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
|
|||
|
||||
yield shared.history['visible']
|
||||
|
||||
def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1):
|
||||
def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
|
||||
eos_token = '\n' if stop_at_newline else None
|
||||
|
||||
if 'pygmalion' in shared.model_name.lower():
|
||||
name1 = "You"
|
||||
|
||||
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
|
||||
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True, end_of_turn=end_of_turn)
|
||||
|
||||
# Yield *Is typing...*
|
||||
yield shared.processing_message
|
||||
|
@ -179,36 +181,30 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ
|
|||
|
||||
yield reply
|
||||
|
||||
def cai_chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1):
|
||||
for history in chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts):
|
||||
yield generate_chat_html(history, name1, name2)
|
||||
def cai_chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
|
||||
for history in chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts, regenerate=False, mode=mode, end_of_turn=end_of_turn):
|
||||
yield chat_html_wrapper(history, name1, name2, mode)
|
||||
|
||||
def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1):
|
||||
def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
|
||||
if (shared.character != 'None' and len(shared.history['visible']) == 1) or len(shared.history['internal']) == 0:
|
||||
yield generate_chat_output(shared.history['visible'], name1, name2)
|
||||
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
else:
|
||||
last_visible = shared.history['visible'].pop()
|
||||
last_internal = shared.history['internal'].pop()
|
||||
# Yield '*Is typing...*'
|
||||
yield generate_chat_output(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2)
|
||||
for history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts, regenerate=True):
|
||||
if shared.args.cai_chat:
|
||||
shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
|
||||
else:
|
||||
shared.history['visible'][-1] = (last_visible[0], history[-1][1])
|
||||
yield generate_chat_output(shared.history['visible'], name1, name2)
|
||||
yield chat_html_wrapper(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, mode)
|
||||
for history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts, regenerate=True, mode=mode, end_of_turn=end_of_turn):
|
||||
shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
|
||||
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
|
||||
def remove_last_message(name1, name2):
|
||||
def remove_last_message(name1, name2, mode):
|
||||
if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
||||
last = shared.history['visible'].pop()
|
||||
shared.history['internal'].pop()
|
||||
else:
|
||||
last = ['', '']
|
||||
|
||||
if shared.args.cai_chat:
|
||||
return generate_chat_html(shared.history['visible'], name1, name2), last[0]
|
||||
else:
|
||||
return shared.history['visible'], last[0]
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0]
|
||||
|
||||
def send_last_reply_to_input():
|
||||
if len(shared.history['internal']) > 0:
|
||||
|
@ -216,20 +212,17 @@ def send_last_reply_to_input():
|
|||
else:
|
||||
return ''
|
||||
|
||||
def replace_last_reply(text, name1, name2):
|
||||
def replace_last_reply(text, name1, name2, mode):
|
||||
if len(shared.history['visible']) > 0:
|
||||
if shared.args.cai_chat:
|
||||
shared.history['visible'][-1][1] = text
|
||||
else:
|
||||
shared.history['visible'][-1] = (shared.history['visible'][-1][0], text)
|
||||
shared.history['visible'][-1][1] = text
|
||||
shared.history['internal'][-1][1] = apply_extensions(text, "input")
|
||||
|
||||
return generate_chat_output(shared.history['visible'], name1, name2)
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
|
||||
def clear_html():
|
||||
return generate_chat_html([], "", "")
|
||||
return chat_html_wrapper([], "", "")
|
||||
|
||||
def clear_chat_log(name1, name2, greeting):
|
||||
def clear_chat_log(name1, name2, greeting, mode):
|
||||
shared.history['visible'] = []
|
||||
shared.history['internal'] = []
|
||||
|
||||
|
@ -237,12 +230,12 @@ def clear_chat_log(name1, name2, greeting):
|
|||
shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
|
||||
shared.history['visible'] += [['', apply_extensions(greeting, "output")]]
|
||||
|
||||
return generate_chat_output(shared.history['visible'], name1, name2)
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
|
||||
def redraw_html(name1, name2):
|
||||
return generate_chat_html(shared.history['visible'], name1, name2)
|
||||
def redraw_html(name1, name2, mode):
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
|
||||
def tokenize_dialogue(dialogue, name1, name2):
|
||||
def tokenize_dialogue(dialogue, name1, name2, mode):
|
||||
history = []
|
||||
|
||||
dialogue = re.sub('<START>', '', dialogue)
|
||||
|
@ -339,11 +332,12 @@ def generate_pfp_cache(character):
|
|||
return img
|
||||
return None
|
||||
|
||||
def load_character(character, name1, name2):
|
||||
def load_character(character, name1, name2, instruct=False):
|
||||
shared.character = character
|
||||
shared.history['internal'] = []
|
||||
shared.history['visible'] = []
|
||||
greeting = ""
|
||||
context = greeting = end_of_turn = ""
|
||||
greeting_field = 'greeting'
|
||||
picture = None
|
||||
|
||||
# Deleting the profile picture cache, if any
|
||||
|
@ -351,9 +345,10 @@ def load_character(character, name1, name2):
|
|||
Path("cache/pfp_character.png").unlink()
|
||||
|
||||
if character != 'None':
|
||||
folder = "characters" if not instruct else "characters/instruction-following"
|
||||
picture = generate_pfp_cache(character)
|
||||
for extension in ["yml", "yaml", "json"]:
|
||||
filepath = Path(f'characters/{character}.{extension}')
|
||||
filepath = Path(f'{folder}/{character}.{extension}')
|
||||
if filepath.exists():
|
||||
break
|
||||
file_contents = open(filepath, 'r', encoding='utf-8').read()
|
||||
|
@ -369,19 +364,21 @@ def load_character(character, name1, name2):
|
|||
|
||||
if 'context' in data:
|
||||
context = f"{data['context'].strip()}\n\n"
|
||||
greeting_field = 'greeting'
|
||||
else:
|
||||
elif "char_persona" in data:
|
||||
context = build_pygmalion_style_context(data)
|
||||
greeting_field = 'char_greeting'
|
||||
|
||||
if 'example_dialogue' in data and data['example_dialogue'] != '':
|
||||
if 'example_dialogue' in data:
|
||||
context += f"{data['example_dialogue'].strip()}\n"
|
||||
if greeting_field in data and len(data[greeting_field].strip()) > 0:
|
||||
if greeting_field in data:
|
||||
greeting = data[greeting_field]
|
||||
if 'end_of_turn' in data:
|
||||
end_of_turn = data['end_of_turn']
|
||||
else:
|
||||
context = shared.settings['context']
|
||||
name2 = shared.settings['name2']
|
||||
greeting = shared.settings['greeting']
|
||||
end_of_turn = shared.settings['end_of_turn']
|
||||
|
||||
if Path(f'logs/{shared.character}_persistent.json').exists():
|
||||
load_history(open(Path(f'logs/{shared.character}_persistent.json'), 'rb').read(), name1, name2)
|
||||
|
@ -389,10 +386,7 @@ def load_character(character, name1, name2):
|
|||
shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
|
||||
shared.history['visible'] += [['', apply_extensions(greeting, "output")]]
|
||||
|
||||
if shared.args.cai_chat:
|
||||
return name1, name2, picture, greeting, context, generate_chat_html(shared.history['visible'], name1, name2, reset_cache=True)
|
||||
else:
|
||||
return name1, name2, picture, greeting, context, shared.history['visible']
|
||||
return name1, name2, picture, greeting, context, end_of_turn, chat_html_wrapper(shared.history['visible'], name1, name2, reset_cache=True)
|
||||
|
||||
def load_default_history(name1, name2):
|
||||
load_character("None", name1, name2)
|
||||
|
@ -423,7 +417,7 @@ def upload_tavern_character(img, name1, name2):
|
|||
_json = {"char_name": _json['name'], "char_persona": _json['description'], "char_greeting": _json["first_mes"], "example_dialogue": _json['mes_example'], "world_scenario": _json['scenario']}
|
||||
return upload_character(json.dumps(_json), img, tavern=True)
|
||||
|
||||
def upload_your_profile_picture(img, name1, name2):
|
||||
def upload_your_profile_picture(img, name1, name2, mode):
|
||||
cache_folder = Path("cache")
|
||||
if not cache_folder.exists():
|
||||
cache_folder.mkdir()
|
||||
|
@ -436,7 +430,4 @@ def upload_your_profile_picture(img, name1, name2):
|
|||
img.save(Path('cache/pfp_me.png'))
|
||||
print('Profile picture saved to "cache/pfp_me.png"')
|
||||
|
||||
if shared.args.cai_chat:
|
||||
return generate_chat_html(shared.history['visible'], name1, name2, reset_cache=True)
|
||||
else:
|
||||
return shared.history['visible']
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True)
|
||||
|
|
|
@ -21,6 +21,8 @@ with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r')
|
|||
_4chan_css = css_f.read()
|
||||
with open(Path(__file__).resolve().parent / '../css/html_cai_style.css', 'r') as f:
|
||||
cai_css = f.read()
|
||||
with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f:
|
||||
instruct_css = f.read()
|
||||
|
||||
def fix_newlines(string):
|
||||
string = string.replace('\n', '\n\n')
|
||||
|
@ -117,7 +119,39 @@ def get_image_cache(path):
|
|||
|
||||
return image_cache[path][1]
|
||||
|
||||
def generate_chat_html(history, name1, name2, reset_cache=False):
|
||||
def generate_instruct_html(history):
|
||||
output = f'<style>{instruct_css}</style><div class="chat" id="chat">'
|
||||
for i,_row in enumerate(history[::-1]):
|
||||
row = [convert_to_markdown(entry) for entry in _row]
|
||||
|
||||
output += f"""
|
||||
<div class="assistant-message">
|
||||
<div class="text">
|
||||
<div class="message-body">
|
||||
{row[1]}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
if len(row[0]) == 0: # don't display empty user messages
|
||||
continue
|
||||
|
||||
output += f"""
|
||||
<div class="user-message">
|
||||
<div class="text">
|
||||
<div class="message-body">
|
||||
{row[0]}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
output += "</div>"
|
||||
|
||||
return output
|
||||
|
||||
def generate_cai_chat_html(history, name1, name2, reset_cache=False):
|
||||
output = f'<style>{cai_css}</style><div class="chat" id="chat">'
|
||||
|
||||
# The time.time() is to prevent the brower from caching the image
|
||||
|
@ -165,3 +199,17 @@ def generate_chat_html(history, name1, name2, reset_cache=False):
|
|||
|
||||
output += "</div>"
|
||||
return output
|
||||
|
||||
def generate_chat_html(history, name1, name2):
|
||||
return generate_cai_chat_html(history, name1, name2)
|
||||
|
||||
def chat_html_wrapper(history, name1, name2, mode="cai-chat", reset_cache=False):
|
||||
|
||||
if mode == "cai-chat":
|
||||
return generate_cai_chat_html(history, name1, name2, reset_cache)
|
||||
elif mode == "chat":
|
||||
return generate_chat_html(history, name1, name2)
|
||||
elif mode == "instruct":
|
||||
return generate_instruct_html(history)
|
||||
else:
|
||||
return ''
|
||||
|
|
|
@ -33,6 +33,7 @@ settings = {
|
|||
'name2': 'Assistant',
|
||||
'context': 'This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.',
|
||||
'greeting': 'Hello there!',
|
||||
'end_of_turn': '',
|
||||
'stop_at_newline': False,
|
||||
'chat_prompt_size': 2048,
|
||||
'chat_prompt_size_min': 0,
|
||||
|
@ -73,8 +74,8 @@ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpForma
|
|||
|
||||
# Basic settings
|
||||
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
|
||||
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
|
||||
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.')
|
||||
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.')
|
||||
parser.add_argument('--cai-chat', action='store_true', help='DEPRECATED: use --chat instead.')
|
||||
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
|
||||
parser.add_argument('--lora', type=str, help='Name of the LoRA to apply to the model by default.')
|
||||
parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
|
||||
|
@ -131,12 +132,17 @@ parser.add_argument("--gradio-auth-path", type=str, help='Set the gradio authent
|
|||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Provisional, this will be deleted later
|
||||
# Deprecation warnings for parameters that have been renamed
|
||||
deprecated_dict = {'gptq_bits': ['wbits', 0], 'gptq_model_type': ['model_type', None], 'gptq_pre_layer': ['prelayer', 0]}
|
||||
for k in deprecated_dict:
|
||||
if eval(f"args.{k}") != deprecated_dict[k][1]:
|
||||
print(f"Warning: --{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.")
|
||||
exec(f"args.{deprecated_dict[k][0]} = args.{k}")
|
||||
|
||||
# Deprecation warnings for parameters that have been removed
|
||||
if args.cai_chat:
|
||||
print("Warning: --cai-chat is deprecated. Use --chat instead.")
|
||||
args.chat = True
|
||||
|
||||
def is_chat():
|
||||
return any((args.chat, args.cai_chat))
|
||||
return args.chat
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue