diff --git a/README.md b/README.md index 979706e..adb9060 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,10 @@ If I get enough ⭐s on this repository, I will make the process of loading mode Then browse to `http://localhost:7860/?__theme=dark` +## Presets + +Inference settings presets can be created under `presets/` as text files. These files are detected automatically at startup. + ## Contributing Pull requests are welcome. diff --git a/presets/Default.txt b/presets/Default.txt new file mode 100644 index 0000000..1f66840 --- /dev/null +++ b/presets/Default.txt @@ -0,0 +1,5 @@ +do_sample=True, +max_new_tokens=max_length, +top_p=1, +typical_p=0.3, +temperature=temperature, diff --git a/presets/Verbose.txt b/presets/Verbose.txt new file mode 100644 index 0000000..7321a9c --- /dev/null +++ b/presets/Verbose.txt @@ -0,0 +1,10 @@ +num_beams=10, +min_length=max_length, +max_new_tokens=max_length, +length_penalty =1.4, +no_repeat_ngram_size=2, +early_stopping=True, +temperature=0.7, +top_k=150, +top_p=0.92, +repetition_penalty=4.5, diff --git a/server.py b/server.py index 75bc027..5bf9a3b 100644 --- a/server.py +++ b/server.py @@ -1,5 +1,6 @@ -import time import re +import time +import glob import torch import gradio as gr import transformers @@ -16,6 +17,8 @@ model_name = 'galactica-6.7b' #model_name = 'flan-t5' #model_name = 'OPT-13B-Erebus' +settings_name = "Default" + def load_model(model_name): print(f"Loading {model_name}") @@ -48,7 +51,7 @@ def fix_gpt4chan(s): return s def fn(question, temperature, max_length, inference_settings, selected_model): - global model, tokenizer, model_name + global model, tokenizer, model_name, settings_name if selected_model != model_name: model_name = selected_model @@ -56,35 +59,17 @@ def fn(question, temperature, max_length, inference_settings, selected_model): tokenier = None torch.cuda.empty_cache() model, tokenizer = load_model(model_name) + if inference_settings != settings_name: + with open(f'presets/{inference_settings}.txt', 'r') as infile: + preset = infile.read() + settings_name = inference_settings torch.cuda.empty_cache() input_text = question input_ids = tokenizer.encode(str(input_text), return_tensors='pt').cuda() - if inference_settings == 'Default': - output = model.generate( - input_ids, - do_sample=True, - max_new_tokens=max_length, - #max_length=max_length+len(input_ids[0]), - top_p=1, - typical_p=0.3, - temperature=temperature, - ).cuda() - elif inference_settings == 'Verbose': - output = model.generate( - input_ids, - num_beams=10, - min_length=max_length, - max_new_tokens=max_length, - length_penalty =1.4, - no_repeat_ngram_size=2, - early_stopping=True, - temperature=0.7, - top_k=150, - top_p=0.92, - repetition_penalty=4.5, - ).cuda() + + output = eval(f"model.generate(input_ids, {preset}).cuda()") reply = tokenizer.decode(output[0], skip_special_tokens=True) if model_name.startswith('gpt4chan'): @@ -104,7 +89,7 @@ interface = gr.Interface( gr.Textbox(value=default_text, lines=15), gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Temperature', value=0.7), gr.Slider(minimum=1, maximum=2000, step=1, label='max_length', value=200), - gr.Dropdown(choices=["Default", "Verbose"], value="Default"), + gr.Dropdown(choices=list(map(lambda x : x.split('/')[-1].split('.')[0], glob.glob("presets/*.txt"))), value="Default"), gr.Dropdown(choices=["gpt4chan_model_float16", "galactica-6.7b", "opt-6.7b", "opt-13b", "gpt-neox-20b", "gpt-j-6B-float16", "flan-t5", "bloomz-7b1-p3", "OPT-13B-Erebus"], value=model_name), ], outputs=[