diff --git a/README.md b/README.md index de6cc2e..30564e2 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,8 @@ Then browse to `http://localhost:7860/?__theme=dark` + + Optionally, you can use the following command-line flags: | Flag | Description | @@ -121,7 +123,7 @@ Optionally, you can use the following command-line flags: | `--model MODEL` | Name of the model to load by default. | | `--notebook` | Launch the web UI in notebook mode, where the output is written to the same text box as the input. | | `--chat` | Launch the web UI in chat mode.| -| `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot's profile picture. | +| `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file img-bot.png or img-bot.jpg exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, img-you.png or img-you.jpg will be used as your profile picture. | | `--cpu` | Use the CPU to generate text.| | `--load-in-8bit` | Load the model with 8-bit precision.| | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.| diff --git a/modules/html_generator.py b/modules/html_generator.py index e5b6f82..11da068 100644 --- a/modules/html_generator.py +++ b/modules/html_generator.py @@ -196,7 +196,7 @@ def generate_chat_html(history, name1, name2, character): border-radius: 50%; } - .circle-bot img { + .circle-bot img, .circle-you img { border-radius: 50%; width: 100%; height: 100%; @@ -225,15 +225,21 @@ def generate_chat_html(history, name1, name2, character): f"characters/{character}.png", f"characters/{character}.jpg", f"characters/{character}.jpeg", - "profile.png", - "profile.jpg", - "profile.jpeg", + "img_bot.png", + "img_bot.jpg", + "img_bot.jpeg" ]: if Path(i).exists(): img = f'' break + img_you = '' + for i in ["img_me.png", "img_me.jpg", "img_me.jpeg"]: + if Path(i).exists(): + img_you = f'' + break + for i,_row in enumerate(history[::-1]): row = _row.copy() row[0] = re.sub(r"[\\]*\*", r"*", row[0]) @@ -262,6 +268,7 @@ def generate_chat_html(history, name1, name2, character): output += f"""
+ {img_you}
diff --git a/requirements.txt b/requirements.txt index d59ee7c..0338b0f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ filelock==3.9.0 fonttools==4.38.0 frozenlist==1.3.3 fsspec==2022.11.0 -gradio==3.16.2 +gradio==3.15.0 h11==0.14.0 httpcore==0.16.3 httpx==0.23.1 diff --git a/server.py b/server.py index d08b6c8..654cd01 100644 --- a/server.py +++ b/server.py @@ -22,7 +22,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, help='Name of the model to load by default.') parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.') parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.') -parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture.') +parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img-bot.png or img-bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img-you.png or img-you.jpg will be used as your profile picture.') parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') @@ -80,7 +80,6 @@ def load_model(model_name): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True) else: model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda() - # Custom else: settings = ["low_cpu_mem_usage=True"] @@ -186,8 +185,9 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok t = encode(stopping_string, 0, add_special_tokens=False) stopping_criteria_list = transformers.StoppingCriteriaList([ _SentinelTokenStoppingCriteria( - sentinel_token_ids=t, - starting_idx=len(input_ids[0])) + sentinel_token_ids=t, + starting_idx=len(input_ids[0]) + ) ]) else: stopping_criteria_list = None @@ -366,7 +366,6 @@ if args.chat or args.cai_chat: load_character(_character, name1, name2) else: history = [] - _history = remove_example_dialogue_from_history(history) if args.cai_chat: return generate_chat_html(_history, name1, name2, character)