Add extension example, replace input_hijack with chat_input_modifier (#3307)

This commit is contained in:
oobabooga 2023-07-25 18:49:56 -03:00 committed by GitHub
parent 08c622df2e
commit ef8637e32d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 335 additions and 100 deletions

View file

@ -7,10 +7,15 @@ from modules import shared
from modules.chat import generate_chat_reply
from modules.LoRA import add_lora_to_model
from modules.models import load_model, unload_model
from modules.models_settings import (get_model_settings_from_yamls,
update_model_parameters)
from modules.text_generation import (encode, generate_reply,
stop_everything_event)
from modules.models_settings import (
get_model_settings_from_yamls,
update_model_parameters
)
from modules.text_generation import (
encode,
generate_reply,
stop_everything_event
)
from modules.utils import get_available_models

View file

@ -2,12 +2,15 @@ import asyncio
import json
from threading import Thread
from websockets.server import serve
from extensions.api.util import build_parameters, try_start_cloudflared, with_api_lock
from extensions.api.util import (
build_parameters,
try_start_cloudflared,
with_api_lock
)
from modules import shared
from modules.chat import generate_chat_reply
from modules.text_generation import generate_reply
from websockets.server import serve
PATH = '/api/v1/stream'

View file

@ -10,7 +10,6 @@ from modules import shared
from modules.chat import load_character_memoized
from modules.presets import load_preset_memoized
# We use a thread local to store the asyncio lock, so that each thread
# has its own lock. This isn't strictly necessary, but it makes it
# such that if we can support multiple worker threads in the future,