Merge branch 'main' into Brawlence-main

This commit is contained in:
oobabooga 2023-03-19 13:09:59 -03:00
commit eab8de0d4a
34 changed files with 1038 additions and 645 deletions

View file

@ -0,0 +1 @@
flask_cloudflared==0.0.12

90
extensions/api/script.py Normal file
View file

@ -0,0 +1,90 @@
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from threading import Thread
from modules import shared
from modules.text_generation import generate_reply, encode
import json
params = {
'port': 5000,
}
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/api/v1/model':
self.send_response(200)
self.end_headers()
response = json.dumps({
'result': shared.model_name
})
self.wfile.write(response.encode('utf-8'))
else:
self.send_error(404)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = json.loads(self.rfile.read(content_length).decode('utf-8'))
if self.path == '/api/v1/generate':
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
prompt = body['prompt']
prompt_lines = [l.strip() for l in prompt.split('\n')]
max_context = body.get('max_context_length', 2048)
while len(prompt_lines) >= 0 and len(encode('\n'.join(prompt_lines))) > max_context:
prompt_lines.pop(0)
prompt = '\n'.join(prompt_lines)
generator = generate_reply(
question = prompt,
max_new_tokens = body.get('max_length', 200),
do_sample=True,
temperature=body.get('temperature', 0.5),
top_p=body.get('top_p', 1),
typical_p=body.get('typical', 1),
repetition_penalty=body.get('rep_pen', 1.1),
encoder_repetition_penalty=1,
top_k=body.get('top_k', 0),
min_length=0,
no_repeat_ngram_size=0,
num_beams=1,
penalty_alpha=0,
length_penalty=1,
early_stopping=False,
)
answer = ''
for a in generator:
answer = a[0]
response = json.dumps({
'results': [{
'text': answer[len(prompt):]
}]
})
self.wfile.write(response.encode('utf-8'))
else:
self.send_error(404)
def run_server():
server_addr = ('0.0.0.0' if shared.args.listen else '127.0.0.1', params['port'])
server = ThreadingHTTPServer(server_addr, Handler)
if shared.args.share:
try:
from flask_cloudflared import _run_cloudflared
public_url = _run_cloudflared(params['port'], params['port'] + 1)
print(f'Starting KoboldAI compatible api at {public_url}/api')
except ImportError:
print('You should install flask_cloudflared manually')
else:
print(f'Starting KoboldAI compatible api at http://{server_addr[0]}:{server_addr[1]}/api')
server.serve_forever()
def ui():
Thread(target=run_server, daemon=True).start()

View file

@ -1,8 +1,8 @@
from pathlib import Path
import gradio as gr
from elevenlabslib import *
from elevenlabslib.helpers import *
from elevenlabslib import ElevenLabsUser
from elevenlabslib.helpers import save_bytes_to_path
params = {
'activate': True,

View file

@ -76,7 +76,7 @@ def generate_html():
return container_html
def ui():
with gr.Accordion("Character gallery"):
with gr.Accordion("Character gallery", open=False):
update = gr.Button("Refresh")
gallery = gr.HTML(value=generate_html())
update.click(generate_html, [], gallery)

View file

@ -0,0 +1,4 @@
git+https://github.com/Uberi/speech_recognition.git@010382b
openai-whisper
soundfile
ffmpeg

View file

@ -0,0 +1,54 @@
import gradio as gr
import speech_recognition as sr
input_hijack = {
'state': False,
'value': ["", ""]
}
def do_stt(audio, text_state=""):
transcription = ""
r = sr.Recognizer()
# Convert to AudioData
audio_data = sr.AudioData(sample_rate=audio[0], frame_data=audio[1], sample_width=4)
try:
transcription = r.recognize_whisper(audio_data, language="english", model="base.en")
except sr.UnknownValueError:
print("Whisper could not understand audio")
except sr.RequestError as e:
print("Could not request results from Whisper", e)
input_hijack.update({"state": True, "value": [transcription, transcription]})
text_state += transcription + " "
return text_state, text_state
def update_hijack(val):
input_hijack.update({"state": True, "value": [val, val]})
return val
def auto_transcribe(audio, audio_auto, text_state=""):
if audio is None:
return "", ""
if audio_auto:
return do_stt(audio, text_state)
return "", ""
def ui():
tr_state = gr.State(value="")
output_transcription = gr.Textbox(label="STT-Input",
placeholder="Speech Preview. Click \"Generate\" to send",
interactive=True)
output_transcription.change(fn=update_hijack, inputs=[output_transcription], outputs=[tr_state])
audio_auto = gr.Checkbox(label="Auto-Transcribe", value=True)
with gr.Row():
audio = gr.Audio(source="microphone")
audio.change(fn=auto_transcribe, inputs=[audio, audio_auto, tr_state], outputs=[output_transcription, tr_state])
transcribe_button = gr.Button(value="Transcribe")
transcribe_button.click(do_stt, inputs=[audio, tr_state], outputs=[output_transcription, tr_state])