New universal API with streaming/blocking endpoints (#990)
Previous title: Add api_streaming extension and update api-example-stream to use it * Merge with latest main * Add parameter capturing encoder_repetition_penalty * Change some defaults, minor fixes * Add --api, --public-api flags * remove unneeded/broken comment from blocking API startup. The comment is already correctly emitted in try_start_cloudflared by calling the lambda we pass in. * Update on_start message for blocking_api, it should say 'non-streaming' and not 'streaming' * Update the API examples * Change a comment * Update README * Remove the gradio API * Remove unused import * Minor change * Remove unused import --------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
parent
459e725af9
commit
654933c634
12 changed files with 346 additions and 286 deletions
|
@ -1,57 +1,42 @@
|
|||
'''
|
||||
|
||||
This is an example on how to use the API for oobabooga/text-generation-webui.
|
||||
|
||||
Make sure to start the web UI with the following flags:
|
||||
|
||||
python server.py --model MODEL --listen --no-stream
|
||||
|
||||
Optionally, you can also add the --share flag to generate a public gradio URL,
|
||||
allowing you to use the API remotely.
|
||||
|
||||
'''
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
# Server address
|
||||
server = "127.0.0.1"
|
||||
# For local streaming, the websockets are hosted without ssl - http://
|
||||
HOST = 'localhost:5000'
|
||||
URI = f'http://{HOST}/api/v1/generate'
|
||||
|
||||
# Generation parameters
|
||||
# Reference: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig
|
||||
params = {
|
||||
'max_new_tokens': 200,
|
||||
'do_sample': True,
|
||||
'temperature': 0.72,
|
||||
'top_p': 0.73,
|
||||
'typical_p': 1,
|
||||
'repetition_penalty': 1.1,
|
||||
'encoder_repetition_penalty': 1.0,
|
||||
'top_k': 0,
|
||||
'min_length': 0,
|
||||
'no_repeat_ngram_size': 0,
|
||||
'num_beams': 1,
|
||||
'penalty_alpha': 0,
|
||||
'length_penalty': 1,
|
||||
'early_stopping': False,
|
||||
'seed': -1,
|
||||
'add_bos_token': True,
|
||||
'truncation_length': 2048,
|
||||
'ban_eos_token': False,
|
||||
'skip_special_tokens': True,
|
||||
'stopping_strings': [],
|
||||
}
|
||||
# For reverse-proxied streaming, the remote will likely host with ssl - https://
|
||||
# URI = 'https://your-uri-here.trycloudflare.com/api/v1/generate'
|
||||
|
||||
# Input prompt
|
||||
prompt = "What I would like to say is the following: "
|
||||
def run(context):
|
||||
request = {
|
||||
'prompt': prompt,
|
||||
'max_new_tokens': 250,
|
||||
'do_sample': True,
|
||||
'temperature': 1.3,
|
||||
'top_p': 0.1,
|
||||
'typical_p': 1,
|
||||
'repetition_penalty': 1.18,
|
||||
'top_k': 40,
|
||||
'min_length': 0,
|
||||
'no_repeat_ngram_size': 0,
|
||||
'num_beams': 1,
|
||||
'penalty_alpha': 0,
|
||||
'length_penalty': 1,
|
||||
'early_stopping': False,
|
||||
'seed': -1,
|
||||
'add_bos_token': True,
|
||||
'truncation_length': 2048,
|
||||
'ban_eos_token': False,
|
||||
'skip_special_tokens': True,
|
||||
'stopping_strings': []
|
||||
}
|
||||
|
||||
payload = json.dumps([prompt, params])
|
||||
response = requests.post(URI, json=request)
|
||||
|
||||
response = requests.post(f"http://{server}:7860/run/textgen", json={
|
||||
"data": [
|
||||
payload
|
||||
]
|
||||
}).json()
|
||||
if response.status_code == 200:
|
||||
result = response.json()['results'][0]['text']
|
||||
print(prompt + result)
|
||||
|
||||
reply = response["data"][0]
|
||||
print(reply)
|
||||
if __name__ == '__main__':
|
||||
prompt = "In order to make homemade bread, follow these steps:\n1)"
|
||||
run(prompt)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue