Create llamacpp_HF loader (#3062)

This commit is contained in:
oobabooga 2023-07-16 02:21:13 -03:00 committed by GitHub
parent 7c4d4fc7d3
commit 5e3f7e00a9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 143 additions and 1 deletions

View file

@ -204,7 +204,7 @@ def create_model_menus():
with gr.Row():
with gr.Column():
shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "ExLlama_HF", "AutoGPTQ", "llama.cpp", "ExLlama", "GPTQ-for-LLaMa"], value=None)
shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "ExLlama_HF", "AutoGPTQ", "llama.cpp", "ExLlama", "llama.cpp_HF", "GPTQ-for-LLaMa"], value=None)
with gr.Box():
with gr.Row():
with gr.Column():
@ -250,6 +250,7 @@ def create_model_menus():
shared.gradio['gptq_for_llama_info'] = gr.Markdown('GPTQ-for-LLaMa is currently 2x faster than AutoGPTQ on some systems. It is installed by default with the one-click installers. Otherwise, it has to be installed manually following the instructions here: [instructions](https://github.com/oobabooga/text-generation-webui/blob/main/docs/GPTQ-models-(4-bit-mode).md#installation-1).')
shared.gradio['exllama_info'] = gr.Markdown('For more information, consult the [docs](https://github.com/oobabooga/text-generation-webui/blob/main/docs/ExLlama.md).')
shared.gradio['exllama_HF_info'] = gr.Markdown('ExLlama_HF is a wrapper that lets you use ExLlama like a Transformers model, which means it can use the Transformers samplers. It\'s a bit slower than the regular ExLlama.')
shared.gradio['llamacpp_HF_info'] = gr.Markdown('llamacpp_HF is a wrapper that lets you use llama.cpp like a Transformers model, which means it can use the Transformers samplers. It works, but it\'s experimental and slow. Contributions are welcome.\n\nTo use it, make sure to first download oobabooga/llama-tokenizer under "Download custom model or LoRA".')
with gr.Column():
with gr.Row():