Bump llama-cpp-python to 0.2.24 (#5001)
This commit is contained in:
parent
83cf1a6b67
commit
0a299d5959
15 changed files with 104 additions and 96 deletions
|
|
@ -204,6 +204,7 @@ class LlamacppHF(PreTrainedModel):
|
|||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'logits_all': shared.args.logits_all,
|
||||
'offload_kqv': not shared.args.no_offload_kqv
|
||||
}
|
||||
|
||||
Llama = llama_cpp_lib().Llama
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ class LlamaCppModel:
|
|||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'offload_kqv': not shared.args.no_offload_kqv
|
||||
}
|
||||
|
||||
result.model = Llama(**params)
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ loaders_and_params = OrderedDict({
|
|||
'compress_pos_emb',
|
||||
'cpu',
|
||||
'numa',
|
||||
'no_offload_kqv'
|
||||
],
|
||||
'llamacpp_HF': [
|
||||
'n_ctx',
|
||||
|
|
@ -63,6 +64,7 @@ loaders_and_params = OrderedDict({
|
|||
'trust_remote_code',
|
||||
'no_use_fast',
|
||||
'logits_all',
|
||||
'no_offload_kqv',
|
||||
'llamacpp_HF_info',
|
||||
],
|
||||
'ExLlamav2_HF': [
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
|
|||
parser.add_argument('--tensor_split', type=str, default=None, help='Split the model across multiple GPUs. Comma-separated list of proportions. Example: 18,17.')
|
||||
parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp.')
|
||||
parser.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.')
|
||||
parser.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||
|
||||
# ExLlama
|
||||
|
|
|
|||
|
|
@ -91,6 +91,7 @@ def list_model_elements():
|
|||
'rope_freq_base',
|
||||
'numa',
|
||||
'logits_all',
|
||||
'no_offload_kqv',
|
||||
'hqq_backend',
|
||||
]
|
||||
if is_torch_xpu_available():
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ def create_ui():
|
|||
shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock)
|
||||
shared.gradio['numa'] = gr.Checkbox(label="numa", value=shared.args.numa, info='NUMA support can help on some systems with non-uniform memory access.')
|
||||
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
||||
shared.gradio['no_offload_kqv'] = gr.Checkbox(label="no_offload_kqv", value=shared.args.no_offload_kqv, info='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.')
|
||||
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
||||
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
||||
shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue