Bump llama-cpp-python, +tensor_split by @shouyiwang, +mul_mat_q (#3610)

This commit is contained in:
oobabooga 2023-08-18 12:03:34 -03:00 committed by GitHub
parent 4b69f4f6ae
commit 7cba000421
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 31 additions and 2 deletions

View file

@ -102,6 +102,12 @@ class LlamacppHF(PreTrainedModel):
model_file = list(path.glob('*ggml*.bin'))[0]
logger.info(f"llama.cpp weights detected: {model_file}\n")
if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '':
tensor_split_list = None
else:
tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")]
params = {
'model_path': str(model_file),
'n_ctx': shared.args.n_ctx,
@ -110,9 +116,11 @@ class LlamacppHF(PreTrainedModel):
'n_batch': shared.args.n_batch,
'use_mmap': not shared.args.no_mmap,
'use_mlock': shared.args.mlock,
'mul_mat_q': shared.args.mul_mat_q,
'low_vram': shared.args.low_vram,
'n_gpu_layers': shared.args.n_gpu_layers,
'rope_freq_base': 10000 * shared.args.alpha_value ** (64 / 63.),
'tensor_split': tensor_split_list,
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
'n_gqa': shared.args.n_gqa or None,
'rms_norm_eps': shared.args.rms_norm_eps or None,