Add rope_freq_base parameter for CodeLlama
This commit is contained in:
parent
feecd8190f
commit
52ab2a6b9e
10 changed files with 26 additions and 17 deletions
|
@ -3,7 +3,7 @@ from functools import partial
|
|||
|
||||
import torch
|
||||
|
||||
from modules import shared
|
||||
from modules import RoPE, shared
|
||||
from modules.callbacks import Iteratorize
|
||||
from modules.logging_colors import logger
|
||||
from modules.text_generation import get_max_prompt_length
|
||||
|
@ -72,7 +72,7 @@ class LlamaCppModel:
|
|||
'mul_mat_q': shared.args.mul_mat_q,
|
||||
'low_vram': shared.args.low_vram,
|
||||
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||
'rope_freq_base': 10000 * shared.args.alpha_value ** (64 / 63.),
|
||||
'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
|
||||
'tensor_split': tensor_split_list,
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'n_gqa': shared.args.n_gqa or None,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue