Add llama-2-70b GGML support (#3285)
This commit is contained in:
parent
6f4830b4d3
commit
a07d070b6c
8 changed files with 20 additions and 4 deletions
|
|
@ -106,6 +106,8 @@ class LlamacppHF(PreTrainedModel):
|
|||
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||
'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.),
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'n_gqa': shared.args.n_gqa or None,
|
||||
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
||||
'logits_all': True,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ class LlamaCppModel:
|
|||
'n_gpu_layers': shared.args.n_gpu_layers,
|
||||
'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.),
|
||||
'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
|
||||
'n_gqa': shared.args.n_gqa or None,
|
||||
'rms_norm_eps': shared.args.rms_norm_eps or None,
|
||||
}
|
||||
|
||||
result.model = Llama(**params)
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ loaders_and_params = {
|
|||
],
|
||||
'llama.cpp': [
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'n_gpu_layers',
|
||||
'n_batch',
|
||||
'threads',
|
||||
|
|
@ -42,6 +44,8 @@ loaders_and_params = {
|
|||
],
|
||||
'llamacpp_HF': [
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'n_gpu_layers',
|
||||
'n_batch',
|
||||
'threads',
|
||||
|
|
|
|||
|
|
@ -127,6 +127,8 @@ parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity.
|
|||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama2 70b.')
|
||||
parser.add_argument('--rms_norm_eps', type=float, default=0, help='Must be 1e-5 for llama2 70b.')
|
||||
|
||||
# GPTQ
|
||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||
|
|
|
|||
|
|
@ -61,6 +61,8 @@ def list_model_elements():
|
|||
'mlock',
|
||||
'n_gpu_layers',
|
||||
'n_ctx',
|
||||
'n_gqa',
|
||||
'rms_norm_eps',
|
||||
'llama_cpp_seed',
|
||||
'gpu_split',
|
||||
'max_seq_len',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue