Make llama.cpp read prompt size and seed from settings (#2299)
This commit is contained in:
parent
ee674afa50
commit
cf088566f8
5 changed files with 9 additions and 3 deletions
|
|
@ -39,8 +39,8 @@ class LlamaCppModel:
|
|||
|
||||
params = {
|
||||
'model_path': str(path),
|
||||
'n_ctx': 2048,
|
||||
'seed': 0,
|
||||
'n_ctx': shared.args.n_ctx,
|
||||
'seed': int(shared.args.llama_cpp_seed),
|
||||
'n_threads': shared.args.threads or None,
|
||||
'n_batch': shared.args.n_batch,
|
||||
'use_mmap': not shared.args.no_mmap,
|
||||
|
|
|
|||
|
|
@ -134,6 +134,8 @@ parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from be
|
|||
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||
|
||||
# GPTQ
|
||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ theme = gr.themes.Default(
|
|||
|
||||
|
||||
def list_model_elements():
|
||||
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers']
|
||||
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed']
|
||||
for i in range(torch.cuda.device_count()):
|
||||
elements.append(f'gpu_memory_{i}')
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue