Make llama.cpp read prompt size and seed from settings (#2299)

This commit is contained in:
DGdev91 2023-05-25 15:29:31 +02:00 committed by GitHub
parent ee674afa50
commit cf088566f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 9 additions and 3 deletions

View file

@ -30,7 +30,7 @@ theme = gr.themes.Default(
def list_model_elements():
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers']
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed']
for i in range(torch.cuda.device_count()):
elements.append(f'gpu_memory_{i}')