Make llama.cpp read prompt size and seed from settings (#2299)

This commit is contained in:
DGdev91 2023-05-25 15:29:31 +02:00 committed by GitHub
parent ee674afa50
commit cf088566f8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 9 additions and 3 deletions

View file

@ -134,6 +134,8 @@ parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from be
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
# GPTQ
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')