Add in-memory cache support for llama.cpp (#1936)
This commit is contained in:
parent
0227e738ed
commit
e657dd342d
3 changed files with 33 additions and 16 deletions
|
@ -123,6 +123,7 @@ parser.add_argument('--threads', type=int, default=0, help='Number of threads to
|
|||
parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of prompt tokens to batch together when calling llama_eval.')
|
||||
parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
||||
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||
|
||||
# GPTQ
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue