Enable NUMA feature for llama_cpp_python (#4040)
This commit is contained in:
parent
87ea2d96fd
commit
7e6ff8d1f0
7 changed files with 21 additions and 12 deletions
|
@ -125,6 +125,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
|
|||
parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||
parser.add_argument('--numa', action='store_true', help='Activate NUMA task allocation for llama.cpp')
|
||||
|
||||
# GPTQ
|
||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue