Bump llama-cpp-python, +tensor_split by @shouyiwang, +mul_mat_q (#3610)
This commit is contained in:
parent
4b69f4f6ae
commit
7cba000421
8 changed files with 31 additions and 2 deletions
|
@ -119,8 +119,10 @@ parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of
|
|||
parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.')
|
||||
parser.add_argument('--low-vram', action='store_true', help='Low VRAM Mode')
|
||||
parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.')
|
||||
parser.add_argument('--mul_mat_q', action='store_true', help='Activate new mulmat kernels.')
|
||||
parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.')
|
||||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||
parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17")
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama-2 70b.')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue