Add llama-cpp-python wheels with tensor cores support (#5003)
This commit is contained in:
parent
0a299d5959
commit
de138b8ba6
9 changed files with 69 additions and 21 deletions
|
@ -106,6 +106,7 @@ parser.add_argument('--compute_dtype', type=str, default='float16', help='comput
|
|||
parser.add_argument('--quant_type', type=str, default='nf4', help='quant_type for 4-bit. Valid options: nf4, fp4.')
|
||||
|
||||
# llama.cpp
|
||||
parser.add_argument('--tensorcores', action='store_true', help='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.')
|
||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||
parser.add_argument('--threads', type=int, default=0, help='Number of threads to use.')
|
||||
parser.add_argument('--threads-batch', type=int, default=0, help='Number of threads to use for batches/prompt processing.')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue