Experimental jank multiGPU inference that's 2x faster than native somehow (#2100)
This commit is contained in:
parent
fd743a0207
commit
1f50dbe352
4 changed files with 10 additions and 3 deletions
|
@ -172,7 +172,12 @@ def load_quantized(model_name):
|
|||
|
||||
# qwopqwop200's offload
|
||||
if model_type == 'llama' and shared.args.pre_layer:
|
||||
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, shared.args.pre_layer)
|
||||
if len(shared.args.pre_layer) == 1:
|
||||
pre_layer = shared.args.pre_layer[0]
|
||||
else:
|
||||
pre_layer = shared.args.pre_layer
|
||||
|
||||
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, pre_layer)
|
||||
else:
|
||||
threshold = False if model_type == 'gptj' else 128
|
||||
model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, kernel_switch_threshold=threshold)
|
||||
|
|
|
@ -130,7 +130,7 @@ parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layer
|
|||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||
parser.add_argument('--model_type', type=str, help='Model type of pre-quantized model. Currently LLaMA, OPT, and GPT-J are supported.')
|
||||
parser.add_argument('--groupsize', type=int, default=-1, help='Group size.')
|
||||
parser.add_argument('--pre_layer', type=int, default=0, help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.')
|
||||
parser.add_argument('--pre_layer', type=int, nargs="+", help='The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models. For multi-gpu, write the numbers separated by spaces, eg --pre_layer 30 60.')
|
||||
parser.add_argument('--checkpoint', type=str, help='The path to the quantized checkpoint file. If not specified, it will be automatically detected.')
|
||||
parser.add_argument('--monkey-patch', action='store_true', help='Apply the monkey patch for using LoRAs with quantized models.')
|
||||
parser.add_argument('--quant_attn', action='store_true', help='(triton) Enable quant attention.')
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue