Some minor fixes to the GPTQ loader
This commit is contained in:
parent
8778b756e6
commit
518e5c4244
1 changed files with 5 additions and 3 deletions
|
@ -7,6 +7,8 @@ import torch
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
|
|
||||||
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
||||||
|
import llama
|
||||||
|
import opt
|
||||||
|
|
||||||
|
|
||||||
def load_quantized(model_name):
|
def load_quantized(model_name):
|
||||||
|
@ -21,9 +23,9 @@ def load_quantized(model_name):
|
||||||
model_type = shared.args.gptq_model_type.lower()
|
model_type = shared.args.gptq_model_type.lower()
|
||||||
|
|
||||||
if model_type == 'llama':
|
if model_type == 'llama':
|
||||||
from llama import load_quant
|
load_quant = llama.load_quant
|
||||||
elif model_type == 'opt':
|
elif model_type == 'opt':
|
||||||
from opt import load_quant
|
load_quant = opt.load_quant
|
||||||
else:
|
else:
|
||||||
print("Unknown pre-quantized model type specified. Only 'llama' and 'opt' are supported")
|
print("Unknown pre-quantized model type specified. Only 'llama' and 'opt' are supported")
|
||||||
exit()
|
exit()
|
||||||
|
@ -50,7 +52,7 @@ def load_quantized(model_name):
|
||||||
print(f"Could not find {pt_model}, exiting...")
|
print(f"Could not find {pt_model}, exiting...")
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
model = load_quant(path_to_model, str(pt_path), shared.args.gptq_bits)
|
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits)
|
||||||
|
|
||||||
# Multiple GPUs or GPU+CPU
|
# Multiple GPUs or GPU+CPU
|
||||||
if shared.args.gpu_memory:
|
if shared.args.gpu_memory:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue