add mps support on apple silicon

This commit is contained in:
Wojtek Kowaluk 2023-03-18 00:56:23 +01:00
parent 7d97da1dcb
commit 30939e2aee
2 changed files with 12 additions and 1 deletions

View file

@ -46,6 +46,13 @@ def load_model(model_name):
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
if torch.has_mps:
model = AutoModelForCausalLM.from_pretrained(
Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True,
torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16
)
device = torch.device('mps')
model = model.to(device)
else: else:
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda() model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
@ -97,7 +104,7 @@ def load_model(model_name):
# Custom # Custom
else: else:
params = {"low_cpu_mem_usage": True} params = {"low_cpu_mem_usage": True}
if not shared.args.cpu and not torch.cuda.is_available(): if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps:
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n") print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
shared.args.cpu = True shared.args.cpu = True

View file

@ -33,9 +33,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
return input_ids.numpy() return input_ids.numpy()
elif shared.args.deepspeed: elif shared.args.deepspeed:
return input_ids.to(device=local_rank) return input_ids.to(device=local_rank)
elif torch.has_mps:
device = torch.device('mps')
return input_ids.to(device)
else: else:
return input_ids.cuda() return input_ids.cuda()
def decode(output_ids): def decode(output_ids):
# Open Assistant relies on special tokens like <|endoftext|> # Open Assistant relies on special tokens like <|endoftext|>
if re.match('(oasst|galactica)-*', shared.model_name.lower()): if re.match('(oasst|galactica)-*', shared.model_name.lower()):