Use str(Path) instead of os.path.abspath(Path)

This commit is contained in:
oobabooga 2023-03-13 00:08:01 -03:00
parent b9e0712b92
commit 77294b27dd
3 changed files with 7 additions and 9 deletions

View file

@ -25,10 +25,10 @@ class RWKVModel:
tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
if shared.args.rwkv_strategy is None:
model = RWKV(model=os.path.abspath(path), strategy=f'{device} {dtype}')
model = RWKV(model=str(path), strategy=f'{device} {dtype}')
else:
model = RWKV(model=os.path.abspath(path), strategy=shared.args.rwkv_strategy)
pipeline = PIPELINE(model, os.path.abspath(tokenizer_path))
model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
pipeline = PIPELINE(model, str(tokenizer_path))
result = self()
result.pipeline = pipeline
@ -61,7 +61,7 @@ class RWKVTokenizer:
@classmethod
def from_pretrained(self, path):
tokenizer_path = path / "20B_tokenizer.json"
tokenizer = Tokenizer.from_file(os.path.abspath(tokenizer_path))
tokenizer = Tokenizer.from_file(str(tokenizer_path))
result = self()
result.tokenizer = tokenizer

View file

@ -1,4 +1,3 @@
import os
import sys
from pathlib import Path
@ -7,7 +6,7 @@ import torch
import modules.shared as shared
sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
from llama import load_quant
@ -41,7 +40,7 @@ def load_quantized_LLaMA(model_name):
print(f"Could not find {pt_model}, exiting...")
exit()
model = load_quant(path_to_model, os.path.abspath(pt_path), bits)
model = load_quant(path_to_model, str(pt_path), bits)
# Multi-GPU setup
if shared.args.gpu_memory: