Use separate llama-cpp-python packages for GGML support

This commit is contained in:
jllllll 2023-08-26 09:15:11 -05:00
parent 6e6431e73f
commit 4a999e3bcd
No known key found for this signature in database
GPG key ID: 7FCD00C417935797
7 changed files with 74 additions and 17 deletions

View file

@ -2,6 +2,7 @@ import os
import re
from datetime import datetime
from pathlib import Path
from typing import Union
from modules import shared
from modules.logging_colors import logger
@ -124,3 +125,11 @@ def get_datasets(path: str, ext: str):
def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)
# Determines if a llama.cpp model is in GGUF format
# Copied from ctransformers utils.py
def is_gguf(path: Union[str, Path]) -> bool:
path = str(Path(path).resolve())
with open(path, "rb") as f:
magic = f.read(4)
return magic == "GGUF".encode()