diff --git a/README.md b/README.md index 822deba..f36a8b9 100644 --- a/README.md +++ b/README.md @@ -156,7 +156,7 @@ text-generation-webui In the "Model" tab of the UI, those models can be automatically downloaded from Hugging Face. You can also download them via the command-line with `python download-model.py organization/model`. -* GGML models are a single file and should be placed directly into `models`. Example: +* GGUF models are a single file and should be placed directly into `models`. Example: ``` text-generation-webui @@ -258,7 +258,7 @@ Optionally, you can use the following command-line flags: | `--quant_type QUANT_TYPE` | quant_type for 4-bit. Valid options: nf4, fp4. | | `--use_double_quant` | use_double_quant for 4-bit. | -#### GGML (for llama.cpp and ctransformers) +#### GGUF (for llama.cpp and ctransformers) | Flag | Description | |-------------|-------------| diff --git a/download-model.py b/download-model.py index a65f82c..be8d59f 100644 --- a/download-model.py +++ b/download-model.py @@ -57,7 +57,7 @@ class ModelDownloader: classifications = [] has_pytorch = False has_pt = False - # has_ggml = False + # has_gguf = False has_safetensors = False is_lora = False while True: @@ -78,10 +78,10 @@ class ModelDownloader: is_pytorch = re.match(r"(pytorch|adapter|gptq)_model.*\.bin", fname) is_safetensors = re.match(r".*\.safetensors", fname) is_pt = re.match(r".*\.pt", fname) - is_ggml = re.match(r".*ggml.*\.bin", fname) + is_gguf = re.match(r'.*\.gguf', fname) is_tokenizer = re.match(r"(tokenizer|ice|spiece).*\.model", fname) is_text = re.match(r".*\.(txt|json|py|md)", fname) or is_tokenizer - if any((is_pytorch, is_safetensors, is_pt, is_ggml, is_tokenizer, is_text)): + if any((is_pytorch, is_safetensors, is_pt, is_gguf, is_tokenizer, is_text)): if 'lfs' in dict[i]: sha256.append([fname, dict[i]['lfs']['oid']]) @@ -101,9 +101,9 @@ class ModelDownloader: elif is_pt: has_pt = True classifications.append('pt') - elif is_ggml: - # has_ggml = True - classifications.append('ggml') + elif is_gguf: + # has_gguf = True + classifications.append('gguf') cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50' cursor = base64.b64encode(cursor) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 4d42394..0608cb0 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -165,7 +165,7 @@ class LlamacppHF(PreTrainedModel): if path.is_file(): model_file = path else: - model_file = list(path.glob('*ggml*.bin'))[0] + model_file = list(path.glob('*.gguf*'))[0] logger.info(f"llama.cpp weights detected: {model_file}\n") diff --git a/modules/models.py b/modules/models.py index ea9cc52..5268a2f 100644 --- a/modules/models.py +++ b/modules/models.py @@ -241,7 +241,7 @@ def llamacpp_loader(model_name): if path.is_file(): model_file = path else: - model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*ggml*.bin'))[0] + model_file = list(Path(f'{shared.args.model_dir}/{model_name}').glob('*.gguf*'))[0] logger.info(f"llama.cpp weights detected: {model_file}") model, tokenizer = LlamaCppModel.from_pretrained(model_file) diff --git a/modules/models_settings.py b/modules/models_settings.py index 5efde34..2ed658b 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -24,9 +24,9 @@ def infer_loader(model_name): loader = None elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): loader = 'AutoGPTQ' - elif len(list(path_to_model.glob('*ggml*.bin'))) > 0: + elif len(list(path_to_model.glob('*.gguf*'))) > 0: loader = 'llama.cpp' - elif re.match(r'.*ggml.*\.bin', model_name.lower()): + elif re.match(r'.*\.gguf', model_name.lower()): loader = 'llama.cpp' elif re.match(r'.*rwkv.*\.pth', model_name.lower()): loader = 'RWKV'