Add settings UI for llama.cpp and fixed reloading of llama.cpp models (#2087)
This commit is contained in:
parent
10869de0f4
commit
0227e738ed
3 changed files with 18 additions and 2 deletions
|
@ -16,6 +16,9 @@ class LlamaCppModel:
|
|||
def __init__(self):
|
||||
self.initialized = False
|
||||
|
||||
def __del__(self):
|
||||
self.model.__del__()
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(self, path):
|
||||
result = self()
|
||||
|
|
|
@ -27,7 +27,7 @@ theme = gr.themes.Default(
|
|||
|
||||
|
||||
def list_model_elements():
|
||||
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer']
|
||||
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no-mmap', 'mlock', 'n_gpu_layers']
|
||||
for i in range(torch.cuda.device_count()):
|
||||
elements.append(f'gpu_memory_{i}')
|
||||
return elements
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue