Don't require llama.cpp models to be placed in subfolders
This commit is contained in:
parent
06b6ff6c2e
commit
fcb594b90e
4 changed files with 41 additions and 39 deletions
|
@ -6,11 +6,10 @@ import yaml
|
|||
model = None
|
||||
tokenizer = None
|
||||
model_name = "None"
|
||||
model_type = None
|
||||
lora_names = []
|
||||
soft_prompt_tensor = None
|
||||
soft_prompt = False
|
||||
is_RWKV = False
|
||||
is_llamacpp = False
|
||||
|
||||
# Chat variables
|
||||
history = {'internal': [], 'visible': []}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue