Don't require llama.cpp models to be placed in subfolders

This commit is contained in:
oobabooga 2023-04-22 14:56:48 -03:00
parent 06b6ff6c2e
commit fcb594b90e
4 changed files with 41 additions and 39 deletions

View file

@ -6,11 +6,10 @@ import yaml
model = None
tokenizer = None
model_name = "None"
model_type = None
lora_names = []
soft_prompt_tensor = None
soft_prompt = False
is_RWKV = False
is_llamacpp = False
# Chat variables
history = {'internal': [], 'visible': []}