Bump llama-cpp-python to use LlamaCache
This commit is contained in:
parent
ac189011cb
commit
d2ea925fa5
2 changed files with 4 additions and 3 deletions
|
@ -6,7 +6,7 @@ Documentation:
|
|||
https://abetlen.github.io/llama-cpp-python/
|
||||
'''
|
||||
|
||||
from llama_cpp import Llama
|
||||
from llama_cpp import Llama, LlamaCache
|
||||
|
||||
from modules import shared
|
||||
from modules.callbacks import Iteratorize
|
||||
|
@ -27,6 +27,7 @@ class LlamaCppModel:
|
|||
'n_threads': shared.args.threads or None
|
||||
}
|
||||
self.model = Llama(**params)
|
||||
self.model.set_cache(LlamaCache)
|
||||
|
||||
# This is ugly, but the model and the tokenizer are the same object in this library.
|
||||
return result, result
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue