Add llama.cpp GPU offload option (#2060)
This commit is contained in:
parent
eee986348c
commit
071f0776ad
4 changed files with 23 additions and 5 deletions
|
@ -27,7 +27,8 @@ class LlamaCppModel:
|
|||
'n_threads': shared.args.threads or None,
|
||||
'n_batch': shared.args.n_batch,
|
||||
'use_mmap': not shared.args.no_mmap,
|
||||
'use_mlock': shared.args.mlock
|
||||
'use_mlock': shared.args.mlock,
|
||||
'n_gpu_layers': shared.args.n_gpu_layers
|
||||
}
|
||||
self.model = Llama(**params)
|
||||
self.model.set_cache(LlamaCache)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue