Add llama.cpp GPU offload option (#2060)

This commit is contained in:
AlphaAtlas 2023-05-14 21:58:11 -04:00 committed by GitHub
parent eee986348c
commit 071f0776ad
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 23 additions and 5 deletions

View file

@ -27,7 +27,8 @@ class LlamaCppModel:
'n_threads': shared.args.threads or None,
'n_batch': shared.args.n_batch,
'use_mmap': not shared.args.no_mmap,
'use_mlock': shared.args.mlock
'use_mlock': shared.args.mlock,
'n_gpu_layers': shared.args.n_gpu_layers
}
self.model = Llama(**params)
self.model.set_cache(LlamaCache)