Add ChatGLM support (#1256)

---------

Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
Forkoz 2023-04-16 22:15:03 +00:00 committed by GitHub
parent 6a03ad0824
commit c6fe1ced01
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 31 additions and 10 deletions

View file

@ -113,6 +113,7 @@ parser.add_argument('--bf16', action='store_true', help='Load the model with bfl
parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM.")
# llama.cpp
parser.add_argument('--threads', type=int, default=0, help='Number of threads to use in llama.cpp.')
@ -162,6 +163,10 @@ if args.cai_chat:
print("Warning: --cai-chat is deprecated. Use --chat instead.")
args.chat = True
# Security warnings
if args.trust_remote_code:
print("Warning: trust_remote_code is enabled. This is dangerous.")
def is_chat():
return args.chat