Falcon support (trust-remote-code and autogptq checkboxes) (#2367)
--------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
parent
60ae80cf28
commit
204731952a
6 changed files with 9 additions and 5 deletions
|
|
@ -110,7 +110,7 @@ parser.add_argument('--bf16', action='store_true', help='Load the model with bfl
|
|||
parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
|
||||
parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
|
||||
parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
|
||||
parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM.")
|
||||
parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM and Falcon.")
|
||||
|
||||
# Accelerate 4-bit
|
||||
parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue