transformers: add use_flash_attention_2 option (#4373)
This commit is contained in:
parent
add359379e
commit
4766a57352
6 changed files with 9 additions and 1 deletions
|
@ -53,6 +53,7 @@ def list_model_elements():
|
|||
'load_in_8bit',
|
||||
'trust_remote_code',
|
||||
'use_fast',
|
||||
'use_flash_attention_2',
|
||||
'load_in_4bit',
|
||||
'compute_dtype',
|
||||
'quant_type',
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue