Remove GGML support

This commit is contained in:
oobabooga 2023-09-11 07:30:56 -07:00
parent cc7b7ba153
commit ed86878f02
15 changed files with 24 additions and 123 deletions

View file

@ -63,7 +63,6 @@ llama-65b-gptq-3bit:
.*vicuna.*(1.5|1_5):
instruction_template: 'Vicuna-v1.1'
truncation_length: 4096
rms_norm_eps: 5.0e-6
.*stable.*vicuna:
instruction_template: 'StableVicuna'
(?!.*chat).*chinese-vicuna:
@ -211,24 +210,19 @@ llama-65b-gptq-3bit:
instruction_template: 'Alpaca'
.*llama-(2|v2):
truncation_length: 4096
rms_norm_eps: 5.0e-6
.*llama-(2|v2).*chat:
instruction_template: 'Llama-v2'
.*70b.*ggml.*\.bin:
n_gqa: 8
.*newhope:
instruction_template: 'NewHope'
.*stablebeluga2:
instruction_template: 'StableBeluga2'
truncation_length: 4096
rms_norm_eps: 5.0e-6
.*openchat:
instruction_template: 'OpenChat'
.*falcon.*-instruct:
.*(openorca-platypus2):
instruction_template: 'OpenOrca-Platypus2'
custom_stopping_strings: '"### Instruction:", "### Response:"'
rms_norm_eps: 5.0e-6
.*codellama:
rope_freq_base: 1000000
.*codellama.*instruct: