Support for MPT, INCITE, WizardLM, StableLM, Galactica, Vicuna, Guanaco, and Baize instruction following (#1596)

This commit is contained in:
Carl Kenner 2023-05-10 09:07:31 +09:30 committed by GitHub
parent 06c7db017d
commit 814f754451
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 352 additions and 28 deletions

View file

@ -6,27 +6,53 @@
mode: 'chat'
skip_special_tokens: true
custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
wbits: 4
.*llama:
model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
.*gptq(?!u|arl|v2):
wbits: 4
groupsize: 128
.*-(gr128|128g)-(4bit|int4):
.*(4bit|int4):
wbits: 4
groupsize: 128
.*-3bit-(gr128|128g):
.*(3bit|int3):
wbits: 3
.*(-2bit|_2bit|int2-):
wbits: 2
.*(-1bit|_1bit|int1-):
wbits: 1
.*(8bit|int8):
wbits: 8
.*(-7bit|_7bit|int7-):
wbits: 7
.*(-6bit|_6bit|int6-):
wbits: 6
.*(-5bit|_5bit|int5-):
wbits: 5
.*gptqv2:
groupsize: 'None'
.*(-gr32-|-32g-|groupsize32):
groupsize: 32
.*(-gr64-|-64g-|groupsize64):
groupsize: 64
.*(gr128|128g|groupsize128):
groupsize: 128
.*-(gr128|128g)-3bit:
wbits: 3
groupsize: 128
.*(oasst-sft-1-pythia-12b|oasst-sft-6-llama-30b):
.*(gr1024|1024g|groupsize1024):
groupsize: 1024
.*(oasst|stablelm-7b-sft-v7-epoch-3):
mode: 'instruct'
instruction_template: 'Open Assistant'
.*vicuna:
skip_special_tokens: false
(?!.*v0)(?!.*1.1)(?!.*1_1)(?!.*stable).*vicuna:
mode: 'instruct'
instruction_template: 'Vicuna-v0'
.*vicuna.*v0:
mode: 'instruct'
instruction_template: 'Vicuna-v0'
.*vicuna.*(1.1|1_1):
mode: 'instruct'
instruction_template: 'Vicuna-v1.1'
.*stable.*vicuna:
mode: 'instruct'
instruction_template: 'StableVicuna'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'
@ -35,7 +61,7 @@ llama-[0-9]*b-4bit$:
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*(galactica|oasst):
.*galactica:
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
mode: 'instruct'
@ -59,7 +85,51 @@ llama-[0-9]*b-4bit$:
.*moss-moon.*sft:
mode: 'instruct'
instruction_template: 'MOSS'
.*stablelm-tuned:
mode: 'instruct'
instruction_template: 'StableLM'
truncation_length: 4096
chat_prompt_size: 4096
chat_prompt_size_max: 4096
.*stablelm-base:
truncation_length: 4096
chat_prompt_size: 4096
chat_prompt_size_max: 4096
.*wizardlm:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'WizardLM'
.*galactica.*finetuned:
mode: 'instruct'
instruction_template: 'Galactica Finetuned'
.*galactica.*-v2:
mode: 'instruct'
instruction_template: 'Galactica v2'
(?!.*finetuned)(?!.*-v2).*galactica:
mode: 'instruct'
instruction_template: 'Galactica'
.*guanaco:
mode: 'instruct'
instruction_template: 'Guanaco non-chat'
.*baize:
mode: 'instruct'
instruction_template: 'Baize'
.*mpt-.*instruct:
mode: 'instruct'
instruction_template: 'Alpaca'
.*mpt-.*chat:
mode: 'instruct'
instruction_template: 'MPT-Chat'
(?!.*-flan-)(?!.*-t5-).*lamini-:
mode: 'instruct'
instruction_template: 'Alpaca'
.*incite.*chat:
mode: 'instruct'
instruction_template: 'INCITE-Chat'
.*incite.*instruct:
mode: 'instruct'
instruction_template: 'INCITE-Instruct'
.*pygmalion-7b:
model_type: 'llama'
.*metharme-7b:
model_type: 'llama'
model_type: 'llama'