Add additive_repetition_penalty sampler setting. (#3627)
This commit is contained in:
parent
6086768309
commit
4440f87722
13 changed files with 34 additions and 8 deletions
|
|
@ -153,6 +153,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -186,6 +187,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -244,6 +246,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -273,6 +276,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -306,6 +310,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -353,6 +358,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
@ -389,6 +395,7 @@ loaders_samplers = {
|
|||
'tfs',
|
||||
'top_a',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ def default_preset():
|
|||
'tfs': 1,
|
||||
'top_a': 0,
|
||||
'repetition_penalty': 1,
|
||||
'additive_repetition_penalty': 0,
|
||||
'repetition_penalty_range': 0,
|
||||
'encoder_repetition_penalty': 1,
|
||||
'no_repeat_ngram_size': 0,
|
||||
|
|
|
|||
|
|
@ -139,11 +139,12 @@ class RepetitionPenaltyLogitsProcessorWithRange(LogitsProcessor):
|
|||
Copied from the transformers library
|
||||
'''
|
||||
|
||||
def __init__(self, penalty: float, _range: int):
|
||||
if not isinstance(penalty, float) or not (penalty > 0):
|
||||
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
|
||||
def __init__(self, penalty: float, additive_penalty: float, _range: int):
|
||||
if not (penalty > 0):
|
||||
raise ValueError(f"`penalty` has to be strictly positive, but is {penalty}")
|
||||
|
||||
self.penalty = penalty
|
||||
self.additive_penalty = additive_penalty
|
||||
self._range = _range
|
||||
|
||||
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
||||
|
|
@ -153,6 +154,7 @@ class RepetitionPenaltyLogitsProcessorWithRange(LogitsProcessor):
|
|||
|
||||
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
|
||||
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
|
||||
score -= self.additive_penalty
|
||||
|
||||
scores.scatter_(1, input_ids, score)
|
||||
return scores
|
||||
|
|
@ -185,14 +187,20 @@ def get_logits_warper_patch(self, generation_config):
|
|||
|
||||
|
||||
def get_logits_processor_patch(self, **kwargs):
|
||||
result = self._get_logits_processor_old(**kwargs)
|
||||
repetition_penalty_range = kwargs['generation_config'].repetition_penalty_range
|
||||
repetition_penalty = kwargs['generation_config'].repetition_penalty
|
||||
additive_repetition_penalty = kwargs['generation_config'].additive_repetition_penalty
|
||||
repetition_penalty_range = kwargs['generation_config'].repetition_penalty_range
|
||||
do_rep_pen_hijack = (repetition_penalty > 1) or (additive_repetition_penalty > 0)
|
||||
if do_rep_pen_hijack:
|
||||
# Make sure that a RepetitionPenaltyLogitsProcessor will be created
|
||||
kwargs['generation_config'].repetition_penalty = 1.1 # must set to some value > 1
|
||||
|
||||
if repetition_penalty_range > 0:
|
||||
result = self._get_logits_processor_old(**kwargs)
|
||||
|
||||
if do_rep_pen_hijack:
|
||||
for i in range(len(result)):
|
||||
if result[i].__class__.__name__ == 'RepetitionPenaltyLogitsProcessor':
|
||||
result[i] = RepetitionPenaltyLogitsProcessorWithRange(repetition_penalty, repetition_penalty_range)
|
||||
result[i] = RepetitionPenaltyLogitsProcessorWithRange(repetition_penalty, additive_repetition_penalty, repetition_penalty_range)
|
||||
|
||||
return result
|
||||
|
||||
|
|
@ -205,6 +213,7 @@ def generation_config_init_patch(self, **kwargs):
|
|||
self.mirostat_eta = kwargs.pop("mirostat_eta", 0.1)
|
||||
self.mirostat_tau = kwargs.pop("mirostat_tau", 5)
|
||||
self.repetition_penalty_range = kwargs.pop("repetition_penalty_range", 0)
|
||||
self.additive_repetition_penalty = kwargs.pop("additive_repetition_penalty", 0)
|
||||
|
||||
|
||||
def hijack_samplers():
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ def apply_stopping_strings(reply, all_stop_strings):
|
|||
|
||||
def generate_reply_HF(question, original_question, seed, state, stopping_strings=None, is_chat=False):
|
||||
generate_params = {}
|
||||
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'repetition_penalty_range', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'tfs', 'top_a', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'guidance_scale']:
|
||||
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'additive_repetition_penalty', 'repetition_penalty_range', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'tfs', 'top_a', 'mirostat_mode', 'mirostat_tau', 'mirostat_eta', 'guidance_scale']:
|
||||
generate_params[k] = state[k]
|
||||
|
||||
if state['negative_prompt'] != '':
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ def list_interface_input_elements():
|
|||
'epsilon_cutoff',
|
||||
'eta_cutoff',
|
||||
'repetition_penalty',
|
||||
'additive_repetition_penalty',
|
||||
'repetition_penalty_range',
|
||||
'encoder_repetition_penalty',
|
||||
'no_repeat_ngram_size',
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ def create_ui(default_preset):
|
|||
shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
|
||||
shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')
|
||||
shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty')
|
||||
shared.gradio['additive_repetition_penalty'] = gr.Slider(0, 4, value=generate_params['additive_repetition_penalty'], step=0.05, label='additive_repetition_penalty')
|
||||
shared.gradio['repetition_penalty_range'] = gr.Slider(0, 4096, step=64, value=generate_params['repetition_penalty_range'], label='repetition_penalty_range')
|
||||
shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p')
|
||||
shared.gradio['tfs'] = gr.Slider(0.0, 1.0, value=generate_params['tfs'], step=0.01, label='tfs')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue