Merge pull request from GHSA-hv5m-3rp9-xcpf
* Remove eval of API input * Remove unnecessary eval/exec for security * Use ast.literal_eval * Use ast.literal_eval --------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
parent
d2ea925fa5
commit
16a3a5b039
5 changed files with 15 additions and 13 deletions
|
@ -1,3 +1,4 @@
|
|||
import ast
|
||||
import base64
|
||||
import copy
|
||||
import io
|
||||
|
@ -81,7 +82,7 @@ def get_stopping_strings(state):
|
|||
stopping_strings = [f"\n{state['name1']}", f"\n{state['name2']}"]
|
||||
else:
|
||||
stopping_strings = [f"\n{state['name1']}:", f"\n{state['name2']}:"]
|
||||
stopping_strings += eval(f"[{state['custom_stopping_strings']}]")
|
||||
stopping_strings += ast.literal_eval(f"[{state['custom_stopping_strings']}]")
|
||||
return stopping_strings
|
||||
|
||||
|
||||
|
@ -525,4 +526,4 @@ def upload_your_profile_picture(img, name1, name2, mode):
|
|||
img.save(Path('cache/pfp_me.png'))
|
||||
print('Profile picture saved to "cache/pfp_me.png"')
|
||||
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True)
|
||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True)
|
||||
|
|
|
@ -17,7 +17,7 @@ def load_extensions():
|
|||
print(f'Loading the extension "{name}"... ', end='')
|
||||
try:
|
||||
exec(f"import extensions.{name}.script")
|
||||
extension = eval(f"extensions.{name}.script")
|
||||
extension = getattr(extensions, name).script
|
||||
if extension not in setup_called and hasattr(extension, "setup"):
|
||||
setup_called.add(extension)
|
||||
extension.setup()
|
||||
|
@ -32,7 +32,7 @@ def load_extensions():
|
|||
def iterator():
|
||||
for name in sorted(state, key=lambda x: state[x][1]):
|
||||
if state[name][0]:
|
||||
yield eval(f"extensions.{name}.script"), name
|
||||
yield getattr(extensions, name).script, name
|
||||
|
||||
|
||||
# Extension functions that map string -> string
|
||||
|
|
|
@ -152,9 +152,9 @@ args_defaults = parser.parse_args([])
|
|||
# Deprecation warnings for parameters that have been renamed
|
||||
deprecated_dict = {}
|
||||
for k in deprecated_dict:
|
||||
if eval(f"args.{k}") != deprecated_dict[k][1]:
|
||||
if getattr(args, k) != deprecated_dict[k][1]:
|
||||
print(f"Warning: --{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.")
|
||||
exec(f"args.{deprecated_dict[k][0]} = args.{k}")
|
||||
setattr(args, deprecated_dict[k][0], getattr(args, k))
|
||||
|
||||
# Deprecation warnings for parameters that have been removed
|
||||
if args.cai_chat:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import ast
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
|
@ -192,7 +193,7 @@ def generate_reply(question, state, eos_token=None, stopping_strings=[]):
|
|||
|
||||
# Handling the stopping strings
|
||||
stopping_criteria_list = transformers.StoppingCriteriaList()
|
||||
for st in [stopping_strings, eval(f"[{state['custom_stopping_strings']}]")]:
|
||||
for st in (stopping_strings, ast.literal_eval(f"[{state['custom_stopping_strings']}]")]):
|
||||
if type(st) is list and len(st) > 0:
|
||||
sentinel_token_ids = [encode(string, add_special_tokens=False) for string in st]
|
||||
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=sentinel_token_ids, starting_idx=len(input_ids[0])))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue