Clear cache while switching LoRAs

This commit is contained in:
oobabooga 2023-03-23 21:56:26 -03:00 committed by GitHub
parent 4578e88ffd
commit bf22d16ebc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 13 additions and 24 deletions

View file

@ -1,11 +1,10 @@
import gc
from queue import Queue
from threading import Thread
import torch
import transformers
import modules.shared as shared
from modules.text_generation import clear_torch_cache
# Copied from https://github.com/PygmalionAI/gradio-ui/
@ -90,8 +89,3 @@ class Iteratorize:
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_now = True
clear_torch_cache()
def clear_torch_cache():
gc.collect()
if not shared.args.cpu:
torch.cuda.empty_cache()