Clear cache while switching LoRAs
This commit is contained in:
parent
4578e88ffd
commit
bf22d16ebc
3 changed files with 13 additions and 24 deletions
|
@ -1,11 +1,10 @@
|
|||
import gc
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
|
||||
import modules.shared as shared
|
||||
from modules.text_generation import clear_torch_cache
|
||||
|
||||
|
||||
# Copied from https://github.com/PygmalionAI/gradio-ui/
|
||||
|
@ -90,8 +89,3 @@ class Iteratorize:
|
|||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop_now = True
|
||||
clear_torch_cache()
|
||||
|
||||
def clear_torch_cache():
|
||||
gc.collect()
|
||||
if not shared.args.cpu:
|
||||
torch.cuda.empty_cache()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue