From ddb62470e94ec78d70d9e138acfdb543b84dd331 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 19 Mar 2023 19:21:41 -0300 Subject: [PATCH] --no-cache and --gpu-memory in MiB for fine VRAM control --- README.md | 3 ++- modules/models.py | 8 +++++--- modules/shared.py | 5 +++-- modules/text_generation.py | 4 +++- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index ded9b35..330f36b 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,8 @@ Optionally, you can use the following command-line flags: | `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to `cache/`. | | `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. | | `--cpu-memory CPU_MEMORY` | Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.| -| `--flexgen` | Enable the use of FlexGen offloading. | +| `--no-cache` | Set `use_cache` to False while generating text. This reduces the VRAM usage a bit at a performance cost.') +| `--flexgen` | Enable the use of FlexGen offloading. | | `--percent PERCENT [PERCENT ...]` | FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0). | | `--compress-weight` | FlexGen: Whether to compress weight (default: False).| | `--pin-weight [PIN_WEIGHT]` | FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%). | diff --git a/modules/models.py b/modules/models.py index f07e738..ccb97da 100644 --- a/modules/models.py +++ b/modules/models.py @@ -1,5 +1,6 @@ import json import os +import re import time import zipfile from pathlib import Path @@ -120,11 +121,12 @@ def load_model(model_name): params["torch_dtype"] = torch.float16 if shared.args.gpu_memory: - memory_map = shared.args.gpu_memory + memory_map = list(map(lambda x : x.strip(), shared.args.gpu_memory)) + max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB' max_memory = {} for i in range(len(memory_map)): - max_memory[i] = f'{memory_map[i]}GiB' - max_memory['cpu'] = f'{shared.args.cpu_memory or 99}GiB' + max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] + max_memory['cpu'] = max_cpu_memory params['max_memory'] = max_memory elif shared.args.auto_devices: total_mem = (torch.cuda.get_device_properties(0).total_memory / (1024*1024)) diff --git a/modules/shared.py b/modules/shared.py index e3920f2..8cae107 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -85,8 +85,9 @@ parser.add_argument('--bf16', action='store_true', help='Load the model with bfl parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.') parser.add_argument('--disk-cache-dir', type=str, default="cache", help='Directory to save the disk cache to. Defaults to "cache".') -parser.add_argument('--gpu-memory', type=int, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs.') -parser.add_argument('--cpu-memory', type=int, help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.') +parser.add_argument('--gpu-memory', type=str, nargs="+", help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs.') +parser.add_argument('--cpu-memory', type=str, help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.') +parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.') parser.add_argument('--flexgen', action='store_true', help='Enable the use of FlexGen offloading.') parser.add_argument('--percent', type=int, nargs="+", default=[0, 100, 100, 0, 100, 0], help='FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).') parser.add_argument("--compress-weight", action="store_true", help="FlexGen: activate weight compression.") diff --git a/modules/text_generation.py b/modules/text_generation.py index 1d11de1..9159975 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -136,7 +136,9 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi t = encode(stopping_string, 0, add_special_tokens=False) stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0]))) - generate_params = {} + generate_params = { + 'use_cache': not shared.args.no_cache, + } if not shared.args.flexgen: generate_params.update({ "max_new_tokens": max_new_tokens,