Create alternative requirements.txt with AMD and Metal wheels (#4052)
This commit is contained in:
parent
9de2dfa887
commit
2e7b6b0014
13 changed files with 336 additions and 66 deletions
86
one_click.py
86
one_click.py
|
@ -1,6 +1,7 @@
|
|||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import platform
|
||||
import site
|
||||
import subprocess
|
||||
import sys
|
||||
|
@ -34,6 +35,25 @@ def is_macos():
|
|||
return sys.platform.startswith("darwin")
|
||||
|
||||
|
||||
def is_x86_64():
|
||||
return platform.machine() == "x86_64"
|
||||
|
||||
|
||||
def cpu_has_avx2():
|
||||
import cpuinfo
|
||||
|
||||
info = cpuinfo.get_cpu_info()
|
||||
if 'avx2' in info['flags']:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def torch_version():
|
||||
from torch import __version__ as torver
|
||||
return torver
|
||||
|
||||
|
||||
def is_installed():
|
||||
for sitedir in site.getsitepackages():
|
||||
if "site-packages" in sitedir and conda_env_path in sitedir:
|
||||
|
@ -137,7 +157,7 @@ def install_webui():
|
|||
install_pytorch = "python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel_extension_for_pytorch==2.0.110+xpu -f https://developer.intel.com/ipex-whl-stable-xpu"
|
||||
|
||||
# Install Git and then Pytorch
|
||||
run_cmd(f"{install_git} && {install_pytorch}", assert_success=True, environment=True)
|
||||
run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo", assert_success=True, environment=True)
|
||||
|
||||
# Install the webui requirements
|
||||
update_requirements(initial_installation=True)
|
||||
|
@ -162,7 +182,37 @@ def update_requirements(initial_installation=False):
|
|||
if os.path.exists(extension_req_path):
|
||||
run_cmd("python -m pip install -r " + extension_req_path + " --upgrade", assert_success=True, environment=True)
|
||||
|
||||
textgen_requirements = open("requirements.txt").read().splitlines()
|
||||
# Detect the PyTorch version
|
||||
torver = torch_version()
|
||||
is_cuda = '+cu' in torver # 2.0.1+cu117
|
||||
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
|
||||
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
|
||||
is_cpu = '+cpu' in torver # 2.0.1+cpu
|
||||
|
||||
if is_rocm:
|
||||
if cpu_has_avx2():
|
||||
requirements_file = "requirements_amd.txt"
|
||||
else:
|
||||
requirements_file = "requirements_amd_noavx2.txt"
|
||||
elif is_cpu:
|
||||
if cpu_has_avx2():
|
||||
requirements_file = "requirements_cpu_only.txt"
|
||||
else:
|
||||
requirements_file = "requirements_cpu_only_noavx2.txt"
|
||||
elif is_macos():
|
||||
if is_x86_64():
|
||||
requirements_file = "requirements_apple_intel.txt"
|
||||
else:
|
||||
requirements_file = "requirements_apple_silicon.txt"
|
||||
else:
|
||||
if cpu_has_avx2():
|
||||
requirements_file = "requirements.txt"
|
||||
else:
|
||||
requirements_file = "requirements_noavx2.txt"
|
||||
|
||||
print(f"Using the following requirements file: {requirements_file}")
|
||||
|
||||
textgen_requirements = open(requirements_file).read().splitlines()
|
||||
|
||||
# Workaround for git+ packages not updating properly. Also store requirements.txt for later use
|
||||
git_requirements = [req for req in textgen_requirements if req.startswith("git+")]
|
||||
|
@ -178,14 +228,7 @@ def update_requirements(initial_installation=False):
|
|||
print(f"Uninstalled {package_name}")
|
||||
|
||||
# Install/update the project requirements
|
||||
run_cmd("python -m pip install -r requirements.txt --upgrade", assert_success=True, environment=True)
|
||||
|
||||
# The following requirements are for CUDA, not CPU
|
||||
# Parse output of 'pip show torch' to determine torch version
|
||||
torver_cmd = run_cmd("python -m pip show torch", assert_success=True, environment=True, capture_output=True)
|
||||
torver = [v.split()[1] for v in torver_cmd.stdout.decode('utf-8').splitlines() if 'Version:' in v][0]
|
||||
is_cuda = '+cu' in torver
|
||||
is_rocm = '+rocm' in torver
|
||||
run_cmd(f"python -m pip install -r {requirements_file} --upgrade", assert_success=True, environment=True)
|
||||
|
||||
# Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility
|
||||
if not any((is_cuda, is_rocm)) and run_cmd("conda list -f pytorch-cuda | grep pytorch-cuda", environment=True, capture_output=True).returncode == 1:
|
||||
|
@ -216,29 +259,6 @@ def update_requirements(initial_installation=False):
|
|||
# Install the correct version of g++
|
||||
run_cmd("conda install -y -k conda-forge::gxx_linux-64=11.2.0", environment=True)
|
||||
|
||||
if is_rocm:
|
||||
# Pre-installed ExLlama module does not support AMD GPU
|
||||
run_cmd("python -m pip uninstall -y exllama", environment=True)
|
||||
# Get download URL for latest ExLlama ROCm wheel
|
||||
exllama_rocm = run_cmd('curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8')
|
||||
if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm:
|
||||
run_cmd("python -m pip install " + exllama_rocm, environment=True)
|
||||
|
||||
# Install/Update ROCm AutoGPTQ for AMD GPUs
|
||||
auto_gptq_version = [req for req in textgen_requirements if req.startswith('https://github.com/PanQiWei/AutoGPTQ/releases/download/')][0].split('/')[7]
|
||||
auto_gptq_wheel = run_cmd(f'curl -s https://api.github.com/repos/PanQiWei/AutoGPTQ/releases/tags/{auto_gptq_version} | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8')
|
||||
if not auto_gptq_wheel and run_cmd(f"python -m pip install {auto_gptq_wheel} --force-reinstall --no-deps", environment=True).returncode != 0:
|
||||
print_big_message("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ.")
|
||||
|
||||
# Install GPTQ-for-LLaMa for ROCm
|
||||
gptq_wheel = run_cmd('curl -s https://api.github.com/repos/jllllll/GPTQ-for-LLaMa-CUDA/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \'"\'', environment=True, capture_output=True).stdout.decode('utf-8')
|
||||
install_gptq = run_cmd("python -m pip install " + gptq_wheel, environment=True).returncode == 0
|
||||
if install_gptq:
|
||||
print("Wheel installation success!")
|
||||
else:
|
||||
print("ERROR: GPTQ wheel installation failed.")
|
||||
print("You will not be able to use GPTQ-based models with GPTQ-for-LLaMa.")
|
||||
|
||||
clear_cache()
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue