diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..b40f196 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +FROM nvcr.io/nvidia/pytorch:23.05-py3 + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + make \ + pkgconf \ + xz-utils \ + xorg-dev \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + libxrandr-dev \ + libxinerama-dev \ + libxcursor-dev \ + libxi-dev \ + libxxf86vm-dev + +RUN pip install --upgrade pip + +COPY requirements.txt requirements.txt +RUN pip install -r requirements.txt + +WORKDIR /workspace + +RUN (printf '#!/bin/bash\nexec \"$@\"\n' >> /entry.sh) && chmod a+x /entry.sh +ENTRYPOINT ["/entry.sh"] diff --git a/README.md b/README.md index b26a764..55b5a29 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ If you have CUDA graphic card, please follow the requirements of [NVlabs/stylega The usual installation steps involve the following commands, they should set up the correct CUDA version and all the python packages ``` -conda env create python=3.7 -f environment.yml +conda env create -f environment.yml conda activate stylegan3 ``` @@ -64,6 +64,19 @@ cat environment.yml | \ grep -v -E 'nvidia|cuda' > environment-no-nvidia.yml && \ conda env create -f environment-no-nvidia.yml conda activate stylegan3 +``` + +## Run Gradio visualizer in Docker + +Provided docker image is based on NGC PyTorch repository. To quickly try out visualizer in Docker, run the following: + +```sh +docker build . -t draggan:latest +docker run -v "$PWD":/workspace/src -it draggan:latest bash +cd src && python visualizer_drag_gradio.py +``` +Now you can open a shared link from Gradio (printed in the terminal console). +Beware the Docker image takes about 25GB of disk space! # On MacOS export PYTORCH_ENABLE_MPS_FALLBACK=1 @@ -75,6 +88,10 @@ To download pre-trained weights, simply run: ```sh sh scripts/download_model.sh ``` +Or for windows: +``` +.\scripts\download_model.bat +``` If you want to try StyleGAN-Human and the Landscapes HQ (LHQ) dataset, please download weights from these links: [StyleGAN-Human](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing), [LHQ](https://drive.google.com/file/d/16twEf0T9QINAEoMsWefoWiyhcTd-aiWc/view?usp=sharing), and put them under `./checkpoints`. Feel free to try other pretrained StyleGAN. @@ -85,10 +102,14 @@ To start the DragGAN GUI, simply run: ```sh sh scripts/gui.sh ``` +If you are using windows, you can run: +``` +.\scripts\gui.bat +``` This GUI supports editing GAN-generated images. To edit a real image, you need to first perform GAN inversion using tools like [PTI](https://github.com/danielroich/PTI). Then load the new latent code and model weights to the GUI. -You can run DragGAN Gradio demo as well: +You can run DragGAN Gradio demo as well, this is universal for both windows and linux: ```sh python visualizer_drag_gradio.py ``` @@ -97,6 +118,7 @@ python visualizer_drag_gradio.py This code is developed based on [StyleGAN3](https://github.com/NVlabs/stylegan3). Part of the code is borrowed from [StyleGAN-Human](https://github.com/stylegan-human/StyleGAN-Human). +(cheers to the community as well) ## License The code related to the DragGAN algorithm is licensed under [CC-BY-NC](https://creativecommons.org/licenses/by-nc/4.0/). diff --git a/requirements.txt b/requirements.txt index 1c34039..badd453 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,16 @@ torch torchvision Ninja gradio +torch>=2.0.0 +scipy==1.11.0 +Ninja==1.10.2 +gradio>=3.35.2 +imageio-ffmpeg>=0.4.3 huggingface_hub hf_transfer pyopengl imgui -glfw +glfw==2.6.1 +pillow>=9.4.0 +torchvision>=0.15.2 +imageio>=2.9.0