finegrain-image-enhancer / requirements.lock
Pierre Chapuis
use enhancer generator
26a3750 unverified
raw
history blame contribute delete
4.32 kB
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
# universal: false
-e file:.
aiofiles==24.1.0
# via gradio
annotated-types==0.7.0
# via pydantic
anyio==4.9.0
# via gradio
# via httpx
# via starlette
certifi==2025.4.26
# via httpcore
# via httpx
# via requests
charset-normalizer==3.4.1
# via requests
click==8.1.8
# via typer
# via uvicorn
fastapi==0.115.12
# via gradio
ffmpy==0.5.0
# via gradio
filelock==3.18.0
# via huggingface-hub
# via torch
fsspec==2025.3.2
# via gradio-client
# via huggingface-hub
# via torch
gradio==5.27.1
# via enhancer
# via spaces
gradio-client==1.9.1
# via gradio
groovy==0.1.2
# via gradio
h11==0.16.0
# via httpcore
# via uvicorn
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via gradio
# via gradio-client
# via safehttpx
# via spaces
huggingface-hub==0.30.2
# via gradio
# via gradio-client
idna==3.10
# via anyio
# via httpx
# via requests
jaxtyping==0.3.2
# via refiners
jinja2==3.1.6
# via gradio
# via torch
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.2
# via gradio
# via jinja2
mdurl==0.1.2
# via markdown-it-py
mpmath==1.3.0
# via sympy
networkx==3.4.2
# via torch
numpy==1.26.4
# via enhancer
# via gradio
# via pandas
# via refiners
nvidia-cublas-cu12==12.6.4.1
# via nvidia-cudnn-cu12
# via nvidia-cusolver-cu12
# via torch
nvidia-cuda-cupti-cu12==12.6.80
# via torch
nvidia-cuda-nvrtc-cu12==12.6.77
# via torch
nvidia-cuda-runtime-cu12==12.6.77
# via torch
nvidia-cudnn-cu12==9.5.1.17
# via torch
nvidia-cufft-cu12==11.3.0.4
# via torch
nvidia-cufile-cu12==1.11.1.6
# via torch
nvidia-curand-cu12==10.3.7.77
# via torch
nvidia-cusolver-cu12==11.7.1.2
# via torch
nvidia-cusparse-cu12==12.5.4.2
# via nvidia-cusolver-cu12
# via torch
nvidia-cusparselt-cu12==0.6.3
# via torch
nvidia-nccl-cu12==2.26.2
# via torch
nvidia-nvjitlink-cu12==12.6.85
# via nvidia-cufft-cu12
# via nvidia-cusolver-cu12
# via nvidia-cusparse-cu12
# via torch
nvidia-nvtx-cu12==12.6.77
# via torch
orjson==3.10.17
# via gradio
packaging==25.0
# via gradio
# via gradio-client
# via huggingface-hub
# via refiners
# via spaces
pandas==2.2.3
# via gradio
pillow==11.2.1
# via enhancer
# via gradio
# via pillow-heif
# via refiners
pillow-heif==0.22.0
# via enhancer
psutil==5.9.8
# via spaces
pydantic==2.11.3
# via fastapi
# via gradio
# via spaces
pydantic-core==2.33.1
# via pydantic
pydub==0.25.1
# via gradio
pygments==2.19.1
# via rich
python-dateutil==2.9.0.post0
# via pandas
python-multipart==0.0.20
# via gradio
pytz==2025.2
# via pandas
pyyaml==6.0.2
# via gradio
# via huggingface-hub
refiners @ git+https://github.com/finegrain-ai/refiners@cfe8b66ba4f8a906583850ac25e9e89cb83a44b9
# via enhancer
requests==2.32.3
# via huggingface-hub
# via spaces
rich==14.0.0
# via typer
ruff==0.11.7
# via gradio
safehttpx==0.1.6
# via gradio
safetensors==0.5.3
# via refiners
semantic-version==2.10.0
# via gradio
setuptools==80.0.0
# via torch
# via triton
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via anyio
spaces==0.35.0
# via enhancer
starlette==0.46.2
# via fastapi
# via gradio
sympy==1.14.0
# via torch
tomlkit==0.13.2
# via gradio
torch==2.7.0
# via refiners
tqdm==4.67.1
# via huggingface-hub
triton==3.3.0
# via torch
typer==0.15.3
# via gradio
typing-extensions==4.13.2
# via anyio
# via fastapi
# via gradio
# via gradio-client
# via huggingface-hub
# via pydantic
# via pydantic-core
# via spaces
# via torch
# via typer
# via typing-inspection
typing-inspection==0.4.0
# via pydantic
tzdata==2025.2
# via pandas
urllib3==2.4.0
# via requests
uvicorn==0.34.2
# via gradio
wadler-lindig==0.1.5
# via jaxtyping
websockets==15.0.1
# via gradio-client