hysts HF staff commited on
Commit
de3f458
·
1 Parent(s): 2fd3108
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.5.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -13,48 +13,21 @@ repos:
13
  args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
- - repo: https://github.com/myint/docformatter
17
- rev: v1.7.5
18
  hooks:
19
- - id: docformatter
20
- args: ["--in-place"]
21
- - repo: https://github.com/pycqa/isort
22
- rev: 5.13.2
23
- hooks:
24
- - id: isort
25
- args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v1.8.0
28
  hooks:
29
  - id: mypy
30
  args: ["--ignore-missing-imports"]
31
  additional_dependencies:
32
  [
33
  "types-python-slugify",
34
- "types-requests",
35
- "types-PyYAML",
36
  "types-pytz",
 
 
37
  ]
38
- - repo: https://github.com/psf/black
39
- rev: 24.2.0
40
- hooks:
41
- - id: black
42
- language_version: python3.10
43
- args: ["--line-length", "119"]
44
- - repo: https://github.com/kynan/nbstripout
45
- rev: 0.7.1
46
- hooks:
47
- - id: nbstripout
48
- args:
49
- [
50
- "--extra-keys",
51
- "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
- ]
53
- - repo: https://github.com/nbQA-dev/nbQA
54
- rev: 1.7.1
55
- hooks:
56
- - id: nbqa-black
57
- - id: nbqa-pyupgrade
58
- args: ["--py37-plus"]
59
- - id: nbqa-isort
60
- args: ["--float-to-top"]
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
13
  args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.9.9
18
  hooks:
19
+ - id: ruff
20
+ args: ["--fix"]
21
+ - id: ruff-format
 
 
 
 
22
  - repo: https://github.com/pre-commit/mirrors-mypy
23
+ rev: v1.15.0
24
  hooks:
25
  - id: mypy
26
  args: ["--ignore-missing-imports"]
27
  additional_dependencies:
28
  [
29
  "types-python-slugify",
 
 
30
  "types-pytz",
31
+ "types-PyYAML",
32
+ "types-requests",
33
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json CHANGED
@@ -2,29 +2,16 @@
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
- "editor.defaultFormatter": "ms-python.black-formatter",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
 
8
  "source.organizeImports": "explicit"
9
  }
10
  },
11
  "[jupyter]": {
12
  "files.insertFinalNewline": false
13
  },
14
- "black-formatter.args": [
15
- "--line-length=119"
16
- ],
17
- "isort.args": ["--profile", "black"],
18
- "flake8.args": [
19
- "--max-line-length=119"
20
- ],
21
- "ruff.lint.args": [
22
- "--line-length=119"
23
- ],
24
  "notebook.output.scrolling": true,
25
- "notebook.formatOnCellExecution": true,
26
- "notebook.formatOnSave.enabled": true,
27
- "notebook.codeActionsOnSave": {
28
- "source.organizeImports": "explicit"
29
- }
30
  }
 
2
  "editor.formatOnSave": true,
3
  "files.insertFinalNewline": false,
4
  "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
  "source.organizeImports": "explicit"
10
  }
11
  },
12
  "[jupyter]": {
13
  "files.insertFinalNewline": false
14
  },
 
 
 
 
 
 
 
 
 
 
15
  "notebook.output.scrolling": true,
16
+ "notebook.formatOnSave.enabled": true
 
 
 
 
17
  }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🐢
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 5.20.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -1,14 +1,12 @@
1
  #!/usr/bin/env python
2
 
3
- from __future__ import annotations
4
-
5
  import functools
6
  import os
7
  import pathlib
8
  import sys
9
  import tarfile
10
  import urllib.request
11
- from typing import Callable
12
 
13
  import cv2
14
  import gradio as gr
@@ -16,7 +14,7 @@ import huggingface_hub
16
  import numpy as np
17
  import PIL.Image
18
  import torch
19
- import torchvision.transforms as T
20
 
21
  sys.path.insert(0, "anime_face_landmark_detection")
22
 
@@ -34,7 +32,7 @@ def load_sample_image_paths() -> list[pathlib.Path]:
34
  dataset_repo = "hysts/sample-images-TADNE"
35
  path = huggingface_hub.hf_hub_download(dataset_repo, "images.tar.gz", repo_type="dataset")
36
  with tarfile.open(path) as f:
37
- f.extractall()
38
  return sorted(image_dir.glob("*"))
39
 
40
 
@@ -42,7 +40,7 @@ def load_face_detector() -> cv2.CascadeClassifier:
42
  url = "https://raw.githubusercontent.com/nagadomi/lbpcascade_animeface/master/lbpcascade_animeface.xml"
43
  path = pathlib.Path("lbpcascade_animeface.xml")
44
  if not path.exists():
45
- urllib.request.urlretrieve(url, path.as_posix())
46
  return cv2.CascadeClassifier(path.as_posix())
47
 
48
 
@@ -73,7 +71,6 @@ def detect(
73
 
74
  res = image.copy()
75
  for x_orig, y_orig, w_orig, h_orig in preds:
76
-
77
  x0 = round(max(x_orig - w_orig / 8, 0))
78
  x1 = round(min(x_orig + w_orig * 9 / 8, image_w))
79
  y0 = round(max(y_orig - h_orig / 4, 0))
@@ -123,7 +120,7 @@ fn = functools.partial(
123
  landmark_detector=landmark_detector,
124
  )
125
 
126
- with gr.Blocks(css="style.css") as demo:
127
  gr.Markdown(DESCRIPTION)
128
  with gr.Row():
129
  with gr.Column():
 
1
  #!/usr/bin/env python
2
 
 
 
3
  import functools
4
  import os
5
  import pathlib
6
  import sys
7
  import tarfile
8
  import urllib.request
9
+ from collections.abc import Callable
10
 
11
  import cv2
12
  import gradio as gr
 
14
  import numpy as np
15
  import PIL.Image
16
  import torch
17
+ import torchvision.transforms as T # noqa: N812
18
 
19
  sys.path.insert(0, "anime_face_landmark_detection")
20
 
 
32
  dataset_repo = "hysts/sample-images-TADNE"
33
  path = huggingface_hub.hf_hub_download(dataset_repo, "images.tar.gz", repo_type="dataset")
34
  with tarfile.open(path) as f:
35
+ f.extractall() # noqa: S202
36
  return sorted(image_dir.glob("*"))
37
 
38
 
 
40
  url = "https://raw.githubusercontent.com/nagadomi/lbpcascade_animeface/master/lbpcascade_animeface.xml"
41
  path = pathlib.Path("lbpcascade_animeface.xml")
42
  if not path.exists():
43
+ urllib.request.urlretrieve(url, path.as_posix()) # noqa: S310
44
  return cv2.CascadeClassifier(path.as_posix())
45
 
46
 
 
71
 
72
  res = image.copy()
73
  for x_orig, y_orig, w_orig, h_orig in preds:
 
74
  x0 = round(max(x_orig - w_orig / 8, 0))
75
  x1 = round(min(x_orig + w_orig * 9 / 8, image_w))
76
  y0 = round(max(y_orig - h_orig / 4, 0))
 
120
  landmark_detector=landmark_detector,
121
  )
122
 
123
+ with gr.Blocks(css_paths="style.css") as demo:
124
  gr.Markdown(DESCRIPTION)
125
  with gr.Row():
126
  with gr.Column():
pyproject.toml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "anime-face-landmark-detection-app"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "gradio>=5.20.0",
9
+ "hf-transfer>=0.1.9",
10
+ "opencv-python-headless>=4.11.0.86",
11
+ "torch==2.4.0",
12
+ "torchvision>=0.19.0",
13
+ ]
14
+
15
+ [tool.ruff]
16
+ line-length = 119
17
+
18
+ [tool.ruff.lint]
19
+ select = ["ALL"]
20
+ ignore = [
21
+ "COM812", # missing-trailing-comma
22
+ "D203", # one-blank-line-before-class
23
+ "D213", # multi-line-summary-second-line
24
+ "E501", # line-too-long
25
+ "SIM117", # multiple-with-statements
26
+ ]
27
+ extend-ignore = [
28
+ "D100", # undocumented-public-module
29
+ "D101", # undocumented-public-class
30
+ "D102", # undocumented-public-method
31
+ "D103", # undocumented-public-function
32
+ "D104", # undocumented-public-package
33
+ "D105", # undocumented-magic-method
34
+ "D107", # undocumented-public-init
35
+ "EM101", # raw-string-in-exception
36
+ "FBT001", # boolean-type-hint-positional-argument
37
+ "FBT002", # boolean-default-value-positional-argument
38
+ "PD901", # pandas-df-variable-name
39
+ "PGH003", # blanket-type-ignore
40
+ "PLR0913", # too-many-arguments
41
+ "PLR0915", # too-many-statements
42
+ "TRY003", # raise-vanilla-args
43
+ ]
44
+ unfixable = [
45
+ "F401", # unused-import
46
+ ]
47
+
48
+ [tool.ruff.lint.pydocstyle]
49
+ convention = "google"
50
+
51
+ [tool.ruff.lint.per-file-ignores]
52
+ "*.ipynb" = ["T201", "T203"]
53
+
54
+ [tool.ruff.format]
55
+ docstring-code-format = true
requirements.txt CHANGED
@@ -1,4 +1,211 @@
1
- gradio==4.36.1
2
- opencv-python-headless==4.10.0.82
3
- torch==2.0.1
4
- torchvision==0.15.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ aiofiles==23.2.1
4
+ # via gradio
5
+ annotated-types==0.7.0
6
+ # via pydantic
7
+ anyio==4.8.0
8
+ # via
9
+ # gradio
10
+ # httpx
11
+ # starlette
12
+ certifi==2025.1.31
13
+ # via
14
+ # httpcore
15
+ # httpx
16
+ # requests
17
+ charset-normalizer==3.4.1
18
+ # via requests
19
+ click==8.1.8
20
+ # via
21
+ # typer
22
+ # uvicorn
23
+ exceptiongroup==1.2.2
24
+ # via anyio
25
+ fastapi==0.115.11
26
+ # via gradio
27
+ ffmpy==0.5.0
28
+ # via gradio
29
+ filelock==3.17.0
30
+ # via
31
+ # huggingface-hub
32
+ # torch
33
+ # triton
34
+ fsspec==2025.2.0
35
+ # via
36
+ # gradio-client
37
+ # huggingface-hub
38
+ # torch
39
+ gradio==5.20.0
40
+ # via anime-face-landmark-detection-app (pyproject.toml)
41
+ gradio-client==1.7.2
42
+ # via gradio
43
+ groovy==0.1.2
44
+ # via gradio
45
+ h11==0.14.0
46
+ # via
47
+ # httpcore
48
+ # uvicorn
49
+ hf-transfer==0.1.9
50
+ # via anime-face-landmark-detection-app (pyproject.toml)
51
+ httpcore==1.0.7
52
+ # via httpx
53
+ httpx==0.28.1
54
+ # via
55
+ # gradio
56
+ # gradio-client
57
+ # safehttpx
58
+ huggingface-hub==0.29.2
59
+ # via
60
+ # gradio
61
+ # gradio-client
62
+ idna==3.10
63
+ # via
64
+ # anyio
65
+ # httpx
66
+ # requests
67
+ jinja2==3.1.6
68
+ # via
69
+ # gradio
70
+ # torch
71
+ markdown-it-py==3.0.0
72
+ # via rich
73
+ markupsafe==2.1.5
74
+ # via
75
+ # gradio
76
+ # jinja2
77
+ mdurl==0.1.2
78
+ # via markdown-it-py
79
+ mpmath==1.3.0
80
+ # via sympy
81
+ networkx==3.4.2
82
+ # via torch
83
+ numpy==2.2.3
84
+ # via
85
+ # gradio
86
+ # opencv-python-headless
87
+ # pandas
88
+ # torchvision
89
+ nvidia-cublas-cu12==12.1.3.1
90
+ # via
91
+ # nvidia-cudnn-cu12
92
+ # nvidia-cusolver-cu12
93
+ # torch
94
+ nvidia-cuda-cupti-cu12==12.1.105
95
+ # via torch
96
+ nvidia-cuda-nvrtc-cu12==12.1.105
97
+ # via torch
98
+ nvidia-cuda-runtime-cu12==12.1.105
99
+ # via torch
100
+ nvidia-cudnn-cu12==9.1.0.70
101
+ # via torch
102
+ nvidia-cufft-cu12==11.0.2.54
103
+ # via torch
104
+ nvidia-curand-cu12==10.3.2.106
105
+ # via torch
106
+ nvidia-cusolver-cu12==11.4.5.107
107
+ # via torch
108
+ nvidia-cusparse-cu12==12.1.0.106
109
+ # via
110
+ # nvidia-cusolver-cu12
111
+ # torch
112
+ nvidia-nccl-cu12==2.20.5
113
+ # via torch
114
+ nvidia-nvjitlink-cu12==12.8.61
115
+ # via
116
+ # nvidia-cusolver-cu12
117
+ # nvidia-cusparse-cu12
118
+ nvidia-nvtx-cu12==12.1.105
119
+ # via torch
120
+ opencv-python-headless==4.11.0.86
121
+ # via anime-face-landmark-detection-app (pyproject.toml)
122
+ orjson==3.10.15
123
+ # via gradio
124
+ packaging==24.2
125
+ # via
126
+ # gradio
127
+ # gradio-client
128
+ # huggingface-hub
129
+ pandas==2.2.3
130
+ # via gradio
131
+ pillow==11.1.0
132
+ # via
133
+ # gradio
134
+ # torchvision
135
+ pydantic==2.10.6
136
+ # via
137
+ # fastapi
138
+ # gradio
139
+ pydantic-core==2.27.2
140
+ # via pydantic
141
+ pydub==0.25.1
142
+ # via gradio
143
+ pygments==2.19.1
144
+ # via rich
145
+ python-dateutil==2.9.0.post0
146
+ # via pandas
147
+ python-multipart==0.0.20
148
+ # via gradio
149
+ pytz==2025.1
150
+ # via pandas
151
+ pyyaml==6.0.2
152
+ # via
153
+ # gradio
154
+ # huggingface-hub
155
+ requests==2.32.3
156
+ # via huggingface-hub
157
+ rich==13.9.4
158
+ # via typer
159
+ ruff==0.9.9
160
+ # via gradio
161
+ safehttpx==0.1.6
162
+ # via gradio
163
+ semantic-version==2.10.0
164
+ # via gradio
165
+ shellingham==1.5.4
166
+ # via typer
167
+ six==1.17.0
168
+ # via python-dateutil
169
+ sniffio==1.3.1
170
+ # via anyio
171
+ starlette==0.46.0
172
+ # via
173
+ # fastapi
174
+ # gradio
175
+ sympy==1.13.3
176
+ # via torch
177
+ tomlkit==0.13.2
178
+ # via gradio
179
+ torch==2.4.0
180
+ # via
181
+ # anime-face-landmark-detection-app (pyproject.toml)
182
+ # torchvision
183
+ torchvision==0.19.0
184
+ # via anime-face-landmark-detection-app (pyproject.toml)
185
+ tqdm==4.67.1
186
+ # via huggingface-hub
187
+ triton==3.0.0
188
+ # via torch
189
+ typer==0.15.2
190
+ # via gradio
191
+ typing-extensions==4.12.2
192
+ # via
193
+ # anyio
194
+ # fastapi
195
+ # gradio
196
+ # gradio-client
197
+ # huggingface-hub
198
+ # pydantic
199
+ # pydantic-core
200
+ # rich
201
+ # torch
202
+ # typer
203
+ # uvicorn
204
+ tzdata==2025.1
205
+ # via pandas
206
+ urllib3==2.3.0
207
+ # via requests
208
+ uvicorn==0.34.0
209
+ # via gradio
210
+ websockets==15.0.1
211
+ # via gradio-client
uv.lock ADDED
The diff for this file is too large to render. See raw diff