Compare commits
19 Commits
feat/unifa
...
v3.5.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
400bb72217 | ||
|
|
a0a12d5eca | ||
|
|
a34f376da0 | ||
|
|
2b29706615 | ||
|
|
f6d3cf33f0 | ||
|
|
0eb042425c | ||
|
|
35c0b6d539 | ||
|
|
13c4ac83d8 | ||
|
|
6ce397b811 | ||
|
|
9bf54f5f78 | ||
|
|
c87ec1ad0f | ||
|
|
9e56a86963 | ||
|
|
426bd71505 | ||
|
|
ede8b27091 | ||
|
|
02c77ce5db | ||
|
|
d70d6a254f | ||
|
|
7d37633b1a | ||
|
|
bc413df4a8 | ||
|
|
8db0577991 |
24
.github/workflows/ci.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -17,10 +17,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: "3.11"
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
|
||||
test:
|
||||
@@ -35,8 +35,14 @@ jobs:
|
||||
# Full Python range on Linux (fastest runner)
|
||||
- os: ubuntu-latest
|
||||
python-version: "3.10"
|
||||
- os: ubuntu-latest
|
||||
python-version: "3.11"
|
||||
- os: ubuntu-latest
|
||||
python-version: "3.12"
|
||||
- os: ubuntu-latest
|
||||
python-version: "3.13"
|
||||
- os: ubuntu-latest
|
||||
python-version: "3.14"
|
||||
- os: macos-latest
|
||||
python-version: "3.13"
|
||||
- os: windows-latest
|
||||
@@ -44,10 +50,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: "pip"
|
||||
@@ -55,7 +61,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[dev]
|
||||
python -m pip install ".[cpu,dev]"
|
||||
|
||||
- name: Check ONNX Runtime providers
|
||||
run: |
|
||||
@@ -74,10 +80,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: "pip"
|
||||
|
||||
10
.github/workflows/docs.yml
vendored
@@ -1,8 +1,6 @@
|
||||
name: Deploy docs
|
||||
name: Deploy Documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -12,11 +10,11 @@ jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # Fetch full history for git-committers and git-revision-date plugins
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
|
||||
221
.github/workflows/pipeline.yml
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
name: Release Pipeline
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version (e.g. 3.6.0, 3.6.0b1, 3.6.0rc1)'
|
||||
required: true
|
||||
|
||||
concurrency:
|
||||
group: pipeline
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
is_prerelease: ${{ steps.prerelease.outputs.is_prerelease }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Validate version (PEP 440)
|
||||
run: |
|
||||
python - <<'EOF'
|
||||
import re, sys
|
||||
v = "${{ inputs.version }}"
|
||||
if not re.fullmatch(r'\d+\.\d+\.\d+((a|b|rc)\d+|\.dev\d+)?', v):
|
||||
print(f"Invalid version: {v}")
|
||||
print("Expected forms: 3.6.0, 3.6.0a1, 3.6.0b1, 3.6.0rc1, 3.6.0.dev1")
|
||||
sys.exit(1)
|
||||
EOF
|
||||
|
||||
- name: Check tag does not exist
|
||||
run: |
|
||||
if git rev-parse "v${{ inputs.version }}" >/dev/null 2>&1; then
|
||||
echo "Tag v${{ inputs.version }} already exists."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Detect pre-release
|
||||
id: prerelease
|
||||
run: |
|
||||
if [[ "${{ inputs.version }}" =~ (a|b|rc|\.dev)[0-9]+ ]]; then
|
||||
echo "is_prerelease=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_prerelease=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
needs: validate
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install ".[cpu,dev]"
|
||||
|
||||
- name: Run tests
|
||||
run: pytest -v --tb=short
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
needs: test
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.RELEASE_TOKEN }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Update pyproject.toml
|
||||
run: |
|
||||
python - <<'EOF'
|
||||
import re, pathlib
|
||||
p = pathlib.Path('pyproject.toml')
|
||||
text = p.read_text()
|
||||
new = re.sub(r'^version\s*=\s*".*"', f'version = "${{ inputs.version }}"', text, count=1, flags=re.M)
|
||||
if new == text:
|
||||
raise SystemExit("Failed to update version in pyproject.toml")
|
||||
p.write_text(new)
|
||||
EOF
|
||||
|
||||
- name: Update uniface/__init__.py
|
||||
run: |
|
||||
python - <<'EOF'
|
||||
import re, pathlib
|
||||
p = pathlib.Path('uniface/__init__.py')
|
||||
text = p.read_text()
|
||||
new = re.sub(r"^__version__\s*=\s*'.*'", f"__version__ = '${{ inputs.version }}'", text, count=1, flags=re.M)
|
||||
if new == text:
|
||||
raise SystemExit("Failed to update __version__ in uniface/__init__.py")
|
||||
p.write_text(new)
|
||||
EOF
|
||||
|
||||
- name: Commit, tag, push
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add pyproject.toml uniface/__init__.py
|
||||
git commit -m "chore: Release v${{ inputs.version }}"
|
||||
git tag "v${{ inputs.version }}"
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
git push origin "v${{ inputs.version }}"
|
||||
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: [validate, release]
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/project/uniface/
|
||||
|
||||
steps:
|
||||
- name: Checkout tag
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: v${{ inputs.version }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install build tools
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: twine upload dist/*
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
tag_name: v${{ inputs.version }}
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
prerelease: ${{ needs.validate.outputs.is_prerelease }}
|
||||
|
||||
docs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: [validate, publish]
|
||||
if: needs.validate.outputs.is_prerelease == 'false'
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout tag
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: v${{ inputs.version }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install mkdocs-material pymdown-extensions mkdocs-git-committers-plugin-2 mkdocs-git-revision-date-localized-plugin
|
||||
|
||||
- name: Build docs
|
||||
env:
|
||||
MKDOCS_GIT_COMMITTERS_APIKEY: ${{ secrets.MKDOCS_GIT_COMMITTERS_APIKEY }}
|
||||
run: mkdocs build --strict
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./site
|
||||
destination_dir: docs
|
||||
119
.github/workflows/publish.yml
vendored
@@ -1,119 +0,0 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*" # Trigger only on version tags like v0.1.9
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
outputs:
|
||||
version: ${{ steps.get_version.outputs.version }}
|
||||
tag_version: ${{ steps.get_version.outputs.tag_version }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11" # Needs 3.11+ for tomllib
|
||||
|
||||
- name: Get version from tag and pyproject.toml
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "tag_version=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
PYPROJECT_VERSION=$(python -c "import tomllib; print(tomllib.load(open('pyproject.toml','rb'))['project']['version'])")
|
||||
echo "version=$PYPROJECT_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "Tag version: v$TAG_VERSION"
|
||||
echo "pyproject.toml version: $PYPROJECT_VERSION"
|
||||
|
||||
- name: Verify version match
|
||||
run: |
|
||||
if [ "${{ steps.get_version.outputs.tag_version }}" != "${{ steps.get_version.outputs.version }}" ]; then
|
||||
echo "Error: Tag version (${{ steps.get_version.outputs.tag_version }}) does not match pyproject.toml version (${{ steps.get_version.outputs.version }})"
|
||||
exit 1
|
||||
fi
|
||||
echo "Version validation passed: ${{ steps.get_version.outputs.version }}"
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
needs: validate
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.13"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[dev]
|
||||
|
||||
- name: Run tests
|
||||
run: pytest -v
|
||||
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs: [validate, test]
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/project/uniface/
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install build tools
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: twine upload dist/*
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
@@ -18,6 +18,13 @@ repos:
|
||||
- id: debug-statements
|
||||
- id: check-ast
|
||||
|
||||
# Strip Jupyter notebook outputs
|
||||
- repo: https://github.com/kynan/nbstripout
|
||||
rev: 0.9.1
|
||||
hooks:
|
||||
- id: nbstripout
|
||||
files: ^examples/
|
||||
|
||||
# Ruff - Fast Python linter and formatter
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.14.10
|
||||
|
||||
6
AGENTS.md
Normal file
@@ -0,0 +1,6 @@
|
||||
<!-- Cursor agent instructions — shared with CLAUDE.md -->
|
||||
<!-- See CLAUDE.md for full project instructions for AI coding agents. -->
|
||||
|
||||
# AGENTS.md
|
||||
|
||||
Please read and follow all instructions in [CLAUDE.md](./CLAUDE.md).
|
||||
81
CLAUDE.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# CLAUDE.md
|
||||
|
||||
Project instructions for AI coding agents.
|
||||
|
||||
## Project Overview
|
||||
|
||||
UniFace is a Python library for face detection, recognition, tracking, landmark analysis, face parsing, gaze estimation, age/gender detection. It uses ONNX Runtime for inference.
|
||||
|
||||
## Code Style
|
||||
|
||||
- Python 3.10+ with type hints
|
||||
- Line length: 120
|
||||
- Single quotes for strings, double quotes for docstrings
|
||||
- Google-style docstrings
|
||||
- Formatter/linter: Ruff (config in `pyproject.toml`)
|
||||
- Run `ruff format .` and `ruff check . --fix` before committing
|
||||
|
||||
## Commit Messages
|
||||
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/) with a **capitalized** description:
|
||||
|
||||
```
|
||||
<type>: <Capitalized short description>
|
||||
```
|
||||
|
||||
Types: `feat`, `fix`, `docs`, `style`, `refactor`, `perf`, `test`, `build`, `ci`, `chore`
|
||||
|
||||
Examples:
|
||||
- `feat: Add gaze estimation model`
|
||||
- `fix: Correct bounding box scaling for non-square images`
|
||||
- `ci: Add nbstripout pre-commit hook`
|
||||
- `docs: Update installation instructions`
|
||||
- `refactor: Unify attribute/detector base classes`
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
pytest -v --tb=short
|
||||
```
|
||||
|
||||
Tests live in `tests/`. Run the full suite before submitting changes.
|
||||
|
||||
## Pre-commit
|
||||
|
||||
Pre-commit hooks handle formatting, linting, security checks, and notebook output stripping. Always run:
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
uniface/ # Main package
|
||||
detection/ # Face detection models (SCRFD, RetinaFace, YOLOv5, YOLOv8)
|
||||
recognition/ # Face recognition/verification (AdaFace, ArcFace, EdgeFace, MobileFace, SphereFace)
|
||||
landmark/ # Facial landmark models
|
||||
tracking/ # Object tracking (ByteTrack)
|
||||
parsing/ # Face parsing/segmentation (BiSeNet, XSeg)
|
||||
gaze/ # Gaze estimation
|
||||
headpose/ # Head pose estimation
|
||||
attribute/ # Age, gender, emotion detection
|
||||
spoofing/ # Anti-spoofing (MiniFASNet)
|
||||
privacy/ # Face anonymization
|
||||
stores/ # Vector stores (FAISS)
|
||||
constants.py # Model weight URLs and checksums
|
||||
model_store.py # Model download/cache management
|
||||
analyzer.py # High-level FaceAnalyzer API
|
||||
types.py # Shared type definitions
|
||||
tests/ # Unit tests
|
||||
examples/ # Jupyter notebooks (outputs are auto-stripped)
|
||||
docs/ # MkDocs documentation
|
||||
```
|
||||
|
||||
## Key Conventions
|
||||
|
||||
- New models: add class in submodule, register weights in `constants.py`, export in `__init__.py`
|
||||
- Dependencies: managed in `pyproject.toml`
|
||||
- All ONNX models are downloaded on demand with SHA256 verification
|
||||
- Do not commit notebook outputs; `nbstripout` pre-commit hook handles this
|
||||
@@ -184,6 +184,48 @@ Example notebooks demonstrating library usage:
|
||||
| Face Parsing | [06_face_parsing.ipynb](examples/06_face_parsing.ipynb) |
|
||||
| Face Anonymization | [07_face_anonymization.ipynb](examples/07_face_anonymization.ipynb) |
|
||||
| Gaze Estimation | [08_gaze_estimation.ipynb](examples/08_gaze_estimation.ipynb) |
|
||||
| Face Segmentation | [09_face_segmentation.ipynb](examples/09_face_segmentation.ipynb) |
|
||||
| Face Vector Store | [10_face_vector_store.ipynb](examples/10_face_vector_store.ipynb) |
|
||||
| Head Pose Estimation | [11_head_pose_estimation.ipynb](examples/11_head_pose_estimation.ipynb) |
|
||||
|
||||
## Release Process
|
||||
|
||||
Releases are fully automated via GitHub Actions. Only maintainers with branch-protection bypass privileges on `main` can trigger a release.
|
||||
|
||||
### Cutting a release
|
||||
|
||||
1. Go to **Actions → Release Pipeline → Run workflow** on GitHub.
|
||||
2. Enter the version following [PEP 440](https://peps.python.org/pep-0440/):
|
||||
- Stable: `0.7.0`, `1.0.0`
|
||||
- Pre-release: `0.7.0rc1`, `0.7.0b1`, `0.7.0a1`, `0.7.0.dev1`
|
||||
3. Click **Run workflow**.
|
||||
|
||||
### What happens automatically
|
||||
|
||||
The `Release Pipeline` workflow runs all stages in sequence:
|
||||
|
||||
1. **Validate** — checks the version string against PEP 440 and confirms the tag does not already exist.
|
||||
2. **Test** — runs the test suite on Python 3.10–3.14.
|
||||
3. **Release** — updates `pyproject.toml` and `uniface/__init__.py`, commits `chore: Release vX.Y.Z` to `main`, creates and pushes tag `vX.Y.Z`.
|
||||
4. **Publish** — builds the package, uploads to PyPI, and creates a GitHub Release (flagged as pre-release for `a`/`b`/`rc`/`.dev` versions).
|
||||
5. **Deploy docs** — runs only for **stable** versions. Pre-releases do not update the live documentation site.
|
||||
|
||||
### Verifying a release
|
||||
|
||||
- PyPI: <https://pypi.org/project/uniface/>
|
||||
- GitHub Releases: <https://github.com/yakhyo/uniface/releases>
|
||||
- Docs (stable only): <https://yakhyo.github.io/uniface/>
|
||||
|
||||
### Installing a pre-release
|
||||
|
||||
End users can opt in to pre-releases with the `--pre` flag:
|
||||
|
||||
```bash
|
||||
pip install uniface --pre # latest pre-release
|
||||
pip install uniface==0.7.0rc1 # specific pre-release
|
||||
```
|
||||
|
||||
Without `--pre`, `pip install uniface` always resolves to the latest stable version.
|
||||
|
||||
## Questions?
|
||||
|
||||
|
||||
166
README.md
@@ -1,4 +1,4 @@
|
||||
<h1 align="center">UniFace: All-in-One Face Analysis Library</h1>
|
||||
<h1 align="center">UniFace: A Unified Face Analysis Library for Python</h1>
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -14,53 +14,90 @@
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://raw.githubusercontent.com/yakhyo/uniface/main/.github/logos/uniface_rounded_q80.webp" width="90%" alt="UniFace - All-in-One Open-Source Face Analysis Library">
|
||||
<img src="https://raw.githubusercontent.com/yakhyo/uniface/main/.github/logos/uniface_rounded_q80.webp" width="90%" alt="UniFace - A Unified Face Analysis Library for Python">
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
**UniFace** is a lightweight, production-ready face analysis library built on ONNX Runtime. It provides high-performance face detection, recognition, landmark detection, face parsing, gaze estimation, and attribute analysis with hardware acceleration support across platforms.
|
||||
**UniFace** is a lightweight, production-ready Python library for face detection, recognition, tracking, landmark analysis, face parsing, gaze estimation, and face attributes.
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
- **Face Detection** — RetinaFace, SCRFD, YOLOv5-Face, and YOLOv8-Face with 5-point landmarks
|
||||
- **Face Recognition** — ArcFace, MobileFace, and SphereFace embeddings
|
||||
- **Face Recognition** — AdaFace, ArcFace, EdgeFace, MobileFace, and SphereFace embeddings
|
||||
- **Face Tracking** — Multi-object tracking with [BYTETracker](https://github.com/yakhyo/bytetrack-tracker) for persistent IDs across video frames
|
||||
- **Facial Landmarks** — 106-point landmark localization module (separate from 5-point detector landmarks)
|
||||
- **Face Parsing** — BiSeNet semantic segmentation (19 classes), XSeg face masking
|
||||
- **Portrait Matting** — Trimap-free alpha matte with MODNet (background removal, green screen, compositing)
|
||||
- **Gaze Estimation** — Real-time gaze direction with MobileGaze
|
||||
- **Head Pose Estimation** — 3D head orientation (pitch, yaw, roll) with 6D rotation representation
|
||||
- **Attribute Analysis** — Age, gender, race (FairFace), and emotion
|
||||
- **Vector Indexing** — FAISS-backed embedding store for fast multi-identity search
|
||||
- **Vector Store** — FAISS-backed embedding store for fast multi-identity search
|
||||
- **Anti-Spoofing** — Face liveness detection with MiniFASNet
|
||||
- **Face Anonymization** — 5 blur methods for privacy protection
|
||||
- **Hardware Acceleration** — ARM64 (Apple Silicon), CUDA (NVIDIA), CPU
|
||||
|
||||
---
|
||||
|
||||
## Visual Examples
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center"><b>Face Detection</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/detection.jpg" width="100%"></td>
|
||||
<td align="center"><b>Gaze Estimation</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/gaze.jpg" width="100%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center"><b>Head Pose Estimation</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/headpose.jpg" width="100%"></td>
|
||||
<td align="center"><b>Age & Gender</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/age_gender.jpg" width="100%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>Face Verification</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/verification.jpg" width="80%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>106-Point Landmarks</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/landmarks.jpg" width="36%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>Face Parsing</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/parsing.jpg" width="80%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>Face Segmentation</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/segmentation.jpg" width="80%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>Portrait Matting</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/matting.jpg" width="100%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" colspan="2"><b>Face Anonymization</b><br><img src="https://raw.githubusercontent.com/yakhyo/uniface/main/assets/demos/anonymization.jpg" width="100%"></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
**Standard installation**
|
||||
**CPU / Apple Silicon**
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
**GPU support (CUDA)**
|
||||
**GPU support (NVIDIA CUDA)**
|
||||
|
||||
```bash
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
|
||||
> **Why separate extras?** `onnxruntime` and `onnxruntime-gpu` conflict when both are installed — they own the same Python namespace. Installing only the extra you need prevents that conflict entirely.
|
||||
|
||||
**From source (latest version)**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/yakhyo/uniface.git
|
||||
cd uniface && pip install -e .
|
||||
cd uniface && pip install -e ".[cpu]" # or .[gpu] for CUDA
|
||||
```
|
||||
|
||||
**FAISS vector indexing**
|
||||
**FAISS vector store**
|
||||
|
||||
```bash
|
||||
pip install faiss-cpu # or faiss-gpu for CUDA
|
||||
@@ -126,14 +163,10 @@ for face in faces:
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from uniface.analyzer import FaceAnalyzer
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import ArcFace
|
||||
from uniface import FaceAnalyzer
|
||||
|
||||
detector = RetinaFace()
|
||||
recognizer = ArcFace()
|
||||
|
||||
analyzer = FaceAnalyzer(detector, recognizer=recognizer)
|
||||
# Zero-config: uses SCRFD (500M) + ArcFace (MobileNet) by default
|
||||
analyzer = FaceAnalyzer()
|
||||
|
||||
image = cv2.imread("photo.jpg")
|
||||
if image is None:
|
||||
@@ -145,19 +178,63 @@ for face in faces:
|
||||
print(face.bbox, face.embedding.shape if face.embedding is not None else None)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Providers (ONNX Runtime)
|
||||
With attributes:
|
||||
|
||||
```python
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface import FaceAnalyzer, AgeGender
|
||||
|
||||
# Force CPU-only inference
|
||||
detector = RetinaFace(providers=["CPUExecutionProvider"])
|
||||
analyzer = FaceAnalyzer(attributes=[AgeGender()])
|
||||
faces = analyzer.analyze(image)
|
||||
|
||||
for face in faces:
|
||||
print(f"{face.sex}, {face.age}y, embedding={face.embedding.shape}")
|
||||
```
|
||||
|
||||
See more in the docs:
|
||||
https://yakhyo.github.io/uniface/concepts/execution-providers/
|
||||
---
|
||||
|
||||
## Example (Portrait Matting)
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
from uniface.matting import MODNet
|
||||
|
||||
matting = MODNet()
|
||||
|
||||
image = cv2.imread("portrait.jpg")
|
||||
matte = matting.predict(image) # (H, W) float32 in [0, 1]
|
||||
|
||||
# Transparent PNG
|
||||
rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
|
||||
rgba[:, :, 3] = (matte * 255).astype(np.uint8)
|
||||
cv2.imwrite("transparent.png", rgba)
|
||||
|
||||
# Green screen
|
||||
matte_3ch = matte[:, :, np.newaxis]
|
||||
bg = np.full_like(image, (0, 177, 64), dtype=np.uint8)
|
||||
result = (image * matte_3ch + bg * (1 - matte_3ch)).astype(np.uint8)
|
||||
cv2.imwrite("green_screen.jpg", result)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Jupyter Notebooks
|
||||
|
||||
| Example | Colab | Description |
|
||||
|---------|:-----:|-------------|
|
||||
| [01_face_detection.ipynb](examples/01_face_detection.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/01_face_detection.ipynb) | Face detection and landmarks |
|
||||
| [02_face_alignment.ipynb](examples/02_face_alignment.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/02_face_alignment.ipynb) | Face alignment for recognition |
|
||||
| [03_face_verification.ipynb](examples/03_face_verification.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/03_face_verification.ipynb) | Compare faces for identity |
|
||||
| [04_face_search.ipynb](examples/04_face_search.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/04_face_search.ipynb) | Find a person in group photos |
|
||||
| [05_face_analyzer.ipynb](examples/05_face_analyzer.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | Unified face analysis |
|
||||
| [06_face_parsing.ipynb](examples/06_face_parsing.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/06_face_parsing.ipynb) | Semantic face segmentation |
|
||||
| [07_face_anonymization.ipynb](examples/07_face_anonymization.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/07_face_anonymization.ipynb) | Privacy-preserving blur |
|
||||
| [08_gaze_estimation.ipynb](examples/08_gaze_estimation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/08_gaze_estimation.ipynb) | Gaze direction estimation |
|
||||
| [09_face_segmentation.ipynb](examples/09_face_segmentation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/09_face_segmentation.ipynb) | Face segmentation with XSeg |
|
||||
| [10_face_vector_store.ipynb](examples/10_face_vector_store.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/10_face_vector_store.ipynb) | FAISS-backed face database |
|
||||
| [11_head_pose_estimation.ipynb](examples/11_head_pose_estimation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/11_head_pose_estimation.ipynb) | Head pose estimation (pitch, yaw, roll) |
|
||||
| [12_face_recognition.ipynb](examples/12_face_recognition.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/12_face_recognition.ipynb) | Standalone face recognition pipeline |
|
||||
| [13_portrait_matting.ipynb](examples/13_portrait_matting.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/13_portrait_matting.ipynb) | Portrait matting with MODNet |
|
||||
|
||||
---
|
||||
|
||||
@@ -176,6 +253,20 @@ Full documentation: https://yakhyo.github.io/uniface/
|
||||
|
||||
---
|
||||
|
||||
## Execution Providers (ONNX Runtime)
|
||||
|
||||
```python
|
||||
from uniface.detection import RetinaFace
|
||||
|
||||
# Force CPU-only inference
|
||||
detector = RetinaFace(providers=["CPUExecutionProvider"])
|
||||
```
|
||||
|
||||
See more in the docs:
|
||||
https://yakhyo.github.io/uniface/concepts/execution-providers/
|
||||
|
||||
---
|
||||
|
||||
## Datasets
|
||||
|
||||
| Task | Training Dataset | Models |
|
||||
@@ -184,7 +275,9 @@ Full documentation: https://yakhyo.github.io/uniface/
|
||||
| Recognition | MS1MV2 | MobileFace, SphereFace |
|
||||
| Recognition | WebFace600K | ArcFace |
|
||||
| Recognition | WebFace4M / 12M | AdaFace |
|
||||
| Recognition | MS1MV2 | EdgeFace |
|
||||
| Gaze | Gaze360 | MobileGaze |
|
||||
| Head Pose | 300W-LP | HeadPose (ResNet, MobileNet) |
|
||||
| Parsing | CelebAMask-HQ | BiSeNet |
|
||||
| Attributes | CelebA, FairFace, AffectNet | AgeGender, FairFace, Emotion |
|
||||
|
||||
@@ -192,23 +285,6 @@ Full documentation: https://yakhyo.github.io/uniface/
|
||||
|
||||
---
|
||||
|
||||
## Jupyter Notebooks
|
||||
|
||||
| Example | Colab | Description |
|
||||
|---------|:-----:|-------------|
|
||||
| [01_face_detection.ipynb](examples/01_face_detection.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/01_face_detection.ipynb) | Face detection and landmarks |
|
||||
| [02_face_alignment.ipynb](examples/02_face_alignment.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/02_face_alignment.ipynb) | Face alignment for recognition |
|
||||
| [03_face_verification.ipynb](examples/03_face_verification.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/03_face_verification.ipynb) | Compare faces for identity |
|
||||
| [04_face_search.ipynb](examples/04_face_search.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/04_face_search.ipynb) | Find a person in group photos |
|
||||
| [05_face_analyzer.ipynb](examples/05_face_analyzer.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | All-in-one analysis |
|
||||
| [06_face_parsing.ipynb](examples/06_face_parsing.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/06_face_parsing.ipynb) | Semantic face segmentation |
|
||||
| [07_face_anonymization.ipynb](examples/07_face_anonymization.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/07_face_anonymization.ipynb) | Privacy-preserving blur |
|
||||
| [08_gaze_estimation.ipynb](examples/08_gaze_estimation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/08_gaze_estimation.ipynb) | Gaze direction estimation |
|
||||
| [09_face_segmentation.ipynb](examples/09_face_segmentation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/09_face_segmentation.ipynb) | Face segmentation with XSeg |
|
||||
| [10_face_vector_store.ipynb](examples/10_face_vector_store.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/10_face_vector_store.ipynb) | FAISS-backed face database |
|
||||
|
||||
---
|
||||
|
||||
## Licensing and Model Usage
|
||||
|
||||
UniFace is MIT-licensed, but several pretrained models carry their own licenses.
|
||||
@@ -231,9 +307,12 @@ If you plan commercial use, verify model license compatibility.
|
||||
| Detection | [yolov8-face-onnx-inference](https://github.com/yakhyo/yolov8-face-onnx-inference) | - | YOLOv8-Face ONNX Inference |
|
||||
| Tracking | [bytetrack-tracker](https://github.com/yakhyo/bytetrack-tracker) | - | BYTETracker Multi-Object Tracking |
|
||||
| Recognition | [face-recognition](https://github.com/yakhyo/face-recognition) | ✓ | MobileFace, SphereFace Training |
|
||||
| Recognition | [edgeface-onnx](https://github.com/yakhyo/edgeface-onnx) | - | EdgeFace ONNX Inference |
|
||||
| Parsing | [face-parsing](https://github.com/yakhyo/face-parsing) | ✓ | BiSeNet Face Parsing |
|
||||
| Parsing | [face-segmentation](https://github.com/yakhyo/face-segmentation) | - | XSeg Face Segmentation |
|
||||
| Gaze | [gaze-estimation](https://github.com/yakhyo/gaze-estimation) | ✓ | MobileGaze Training |
|
||||
| Head Pose | [head-pose-estimation](https://github.com/yakhyo/head-pose-estimation) | ✓ | Head Pose Training (6DRepNet-style) |
|
||||
| Matting | [modnet](https://github.com/yakhyo/modnet) | - | MODNet Portrait Matting |
|
||||
| Anti-Spoofing | [face-anti-spoofing](https://github.com/yakhyo/face-anti-spoofing) | - | MiniFASNet Inference |
|
||||
| Attributes | [fairface-onnx](https://github.com/yakhyo/fairface-onnx) | - | FairFace ONNX Inference |
|
||||
|
||||
@@ -257,3 +336,6 @@ Questions or feedback:
|
||||
## License
|
||||
|
||||
This project is licensed under the [MIT License](LICENSE).
|
||||
|
||||
> **Disclaimer:** This project is not affiliated with or related to
|
||||
> [Uniface](https://uniface.com/) by Rocket Software.
|
||||
|
||||
BIN
assets/demos/age_gender.jpg
Normal file
|
After Width: | Height: | Size: 206 KiB |
BIN
assets/demos/anonymization.jpg
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
BIN
assets/demos/detection.jpg
Normal file
|
After Width: | Height: | Size: 341 KiB |
BIN
assets/demos/gaze.jpg
Normal file
|
After Width: | Height: | Size: 212 KiB |
BIN
assets/demos/headpose.jpg
Normal file
|
After Width: | Height: | Size: 233 KiB |
BIN
assets/demos/landmarks.jpg
Normal file
|
After Width: | Height: | Size: 428 KiB |
BIN
assets/demos/matting.jpg
Normal file
|
After Width: | Height: | Size: 938 KiB |
BIN
assets/demos/parsing.jpg
Normal file
|
After Width: | Height: | Size: 712 KiB |
BIN
assets/demos/segmentation.jpg
Normal file
|
After Width: | Height: | Size: 851 KiB |
BIN
assets/demos/src_friends.jpg
Normal file
|
After Width: | Height: | Size: 171 KiB |
BIN
assets/demos/src_man1.jpg
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
assets/demos/src_man2.jpg
Normal file
|
After Width: | Height: | Size: 220 KiB |
BIN
assets/demos/src_man3.jpg
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
assets/demos/src_meeting.jpg
Normal file
|
After Width: | Height: | Size: 96 KiB |
BIN
assets/demos/src_portrait1.jpg
Normal file
|
After Width: | Height: | Size: 208 KiB |
BIN
assets/demos/verification.jpg
Normal file
|
After Width: | Height: | Size: 121 KiB |
BIN
assets/test_images/image5.jpg
Normal file
|
After Width: | Height: | Size: 5.8 KiB |
@@ -39,16 +39,20 @@ recognizer = ArcFace(providers=['CPUExecutionProvider'])
|
||||
detector = RetinaFace(providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
||||
```
|
||||
|
||||
All model classes accept the `providers` parameter:
|
||||
All **ONNX-based** model classes accept the `providers` parameter:
|
||||
|
||||
- Detection: `RetinaFace`, `SCRFD`, `YOLOv5Face`, `YOLOv8Face`
|
||||
- Recognition: `ArcFace`, `AdaFace`, `MobileFace`, `SphereFace`
|
||||
- Landmarks: `Landmark106`
|
||||
- Gaze: `MobileGaze`
|
||||
- Parsing: `BiSeNet`
|
||||
- Parsing: `BiSeNet`, `XSeg`
|
||||
- Attributes: `AgeGender`, `FairFace`
|
||||
- Anti-Spoofing: `MiniFASNet`
|
||||
|
||||
!!! note "Non-ONNX components"
|
||||
- **Emotion** uses TorchScript and selects its device automatically (`mps` / `cuda` / `cpu`). It does **not** accept the `providers` parameter.
|
||||
- **BlurFace** is a pure OpenCV utility and does not load any model.
|
||||
|
||||
---
|
||||
|
||||
## Check Available Providers
|
||||
@@ -89,7 +93,7 @@ print("Available providers:", providers)
|
||||
No additional setup required. ARM64 optimizations are built into `onnxruntime`:
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
Verify ARM64:
|
||||
@@ -106,7 +110,7 @@ python -c "import platform; print(platform.machine())"
|
||||
|
||||
### NVIDIA GPU (CUDA)
|
||||
|
||||
Install with GPU support:
|
||||
Install with GPU support (this installs `onnxruntime-gpu`, which already includes CPU fallback):
|
||||
|
||||
```bash
|
||||
pip install uniface[gpu]
|
||||
@@ -136,7 +140,7 @@ else:
|
||||
CPU execution is always available:
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
Works on all platforms without additional configuration.
|
||||
@@ -211,7 +215,7 @@ for image_path in image_paths:
|
||||
|
||||
3. Reinstall with GPU support:
|
||||
```bash
|
||||
pip uninstall onnxruntime onnxruntime-gpu
|
||||
pip uninstall onnxruntime onnxruntime-gpu -y
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
|
||||
|
||||
@@ -106,6 +106,27 @@ print(f"Yaw: {np.degrees(result.yaw):.1f}°")
|
||||
|
||||
---
|
||||
|
||||
### HeadPoseResult
|
||||
|
||||
```python
|
||||
@dataclass(frozen=True)
|
||||
class HeadPoseResult:
|
||||
pitch: float # Rotation around X-axis (degrees), + = looking down
|
||||
yaw: float # Rotation around Y-axis (degrees), + = looking right
|
||||
roll: float # Rotation around Z-axis (degrees), + = tilting clockwise
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
|
||||
```python
|
||||
result = head_pose.estimate(face_crop)
|
||||
print(f"Pitch: {result.pitch:.1f}°")
|
||||
print(f"Yaw: {result.yaw:.1f}°")
|
||||
print(f"Roll: {result.roll:.1f}°")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### SpoofingResult
|
||||
|
||||
```python
|
||||
@@ -144,11 +165,11 @@ class AttributeResult:
|
||||
|
||||
```python
|
||||
# AgeGender model
|
||||
result = age_gender.predict(image, face.bbox)
|
||||
result = age_gender.predict(image, face)
|
||||
print(f"{result.sex}, {result.age} years old")
|
||||
|
||||
# FairFace model
|
||||
result = fairface.predict(image, face.bbox)
|
||||
result = fairface.predict(image, face)
|
||||
print(f"{result.sex}, {result.age_group}, {result.race}")
|
||||
```
|
||||
|
||||
@@ -171,7 +192,7 @@ Face recognition models return normalized 512-dimensional embeddings:
|
||||
|
||||
```python
|
||||
embedding = recognizer.get_normalized_embedding(image, landmarks)
|
||||
print(f"Shape: {embedding.shape}") # (1, 512)
|
||||
print(f"Shape: {embedding.shape}") # (512,)
|
||||
print(f"Norm: {np.linalg.norm(embedding):.4f}") # ~1.0
|
||||
```
|
||||
|
||||
|
||||
@@ -23,8 +23,10 @@ graph TB
|
||||
LMK[Landmarks]
|
||||
ATTR[Attributes]
|
||||
GAZE[Gaze]
|
||||
HPOSE[Head Pose]
|
||||
PARSE[Parsing]
|
||||
SPOOF[Anti-Spoofing]
|
||||
MATT[Matting]
|
||||
PRIV[Privacy]
|
||||
end
|
||||
|
||||
@@ -32,7 +34,7 @@ graph TB
|
||||
TRK[BYTETracker]
|
||||
end
|
||||
|
||||
subgraph Indexing
|
||||
subgraph Stores
|
||||
IDX[FAISS Vector Store]
|
||||
end
|
||||
|
||||
@@ -41,10 +43,12 @@ graph TB
|
||||
end
|
||||
|
||||
IMG --> DET
|
||||
IMG --> MATT
|
||||
DET --> REC
|
||||
DET --> LMK
|
||||
DET --> ATTR
|
||||
DET --> GAZE
|
||||
DET --> HPOSE
|
||||
DET --> PARSE
|
||||
DET --> SPOOF
|
||||
DET --> PRIV
|
||||
@@ -60,16 +64,14 @@ graph TB
|
||||
|
||||
## Design Principles
|
||||
|
||||
### 1. ONNX-First
|
||||
### 1. Cross-Platform Inference
|
||||
|
||||
UniFace runs inference primarily via ONNX Runtime for core components:
|
||||
UniFace uses portable model runtimes to provide consistent inference across macOS, Linux, and Windows. Most core components run through ONNX Runtime, while optional components may use PyTorch where appropriate.
|
||||
|
||||
- **Cross-platform**: Same models work on macOS, Linux, Windows
|
||||
- **Hardware acceleration**: Automatic selection of optimal provider
|
||||
- **Production-ready**: No Python-only dependencies for inference
|
||||
|
||||
Some optional components (e.g., emotion TorchScript, torchvision NMS) require PyTorch.
|
||||
|
||||
### 2. Minimal Dependencies
|
||||
|
||||
Core dependencies are kept minimal:
|
||||
@@ -113,16 +115,18 @@ def detect(self, image: np.ndarray) -> list[Face]:
|
||||
```
|
||||
uniface/
|
||||
├── detection/ # Face detection (RetinaFace, SCRFD, YOLOv5Face, YOLOv8Face)
|
||||
├── recognition/ # Face recognition (AdaFace, ArcFace, MobileFace, SphereFace)
|
||||
├── recognition/ # Face recognition (AdaFace, ArcFace, EdgeFace, MobileFace, SphereFace)
|
||||
├── tracking/ # Multi-object tracking (BYTETracker)
|
||||
├── landmark/ # 106-point landmarks
|
||||
├── attribute/ # Age, gender, emotion, race
|
||||
├── parsing/ # Face semantic segmentation
|
||||
├── matting/ # Portrait matting (MODNet)
|
||||
├── gaze/ # Gaze estimation
|
||||
├── headpose/ # Head pose estimation
|
||||
├── spoofing/ # Anti-spoofing
|
||||
├── privacy/ # Face anonymization
|
||||
├── indexing/ # Vector indexing (FAISS)
|
||||
├── types.py # Dataclasses (Face, GazeResult, etc.)
|
||||
├── stores/ # Vector stores (FAISS)
|
||||
├── types.py # Dataclasses (Face, GazeResult, HeadPoseResult, etc.)
|
||||
├── constants.py # Model weights and URLs
|
||||
├── model_store.py # Model download and caching
|
||||
├── onnx_utils.py # ONNX Runtime utilities
|
||||
@@ -158,7 +162,7 @@ for face in faces:
|
||||
embedding = recognizer.get_normalized_embedding(image, face.landmarks)
|
||||
|
||||
# Attributes
|
||||
attrs = age_gender.predict(image, face.bbox)
|
||||
attrs = age_gender.predict(image, face)
|
||||
|
||||
print(f"Face: {attrs.sex}, {attrs.age} years")
|
||||
```
|
||||
@@ -183,8 +187,7 @@ fairface = FairFace()
|
||||
analyzer = FaceAnalyzer(
|
||||
detector,
|
||||
recognizer=recognizer,
|
||||
age_gender=age_gender,
|
||||
fairface=fairface,
|
||||
attributes=[age_gender, fairface],
|
||||
)
|
||||
|
||||
faces = analyzer.analyze(image)
|
||||
|
||||
@@ -201,17 +201,11 @@ For drawing detections, filter by confidence:
|
||||
```python
|
||||
from uniface.draw import draw_detections
|
||||
|
||||
# Only draw high-confidence detections
|
||||
bboxes = [f.bbox for f in faces if f.confidence > 0.7]
|
||||
scores = [f.confidence for f in faces if f.confidence > 0.7]
|
||||
landmarks = [f.landmarks for f in faces if f.confidence > 0.7]
|
||||
|
||||
# Only draw high-confidence detections (confidence ≥ vis_threshold)
|
||||
draw_detections(
|
||||
image=image,
|
||||
bboxes=bboxes,
|
||||
scores=scores,
|
||||
landmarks=landmarks,
|
||||
vis_threshold=0.6 # Additional visualization filter
|
||||
faces=faces,
|
||||
vis_threshold=0.7,
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -47,6 +47,38 @@ pre-commit run --all-files
|
||||
|
||||
---
|
||||
|
||||
## Commit Messages
|
||||
|
||||
We follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
```
|
||||
<type>: <short description>
|
||||
```
|
||||
|
||||
| Type | When to use |
|
||||
|--------------|--------------------------------------------------|
|
||||
| **feat** | New feature or capability |
|
||||
| **fix** | Bug fix |
|
||||
| **docs** | Documentation changes |
|
||||
| **style** | Formatting, whitespace (no logic change) |
|
||||
| **refactor** | Code restructuring without changing behavior |
|
||||
| **perf** | Performance improvement |
|
||||
| **test** | Adding or updating tests |
|
||||
| **build** | Build system or dependencies |
|
||||
| **ci** | CI/CD and pre-commit configuration |
|
||||
| **chore** | Routine maintenance and tooling |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```
|
||||
feat: Add gaze estimation model
|
||||
fix: Correct bounding box scaling for non-square images
|
||||
ci: Add nbstripout pre-commit hook
|
||||
docs: Update installation instructions
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. Fork the repository
|
||||
@@ -67,6 +99,14 @@ pre-commit run --all-files
|
||||
|
||||
---
|
||||
|
||||
## Releases
|
||||
|
||||
Releases are automated via GitHub Actions. Maintainers trigger **Actions → Release Pipeline → Run workflow** with a [PEP 440](https://peps.python.org/pep-0440/) version (e.g. `0.7.0`, `0.7.0rc1`). The pipeline runs tests, bumps `pyproject.toml` + `uniface/__init__.py`, tags the commit, publishes to PyPI, and creates a GitHub Release. Docs redeploy only for stable releases.
|
||||
|
||||
See [CONTRIBUTING.md](https://github.com/yakhyo/uniface/blob/main/CONTRIBUTING.md#release-process) for the full process.
|
||||
|
||||
---
|
||||
|
||||
## Questions?
|
||||
|
||||
Open an issue on [GitHub](https://github.com/yakhyo/uniface/issues).
|
||||
|
||||
@@ -183,6 +183,30 @@ data/
|
||||
|
||||
---
|
||||
|
||||
### Head Pose Estimation
|
||||
|
||||
#### 300W-LP
|
||||
|
||||
Large-scale synthesized face dataset with large pose variations, generated from 300W by face profiling. Used for training head pose estimation models.
|
||||
|
||||
| Property | Value |
|
||||
| ----------- | ----------------------------- |
|
||||
| Images | ~122,000 (synthesized) |
|
||||
| Source | 300W (profiled) |
|
||||
| Pose range | ±90° yaw |
|
||||
| Evaluation | AFLW2000 |
|
||||
| Used by | All HeadPose models |
|
||||
|
||||
!!! info "Download & Reference"
|
||||
**Paper**: [Face Alignment Across Large Poses: A 3D Solution](https://arxiv.org/abs/1511.07212)
|
||||
|
||||
**Training code**: [yakhyo/head-pose-estimation](https://github.com/yakhyo/head-pose-estimation)
|
||||
|
||||
!!! note "UniFace Models"
|
||||
All HeadPose models shipped with UniFace are trained on 300W-LP and evaluated on AFLW2000.
|
||||
|
||||
---
|
||||
|
||||
### Face Parsing
|
||||
|
||||
#### CelebAMask-HQ
|
||||
|
||||
@@ -10,7 +10,7 @@ template: home.html
|
||||
|
||||
# UniFace { .hero-title }
|
||||
|
||||
<p class="hero-subtitle">All-in-One Open-Source Face Analysis Library</p>
|
||||
<p class="hero-subtitle">A Unified Face Analysis Library for Python</p>
|
||||
|
||||
[](https://pypi.org/project/uniface/)
|
||||
[](https://www.python.org/)
|
||||
@@ -20,7 +20,7 @@ template: home.html
|
||||
[](https://www.kaggle.com/yakhyokhuja/code)
|
||||
[](https://discord.gg/wdzrjr7R5j)
|
||||
|
||||
<!-- <img src="https://raw.githubusercontent.com/yakhyo/uniface/main/.github/logos/uniface_rounded_q80.webp" alt="UniFace - All-in-One Open-Source Face Analysis Library" style="max-width: 70%; margin: 1rem 0;"> -->
|
||||
<!-- <img src="https://raw.githubusercontent.com/yakhyo/uniface/main/.github/logos/uniface_rounded_q80.webp" alt="UniFace - A Unified Face Analysis Library for Python" style="max-width: 70%; margin: 1rem 0;"> -->
|
||||
|
||||
[Get Started](quickstart.md){ .md-button .md-button--primary }
|
||||
[View on GitHub](https://github.com/yakhyo/uniface){ .md-button }
|
||||
@@ -31,12 +31,12 @@ template: home.html
|
||||
|
||||
<div class="feature-card" markdown>
|
||||
### :material-face-recognition: Face Detection
|
||||
ONNX-optimized detectors (RetinaFace, SCRFD, YOLO) with 5-point landmarks.
|
||||
RetinaFace, SCRFD, and YOLO detectors with 5-point landmarks.
|
||||
</div>
|
||||
|
||||
<div class="feature-card" markdown>
|
||||
### :material-account-check: Face Recognition
|
||||
AdaFace, ArcFace, MobileFace, and SphereFace embeddings for identity verification.
|
||||
AdaFace, ArcFace, EdgeFace, MobileFace, and SphereFace embeddings for identity verification.
|
||||
</div>
|
||||
|
||||
<div class="feature-card" markdown>
|
||||
@@ -59,6 +59,11 @@ BiSeNet semantic segmentation with 19 facial component classes.
|
||||
Real-time gaze direction prediction with MobileGaze models.
|
||||
</div>
|
||||
|
||||
<div class="feature-card" markdown>
|
||||
### :material-axis-arrow: Head Pose
|
||||
3D head orientation (pitch, yaw, roll) estimation with 6D rotation models.
|
||||
</div>
|
||||
|
||||
<div class="feature-card" markdown>
|
||||
### :material-motion-play: Tracking
|
||||
Multi-object tracking with BYTETracker for persistent face IDs across video frames.
|
||||
@@ -85,14 +90,14 @@ FAISS-backed embedding store for fast multi-identity face search.
|
||||
|
||||
## Installation
|
||||
|
||||
UniFace runs inference primarily via **ONNX Runtime**; some optional components (e.g., emotion TorchScript, torchvision NMS) require **PyTorch**.
|
||||
UniFace uses portable model runtimes for consistent inference across macOS, Linux, and Windows. Most core components run through **ONNX Runtime**, while optional components may use **PyTorch** where appropriate.
|
||||
|
||||
**Standard**
|
||||
**CPU / Apple Silicon**
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
**GPU (CUDA)**
|
||||
**GPU (NVIDIA CUDA)**
|
||||
```bash
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
@@ -101,7 +106,7 @@ pip install uniface[gpu]
|
||||
```bash
|
||||
git clone https://github.com/yakhyo/uniface.git
|
||||
cd uniface
|
||||
pip install -e .
|
||||
pip install -e ".[cpu]" # or .[gpu] for CUDA
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -11,15 +11,27 @@ This guide covers all installation options for UniFace.
|
||||
|
||||
---
|
||||
|
||||
## Why Two Extras?
|
||||
|
||||
`onnxruntime` (CPU) and `onnxruntime-gpu` (CUDA) both own the same Python namespace.
|
||||
Installing both at the same time causes file conflicts and silent provider mismatches.
|
||||
UniFace exposes them as separate, mutually exclusive extras so you install exactly one.
|
||||
|
||||
---
|
||||
|
||||
## Quick Install
|
||||
|
||||
The simplest way to install UniFace:
|
||||
=== "CPU / Apple Silicon"
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
```
|
||||
```bash
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
This installs the CPU version with all core dependencies.
|
||||
=== "NVIDIA GPU (CUDA)"
|
||||
|
||||
```bash
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -27,14 +39,16 @@ This installs the CPU version with all core dependencies.
|
||||
|
||||
### macOS (Apple Silicon - M1/M2/M3/M4)
|
||||
|
||||
For Apple Silicon Macs, the standard installation automatically includes ARM64 optimizations:
|
||||
The `[cpu]` extra pulls in the standard `onnxruntime` package, which has native ARM64 support
|
||||
built in since version 1.13. No additional setup is needed for CoreML acceleration.
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
!!! tip "Native Performance"
|
||||
The base `onnxruntime` package has native Apple Silicon support with ARM64 optimizations built-in since version 1.13+. No additional configuration needed.
|
||||
`onnxruntime` 1.13+ includes ARM64 optimizations out of the box.
|
||||
UniFace automatically detects and enables `CoreMLExecutionProvider` on Apple Silicon.
|
||||
|
||||
Verify ARM64 installation:
|
||||
|
||||
@@ -47,18 +61,22 @@ python -c "import platform; print(platform.machine())"
|
||||
|
||||
### Linux/Windows with NVIDIA GPU
|
||||
|
||||
For CUDA acceleration on NVIDIA GPUs:
|
||||
|
||||
```bash
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
|
||||
This installs `onnxruntime-gpu`, which includes both `CUDAExecutionProvider` and
|
||||
`CPUExecutionProvider` — no separate CPU package is needed.
|
||||
|
||||
**Requirements:**
|
||||
|
||||
- `uniface[gpu]` automatically installs `onnxruntime-gpu`. Requirements depend on the ORT version and execution provider.
|
||||
- NVIDIA driver compatible with your CUDA version
|
||||
- CUDA 11.x or 12.x toolkit
|
||||
- cuDNN 8.x
|
||||
|
||||
!!! info "CUDA Compatibility"
|
||||
See the [ONNX Runtime GPU compatibility matrix](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) for matching CUDA and cuDNN versions.
|
||||
See the [ONNX Runtime GPU compatibility matrix](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html)
|
||||
for matching CUDA and cuDNN versions.
|
||||
|
||||
Verify GPU installation:
|
||||
|
||||
@@ -70,23 +88,10 @@ print("Available providers:", ort.get_available_providers())
|
||||
|
||||
---
|
||||
|
||||
### FAISS Vector Indexing
|
||||
|
||||
For fast multi-identity face search using a FAISS index:
|
||||
|
||||
```bash
|
||||
pip install faiss-cpu # CPU
|
||||
pip install faiss-gpu # NVIDIA GPU (CUDA)
|
||||
```
|
||||
|
||||
See the [Indexing module](modules/indexing.md) for usage.
|
||||
|
||||
---
|
||||
|
||||
### CPU-Only (All Platforms)
|
||||
|
||||
```bash
|
||||
pip install uniface
|
||||
pip install uniface[cpu]
|
||||
```
|
||||
|
||||
Works on all platforms with automatic CPU fallback.
|
||||
@@ -100,37 +105,58 @@ For development or the latest features:
|
||||
```bash
|
||||
git clone https://github.com/yakhyo/uniface.git
|
||||
cd uniface
|
||||
pip install -e .
|
||||
|
||||
pip install -e ".[cpu]" # CPU / Apple Silicon
|
||||
pip install -e ".[gpu]" # NVIDIA GPU
|
||||
```
|
||||
|
||||
With development dependencies:
|
||||
|
||||
```bash
|
||||
pip install -e ".[dev]"
|
||||
pip install -e ".[cpu,dev]"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## FAISS Vector Store
|
||||
|
||||
For fast multi-identity face search using a FAISS vector store:
|
||||
|
||||
```bash
|
||||
pip install faiss-cpu # CPU
|
||||
pip install faiss-gpu # NVIDIA GPU (CUDA)
|
||||
```
|
||||
|
||||
See the [Stores module](modules/stores.md) for usage.
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
UniFace has minimal dependencies:
|
||||
UniFace has minimal core dependencies:
|
||||
|
||||
| Package | Purpose |
|
||||
|---------|---------|
|
||||
| `numpy` | Array operations |
|
||||
| `opencv-python` | Image processing |
|
||||
| `onnxruntime` | Model inference |
|
||||
| `scikit-image` | Geometric transforms |
|
||||
| `scipy` | Signal processing |
|
||||
| `requests` | Model download |
|
||||
| `tqdm` | Progress bars |
|
||||
|
||||
**Optional:**
|
||||
**Runtime extras (install exactly one):**
|
||||
|
||||
| Package | Install extra | Purpose |
|
||||
|---------|---------------|---------|
|
||||
| `faiss-cpu` / `faiss-gpu` | `pip install faiss-cpu` | FAISS vector indexing |
|
||||
| `onnxruntime-gpu` | `uniface[gpu]` | CUDA acceleration |
|
||||
| `torch` | `pip install torch` | Emotion model uses TorchScript |
|
||||
| Extra | Package | Use case |
|
||||
|-------|---------|---------|
|
||||
| `uniface[cpu]` | `onnxruntime` | CPU inference, Apple Silicon |
|
||||
| `uniface[gpu]` | `onnxruntime-gpu` | NVIDIA CUDA inference |
|
||||
|
||||
**Other optional packages:**
|
||||
|
||||
| Package | Install | Purpose |
|
||||
|---------|---------|---------|
|
||||
| `faiss-cpu` / `faiss-gpu` | `pip install faiss-cpu` | FAISS vector store |
|
||||
| `torch` | `pip install torch` | Emotion model (TorchScript) |
|
||||
| `torchvision` | `pip install torchvision` | Faster NMS for YOLO detectors |
|
||||
|
||||
---
|
||||
@@ -155,17 +181,81 @@ print("Installation successful!")
|
||||
|
||||
---
|
||||
|
||||
## Upgrading
|
||||
|
||||
When upgrading UniFace, stay consistent with your runtime extra:
|
||||
|
||||
```bash
|
||||
pip install --upgrade uniface[cpu] # or uniface[gpu]
|
||||
```
|
||||
|
||||
If you are switching from CPU to GPU (or vice versa):
|
||||
|
||||
```bash
|
||||
pip uninstall onnxruntime onnxruntime-gpu -y
|
||||
pip install uniface[gpu] # install the one you want
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pre-release Versions
|
||||
|
||||
UniFace ships release candidates and betas to PyPI ahead of stable releases (versions like `0.7.0rc1`, `0.7.0b1`, `0.7.0a1`). These let you try upcoming features before they're finalized.
|
||||
|
||||
`pip install uniface` always installs the latest **stable** release. To opt in to pre-releases:
|
||||
|
||||
```bash
|
||||
# Latest pre-release (if newer than latest stable)
|
||||
pip install uniface[cpu] --pre
|
||||
|
||||
# A specific pre-release
|
||||
pip install uniface[cpu]==0.7.0rc1
|
||||
```
|
||||
|
||||
Pre-releases are not recommended for production — APIs may still change before the stable release.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### onnxruntime Not Found
|
||||
|
||||
If you see:
|
||||
|
||||
```
|
||||
ImportError: onnxruntime is not installed. Install it with one of:
|
||||
pip install uniface[cpu] # CPU / Apple Silicon
|
||||
pip install uniface[gpu] # NVIDIA GPU (CUDA)
|
||||
```
|
||||
|
||||
You installed uniface without an extra. Run the appropriate command above.
|
||||
|
||||
---
|
||||
|
||||
### Both onnxruntime and onnxruntime-gpu Installed
|
||||
|
||||
If you previously ran `pip install uniface[gpu]` on top of a `pip install uniface[cpu]`
|
||||
(or vice versa), you may have both packages installed simultaneously, which causes conflicts.
|
||||
Fix it with:
|
||||
|
||||
```bash
|
||||
pip uninstall onnxruntime onnxruntime-gpu -y
|
||||
pip install uniface[gpu] # or uniface[cpu]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Import Errors
|
||||
|
||||
If you encounter import errors, ensure you're using Python 3.10+:
|
||||
Ensure you're using Python 3.10+:
|
||||
|
||||
```bash
|
||||
python --version
|
||||
# Should show: Python 3.10.x or higher
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Model Download Issues
|
||||
|
||||
Models are automatically downloaded on first use. If downloads fail:
|
||||
@@ -179,6 +269,25 @@ model_path = verify_model_weights(RetinaFaceWeights.MNET_V2)
|
||||
print(f"Model downloaded to: {model_path}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### CUDA Not Detected
|
||||
|
||||
1. Verify CUDA installation:
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
2. Check CUDA version compatibility with ONNX Runtime.
|
||||
|
||||
3. Reinstall the GPU extra cleanly:
|
||||
```bash
|
||||
pip uninstall onnxruntime onnxruntime-gpu -y
|
||||
pip install uniface[gpu]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Performance Issues on Mac
|
||||
|
||||
Verify you're using the ARM64 build (not x86_64 via Rosetta):
|
||||
|
||||
@@ -20,5 +20,6 @@ UniFace is released under the [MIT License](https://opensource.org/licenses/MIT)
|
||||
| SphereFace | [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) | MIT |
|
||||
| BiSeNet | [yakhyo/face-parsing](https://github.com/yakhyo/face-parsing) | MIT |
|
||||
| MobileGaze | [yakhyo/gaze-estimation](https://github.com/yakhyo/gaze-estimation) | MIT |
|
||||
| MODNet | [yakhyo/modnet](https://github.com/yakhyo/modnet) | Apache-2.0 |
|
||||
| MiniFASNet | [yakhyo/face-anti-spoofing](https://github.com/yakhyo/face-anti-spoofing) | Apache-2.0 |
|
||||
| FairFace | [yakhyo/fairface-onnx](https://github.com/yakhyo/fairface-onnx) | CC BY 4.0 |
|
||||
|
||||
@@ -156,6 +156,24 @@ Face recognition using angular softmax loss.
|
||||
|
||||
---
|
||||
|
||||
### EdgeFace
|
||||
|
||||
Efficient face recognition designed for edge devices, using EdgeNeXt backbone with optional LoRA compression.
|
||||
|
||||
| Model Name | Backbone | Params | MFLOPs | Size | LFW | CALFW | CPLFW | CFP-FP | AgeDB-30 |
|
||||
| --------------- | -------- | ------ | ------ | ----- | ------ | ------ | ------ | ------ | -------- |
|
||||
| `XXS` :material-check-circle: | EdgeNeXt | 1.24M | 94 | ~5 MB | 99.57% | 94.83% | 90.27% | 93.63% | 94.92% |
|
||||
| `XS_GAMMA_06` | EdgeNeXt | 1.77M | 154 | ~7 MB | 99.73% | 95.28% | 91.58% | 94.71% | 96.08% |
|
||||
| `S_GAMMA_05` | EdgeNeXt | 3.65M | 306 | ~14 MB | 99.78% | 95.55% | 92.48% | 95.74% | 97.03% |
|
||||
| `BASE` | EdgeNeXt | 18.2M | 1399 | ~70 MB | 99.83% | 96.07% | 93.75% | 97.01% | 97.60% |
|
||||
|
||||
!!! info "Training Data & Reference"
|
||||
**Paper**: [EdgeFace: Efficient Face Recognition Model for Edge Devices](https://arxiv.org/abs/2307.01838v2) (IEEE T-BIOM 2024)
|
||||
|
||||
**Source**: [github.com/otroshi/edgeface](https://github.com/otroshi/edgeface) | [github.com/yakhyo/edgeface-onnx](https://github.com/yakhyo/edgeface-onnx)
|
||||
|
||||
---
|
||||
|
||||
## Facial Landmark Models
|
||||
|
||||
### 106-Point Landmark Detection
|
||||
@@ -257,6 +275,33 @@ Gaze direction prediction models trained on [Gaze360](datasets.md#gaze360) datas
|
||||
|
||||
---
|
||||
|
||||
## Head Pose Estimation Models
|
||||
|
||||
### HeadPose Family
|
||||
|
||||
Head pose estimation models using 6D rotation representation. Trained on [300W-LP](datasets.md#300w-lp) dataset, evaluated on AFLW2000. Returns pitch, yaw, and roll angles in degrees.
|
||||
|
||||
| Model Name | Backbone | Size | MAE* |
|
||||
| -------------- | -------- | ------- | ----- |
|
||||
| `RESNET18` :material-check-circle: | ResNet18 | 43 MB | 5.22° |
|
||||
| `RESNET34` | ResNet34 | 82 MB | 5.07° |
|
||||
| `RESNET50` | ResNet50 | 91 MB | 4.83° |
|
||||
| `MOBILENET_V2` | MobileNetV2 | 9.6 MB | 5.72° |
|
||||
| `MOBILENET_V3_SMALL` | MobileNetV3-Small | 4.8 MB | 6.31° |
|
||||
| `MOBILENET_V3_LARGE` | MobileNetV3-Large | 16 MB | 5.58° |
|
||||
|
||||
*MAE (Mean Absolute Error) in degrees on AFLW2000 test set — lower is better
|
||||
|
||||
!!! info "Training Data"
|
||||
**Dataset**: Trained on [300W-LP](datasets.md#300w-lp) (synthesized large-pose faces from 300W)
|
||||
|
||||
**Method**: 6D rotation representation (rotation matrix → Euler angles)
|
||||
|
||||
!!! note "Input Requirements"
|
||||
Requires face crop as input. Use face detection first to obtain bounding boxes.
|
||||
|
||||
---
|
||||
|
||||
## Face Parsing Models
|
||||
|
||||
### BiSeNet Family
|
||||
@@ -326,6 +371,36 @@ XSeg from DeepFaceLab outputs masks for face regions. Requires 5-point landmarks
|
||||
|
||||
---
|
||||
|
||||
## Portrait Matting Models
|
||||
|
||||
### MODNet
|
||||
|
||||
MODNet (Real-Time Trimap-Free Portrait Matting) produces soft alpha mattes from full images without requiring a trimap. Uses MobileNetV2 backbone with low-resolution, high-resolution, and fusion branches.
|
||||
|
||||
| Model Name | Variant | Size | Use Case |
|
||||
| ---------- | ------- | ---- | -------- |
|
||||
| `PHOTOGRAPHIC` :material-check-circle: | High-quality | 25 MB | Portrait photos |
|
||||
| `WEBCAM` | Real-time | 25 MB | Webcam feeds |
|
||||
|
||||
!!! info "Model Details"
|
||||
**Paper**: [MODNet: Real-Time Trimap-Free Portrait Matting via Objective Decomposition](https://arxiv.org/abs/2011.11961) (AAAI 2022)
|
||||
|
||||
**Source**: [yakhyo/modnet](https://github.com/yakhyo/modnet) — ported weights and clean inference codebase
|
||||
|
||||
**Output**: Alpha matte `(H, W)` in `[0, 1]`
|
||||
|
||||
**Applications:**
|
||||
|
||||
- Background removal / replacement
|
||||
- Green screen compositing
|
||||
- Video conferencing virtual backgrounds
|
||||
- Portrait editing
|
||||
|
||||
!!! note "Input Requirements"
|
||||
Operates on full images (not face crops). No trimap or face detection required.
|
||||
|
||||
---
|
||||
|
||||
## Anti-Spoofing Models
|
||||
|
||||
### MiniFASNet Family
|
||||
@@ -372,8 +447,10 @@ See [Model Cache & Offline Use](concepts/model-cache-offline.md) for full detail
|
||||
- **AdaFace ONNX**: [yakhyo/adaface-onnx](https://github.com/yakhyo/adaface-onnx) - ONNX export and inference
|
||||
- **Face Recognition Training**: [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) - ArcFace, MobileFace, SphereFace training code
|
||||
- **Gaze Estimation Training**: [yakhyo/gaze-estimation](https://github.com/yakhyo/gaze-estimation) - MobileGaze training code and pretrained weights
|
||||
- **Head Pose Estimation**: [yakhyo/head-pose-estimation](https://github.com/yakhyo/head-pose-estimation) - 6D rotation head pose estimation training and ONNX models
|
||||
- **Face Parsing Training**: [yakhyo/face-parsing](https://github.com/yakhyo/face-parsing) - BiSeNet training code and pretrained weights
|
||||
- **Face Segmentation**: [yakhyo/face-segmentation](https://github.com/yakhyo/face-segmentation) - XSeg ONNX Inference
|
||||
- **Portrait Matting**: [yakhyo/modnet](https://github.com/yakhyo/modnet) - MODNet ported weights and inference (from [ZHKKKe/MODNet](https://github.com/ZHKKKe/MODNet))
|
||||
- **Face Anti-Spoofing**: [yakhyo/face-anti-spoofing](https://github.com/yakhyo/face-anti-spoofing) - MiniFASNet ONNX inference (weights from [minivision-ai/Silent-Face-Anti-Spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing))
|
||||
- **FairFace**: [yakhyo/fairface-onnx](https://github.com/yakhyo/fairface-onnx) - FairFace ONNX inference for race, gender, age prediction
|
||||
- **InsightFace**: [deepinsight/insightface](https://github.com/deepinsight/insightface) - Model architectures and pretrained weights
|
||||
@@ -386,4 +463,5 @@ See [Model Cache & Offline Use](concepts/model-cache-offline.md) for full detail
|
||||
- **AdaFace**: [AdaFace: Quality Adaptive Margin for Face Recognition](https://arxiv.org/abs/2204.00964)
|
||||
- **ArcFace**: [Additive Angular Margin Loss for Deep Face Recognition](https://arxiv.org/abs/1801.07698)
|
||||
- **SphereFace**: [Deep Hypersphere Embedding for Face Recognition](https://arxiv.org/abs/1704.08063)
|
||||
- **MODNet**: [Real-Time Trimap-Free Portrait Matting via Objective Decomposition](https://arxiv.org/abs/2011.11961)
|
||||
- **BiSeNet**: [Bilateral Segmentation Network for Real-time Semantic Segmentation](https://arxiv.org/abs/1808.00897)
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Facial attribute analysis for age, gender, race, and emotion detection.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>Age and gender prediction with detection bounding boxes</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
@@ -30,9 +35,10 @@ age_gender = AgeGender()
|
||||
faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
result = age_gender.predict(image, face.bbox)
|
||||
result = age_gender.predict(image, face)
|
||||
print(f"Gender: {result.sex}") # "Female" or "Male"
|
||||
print(f"Age: {result.age} years")
|
||||
# face.gender and face.age are also set automatically
|
||||
```
|
||||
|
||||
### Output
|
||||
@@ -64,10 +70,11 @@ fairface = FairFace()
|
||||
faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
result = fairface.predict(image, face.bbox)
|
||||
result = fairface.predict(image, face)
|
||||
print(f"Gender: {result.sex}")
|
||||
print(f"Age Group: {result.age_group}")
|
||||
print(f"Race: {result.race}")
|
||||
# face.gender, face.age_group, face.race are also set automatically
|
||||
```
|
||||
|
||||
### Output
|
||||
@@ -132,7 +139,7 @@ emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7)
|
||||
faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
result = emotion.predict(image, face.landmarks)
|
||||
result = emotion.predict(image, face)
|
||||
print(f"Emotion: {result.emotion}")
|
||||
print(f"Confidence: {result.confidence:.2%}")
|
||||
```
|
||||
@@ -179,6 +186,22 @@ emotion = Emotion(model_name=DDAMFNWeights.AFFECNET8)
|
||||
|
||||
---
|
||||
|
||||
## Factory Function
|
||||
|
||||
Use `create_attribute_predictor()` for dynamic model selection:
|
||||
|
||||
```python
|
||||
from uniface import create_attribute_predictor
|
||||
|
||||
age_gender = create_attribute_predictor('age_gender')
|
||||
fairface = create_attribute_predictor('fairface')
|
||||
emotion = create_attribute_predictor('emotion')
|
||||
```
|
||||
|
||||
Available model names: `'age_gender'`, `'fairface'`, `'emotion'`.
|
||||
|
||||
---
|
||||
|
||||
## Combining Models
|
||||
|
||||
### Full Attribute Analysis
|
||||
@@ -195,10 +218,10 @@ faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
# Get exact age from AgeGender
|
||||
ag_result = age_gender.predict(image, face.bbox)
|
||||
ag_result = age_gender.predict(image, face)
|
||||
|
||||
# Get race from FairFace
|
||||
ff_result = fairface.predict(image, face.bbox)
|
||||
ff_result = fairface.predict(image, face)
|
||||
|
||||
print(f"Gender: {ag_result.sex}")
|
||||
print(f"Exact Age: {ag_result.age}")
|
||||
@@ -215,7 +238,7 @@ from uniface.detection import RetinaFace
|
||||
|
||||
analyzer = FaceAnalyzer(
|
||||
RetinaFace(),
|
||||
age_gender=AgeGender(),
|
||||
attributes=[AgeGender()],
|
||||
)
|
||||
|
||||
faces = analyzer.analyze(image)
|
||||
@@ -257,7 +280,7 @@ def draw_attributes(image, face, result):
|
||||
|
||||
# Usage
|
||||
for face in faces:
|
||||
result = age_gender.predict(image, face.bbox)
|
||||
result = age_gender.predict(image, face)
|
||||
image = draw_attributes(image, face, result)
|
||||
|
||||
cv2.imwrite("attributes.jpg", image)
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Face detection is the first step in any face analysis pipeline. UniFace provides four detection models.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>SCRFD detection with corner-style bounding boxes and 5-point landmarks</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
@@ -264,10 +269,8 @@ from uniface.draw import draw_detections
|
||||
|
||||
draw_detections(
|
||||
image=image,
|
||||
bboxes=[f.bbox for f in faces],
|
||||
scores=[f.confidence for f in faces],
|
||||
landmarks=[f.landmarks for f in faces],
|
||||
vis_threshold=0.6
|
||||
faces=faces,
|
||||
vis_threshold=0.6,
|
||||
)
|
||||
|
||||
cv2.imwrite("result.jpg", image)
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Gaze estimation predicts where a person is looking (pitch and yaw angles).
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>Gaze direction arrows with pitch/yaw angle labels</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
@@ -267,6 +272,7 @@ gaze = create_gaze_estimator() # Returns MobileGaze
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Head Pose Estimation](headpose.md) - 3D head orientation
|
||||
- [Anti-Spoofing](spoofing.md) - Face liveness detection
|
||||
- [Privacy](privacy.md) - Face anonymization
|
||||
- [Video Recipe](../recipes/video-webcam.md) - Real-time processing
|
||||
|
||||
237
docs/modules/headpose.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Head Pose Estimation
|
||||
|
||||
Head pose estimation predicts the 3D orientation of a person's head (pitch, yaw, and roll angles).
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>3D head pose visualization with pitch, yaw, and roll angles</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
|
||||
| Model | Backbone | Size | MAE* |
|
||||
|-------|----------|------|------|
|
||||
| **ResNet18** :material-check-circle: | ResNet18 | 43 MB | 5.22° |
|
||||
| ResNet34 | ResNet34 | 82 MB | 5.07° |
|
||||
| ResNet50 | ResNet50 | 91 MB | 4.83° |
|
||||
| MobileNetV2 | MobileNetV2 | 9.6 MB | 5.72° |
|
||||
| MobileNetV3-Small | MobileNetV3 | 4.8 MB | 6.31° |
|
||||
| MobileNetV3-Large | MobileNetV3 | 16 MB | 5.58° |
|
||||
|
||||
*MAE = Mean Absolute Error on AFLW2000 test set (lower is better)
|
||||
|
||||
---
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.headpose import HeadPose
|
||||
|
||||
detector = RetinaFace()
|
||||
head_pose = HeadPose()
|
||||
|
||||
image = cv2.imread("photo.jpg")
|
||||
faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
# Crop face
|
||||
x1, y1, x2, y2 = map(int, face.bbox)
|
||||
face_crop = image[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size > 0:
|
||||
# Estimate head pose
|
||||
result = head_pose.estimate(face_crop)
|
||||
print(f"Pitch: {result.pitch:.1f}°, Yaw: {result.yaw:.1f}°, Roll: {result.roll:.1f}°")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Model Variants
|
||||
|
||||
```python
|
||||
from uniface.headpose import HeadPose
|
||||
from uniface.constants import HeadPoseWeights
|
||||
|
||||
# Default (ResNet18, recommended balance of speed and accuracy)
|
||||
hp = HeadPose()
|
||||
|
||||
# Lightweight for mobile/edge
|
||||
hp = HeadPose(model_name=HeadPoseWeights.MOBILENET_V3_SMALL)
|
||||
|
||||
# Higher accuracy
|
||||
hp = HeadPose(model_name=HeadPoseWeights.RESNET50)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Output Format
|
||||
|
||||
```python
|
||||
result = head_pose.estimate(face_crop)
|
||||
|
||||
# HeadPoseResult dataclass
|
||||
result.pitch # Rotation around X-axis in degrees
|
||||
result.yaw # Rotation around Y-axis in degrees
|
||||
result.roll # Rotation around Z-axis in degrees
|
||||
```
|
||||
|
||||
### Angle Convention
|
||||
|
||||
```
|
||||
pitch > 0 (looking down)
|
||||
│
|
||||
│
|
||||
yaw < 0 ─────┼───── yaw > 0
|
||||
(looking left) │ (looking right)
|
||||
│
|
||||
pitch < 0 (looking up)
|
||||
|
||||
roll > 0 = clockwise tilt
|
||||
roll < 0 = counter-clockwise tilt
|
||||
```
|
||||
|
||||
- **Pitch**: Rotation around X-axis (positive = looking down)
|
||||
- **Yaw**: Rotation around Y-axis (positive = looking right)
|
||||
- **Roll**: Rotation around Z-axis (positive = tilting clockwise)
|
||||
|
||||
---
|
||||
|
||||
## Visualization
|
||||
|
||||
### 3D Cube (default)
|
||||
|
||||
The default visualization draws a wireframe cube oriented to match the head pose.
|
||||
|
||||
```python
|
||||
from uniface.draw import draw_head_pose
|
||||
|
||||
faces = detector.detect(image)
|
||||
|
||||
for face in faces:
|
||||
x1, y1, x2, y2 = map(int, face.bbox)
|
||||
face_crop = image[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size > 0:
|
||||
result = head_pose.estimate(face_crop)
|
||||
|
||||
# Draw cube on image (default)
|
||||
draw_head_pose(image, face.bbox, result.pitch, result.yaw, result.roll)
|
||||
|
||||
cv2.imwrite("headpose_output.jpg", image)
|
||||
```
|
||||
|
||||
### Axis Visualization
|
||||
|
||||
```python
|
||||
from uniface.draw import draw_head_pose
|
||||
|
||||
# X/Y/Z coordinate axes
|
||||
draw_head_pose(image, face.bbox, result.pitch, result.yaw, result.roll, draw_type='axis')
|
||||
```
|
||||
|
||||
### Low-Level Drawing Functions
|
||||
|
||||
```python
|
||||
from uniface.draw import draw_head_pose_cube, draw_head_pose_axis
|
||||
|
||||
# Draw cube directly
|
||||
draw_head_pose_cube(image, yaw=10.0, pitch=-5.0, roll=2.0, bbox=[100, 100, 250, 280])
|
||||
|
||||
# Draw axes directly
|
||||
draw_head_pose_axis(image, yaw=10.0, pitch=-5.0, roll=2.0, bbox=[100, 100, 250, 280])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Real-Time Head Pose Tracking
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.headpose import HeadPose
|
||||
from uniface.draw import draw_head_pose
|
||||
|
||||
detector = RetinaFace()
|
||||
head_pose = HeadPose()
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
for face in faces:
|
||||
x1, y1, x2, y2 = map(int, face.bbox)
|
||||
face_crop = frame[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size > 0:
|
||||
result = head_pose.estimate(face_crop)
|
||||
draw_head_pose(frame, face.bbox, result.pitch, result.yaw, result.roll)
|
||||
|
||||
cv2.imshow("Head Pose Estimation", frame)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Driver Drowsiness Detection
|
||||
|
||||
```python
|
||||
def is_head_drooping(result, pitch_threshold=-15):
|
||||
"""Check if the head is drooping (looking down significantly)."""
|
||||
return result.pitch < pitch_threshold
|
||||
|
||||
result = head_pose.estimate(face_crop)
|
||||
if is_head_drooping(result):
|
||||
print("Warning: Head drooping detected")
|
||||
```
|
||||
|
||||
### Attention Monitoring
|
||||
|
||||
```python
|
||||
def is_facing_forward(result, threshold=20):
|
||||
"""Check if the person is facing roughly forward."""
|
||||
return (
|
||||
abs(result.pitch) < threshold
|
||||
and abs(result.yaw) < threshold
|
||||
and abs(result.roll) < threshold
|
||||
)
|
||||
|
||||
result = head_pose.estimate(face_crop)
|
||||
if is_facing_forward(result):
|
||||
print("Facing forward")
|
||||
else:
|
||||
print("Looking away")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Factory Function
|
||||
|
||||
```python
|
||||
from uniface.headpose import create_head_pose_estimator
|
||||
|
||||
hp = create_head_pose_estimator() # Returns HeadPose
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Gaze Estimation](gaze.md) - Eye gaze direction
|
||||
- [Anti-Spoofing](spoofing.md) - Face liveness detection
|
||||
- [Video Recipe](../recipes/video-webcam.md) - Real-time processing
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Facial landmark detection provides precise localization of facial features.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="50%" }
|
||||
<figcaption>106-point facial landmark localization</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
|
||||
157
docs/modules/matting.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# Portrait Matting
|
||||
|
||||
Portrait matting produces a soft alpha matte separating the foreground (person) from the background — no trimap needed.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>MODNet: Input → Matte → Green Screen</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
|
||||
| Model | Variant | Size | Use Case |
|
||||
|-------|---------|------|----------|
|
||||
| **MODNet Photographic** :material-check-circle: | PHOTOGRAPHIC | 25 MB | High-quality portrait photos |
|
||||
| MODNet Webcam | WEBCAM | 25 MB | Real-time webcam feeds |
|
||||
|
||||
---
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from uniface.matting import MODNet
|
||||
|
||||
matting = MODNet()
|
||||
|
||||
image = cv2.imread("photo.jpg")
|
||||
matte = matting.predict(image)
|
||||
|
||||
print(f"Matte shape: {matte.shape}") # (H, W)
|
||||
print(f"Matte dtype: {matte.dtype}") # float32
|
||||
print(f"Matte range: [{matte.min():.2f}, {matte.max():.2f}]") # [0, 1]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Model Variants
|
||||
|
||||
```python
|
||||
from uniface.matting import MODNet
|
||||
from uniface.constants import MODNetWeights
|
||||
|
||||
# Photographic (default) — best for photos
|
||||
matting = MODNet()
|
||||
|
||||
# Webcam — optimized for real-time
|
||||
matting = MODNet(model_name=MODNetWeights.WEBCAM)
|
||||
|
||||
# Custom input size
|
||||
matting = MODNet(input_size=256)
|
||||
```
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `model_name` | `PHOTOGRAPHIC` | Model variant to load |
|
||||
| `input_size` | `512` | Target shorter-side size for preprocessing |
|
||||
| `providers` | `None` | ONNX Runtime execution providers |
|
||||
|
||||
---
|
||||
|
||||
## Applications
|
||||
|
||||
### Transparent Background (RGBA)
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
matting = MODNet()
|
||||
image = cv2.imread("photo.jpg")
|
||||
matte = matting.predict(image)
|
||||
|
||||
rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
|
||||
rgba[:, :, 3] = (matte * 255).astype(np.uint8)
|
||||
cv2.imwrite("transparent.png", rgba)
|
||||
```
|
||||
|
||||
### Green Screen
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
matte_3ch = matte[:, :, np.newaxis]
|
||||
bg = np.full_like(image, (0, 177, 64), dtype=np.uint8)
|
||||
green = (image * matte_3ch + bg * (1 - matte_3ch)).astype(np.uint8)
|
||||
cv2.imwrite("green_screen.jpg", green)
|
||||
```
|
||||
|
||||
### Custom Background
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
background = cv2.imread("beach.jpg")
|
||||
background = cv2.resize(background, (image.shape[1], image.shape[0]))
|
||||
|
||||
matte_3ch = matte[:, :, np.newaxis]
|
||||
result = (image * matte_3ch + background * (1 - matte_3ch)).astype(np.uint8)
|
||||
cv2.imwrite("custom_bg.jpg", result)
|
||||
```
|
||||
|
||||
### Webcam Matting
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
from uniface.matting import MODNet
|
||||
|
||||
matting = MODNet(model_name="modnet_webcam")
|
||||
cap = cv2.VideoCapture(0)
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
matte = matting.predict(frame)
|
||||
matte_3ch = matte[:, :, np.newaxis]
|
||||
bg = np.full_like(frame, (0, 177, 64), dtype=np.uint8)
|
||||
result = (frame * matte_3ch + bg * (1 - matte_3ch)).astype(np.uint8)
|
||||
|
||||
cv2.imshow("Matting", np.hstack([frame, result]))
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Factory Function
|
||||
|
||||
```python
|
||||
from uniface.matting import create_matting_model
|
||||
from uniface.constants import MODNetWeights
|
||||
|
||||
# Default (Photographic)
|
||||
matting = create_matting_model()
|
||||
|
||||
# With enum
|
||||
matting = create_matting_model(MODNetWeights.WEBCAM)
|
||||
|
||||
# With string
|
||||
matting = create_matting_model("modnet_webcam")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [Parsing](parsing.md) - Face semantic segmentation
|
||||
- [Privacy](privacy.md) - Face anonymization
|
||||
- [Detection](detection.md) - Face detection
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
Face parsing segments faces into semantic components or face regions.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="80%" }
|
||||
<figcaption>BiSeNet face parsing with 19 semantic component classes</figcaption>
|
||||
</figure>
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="80%" }
|
||||
<figcaption>XSeg face region segmentation mask</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Face anonymization protects privacy by blurring or obscuring faces in images and videos.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="100%" }
|
||||
<figcaption>Five anonymization methods: pixelate, gaussian, blackout, elliptical, and median</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Methods
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Face recognition extracts embeddings for identity verification and face search.
|
||||
|
||||
<figure markdown="span">
|
||||
{ width="80%" }
|
||||
<figcaption>Pairwise face verification with cosine similarity scores</figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
## Available Models
|
||||
@@ -10,6 +15,7 @@ Face recognition extracts embeddings for identity verification and face search.
|
||||
|-------|----------|------|---------------|
|
||||
| **AdaFace** | IR-18/IR-101 | 92-249 MB | 512 |
|
||||
| **ArcFace** | MobileNet/ResNet | 8-166 MB | 512 |
|
||||
| **EdgeFace** | EdgeNeXt/LoRA | 5-70 MB | 512 |
|
||||
| **MobileFace** | MobileNet V2/V3 | 1-10 MB | 512 |
|
||||
| **SphereFace** | Sphere20/36 | 50-92 MB | 512 |
|
||||
|
||||
@@ -113,6 +119,64 @@ recognizer = ArcFace(providers=['CPUExecutionProvider'])
|
||||
|
||||
---
|
||||
|
||||
## EdgeFace
|
||||
|
||||
Efficient face recognition designed for edge devices, using an EdgeNeXt backbone with optional LoRA low-rank compression. Competition-winning entry (compact track) at EFaR 2023, IJCB.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import EdgeFace
|
||||
|
||||
detector = RetinaFace()
|
||||
recognizer = EdgeFace()
|
||||
|
||||
# Detect face
|
||||
faces = detector.detect(image)
|
||||
|
||||
# Extract embedding
|
||||
if faces:
|
||||
embedding = recognizer.get_normalized_embedding(image, faces[0].landmarks)
|
||||
print(f"Embedding shape: {embedding.shape}") # (512,)
|
||||
```
|
||||
|
||||
### Model Variants
|
||||
|
||||
```python
|
||||
from uniface.recognition import EdgeFace
|
||||
from uniface.constants import EdgeFaceWeights
|
||||
|
||||
# Ultra-compact (default)
|
||||
recognizer = EdgeFace(model_name=EdgeFaceWeights.XXS)
|
||||
|
||||
# Compact with LoRA
|
||||
recognizer = EdgeFace(model_name=EdgeFaceWeights.XS_GAMMA_06)
|
||||
|
||||
# Small with LoRA
|
||||
recognizer = EdgeFace(model_name=EdgeFaceWeights.S_GAMMA_05)
|
||||
|
||||
# Full-size
|
||||
recognizer = EdgeFace(model_name=EdgeFaceWeights.BASE)
|
||||
|
||||
# Force CPU execution
|
||||
recognizer = EdgeFace(providers=['CPUExecutionProvider'])
|
||||
```
|
||||
|
||||
| Variant | Params | MFLOPs | Size | LFW | CALFW | CPLFW | CFP-FP | AgeDB-30 |
|
||||
|---------|--------|--------|------|-----|-------|-------|--------|----------|
|
||||
| **XXS** :material-check-circle: | 1.24M | 94 | ~5 MB | 99.57% | 94.83% | 90.27% | 93.63% | 94.92% |
|
||||
| XS_GAMMA_06 | 1.77M | 154 | ~7 MB | 99.73% | 95.28% | 91.58% | 94.71% | 96.08% |
|
||||
| S_GAMMA_05 | 3.65M | 306 | ~14 MB | 99.78% | 95.55% | 92.48% | 95.74% | 97.03% |
|
||||
| BASE | 18.2M | 1399 | ~70 MB | 99.83% | 96.07% | 93.75% | 97.01% | 97.60% |
|
||||
|
||||
!!! info "Reference"
|
||||
**Paper**: [EdgeFace: Efficient Face Recognition Model for Edge Devices](https://arxiv.org/abs/2307.01838v2) (IEEE T-BIOM 2024)
|
||||
|
||||
**Source**: [github.com/otroshi/edgeface](https://github.com/otroshi/edgeface)
|
||||
|
||||
---
|
||||
|
||||
## MobileFace
|
||||
|
||||
Lightweight face recognition models with MobileNet backbones.
|
||||
@@ -287,9 +351,10 @@ else:
|
||||
```python
|
||||
from uniface.recognition import create_recognizer
|
||||
|
||||
# Available methods: 'arcface', 'adaface', 'mobileface', 'sphereface'
|
||||
# Available methods: 'arcface', 'adaface', 'edgeface', 'mobileface', 'sphereface'
|
||||
recognizer = create_recognizer('arcface')
|
||||
recognizer = create_recognizer('adaface')
|
||||
recognizer = create_recognizer('edgeface')
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Indexing
|
||||
# Stores
|
||||
|
||||
FAISS-backed vector store for fast similarity search over embeddings.
|
||||
|
||||
@@ -12,7 +12,7 @@ FAISS-backed vector store for fast similarity search over embeddings.
|
||||
## FAISS
|
||||
|
||||
```python
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
```
|
||||
|
||||
A thin wrapper around a FAISS `IndexFlatIP` (inner-product) index. Vectors
|
||||
@@ -134,7 +134,7 @@ loaded = store.load() # True if files exist
|
||||
import cv2
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import ArcFace
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
|
||||
detector = RetinaFace()
|
||||
recognizer = ArcFace()
|
||||
@@ -12,12 +12,15 @@ Run UniFace examples directly in your browser with Google Colab, or download and
|
||||
| [Face Alignment](https://github.com/yakhyo/uniface/blob/main/examples/02_face_alignment.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/02_face_alignment.ipynb) | Align faces for recognition |
|
||||
| [Face Verification](https://github.com/yakhyo/uniface/blob/main/examples/03_face_verification.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/03_face_verification.ipynb) | Compare faces for identity |
|
||||
| [Face Search](https://github.com/yakhyo/uniface/blob/main/examples/04_face_search.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/04_face_search.ipynb) | Find a person in group photos |
|
||||
| [Face Analyzer](https://github.com/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | All-in-one face analysis |
|
||||
| [Face Analyzer](https://github.com/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/05_face_analyzer.ipynb) | Unified face analysis |
|
||||
| [Face Parsing](https://github.com/yakhyo/uniface/blob/main/examples/06_face_parsing.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/06_face_parsing.ipynb) | Semantic face segmentation |
|
||||
| [Face Anonymization](https://github.com/yakhyo/uniface/blob/main/examples/07_face_anonymization.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/07_face_anonymization.ipynb) | Privacy-preserving blur |
|
||||
| [Gaze Estimation](https://github.com/yakhyo/uniface/blob/main/examples/08_gaze_estimation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/08_gaze_estimation.ipynb) | Gaze direction estimation |
|
||||
| [Face Segmentation](https://github.com/yakhyo/uniface/blob/main/examples/09_face_segmentation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/09_face_segmentation.ipynb) | Face segmentation with XSeg |
|
||||
| [Face Vector Store](https://github.com/yakhyo/uniface/blob/main/examples/10_face_vector_store.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/10_face_vector_store.ipynb) | FAISS-backed face database |
|
||||
| [Head Pose Estimation](https://github.com/yakhyo/uniface/blob/main/examples/11_head_pose_estimation.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/11_head_pose_estimation.ipynb) | 3D head orientation estimation |
|
||||
| [Face Recognition](https://github.com/yakhyo/uniface/blob/main/examples/12_face_recognition.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/12_face_recognition.ipynb) | Standalone face recognition pipeline |
|
||||
| [Portrait Matting](https://github.com/yakhyo/uniface/blob/main/examples/13_portrait_matting.ipynb) | [](https://colab.research.google.com/github/yakhyo/uniface/blob/main/examples/13_portrait_matting.ipynb) | Portrait matting with MODNet |
|
||||
|
||||
---
|
||||
|
||||
@@ -31,7 +34,7 @@ git clone https://github.com/yakhyo/uniface.git
|
||||
cd uniface
|
||||
|
||||
# Install dependencies
|
||||
pip install uniface jupyter
|
||||
pip install "uniface[cpu]" jupyter # or uniface[gpu] for CUDA
|
||||
|
||||
# Launch Jupyter
|
||||
jupyter notebook examples/
|
||||
|
||||
@@ -54,19 +54,8 @@ detector = RetinaFace()
|
||||
image = cv2.imread("photo.jpg")
|
||||
faces = detector.detect(image)
|
||||
|
||||
# Extract visualization data
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
|
||||
# Draw on image
|
||||
draw_detections(
|
||||
image=image,
|
||||
bboxes=bboxes,
|
||||
scores=scores,
|
||||
landmarks=landmarks,
|
||||
vis_threshold=0.6,
|
||||
)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=0.6)
|
||||
|
||||
# Save result
|
||||
cv2.imwrite("output.jpg", image)
|
||||
@@ -80,7 +69,6 @@ Compare two faces:
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import ArcFace
|
||||
|
||||
@@ -97,12 +85,13 @@ faces1 = detector.detect(image1)
|
||||
faces2 = detector.detect(image2)
|
||||
|
||||
if faces1 and faces2:
|
||||
# Extract embeddings
|
||||
# Extract embeddings (normalized 1-D vectors)
|
||||
emb1 = recognizer.get_normalized_embedding(image1, faces1[0].landmarks)
|
||||
emb2 = recognizer.get_normalized_embedding(image2, faces2[0].landmarks)
|
||||
|
||||
# Compute similarity (cosine similarity)
|
||||
similarity = np.dot(emb1, emb2.T)[0][0]
|
||||
# Compute cosine similarity
|
||||
from uniface import compute_similarity
|
||||
similarity = compute_similarity(emb1, emb2, normalized=True)
|
||||
|
||||
# Interpret result
|
||||
if similarity > 0.6:
|
||||
@@ -135,7 +124,7 @@ faces = detector.detect(image)
|
||||
|
||||
# Predict attributes
|
||||
for i, face in enumerate(faces):
|
||||
result = age_gender.predict(image, face.bbox)
|
||||
result = age_gender.predict(image, face)
|
||||
print(f"Face {i+1}: {result.sex}, {result.age} years old")
|
||||
```
|
||||
|
||||
@@ -164,7 +153,7 @@ image = cv2.imread("photo.jpg")
|
||||
faces = detector.detect(image)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
result = fairface.predict(image, face.bbox)
|
||||
result = fairface.predict(image, face)
|
||||
print(f"Face {i+1}: {result.sex}, {result.age_group}, {result.race}")
|
||||
```
|
||||
|
||||
@@ -234,6 +223,36 @@ cv2.imwrite("gaze_output.jpg", image)
|
||||
|
||||
---
|
||||
|
||||
## Head Pose Estimation
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.headpose import HeadPose
|
||||
from uniface.draw import draw_head_pose
|
||||
|
||||
detector = RetinaFace()
|
||||
head_pose = HeadPose()
|
||||
|
||||
image = cv2.imread("photo.jpg")
|
||||
faces = detector.detect(image)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
x1, y1, x2, y2 = map(int, face.bbox[:4])
|
||||
face_crop = image[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size > 0:
|
||||
result = head_pose.estimate(face_crop)
|
||||
print(f"Face {i+1}: pitch={result.pitch:.1f}°, yaw={result.yaw:.1f}°, roll={result.roll:.1f}°")
|
||||
|
||||
# Draw 3D cube visualization
|
||||
draw_head_pose(image, face.bbox, result.pitch, result.yaw, result.roll)
|
||||
|
||||
cv2.imwrite("headpose_output.jpg", image)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Face Parsing
|
||||
|
||||
Segment face into semantic components:
|
||||
@@ -261,6 +280,34 @@ print(f"Detected {len(np.unique(mask))} facial components")
|
||||
|
||||
---
|
||||
|
||||
## Portrait Matting
|
||||
|
||||
Remove backgrounds without a trimap:
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import numpy as np
|
||||
from uniface.matting import MODNet
|
||||
|
||||
matting = MODNet()
|
||||
|
||||
image = cv2.imread("portrait.jpg")
|
||||
matte = matting.predict(image) # (H, W) float32 in [0, 1]
|
||||
|
||||
# Transparent PNG
|
||||
rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
|
||||
rgba[:, :, 3] = (matte * 255).astype(np.uint8)
|
||||
cv2.imwrite("transparent.png", rgba)
|
||||
|
||||
# Green screen
|
||||
matte_3ch = matte[:, :, np.newaxis]
|
||||
bg = np.full_like(image, (0, 177, 64), dtype=np.uint8)
|
||||
result = (image * matte_3ch + bg * (1 - matte_3ch)).astype(np.uint8)
|
||||
cv2.imwrite("green_screen.jpg", result)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Face Anonymization
|
||||
|
||||
Blur faces for privacy protection:
|
||||
@@ -342,10 +389,7 @@ while True:
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks)
|
||||
draw_detections(image=frame, faces=faces)
|
||||
|
||||
cv2.imshow("UniFace - Press 'q' to quit", frame)
|
||||
|
||||
@@ -424,6 +468,7 @@ For detailed model comparisons and benchmarks, see the [Model Zoo](models.md).
|
||||
| Recognition | `ArcFace`, `AdaFace`, `MobileFace`, `SphereFace` |
|
||||
| Tracking | `BYTETracker` |
|
||||
| Gaze | `MobileGaze` (ResNet18/34/50, MobileNetV2, MobileOneS0) |
|
||||
| Head Pose | `HeadPose` (ResNet18/34/50, MobileNetV2/V3) |
|
||||
| Parsing | `BiSeNet` (ResNet18/34) |
|
||||
| Attributes | `AgeGender`, `FairFace`, `Emotion` |
|
||||
| Anti-Spoofing | `MiniFASNet` (V1SE, V2) |
|
||||
@@ -470,12 +515,13 @@ from uniface.recognition import ArcFace, AdaFace
|
||||
from uniface.attribute import AgeGender, FairFace
|
||||
from uniface.landmark import Landmark106
|
||||
from uniface.gaze import MobileGaze
|
||||
from uniface.headpose import HeadPose
|
||||
from uniface.parsing import BiSeNet, XSeg
|
||||
from uniface.privacy import BlurFace
|
||||
from uniface.spoofing import MiniFASNet
|
||||
from uniface.tracking import BYTETracker
|
||||
from uniface.analyzer import FaceAnalyzer
|
||||
from uniface.indexing import FAISS # pip install faiss-cpu
|
||||
from uniface.stores import FAISS # pip install faiss-cpu
|
||||
from uniface.draw import draw_detections, draw_tracks
|
||||
```
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ python tools/search.py --reference ref.jpg --source 0 # webcam
|
||||
## Vector Search (FAISS index)
|
||||
|
||||
For identifying faces against a database of many known people, use the
|
||||
[`FAISS`](../modules/indexing.md) vector store.
|
||||
[`FAISS`](../modules/stores.md) vector store.
|
||||
|
||||
!!! info "Install extra"
|
||||
`bash
|
||||
@@ -80,7 +80,7 @@ import cv2
|
||||
from pathlib import Path
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import ArcFace
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
|
||||
detector = RetinaFace()
|
||||
recognizer = ArcFace()
|
||||
@@ -112,7 +112,7 @@ python tools/faiss_search.py build --faces-dir dataset/ --db-path ./my_index
|
||||
import cv2
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.recognition import ArcFace
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
|
||||
detector = RetinaFace()
|
||||
recognizer = ArcFace()
|
||||
@@ -143,7 +143,7 @@ python tools/faiss_search.py run --db-path ./my_index --source 0 # webcam
|
||||
### Manage the index
|
||||
|
||||
```python
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
|
||||
store = FAISS(db_path="./my_index")
|
||||
store.load()
|
||||
@@ -160,7 +160,7 @@ store.save()
|
||||
|
||||
## See Also
|
||||
|
||||
- [Indexing Module](../modules/indexing.md) - Full `FAISS` API reference
|
||||
- [Stores Module](../modules/stores.md) - Full `FAISS` API reference
|
||||
- [Recognition Module](../modules/recognition.md) - Face recognition details
|
||||
- [Video & Webcam](video-webcam.md) - Real-time processing
|
||||
- [Concepts: Thresholds](../concepts/thresholds-calibration.md) - Tuning similarity thresholds
|
||||
|
||||
@@ -34,7 +34,7 @@ def process_image(image_path):
|
||||
embedding = recognizer.get_normalized_embedding(image, face.landmarks)
|
||||
|
||||
# Step 3: Predict attributes
|
||||
attrs = age_gender.predict(image, face.bbox)
|
||||
attrs = age_gender.predict(image, face)
|
||||
|
||||
results.append({
|
||||
'face_id': i,
|
||||
@@ -48,12 +48,7 @@ def process_image(image_path):
|
||||
print(f" Face {i+1}: {attrs.sex}, {attrs.age} years old")
|
||||
|
||||
# Visualize
|
||||
draw_detections(
|
||||
image=image,
|
||||
bboxes=[f.bbox for f in faces],
|
||||
scores=[f.confidence for f in faces],
|
||||
landmarks=[f.landmarks for f in faces]
|
||||
)
|
||||
draw_detections(image=image, faces=faces)
|
||||
|
||||
return image, results
|
||||
|
||||
@@ -83,7 +78,7 @@ age_gender = AgeGender()
|
||||
analyzer = FaceAnalyzer(
|
||||
detector,
|
||||
recognizer=recognizer,
|
||||
age_gender=age_gender,
|
||||
attributes=[age_gender],
|
||||
)
|
||||
|
||||
# Process image
|
||||
@@ -109,11 +104,12 @@ import numpy as np
|
||||
from uniface.attribute import AgeGender, FairFace
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.gaze import MobileGaze
|
||||
from uniface.headpose import HeadPose
|
||||
from uniface.landmark import Landmark106
|
||||
from uniface.recognition import ArcFace
|
||||
from uniface.parsing import BiSeNet
|
||||
from uniface.spoofing import MiniFASNet
|
||||
from uniface.draw import draw_detections, draw_gaze
|
||||
from uniface.draw import draw_detections, draw_gaze, draw_head_pose
|
||||
|
||||
class FaceAnalysisPipeline:
|
||||
def __init__(self):
|
||||
@@ -124,6 +120,7 @@ class FaceAnalysisPipeline:
|
||||
self.fairface = FairFace()
|
||||
self.landmarker = Landmark106()
|
||||
self.gaze = MobileGaze()
|
||||
self.head_pose = HeadPose()
|
||||
self.parser = BiSeNet()
|
||||
self.spoofer = MiniFASNet()
|
||||
|
||||
@@ -145,12 +142,12 @@ class FaceAnalysisPipeline:
|
||||
)
|
||||
|
||||
# Attributes
|
||||
ag_result = self.age_gender.predict(image, face.bbox)
|
||||
ag_result = self.age_gender.predict(image, face)
|
||||
result['age'] = ag_result.age
|
||||
result['gender'] = ag_result.sex
|
||||
|
||||
# FairFace attributes
|
||||
ff_result = self.fairface.predict(image, face.bbox)
|
||||
ff_result = self.fairface.predict(image, face)
|
||||
result['age_group'] = ff_result.age_group
|
||||
result['race'] = ff_result.race
|
||||
|
||||
@@ -167,6 +164,13 @@ class FaceAnalysisPipeline:
|
||||
result['gaze_pitch'] = gaze_result.pitch
|
||||
result['gaze_yaw'] = gaze_result.yaw
|
||||
|
||||
# Head pose estimation
|
||||
if face_crop.size > 0:
|
||||
hp_result = self.head_pose.estimate(face_crop)
|
||||
result['head_pitch'] = hp_result.pitch
|
||||
result['head_yaw'] = hp_result.yaw
|
||||
result['head_roll'] = hp_result.roll
|
||||
|
||||
# Face parsing
|
||||
if face_crop.size > 0:
|
||||
result['parsing_mask'] = self.parser.parse(face_crop)
|
||||
@@ -189,6 +193,7 @@ for i, r in enumerate(results):
|
||||
print(f" Gender: {r['gender']}, Age: {r['age']}")
|
||||
print(f" Race: {r['race']}, Age Group: {r['age_group']}")
|
||||
print(f" Gaze: pitch={np.degrees(r['gaze_pitch']):.1f}°")
|
||||
print(f" Head Pose: P={r['head_pitch']:.1f}° Y={r['head_yaw']:.1f}° R={r['head_roll']:.1f}°")
|
||||
print(f" Real: {r['is_real']} ({r['spoof_confidence']:.1%})")
|
||||
```
|
||||
|
||||
@@ -220,7 +225,7 @@ def visualize_analysis(image_path, output_path):
|
||||
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
||||
|
||||
# Age and gender
|
||||
attrs = age_gender.predict(image, face.bbox)
|
||||
attrs = age_gender.predict(image, face)
|
||||
label = f"{attrs.sex}, {attrs.age}y"
|
||||
cv2.putText(image, label, (x1, y1 - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
||||
@@ -268,6 +273,11 @@ def results_to_json(results):
|
||||
'gaze': {
|
||||
'pitch_deg': float(np.degrees(r['gaze_pitch'])) if 'gaze_pitch' in r else None,
|
||||
'yaw_deg': float(np.degrees(r['gaze_yaw'])) if 'gaze_yaw' in r else None
|
||||
},
|
||||
'head_pose': {
|
||||
'pitch': float(r['head_pitch']) if 'head_pitch' in r else None,
|
||||
'yaw': float(r['head_yaw']) if 'head_yaw' in r else None,
|
||||
'roll': float(r['head_roll']) if 'head_roll' in r else None
|
||||
}
|
||||
}
|
||||
output.append(item)
|
||||
@@ -291,3 +301,4 @@ with open('results.json', 'w') as f:
|
||||
- [Face Search](face-search.md) - Build a search system
|
||||
- [Detection Module](../modules/detection.md) - Detection options
|
||||
- [Recognition Module](../modules/recognition.md) - Recognition details
|
||||
- [Head Pose Module](../modules/headpose.md) - Head orientation estimation
|
||||
|
||||
@@ -26,12 +26,7 @@ while True:
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
draw_detections(
|
||||
image=frame,
|
||||
bboxes=[f.bbox for f in faces],
|
||||
scores=[f.confidence for f in faces],
|
||||
landmarks=[f.landmarks for f in faces]
|
||||
)
|
||||
draw_detections(image=frame, faces=faces)
|
||||
|
||||
cv2.imshow("Face Detection", frame)
|
||||
|
||||
@@ -175,3 +170,4 @@ while True:
|
||||
- [Batch Processing](batch-processing.md) - Process multiple files
|
||||
- [Detection Module](../modules/detection.md) - Detection options
|
||||
- [Gaze Module](../modules/gaze.md) - Gaze estimation
|
||||
- [Head Pose Module](../modules/headpose.md) - Head orientation estimation
|
||||
|
||||
223
examples/11_head_pose_estimation.ipynb
Normal file
@@ -0,0 +1,223 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Head Pose Estimation with UniFace\n",
|
||||
"\n",
|
||||
"<div style=\"display:flex; flex-wrap:wrap; align-items:center;\">\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pepy.tech/projects/uniface\"><img alt=\"PyPI Downloads\" src=\"https://static.pepy.tech/personalized-badge/uniface?period=total&units=international_system&left_color=grey&right_color=blue&left_text=Downloads\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pypi.org/project/uniface/\"><img alt=\"PyPI Version\" src=\"https://img.shields.io/pypi/v/uniface.svg\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://opensource.org/licenses/MIT\"><img alt=\"License\" src=\"https://img.shields.io/badge/License-MIT-blue.svg\"></a>\n",
|
||||
" <a style=\"margin-bottom:6px;\" href=\"https://github.com/yakhyo/uniface\"><img alt=\"GitHub Stars\" src=\"https://img.shields.io/github/stars/yakhyo/uniface.svg?style=social\"></a>\n",
|
||||
"</div>\n",
|
||||
"\n",
|
||||
"**UniFace** is a lightweight, production-ready Python library for face detection, recognition, tracking, landmark analysis, face parsing, gaze estimation, and face attributes.\n",
|
||||
"\n",
|
||||
"🔗 **GitHub**: [github.com/yakhyo/uniface](https://github.com/yakhyo/uniface) | 📚 **Docs**: [yakhyo.github.io/uniface](https://yakhyo.github.io/uniface)\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"This notebook demonstrates head pose estimation using the **UniFace** library.\n",
|
||||
"\n",
|
||||
"## 1. Install UniFace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q \"uniface[cpu]\"\n",
|
||||
"\n",
|
||||
"# Clone repo for assets (Colab only)\n",
|
||||
"import os\n",
|
||||
"if 'COLAB_GPU' in os.environ or 'COLAB_RELEASE_TAG' in os.environ:\n",
|
||||
" if not os.path.exists('uniface'):\n",
|
||||
" !git clone --depth 1 https://github.com/yakhyo/uniface.git\n",
|
||||
" os.chdir('uniface/examples')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Import Libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from pathlib import Path\n",
|
||||
"from PIL import Image\n",
|
||||
"\n",
|
||||
"import uniface\n",
|
||||
"from uniface.detection import RetinaFace\n",
|
||||
"from uniface.headpose import HeadPose\n",
|
||||
"from uniface.draw import draw_head_pose\n",
|
||||
"\n",
|
||||
"print(f\"UniFace version: {uniface.__version__}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Initialize Models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize face detector\n",
|
||||
"detector = RetinaFace(confidence_threshold=0.5)\n",
|
||||
"\n",
|
||||
"# Initialize head pose estimator (default: ResNet18 backbone)\n",
|
||||
"head_pose = HeadPose()\n",
|
||||
"\n",
|
||||
"print(\"Models initialized successfully!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Process All Test Images\n",
|
||||
"\n",
|
||||
"Display original images in the first row and head-pose-annotated images in the second row."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get all test images\n",
|
||||
"test_images_dir = Path('../assets/test_images')\n",
|
||||
"test_images = sorted(test_images_dir.glob('*.jpg'))\n",
|
||||
"\n",
|
||||
"original_images = []\n",
|
||||
"annotated_images = []\n",
|
||||
"\n",
|
||||
"for img_path in test_images:\n",
|
||||
" image = cv2.imread(str(img_path))\n",
|
||||
" if image is None:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" # Store original (BGR -> RGB for display)\n",
|
||||
" original_images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n",
|
||||
"\n",
|
||||
" # Detect faces and estimate head pose\n",
|
||||
" faces = detector.detect(image)\n",
|
||||
"\n",
|
||||
" for face in faces:\n",
|
||||
" x1, y1, x2, y2 = map(int, face.bbox)\n",
|
||||
" face_crop = image[y1:y2, x1:x2]\n",
|
||||
"\n",
|
||||
" if face_crop.size == 0:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" result = head_pose.estimate(face_crop)\n",
|
||||
" draw_head_pose(image, face.bbox, result.pitch, result.yaw, result.roll)\n",
|
||||
"\n",
|
||||
" print(f\"{img_path.name}: pitch={result.pitch:.1f}°, yaw={result.yaw:.1f}°, roll={result.roll:.1f}°\")\n",
|
||||
"\n",
|
||||
" annotated_images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n",
|
||||
"\n",
|
||||
"print(f\"\\nProcessed {len(original_images)} images\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Visualize Results\n",
|
||||
"\n",
|
||||
"**First row**: Original images \n",
|
||||
"**Second row**: Images with head pose 3D cube overlay"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"num_images = len(original_images)\n",
|
||||
"\n",
|
||||
"# Create figure with 2 rows\n",
|
||||
"fig, axes = plt.subplots(2, num_images, figsize=(5 * num_images, 10))\n",
|
||||
"\n",
|
||||
"if num_images == 1:\n",
|
||||
" axes = axes.reshape(2, 1)\n",
|
||||
"\n",
|
||||
"for i in range(num_images):\n",
|
||||
" axes[0, i].imshow(original_images[i])\n",
|
||||
" axes[0, i].set_title('Original', fontsize=12)\n",
|
||||
" axes[0, i].axis('off')\n",
|
||||
"\n",
|
||||
" axes[1, i].imshow(annotated_images[i])\n",
|
||||
" axes[1, i].set_title('Head Pose', fontsize=12)\n",
|
||||
" axes[1, i].axis('off')\n",
|
||||
"\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Notes\n",
|
||||
"\n",
|
||||
"- **Input**: Head pose estimation requires a face crop (obtained from face detection)\n",
|
||||
"- **Output**: `HeadPoseResult` with pitch, yaw, and roll angles in **degrees**\n",
|
||||
"- **Visualization**: Two modes available — `'cube'` (3D wireframe) and `'axis'` (X/Y/Z coordinate axes)\n",
|
||||
"- **Models**: 6 backbone variants available via `HeadPoseWeights` enum\n",
|
||||
"- **Method**: Uses 6D rotation representation converted to Euler angles\n",
|
||||
"\n",
|
||||
"### Available Backbones\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from uniface.constants import HeadPoseWeights\n",
|
||||
"\n",
|
||||
"# Options: RESNET18, RESNET34, RESNET50, MOBILENET_V2, MOBILENET_V3_SMALL, MOBILENET_V3_LARGE\n",
|
||||
"head_pose = HeadPose(model_name=HeadPoseWeights.RESNET50)\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
356
examples/12_face_recognition.ipynb
Normal file
@@ -0,0 +1,356 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Face Recognition: RetinaFace → Align → ArcFace\n",
|
||||
"\n",
|
||||
"<div style=\"display:flex; flex-wrap:wrap; align-items:center;\">\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pepy.tech/projects/uniface\"><img alt=\"PyPI Downloads\" src=\"https://static.pepy.tech/personalized-badge/uniface?period=total&units=international_system&left_color=grey&right_color=blue&left_text=Downloads\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pypi.org/project/uniface/\"><img alt=\"PyPI Version\" src=\"https://img.shields.io/pypi/v/uniface.svg\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://opensource.org/licenses/MIT\"><img alt=\"License\" src=\"https://img.shields.io/badge/License-MIT-blue.svg\"></a>\n",
|
||||
" <a style=\"margin-bottom:6px;\" href=\"https://github.com/yakhyo/uniface\"><img alt=\"GitHub Stars\" src=\"https://img.shields.io/github/stars/yakhyo/uniface.svg?style=social\"></a>\n",
|
||||
"</div>\n",
|
||||
"\n",
|
||||
"**UniFace** is a lightweight, production-ready Python library for face detection, recognition, tracking, landmark analysis, face parsing, gaze estimation, and face attributes.\n",
|
||||
"\n",
|
||||
"🔗 **GitHub**: [github.com/yakhyo/uniface](https://github.com/yakhyo/uniface) | 📚 **Docs**: [yakhyo.github.io/uniface](https://yakhyo.github.io/uniface)\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"This notebook demonstrates face recognition **without** the high-level `FaceAnalyzer` wrapper. Each step is handled manually:\n",
|
||||
"\n",
|
||||
"1. **RetinaFace**: Detects faces and extracts 5-point landmarks.\n",
|
||||
"2. **Face Alignment**: Warps each face into a standardized 112x112 crop using the landmarks.\n",
|
||||
"3. **ArcFace**: Generates a 512-D L2-normalized embedding from the aligned crop.\n",
|
||||
"\n",
|
||||
"We compare three test images: `image0.jpg`, `image1.jpg`, and `image5.jpg`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install UniFace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q \"uniface[cpu]\"\n",
|
||||
"\n",
|
||||
"# Clone repo for assets (Colab only)\n",
|
||||
"import os\n",
|
||||
"if 'COLAB_GPU' in os.environ or 'COLAB_RELEASE_TAG' in os.environ:\n",
|
||||
" if not os.path.exists('uniface'):\n",
|
||||
" !git clone --depth 1 https://github.com/yakhyo/uniface.git\n",
|
||||
" os.chdir('uniface/examples')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Import Libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import matplotlib.patches as patches\n",
|
||||
"\n",
|
||||
"import uniface\n",
|
||||
"from uniface.detection import RetinaFace\n",
|
||||
"from uniface.recognition import ArcFace\n",
|
||||
"from uniface.face_utils import face_alignment\n",
|
||||
"\n",
|
||||
"print(f\"UniFace version: {uniface.__version__}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"IMAGE_PATHS = {\n",
|
||||
" \"image0\": \"../assets/test_images/image0.jpg\",\n",
|
||||
" \"image1\": \"../assets/test_images/image1.jpg\",\n",
|
||||
" \"image5\": \"../assets/test_images/image5.jpg\",\n",
|
||||
"}\n",
|
||||
"THRESHOLD = 0.4 # Cosine similarity threshold for \"same person\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Initialize Models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"detector = RetinaFace(confidence_threshold=0.5)\n",
|
||||
"recognizer = ArcFace()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Load Images & Detect Faces\n",
|
||||
"\n",
|
||||
"We use the detector to find faces and their landmarks in each image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"images = {}\n",
|
||||
"faces = {}\n",
|
||||
"\n",
|
||||
"for name, path in IMAGE_PATHS.items():\n",
|
||||
" img = cv2.imread(path)\n",
|
||||
" if img is None:\n",
|
||||
" raise FileNotFoundError(f\"Cannot read: {path}\")\n",
|
||||
"\n",
|
||||
" detected = detector.detect(img)\n",
|
||||
" if not detected:\n",
|
||||
" raise RuntimeError(f\"No face detected in: {path}\")\n",
|
||||
"\n",
|
||||
" images[name] = img\n",
|
||||
" faces[name] = detected[0] # Keep highest-confidence face\n",
|
||||
" print(f\"{name:8s} | {len(detected)} face(s) detected | confidence={faces[name].confidence:.3f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6. Visualize Detections"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"LM_COLORS = [\"red\", \"blue\", \"green\", \"cyan\", \"magenta\"]\n",
|
||||
"\n",
|
||||
"fig, axes = plt.subplots(1, 3, figsize=(15, 5))\n",
|
||||
"fig.suptitle(\"Detected Faces & 5-Point Landmarks\", fontweight=\"bold\", fontsize=16)\n",
|
||||
"\n",
|
||||
"for ax, (name, img) in zip(axes, images.items()):\n",
|
||||
" face = faces[name]\n",
|
||||
" ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n",
|
||||
" ax.set_title(f\"{name}\\nconf={face.confidence:.3f}\", fontsize=12)\n",
|
||||
" ax.axis(\"off\")\n",
|
||||
"\n",
|
||||
" # Bounding box\n",
|
||||
" x1, y1, x2, y2 = face.bbox.astype(int)\n",
|
||||
" ax.add_patch(patches.Rectangle(\n",
|
||||
" (x1, y1), x2 - x1, y2 - y1,\n",
|
||||
" linewidth=2, edgecolor=\"lime\", facecolor=\"none\"))\n",
|
||||
"\n",
|
||||
" # Landmarks\n",
|
||||
" for (lx, ly), c in zip(face.landmarks, LM_COLORS):\n",
|
||||
" ax.plot(lx, ly, \"o\", color=c, markersize=6)\n",
|
||||
"\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7. Face Alignment\n",
|
||||
"\n",
|
||||
"We warp the detected faces into a standardized 112x112 size. This improves recognition accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "14",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"aligned = {}\n",
|
||||
"\n",
|
||||
"for name, img in images.items():\n",
|
||||
" lm = faces[name].landmarks\n",
|
||||
" crop, _ = face_alignment(img, lm, image_size=(112, 112))\n",
|
||||
" aligned[name] = crop\n",
|
||||
"\n",
|
||||
"fig, axes = plt.subplots(1, 3, figsize=(12, 4))\n",
|
||||
"fig.suptitle(\"Aligned Face Crops (112x112)\", fontweight=\"bold\", fontsize=14)\n",
|
||||
"\n",
|
||||
"for ax, (name, crop) in zip(axes, aligned.items()):\n",
|
||||
" ax.imshow(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB))\n",
|
||||
" ax.set_title(name, fontsize=12)\n",
|
||||
" ax.axis(\"off\")\n",
|
||||
"\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 8. Extract Embeddings\n",
|
||||
"\n",
|
||||
"We pass the aligned crops to ArcFace to get the 512-D vectors."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = {}\n",
|
||||
"\n",
|
||||
"for name, crop in aligned.items():\n",
|
||||
" # landmarks=None because image is already aligned\n",
|
||||
" emb = recognizer.get_normalized_embedding(crop, landmarks=None)\n",
|
||||
" embeddings[name] = emb\n",
|
||||
" print(f\"{name:8s} | embedding shape={emb.shape} | L2-norm={np.linalg.norm(emb):.4f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 9. Pairwise Cosine Similarity\n",
|
||||
"\n",
|
||||
"Since embeddings are normalized, cosine similarity is just the dot product."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = list(embeddings.keys())\n",
|
||||
"n = len(names)\n",
|
||||
"sim_matrix = np.zeros((n, n))\n",
|
||||
"\n",
|
||||
"for i, ni in enumerate(names):\n",
|
||||
" for j, nj in enumerate(names):\n",
|
||||
" # Use squeeze() to handle (1, 512) shapes if present\n",
|
||||
" sim_matrix[i, j] = float(np.dot(embeddings[ni].squeeze(), embeddings[nj].squeeze()))\n",
|
||||
"\n",
|
||||
"# Print comparison results\n",
|
||||
"pairs = [(names[i], names[j]) for i in range(n) for j in range(i + 1, n)]\n",
|
||||
"for a, b in pairs:\n",
|
||||
" s = float(np.dot(embeddings[a].squeeze(), embeddings[b].squeeze()))\n",
|
||||
" verdict = \"✓ Same person\" if s >= THRESHOLD else \"✗ Different people\"\n",
|
||||
" print(f\"{a} vs {b}: similarity={s:.4f} → {verdict}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "19",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 10. Similarity Heatmap"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "20",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig, ax = plt.subplots(figsize=(8, 6))\n",
|
||||
"im = ax.imshow(sim_matrix, vmin=0, vmax=1, cmap=\"viridis\")\n",
|
||||
"plt.colorbar(im, ax=ax, label=\"Cosine similarity\")\n",
|
||||
"\n",
|
||||
"ax.set_xticks(range(n))\n",
|
||||
"ax.set_yticks(range(n))\n",
|
||||
"ax.set_xticklabels(names, rotation=30, ha=\"right\")\n",
|
||||
"ax.set_yticklabels(names)\n",
|
||||
"ax.set_title(\"Pairwise Face Similarity (ArcFace)\", fontweight=\"bold\")\n",
|
||||
"\n",
|
||||
"for i in range(n):\n",
|
||||
" for j in range(n):\n",
|
||||
" val = sim_matrix[i, j]\n",
|
||||
" ax.text(j, i, f\"{val:.2f}\",\n",
|
||||
" ha=\"center\", va=\"center\",\n",
|
||||
" color=\"black\" if val >= 0.6 else \"white\",\n",
|
||||
" fontsize=12, fontweight=\"bold\")\n",
|
||||
"\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
265
examples/13_portrait_matting.ipynb
Normal file
@@ -0,0 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Portrait Matting with MODNet\n",
|
||||
"\n",
|
||||
"<div style=\"display:flex; flex-wrap:wrap; align-items:center;\">\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pepy.tech/projects/uniface\"><img alt=\"PyPI Downloads\" src=\"https://static.pepy.tech/personalized-badge/uniface?period=total&units=international_system&left_color=grey&right_color=blue&left_text=Downloads\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://pypi.org/project/uniface/\"><img alt=\"PyPI Version\" src=\"https://img.shields.io/pypi/v/uniface.svg\"></a>\n",
|
||||
" <a style=\"margin-right:10px; margin-bottom:6px;\" href=\"https://opensource.org/licenses/MIT\"><img alt=\"License\" src=\"https://img.shields.io/badge/License-MIT-blue.svg\"></a>\n",
|
||||
" <a style=\"margin-bottom:6px;\" href=\"https://github.com/yakhyo/uniface\"><img alt=\"GitHub Stars\" src=\"https://img.shields.io/github/stars/yakhyo/uniface.svg?style=social\"></a>\n",
|
||||
"</div>\n",
|
||||
"\n",
|
||||
"**UniFace** is a lightweight, production-ready Python library for face detection, recognition, tracking, landmark analysis, face parsing, gaze estimation, and face attributes.\n",
|
||||
"\n",
|
||||
"🔗 **GitHub**: [github.com/yakhyo/uniface](https://github.com/yakhyo/uniface) | 📚 **Docs**: [yakhyo.github.io/uniface](https://yakhyo.github.io/uniface)\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"This notebook demonstrates portrait matting using **MODNet** — a trimap-free model that produces soft alpha mattes from full images. No face detection or cropping required.\n",
|
||||
"\n",
|
||||
"## 1. Install UniFace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -q \"uniface[cpu]\"\n",
|
||||
"\n",
|
||||
"# Clone repo for assets (Colab only)\n",
|
||||
"import os\n",
|
||||
"if 'COLAB_GPU' in os.environ or 'COLAB_RELEASE_TAG' in os.environ:\n",
|
||||
" if not os.path.exists('uniface'):\n",
|
||||
" !git clone --depth 1 https://github.com/yakhyo/uniface.git\n",
|
||||
" os.chdir('uniface/examples')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Import Libraries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cv2\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"import uniface\n",
|
||||
"from uniface.matting import MODNet\n",
|
||||
"\n",
|
||||
"print(f\"UniFace version: {uniface.__version__}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Initialize Model\n",
|
||||
"\n",
|
||||
"MODNet has two variants:\n",
|
||||
"- **PHOTOGRAPHIC** (default): optimized for high-quality portrait photos\n",
|
||||
"- **WEBCAM**: optimized for real-time webcam feeds"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"matting = MODNet()\n",
|
||||
"\n",
|
||||
"print(f\"Input size: {matting.input_size}\")\n",
|
||||
"print(f\"Input name: {matting.input_name}\")\n",
|
||||
"print(f\"Output names: {matting.output_names}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Helper Functions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def compose(image, matte, background=None):\n",
|
||||
" \"\"\"Composite foreground over a background using the alpha matte.\"\"\"\n",
|
||||
" h, w = image.shape[:2]\n",
|
||||
" matte_3ch = matte[:, :, np.newaxis]\n",
|
||||
"\n",
|
||||
" if background is None:\n",
|
||||
" bg = np.full_like(image, (0, 177, 64), dtype=np.uint8)\n",
|
||||
" else:\n",
|
||||
" bg = cv2.resize(background, (w, h), interpolation=cv2.INTER_AREA)\n",
|
||||
"\n",
|
||||
" return (image * matte_3ch + bg * (1 - matte_3ch)).astype(np.uint8)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def show_results(image, matte):\n",
|
||||
" \"\"\"Display original, matte, and green screen as a single merged image.\"\"\"\n",
|
||||
" matte_vis = cv2.cvtColor((matte * 255).astype(np.uint8), cv2.COLOR_GRAY2BGR)\n",
|
||||
" green = compose(image, matte)\n",
|
||||
" merged = np.hstack([image, matte_vis, green])\n",
|
||||
"\n",
|
||||
" plt.figure(figsize=(18, 6))\n",
|
||||
" plt.imshow(cv2.cvtColor(merged, cv2.COLOR_BGR2RGB))\n",
|
||||
" plt.axis(\"off\")\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Basic Matting"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image = cv2.imread(\"../assets/demos/src_portrait1.jpg\")\n",
|
||||
"print(f\"Image shape: {image.shape}\")\n",
|
||||
"\n",
|
||||
"matte = matting.predict(image)\n",
|
||||
"print(f\"Matte shape: {matte.shape}\")\n",
|
||||
"print(f\"Matte dtype: {matte.dtype}\")\n",
|
||||
"print(f\"Matte range: [{matte.min():.3f}, {matte.max():.3f}]\")\n",
|
||||
"\n",
|
||||
"show_results(image, matte)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6. Transparent Background (RGBA)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"alpha = (matte * 255).astype(np.uint8)\n",
|
||||
"rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)\n",
|
||||
"rgba[:, :, 3] = alpha\n",
|
||||
"\n",
|
||||
"# Checkerboard background to visualize transparency\n",
|
||||
"h, w = image.shape[:2]\n",
|
||||
"checker = np.zeros((h, w, 3), dtype=np.uint8)\n",
|
||||
"block = 20\n",
|
||||
"for y in range(0, h, block):\n",
|
||||
" for x in range(0, w, block):\n",
|
||||
" if (y // block + x // block) % 2 == 0:\n",
|
||||
" checker[y:y+block, x:x+block] = 200\n",
|
||||
" else:\n",
|
||||
" checker[y:y+block, x:x+block] = 255\n",
|
||||
"\n",
|
||||
"matte_3ch = matte[:, :, np.newaxis]\n",
|
||||
"rgba_vis = (image * matte_3ch + checker * (1 - matte_3ch)).astype(np.uint8)\n",
|
||||
"\n",
|
||||
"merged = np.hstack([image, rgba_vis])\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(16, 5))\n",
|
||||
"plt.imshow(cv2.cvtColor(merged, cv2.COLOR_BGR2RGB))\n",
|
||||
"plt.axis(\"off\")\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"print(f\"RGBA shape: {rgba.shape}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7. Custom Background"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a gradient background\n",
|
||||
"h, w = image.shape[:2]\n",
|
||||
"gradient = np.zeros((h, w, 3), dtype=np.uint8)\n",
|
||||
"for y in range(h):\n",
|
||||
" ratio = y / h\n",
|
||||
" gradient[y, :] = [int(180 * (1 - ratio)), int(100 + 80 * ratio), int(220 * ratio)]\n",
|
||||
"\n",
|
||||
"custom_bg = compose(image, matte, gradient)\n",
|
||||
"green_bg = compose(image, matte)\n",
|
||||
"\n",
|
||||
"merged = np.hstack([image, green_bg, custom_bg])\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(18, 6))\n",
|
||||
"plt.imshow(cv2.cvtColor(merged, cv2.COLOR_BGR2RGB))\n",
|
||||
"plt.axis(\"off\")\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Summary\n",
|
||||
"\n",
|
||||
"MODNet provides trimap-free portrait matting:\n",
|
||||
"\n",
|
||||
"- **`predict(image)`** — returns `(H, W)` float32 alpha matte in `[0, 1]`\n",
|
||||
"- **No face detection needed** — works on full images directly\n",
|
||||
"- **Two variants** — `PHOTOGRAPHIC` for photos, `WEBCAM` for real-time\n",
|
||||
"- **Compositing** — use the matte for transparent PNGs, green screen, or custom backgrounds\n",
|
||||
"\n",
|
||||
"For more details, see the [Matting docs](https://yakhyo.github.io/uniface/modules/matting/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
site_name: UniFace
|
||||
site_description: All-in-One Face Analysis Library with ONNX Runtime
|
||||
site_description: A Unified Face Analysis Library for Python
|
||||
site_author: Yakhyokhuja Valikhujaev
|
||||
site_url: https://yakhyo.github.io/uniface
|
||||
|
||||
@@ -150,10 +150,12 @@ nav:
|
||||
- Landmarks: modules/landmarks.md
|
||||
- Attributes: modules/attributes.md
|
||||
- Parsing: modules/parsing.md
|
||||
- Matting: modules/matting.md
|
||||
- Gaze: modules/gaze.md
|
||||
- Head Pose: modules/headpose.md
|
||||
- Anti-Spoofing: modules/spoofing.md
|
||||
- Privacy: modules/privacy.md
|
||||
- Indexing: modules/indexing.md
|
||||
- Stores: modules/stores.md
|
||||
- Guides:
|
||||
- Overview: concepts/overview.md
|
||||
- Inputs & Outputs: concepts/inputs-outputs.md
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "uniface"
|
||||
version = "3.1.0"
|
||||
description = "UniFace: A Comprehensive Library for Face Detection, Recognition, Tracking, Landmark Analysis, Face Parsing, Gaze Estimation, Age, and Gender Detection"
|
||||
version = "3.5.3"
|
||||
description = "UniFace: A Unified Face Analysis Library for Python"
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
authors = [{ name = "Yakhyokhuja Valikhujaev", email = "yakhyo9696@gmail.com" }]
|
||||
@@ -9,7 +9,7 @@ maintainers = [
|
||||
{ name = "Yakhyokhuja Valikhujaev", email = "yakhyo9696@gmail.com" },
|
||||
]
|
||||
|
||||
requires-python = ">=3.10,<3.14"
|
||||
requires-python = ">=3.10,<3.15"
|
||||
keywords = [
|
||||
"face-detection",
|
||||
"face-recognition",
|
||||
@@ -38,21 +38,28 @@ classifiers = [
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Programming Language :: Python :: 3.14",
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
"numpy>=1.21.0",
|
||||
"opencv-python>=4.5.0",
|
||||
"onnxruntime>=1.16.0",
|
||||
"scikit-image>=0.19.0",
|
||||
"scikit-image>=0.22.0",
|
||||
"scipy>=1.7.0",
|
||||
"requests>=2.28.0",
|
||||
"tqdm>=4.64.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = ["pytest>=7.0.0", "ruff>=0.4.0"]
|
||||
cpu = ["onnxruntime>=1.16.0"]
|
||||
gpu = ["onnxruntime-gpu>=1.16.0"]
|
||||
dev = ["pytest>=7.0.0", "ruff>=0.4.0", "pre-commit>=3.0.0"]
|
||||
docs = [
|
||||
"mkdocs-material>=9.0",
|
||||
"pymdown-extensions>=10.0",
|
||||
"mkdocs-git-committers-plugin-2>=1.0",
|
||||
"mkdocs-git-revision-date-localized-plugin>=2.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/yakhyo/uniface"
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
numpy>=1.21.0
|
||||
opencv-python>=4.5.0
|
||||
onnxruntime>=1.16.0
|
||||
scikit-image>=0.19.0
|
||||
scikit-image>=0.22.0
|
||||
scipy>=1.7.0
|
||||
requests>=2.28.0
|
||||
tqdm>=4.64.0
|
||||
# Install ONE of the following (not both):
|
||||
# onnxruntime>=1.16.0 # CPU / Apple Silicon → pip install uniface[cpu]
|
||||
# onnxruntime-gpu>=1.16.0 # NVIDIA CUDA → pip install uniface[gpu]
|
||||
|
||||
@@ -9,6 +9,14 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from uniface.attribute import AgeGender, AttributeResult
|
||||
from uniface.types import Face
|
||||
|
||||
|
||||
def _make_face(bbox: list[int] | np.ndarray) -> Face:
|
||||
"""Helper: build a minimal Face from a bounding box."""
|
||||
bbox = np.asarray(bbox)
|
||||
landmarks = np.zeros((5, 2), dtype=np.float32)
|
||||
return Face(bbox=bbox, confidence=0.99, landmarks=landmarks)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -22,30 +30,30 @@ def mock_image():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_bbox():
|
||||
return [100, 100, 300, 300]
|
||||
def mock_face():
|
||||
return _make_face([100, 100, 300, 300])
|
||||
|
||||
|
||||
def test_model_initialization(age_gender_model):
|
||||
assert age_gender_model is not None, 'AgeGender model initialization failed.'
|
||||
|
||||
|
||||
def test_prediction_output_format(age_gender_model, mock_image, mock_bbox):
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
def test_prediction_output_format(age_gender_model, mock_image, mock_face):
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
assert isinstance(result, AttributeResult), f'Result should be AttributeResult, got {type(result)}'
|
||||
assert isinstance(result.gender, int), f'Gender should be int, got {type(result.gender)}'
|
||||
assert isinstance(result.age, int), f'Age should be int, got {type(result.age)}'
|
||||
assert isinstance(result.sex, str), f'Sex should be str, got {type(result.sex)}'
|
||||
|
||||
|
||||
def test_gender_values(age_gender_model, mock_image, mock_bbox):
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
def test_gender_values(age_gender_model, mock_image, mock_face):
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
assert result.gender in [0, 1], f'Gender should be 0 (Female) or 1 (Male), got {result.gender}'
|
||||
assert result.sex in ['Female', 'Male'], f'Sex should be Female or Male, got {result.sex}'
|
||||
|
||||
|
||||
def test_age_range(age_gender_model, mock_image, mock_bbox):
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
def test_age_range(age_gender_model, mock_image, mock_face):
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
assert 0 <= result.age <= 120, f'Age should be between 0 and 120, got {result.age}'
|
||||
|
||||
|
||||
@@ -57,39 +65,52 @@ def test_different_bbox_sizes(age_gender_model, mock_image):
|
||||
]
|
||||
|
||||
for bbox in test_bboxes:
|
||||
result = age_gender_model.predict(mock_image, bbox)
|
||||
face = _make_face(bbox)
|
||||
result = age_gender_model.predict(mock_image, face)
|
||||
assert result.gender in [0, 1], f'Failed for bbox {bbox}'
|
||||
assert 0 <= result.age <= 120, f'Age out of range for bbox {bbox}'
|
||||
|
||||
|
||||
def test_different_image_sizes(age_gender_model, mock_bbox):
|
||||
def test_different_image_sizes(age_gender_model):
|
||||
test_sizes = [(480, 640, 3), (720, 1280, 3), (1080, 1920, 3)]
|
||||
face = _make_face([100, 100, 300, 300])
|
||||
|
||||
for size in test_sizes:
|
||||
mock_image = np.random.randint(0, 255, size, dtype=np.uint8)
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
result = age_gender_model.predict(mock_image, face)
|
||||
assert result.gender in [0, 1], f'Failed for image size {size}'
|
||||
assert 0 <= result.age <= 120, f'Age out of range for image size {size}'
|
||||
|
||||
|
||||
def test_consistency(age_gender_model, mock_image, mock_bbox):
|
||||
result1 = age_gender_model.predict(mock_image, mock_bbox)
|
||||
result2 = age_gender_model.predict(mock_image, mock_bbox)
|
||||
def test_consistency(age_gender_model, mock_image, mock_face):
|
||||
result1 = age_gender_model.predict(mock_image, mock_face)
|
||||
result2 = age_gender_model.predict(mock_image, mock_face)
|
||||
|
||||
assert result1.gender == result2.gender, 'Same input should produce same gender prediction'
|
||||
assert result1.age == result2.age, 'Same input should produce same age prediction'
|
||||
|
||||
|
||||
def test_face_enrichment(age_gender_model, mock_image, mock_face):
|
||||
"""predict() must write gender & age back to the Face object."""
|
||||
assert mock_face.gender is None
|
||||
assert mock_face.age is None
|
||||
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
|
||||
assert mock_face.gender == result.gender
|
||||
assert mock_face.age == result.age
|
||||
|
||||
|
||||
def test_bbox_list_format(age_gender_model, mock_image):
|
||||
bbox_list = [100, 100, 300, 300]
|
||||
result = age_gender_model.predict(mock_image, bbox_list)
|
||||
face = _make_face([100, 100, 300, 300])
|
||||
result = age_gender_model.predict(mock_image, face)
|
||||
assert result.gender in [0, 1], 'Should work with bbox as list'
|
||||
assert 0 <= result.age <= 120, 'Age should be in valid range'
|
||||
|
||||
|
||||
def test_bbox_array_format(age_gender_model, mock_image):
|
||||
bbox_array = np.array([100, 100, 300, 300])
|
||||
result = age_gender_model.predict(mock_image, bbox_array)
|
||||
face = _make_face(np.array([100, 100, 300, 300]))
|
||||
result = age_gender_model.predict(mock_image, face)
|
||||
assert result.gender in [0, 1], 'Should work with bbox as numpy array'
|
||||
assert 0 <= result.age <= 120, 'Age should be in valid range'
|
||||
|
||||
@@ -103,7 +124,8 @@ def test_multiple_predictions(age_gender_model, mock_image):
|
||||
|
||||
results = []
|
||||
for bbox in bboxes:
|
||||
result = age_gender_model.predict(mock_image, bbox)
|
||||
face = _make_face(bbox)
|
||||
result = age_gender_model.predict(mock_image, face)
|
||||
results.append(result)
|
||||
|
||||
assert len(results) == 3, 'Should have 3 predictions'
|
||||
@@ -112,28 +134,26 @@ def test_multiple_predictions(age_gender_model, mock_image):
|
||||
assert 0 <= result.age <= 120
|
||||
|
||||
|
||||
def test_age_is_positive(age_gender_model, mock_image, mock_bbox):
|
||||
def test_age_is_positive(age_gender_model, mock_image, mock_face):
|
||||
for _ in range(5):
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
assert result.age >= 0, f'Age should be non-negative, got {result.age}'
|
||||
|
||||
|
||||
def test_output_format_for_visualization(age_gender_model, mock_image, mock_bbox):
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
def test_output_format_for_visualization(age_gender_model, mock_image, mock_face):
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
text = f'{result.sex}, {result.age}y'
|
||||
assert isinstance(text, str), 'Should be able to format as string'
|
||||
assert 'Male' in text or 'Female' in text, 'Text should contain gender'
|
||||
assert 'y' in text, "Text should contain 'y' for years"
|
||||
|
||||
|
||||
def test_attribute_result_fields(age_gender_model, mock_image, mock_bbox):
|
||||
def test_attribute_result_fields(age_gender_model, mock_image, mock_face):
|
||||
"""Test that AttributeResult has correct fields for AgeGender model."""
|
||||
result = age_gender_model.predict(mock_image, mock_bbox)
|
||||
result = age_gender_model.predict(mock_image, mock_face)
|
||||
|
||||
# AgeGender should set gender and age
|
||||
assert result.gender is not None
|
||||
assert result.age is not None
|
||||
|
||||
# AgeGender should NOT set race and age_group (FairFace only)
|
||||
assert result.race is None
|
||||
assert result.age_group is None
|
||||
|
||||
@@ -9,12 +9,14 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from uniface import (
|
||||
create_attribute_predictor,
|
||||
create_detector,
|
||||
create_landmarker,
|
||||
create_recognizer,
|
||||
list_available_detectors,
|
||||
)
|
||||
from uniface.constants import RetinaFaceWeights, SCRFDWeights
|
||||
from uniface.attribute import AgeGender, FairFace
|
||||
from uniface.constants import AgeGenderWeights, FairFaceWeights, RetinaFaceWeights, SCRFDWeights
|
||||
from uniface.spoofing import MiniFASNet, create_spoofer
|
||||
|
||||
|
||||
@@ -89,6 +91,12 @@ def test_create_recognizer_sphereface():
|
||||
assert recognizer is not None, 'Failed to create SphereFace recognizer'
|
||||
|
||||
|
||||
def test_create_recognizer_edgeface():
|
||||
"""Test creating an EdgeFace recognizer using factory function."""
|
||||
recognizer = create_recognizer('edgeface')
|
||||
assert recognizer is not None, 'Failed to create EdgeFace recognizer'
|
||||
|
||||
|
||||
def test_create_recognizer_invalid_method():
|
||||
"""
|
||||
Test that invalid recognizer method raises an error.
|
||||
@@ -165,7 +173,7 @@ def test_recognizer_inference_from_factory():
|
||||
|
||||
embedding = recognizer.get_embedding(mock_image)
|
||||
assert embedding is not None, 'Recognizer should return embedding'
|
||||
assert embedding.shape[1] == 512, 'Should return 512-dimensional embedding'
|
||||
assert embedding.shape == (1, 512), 'get_embedding should return (1, 512) with batch dimension'
|
||||
|
||||
|
||||
def test_landmarker_inference_from_factory():
|
||||
@@ -236,3 +244,19 @@ def test_create_spoofer_with_providers():
|
||||
"""Test that create_spoofer forwards providers kwarg without TypeError."""
|
||||
spoofer = create_spoofer(providers=['CPUExecutionProvider'])
|
||||
assert isinstance(spoofer, MiniFASNet), 'Should return MiniFASNet instance'
|
||||
|
||||
|
||||
# create_attribute_predictor tests
|
||||
def test_create_attribute_predictor_age_gender():
|
||||
predictor = create_attribute_predictor(AgeGenderWeights.DEFAULT)
|
||||
assert isinstance(predictor, AgeGender), 'Should return AgeGender instance'
|
||||
|
||||
|
||||
def test_create_attribute_predictor_fairface():
|
||||
predictor = create_attribute_predictor(FairFaceWeights.DEFAULT)
|
||||
assert isinstance(predictor, FairFace), 'Should return FairFace instance'
|
||||
|
||||
|
||||
def test_create_attribute_predictor_invalid():
|
||||
with pytest.raises(ValueError, match='Unsupported attribute model'):
|
||||
create_attribute_predictor('invalid_model')
|
||||
|
||||
115
tests/test_headpose.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# Copyright 2025-2026 Yakhyokhuja Valikhujaev
|
||||
# Author: Yakhyokhuja Valikhujaev
|
||||
# GitHub: https://github.com/yakhyo
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from uniface import HeadPose, HeadPoseResult, create_head_pose_estimator
|
||||
from uniface.headpose import BaseHeadPoseEstimator
|
||||
from uniface.headpose.models import HeadPose as HeadPoseModel
|
||||
|
||||
|
||||
def test_create_head_pose_estimator_default():
|
||||
"""Test creating a head pose estimator with default parameters."""
|
||||
estimator = create_head_pose_estimator()
|
||||
assert isinstance(estimator, HeadPose), 'Should return HeadPose instance'
|
||||
|
||||
|
||||
def test_create_head_pose_estimator_aliases():
|
||||
"""Test that factory accepts all documented aliases."""
|
||||
for alias in ('headpose', 'head_pose', '6drepnet'):
|
||||
estimator = create_head_pose_estimator(alias)
|
||||
assert isinstance(estimator, HeadPose), f"Alias '{alias}' should return HeadPose"
|
||||
|
||||
|
||||
def test_create_head_pose_estimator_invalid():
|
||||
"""Test that invalid method raises ValueError."""
|
||||
with pytest.raises(ValueError, match='Unsupported head pose estimation method'):
|
||||
create_head_pose_estimator('invalid_method')
|
||||
|
||||
|
||||
def test_head_pose_inference():
|
||||
"""Test that HeadPose can run inference on a mock image."""
|
||||
estimator = HeadPose()
|
||||
mock_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
|
||||
result = estimator.estimate(mock_image)
|
||||
|
||||
assert isinstance(result, HeadPoseResult), 'Should return HeadPoseResult'
|
||||
assert isinstance(result.pitch, float), 'pitch should be float'
|
||||
assert isinstance(result.yaw, float), 'yaw should be float'
|
||||
assert isinstance(result.roll, float), 'roll should be float'
|
||||
|
||||
|
||||
def test_head_pose_callable():
|
||||
"""Test that HeadPose is callable via __call__."""
|
||||
estimator = HeadPose()
|
||||
mock_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
|
||||
result = estimator(mock_image)
|
||||
|
||||
assert isinstance(result, HeadPoseResult), '__call__ should return HeadPoseResult'
|
||||
|
||||
|
||||
def test_head_pose_result_repr():
|
||||
"""Test HeadPoseResult repr formatting."""
|
||||
result = HeadPoseResult(pitch=10.5, yaw=-20.3, roll=5.1)
|
||||
repr_str = repr(result)
|
||||
assert 'HeadPoseResult' in repr_str
|
||||
assert '10.5' in repr_str
|
||||
assert '-20.3' in repr_str
|
||||
assert '5.1' in repr_str
|
||||
|
||||
|
||||
def test_head_pose_result_frozen():
|
||||
"""Test that HeadPoseResult is immutable."""
|
||||
result = HeadPoseResult(pitch=1.0, yaw=2.0, roll=3.0)
|
||||
with pytest.raises(AttributeError):
|
||||
result.pitch = 99.0 # type: ignore[misc]
|
||||
|
||||
|
||||
def test_rotation_matrix_to_euler_identity():
|
||||
"""Test that identity rotation matrix gives zero angles."""
|
||||
identity = np.eye(3).reshape(1, 3, 3)
|
||||
euler = HeadPoseModel.rotation_matrix_to_euler(identity)
|
||||
|
||||
assert euler.shape == (1, 3), 'Should return (1, 3) shaped array'
|
||||
np.testing.assert_allclose(euler[0], [0.0, 0.0, 0.0], atol=1e-5)
|
||||
|
||||
|
||||
def test_rotation_matrix_to_euler_90deg_yaw():
|
||||
"""Test 90-degree yaw rotation."""
|
||||
angle = np.radians(90)
|
||||
R = np.array(
|
||||
[
|
||||
[np.cos(angle), 0, np.sin(angle)],
|
||||
[0, 1, 0],
|
||||
[-np.sin(angle), 0, np.cos(angle)],
|
||||
]
|
||||
).reshape(1, 3, 3)
|
||||
euler = HeadPoseModel.rotation_matrix_to_euler(R)
|
||||
|
||||
np.testing.assert_allclose(euler[0, 1], 90.0, atol=1e-3)
|
||||
|
||||
|
||||
def test_rotation_matrix_to_euler_batch():
|
||||
"""Test batch processing of rotation matrices."""
|
||||
batch = np.stack([np.eye(3), np.eye(3), np.eye(3)], axis=0)
|
||||
euler = HeadPoseModel.rotation_matrix_to_euler(batch)
|
||||
|
||||
assert euler.shape == (3, 3), 'Batch of 3 should return (3, 3)'
|
||||
np.testing.assert_allclose(euler, 0.0, atol=1e-5)
|
||||
|
||||
|
||||
def test_factory_returns_correct_type():
|
||||
"""Test that factory function returns BaseHeadPoseEstimator subclass."""
|
||||
estimator = create_head_pose_estimator()
|
||||
assert isinstance(estimator, BaseHeadPoseEstimator), 'Should be BaseHeadPoseEstimator subclass'
|
||||
|
||||
|
||||
def test_head_pose_with_providers():
|
||||
"""Test that HeadPose accepts providers kwarg."""
|
||||
estimator = HeadPose(providers=['CPUExecutionProvider'])
|
||||
assert isinstance(estimator, HeadPose), 'Should create with explicit providers'
|
||||
158
tests/test_matting.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# Copyright 2025-2026 Yakhyokhuja Valikhujaev
|
||||
# Author: Yakhyokhuja Valikhujaev
|
||||
# GitHub: https://github.com/yakhyo
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from uniface.constants import MODNetWeights
|
||||
from uniface.matting import MODNet, create_matting_model
|
||||
|
||||
|
||||
def test_modnet_initialization():
|
||||
"""Test MODNet initialization with default weights."""
|
||||
matting = MODNet()
|
||||
assert matting is not None
|
||||
assert matting.input_size == 512
|
||||
|
||||
|
||||
def test_modnet_with_webcam_weights():
|
||||
"""Test MODNet initialization with webcam variant."""
|
||||
matting = MODNet(model_name=MODNetWeights.WEBCAM)
|
||||
assert matting is not None
|
||||
assert matting.input_size == 512
|
||||
|
||||
|
||||
def test_modnet_custom_input_size():
|
||||
"""Test MODNet with custom input size."""
|
||||
matting = MODNet(input_size=256)
|
||||
assert matting.input_size == 256
|
||||
|
||||
|
||||
def test_modnet_preprocess():
|
||||
"""Test preprocessing produces correct tensor shape and dtype."""
|
||||
matting = MODNet()
|
||||
|
||||
image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
||||
tensor, orig_h, orig_w = matting.preprocess(image)
|
||||
|
||||
assert tensor.dtype == np.float32
|
||||
assert tensor.ndim == 4
|
||||
assert tensor.shape[0] == 1
|
||||
assert tensor.shape[1] == 3
|
||||
assert tensor.shape[2] % 32 == 0
|
||||
assert tensor.shape[3] % 32 == 0
|
||||
assert orig_h == 480
|
||||
assert orig_w == 640
|
||||
|
||||
|
||||
def test_modnet_preprocess_small_image():
|
||||
"""Test preprocessing with image smaller than input_size."""
|
||||
matting = MODNet(input_size=512)
|
||||
|
||||
image = np.random.randint(0, 255, (128, 128, 3), dtype=np.uint8)
|
||||
tensor, orig_h, orig_w = matting.preprocess(image)
|
||||
|
||||
assert tensor.shape[2] % 32 == 0
|
||||
assert tensor.shape[3] % 32 == 0
|
||||
assert orig_h == 128
|
||||
assert orig_w == 128
|
||||
|
||||
|
||||
def test_modnet_preprocess_large_image():
|
||||
"""Test preprocessing with image larger than input_size."""
|
||||
matting = MODNet(input_size=512)
|
||||
|
||||
image = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
||||
tensor, orig_h, orig_w = matting.preprocess(image)
|
||||
|
||||
assert tensor.shape[2] % 32 == 0
|
||||
assert tensor.shape[3] % 32 == 0
|
||||
assert orig_h == 1080
|
||||
assert orig_w == 1920
|
||||
|
||||
|
||||
def test_modnet_postprocess():
|
||||
"""Test postprocessing resizes matte to original dimensions."""
|
||||
matting = MODNet()
|
||||
|
||||
dummy_output = np.random.rand(1, 1, 512, 672).astype(np.float32)
|
||||
matte = matting.postprocess(dummy_output, original_size=(640, 480))
|
||||
|
||||
assert matte.shape == (480, 640)
|
||||
assert matte.dtype == np.float32
|
||||
|
||||
|
||||
def test_modnet_predict():
|
||||
"""Test end-to-end prediction."""
|
||||
matting = MODNet()
|
||||
|
||||
image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
||||
matte = matting.predict(image)
|
||||
|
||||
assert matte.shape == (480, 640)
|
||||
assert matte.dtype == np.float32
|
||||
assert matte.min() >= 0.0
|
||||
assert matte.max() <= 1.0
|
||||
|
||||
|
||||
def test_modnet_callable():
|
||||
"""Test that MODNet is callable via __call__."""
|
||||
matting = MODNet()
|
||||
image = np.random.randint(0, 255, (256, 256, 3), dtype=np.uint8)
|
||||
|
||||
matte = matting(image)
|
||||
|
||||
assert matte.shape == (256, 256)
|
||||
assert matte.dtype == np.float32
|
||||
|
||||
|
||||
def test_modnet_different_input_sizes():
|
||||
"""Test prediction with various image dimensions."""
|
||||
matting = MODNet()
|
||||
|
||||
sizes = [(256, 256), (480, 640), (720, 1280), (300, 500)]
|
||||
|
||||
for h, w in sizes:
|
||||
image = np.random.randint(0, 255, (h, w, 3), dtype=np.uint8)
|
||||
matte = matting.predict(image)
|
||||
|
||||
assert matte.shape == (h, w), f'Failed for size {h}x{w}'
|
||||
assert matte.dtype == np.float32
|
||||
|
||||
|
||||
# Factory tests
|
||||
|
||||
|
||||
def test_create_matting_model_default():
|
||||
"""Test factory with default parameters."""
|
||||
matting = create_matting_model()
|
||||
assert matting is not None
|
||||
assert isinstance(matting, MODNet)
|
||||
|
||||
|
||||
def test_create_matting_model_with_enum():
|
||||
"""Test factory with enum."""
|
||||
matting = create_matting_model(MODNetWeights.WEBCAM)
|
||||
assert isinstance(matting, MODNet)
|
||||
|
||||
|
||||
def test_create_matting_model_with_string():
|
||||
"""Test factory with string model name."""
|
||||
matting = create_matting_model('modnet_photographic')
|
||||
assert isinstance(matting, MODNet)
|
||||
|
||||
|
||||
def test_create_matting_model_webcam_string():
|
||||
"""Test factory with webcam string model name."""
|
||||
matting = create_matting_model('modnet_webcam')
|
||||
assert isinstance(matting, MODNet)
|
||||
|
||||
|
||||
def test_create_matting_model_invalid():
|
||||
"""Test factory with invalid model name."""
|
||||
with pytest.raises(ValueError, match='Unknown matting model'):
|
||||
create_matting_model('invalid_model')
|
||||
@@ -8,7 +8,7 @@ from __future__ import annotations
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from uniface.recognition import ArcFace, MobileFace, SphereFace
|
||||
from uniface.recognition import ArcFace, EdgeFace, MobileFace, SphereFace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -35,6 +35,12 @@ def sphereface_model():
|
||||
return SphereFace()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def edgeface_model():
|
||||
"""Fixture to initialize the EdgeFace model for testing."""
|
||||
return EdgeFace()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_aligned_face():
|
||||
"""
|
||||
@@ -74,7 +80,7 @@ def test_arcface_embedding_shape(arcface_model, mock_aligned_face):
|
||||
"""
|
||||
embedding = arcface_model.get_embedding(mock_aligned_face)
|
||||
|
||||
# ArcFace typically produces 512-dimensional embeddings
|
||||
# ArcFace get_embedding returns raw ONNX output with batch dimension
|
||||
assert embedding.shape[1] == 512, f'Expected 512-dim embedding, got {embedding.shape[1]}'
|
||||
assert embedding.shape[0] == 1, 'Embedding should have batch dimension of 1'
|
||||
|
||||
@@ -88,7 +94,8 @@ def test_arcface_normalized_embedding(arcface_model, mock_landmarks):
|
||||
|
||||
embedding = arcface_model.get_normalized_embedding(mock_image, mock_landmarks)
|
||||
|
||||
# Check that embedding is normalized (L2 norm ≈ 1.0)
|
||||
# Check shape and normalization
|
||||
assert embedding.shape == (512,), f'Expected shape (512,), got {embedding.shape}'
|
||||
norm = np.linalg.norm(embedding)
|
||||
assert np.isclose(norm, 1.0, atol=1e-5), f'Normalized embedding should have norm 1.0, got {norm}'
|
||||
|
||||
@@ -125,7 +132,7 @@ def test_mobileface_embedding_shape(mobileface_model, mock_aligned_face):
|
||||
"""
|
||||
embedding = mobileface_model.get_embedding(mock_aligned_face)
|
||||
|
||||
# MobileFace typically produces 512-dimensional embeddings
|
||||
# MobileFace get_embedding returns raw ONNX output with batch dimension
|
||||
assert embedding.shape[1] == 512, f'Expected 512-dim embedding, got {embedding.shape[1]}'
|
||||
assert embedding.shape[0] == 1, 'Embedding should have batch dimension of 1'
|
||||
|
||||
@@ -138,6 +145,7 @@ def test_mobileface_normalized_embedding(mobileface_model, mock_landmarks):
|
||||
|
||||
embedding = mobileface_model.get_normalized_embedding(mock_image, mock_landmarks)
|
||||
|
||||
assert embedding.shape == (512,), f'Expected shape (512,), got {embedding.shape}'
|
||||
norm = np.linalg.norm(embedding)
|
||||
assert np.isclose(norm, 1.0, atol=1e-5), f'Normalized embedding should have norm 1.0, got {norm}'
|
||||
|
||||
@@ -156,7 +164,7 @@ def test_sphereface_embedding_shape(sphereface_model, mock_aligned_face):
|
||||
"""
|
||||
embedding = sphereface_model.get_embedding(mock_aligned_face)
|
||||
|
||||
# SphereFace typically produces 512-dimensional embeddings
|
||||
# SphereFace get_embedding returns raw ONNX output with batch dimension
|
||||
assert embedding.shape[1] == 512, f'Expected 512-dim embedding, got {embedding.shape[1]}'
|
||||
assert embedding.shape[0] == 1, 'Embedding should have batch dimension of 1'
|
||||
|
||||
@@ -169,10 +177,50 @@ def test_sphereface_normalized_embedding(sphereface_model, mock_landmarks):
|
||||
|
||||
embedding = sphereface_model.get_normalized_embedding(mock_image, mock_landmarks)
|
||||
|
||||
assert embedding.shape == (512,), f'Expected shape (512,), got {embedding.shape}'
|
||||
norm = np.linalg.norm(embedding)
|
||||
assert np.isclose(norm, 1.0, atol=1e-5), f'Normalized embedding should have norm 1.0, got {norm}'
|
||||
|
||||
|
||||
# EdgeFace Tests
|
||||
def test_edgeface_initialization(edgeface_model):
|
||||
"""Test that the EdgeFace model initializes correctly."""
|
||||
assert edgeface_model is not None, 'EdgeFace model initialization failed.'
|
||||
|
||||
|
||||
def test_edgeface_embedding_shape(edgeface_model, mock_aligned_face):
|
||||
"""Test that EdgeFace produces embeddings with the correct shape."""
|
||||
embedding = edgeface_model.get_embedding(mock_aligned_face)
|
||||
|
||||
assert embedding.shape[1] == 512, f'Expected 512-dim embedding, got {embedding.shape[1]}'
|
||||
assert embedding.shape[0] == 1, 'Embedding should have batch dimension of 1'
|
||||
|
||||
|
||||
def test_edgeface_normalized_embedding(edgeface_model, mock_landmarks):
|
||||
"""Test that EdgeFace normalized embeddings have unit length."""
|
||||
mock_image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
|
||||
|
||||
embedding = edgeface_model.get_normalized_embedding(mock_image, mock_landmarks)
|
||||
|
||||
assert embedding.shape == (512,), f'Expected shape (512,), got {embedding.shape}'
|
||||
norm = np.linalg.norm(embedding)
|
||||
assert np.isclose(norm, 1.0, atol=1e-5), f'Normalized embedding should have norm 1.0, got {norm}'
|
||||
|
||||
|
||||
def test_edgeface_embedding_dtype(edgeface_model, mock_aligned_face):
|
||||
"""Test that EdgeFace embeddings have the correct data type."""
|
||||
embedding = edgeface_model.get_embedding(mock_aligned_face)
|
||||
assert embedding.dtype == np.float32, f'Expected float32, got {embedding.dtype}'
|
||||
|
||||
|
||||
def test_edgeface_consistency(edgeface_model, mock_aligned_face):
|
||||
"""Test that the same input produces the same EdgeFace embedding."""
|
||||
embedding1 = edgeface_model.get_embedding(mock_aligned_face)
|
||||
embedding2 = edgeface_model.get_embedding(mock_aligned_face)
|
||||
|
||||
assert np.allclose(embedding1, embedding2), 'Same input should produce same embedding'
|
||||
|
||||
|
||||
# Cross-model comparison tests
|
||||
def test_different_models_different_embeddings(arcface_model, mobileface_model, mock_aligned_face):
|
||||
"""
|
||||
|
||||
@@ -12,9 +12,11 @@ CLI utilities for testing and running UniFace features.
|
||||
| `anonymize.py` | Face anonymization/blurring for privacy |
|
||||
| `emotion.py` | Emotion detection (7 or 8 emotions) |
|
||||
| `gaze.py` | Gaze direction estimation |
|
||||
| `headpose.py` | Head pose estimation (pitch, yaw, roll) |
|
||||
| `landmarks.py` | 106-point facial landmark detection |
|
||||
| `recognize.py` | Face embedding extraction and comparison |
|
||||
| `search.py` | Real-time face matching against reference |
|
||||
| `faiss_search.py` | FAISS index build and multi-identity face search |
|
||||
| `fairface.py` | FairFace attribute prediction (race, gender, age) |
|
||||
| `attribute.py` | Age and gender prediction |
|
||||
| `spoofing.py` | Face anti-spoofing detection |
|
||||
@@ -61,6 +63,11 @@ python tools/emotion.py --source 0
|
||||
python tools/gaze.py --source assets/test.jpg
|
||||
python tools/gaze.py --source 0
|
||||
|
||||
# Head pose estimation
|
||||
python tools/headpose.py --source assets/test.jpg
|
||||
python tools/headpose.py --source 0
|
||||
python tools/headpose.py --source 0 --draw-type axis
|
||||
|
||||
# Landmarks
|
||||
python tools/landmarks.py --source assets/test.jpg
|
||||
python tools/landmarks.py --source 0
|
||||
@@ -108,7 +115,7 @@ python tools/download_model.py # downloads all
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--source` | Input source: image/video path or camera ID (0, 1, ...) |
|
||||
| `--detector` | Choose detector: `retinaface`, `scrfd`, `yolov5face` |
|
||||
| `--detector` | Choose detector: `retinaface`, `scrfd`, `yolov5face`, `yolov8face` |
|
||||
| `--threshold` | Visualization confidence threshold (default: varies) |
|
||||
| `--save-dir` | Output directory (default: `outputs`) |
|
||||
|
||||
|
||||
@@ -27,12 +27,17 @@ from uniface.draw import draw_detections
|
||||
from uniface.recognition import ArcFace
|
||||
|
||||
|
||||
def draw_face_info(image, face, face_id):
|
||||
"""Draw face ID and attributes above bounding box."""
|
||||
def draw_face_info(image, face):
|
||||
"""Draw face attributes above bounding box."""
|
||||
x1, y1, _x2, y2 = map(int, face.bbox)
|
||||
lines = [f'ID: {face_id}', f'Conf: {face.confidence:.2f}']
|
||||
if face.age and face.sex:
|
||||
lines = []
|
||||
if face.age is not None and face.sex is not None:
|
||||
lines.append(f'{face.sex}, {face.age}y')
|
||||
if face.emotion is not None:
|
||||
lines.append(face.emotion)
|
||||
|
||||
if not lines:
|
||||
return
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
y_pos = y1 - 10 - (len(lines) - 1 - i) * 25
|
||||
@@ -95,13 +100,10 @@ def process_image(analyzer, image_path: str, save_dir: str = 'outputs', show_sim
|
||||
status = 'Same' if sim > 0.4 else 'Different'
|
||||
print(f' Face {i + 1} ↔ Face {j + 1}: {sim:.3f} ({status})')
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, corner_bbox=True)
|
||||
draw_detections(image=image, faces=faces, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces, 1):
|
||||
draw_face_info(image, face, i)
|
||||
for face in faces:
|
||||
draw_face_info(image, face)
|
||||
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
output_path = os.path.join(save_dir, f'{Path(image_path).stem}_analysis.jpg')
|
||||
@@ -137,13 +139,10 @@ def process_video(analyzer, video_path: str, save_dir: str = 'outputs'):
|
||||
frame_count += 1
|
||||
faces = analyzer.analyze(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, corner_bbox=True)
|
||||
draw_detections(image=frame, faces=faces, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces, 1):
|
||||
draw_face_info(frame, face, i)
|
||||
for face in faces:
|
||||
draw_face_info(frame, face)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
out.write(frame)
|
||||
@@ -167,19 +166,16 @@ def run_camera(analyzer, camera_id: int = 0):
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = analyzer.analyze(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, corner_bbox=True)
|
||||
draw_detections(image=frame, faces=faces, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces, 1):
|
||||
draw_face_info(frame, face, i)
|
||||
for face in faces:
|
||||
draw_face_info(frame, face)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
cv2.imshow('Face Analyzer', frame)
|
||||
@@ -201,7 +197,7 @@ def main():
|
||||
detector = RetinaFace()
|
||||
recognizer = ArcFace()
|
||||
age_gender = AgeGender()
|
||||
analyzer = FaceAnalyzer(detector, recognizer, age_gender)
|
||||
analyzer = FaceAnalyzer(detector, recognizer=recognizer, attributes=[age_gender])
|
||||
|
||||
source_type = get_source_type(args.source)
|
||||
|
||||
|
||||
@@ -43,10 +43,7 @@ def process_image(
|
||||
from uniface.draw import draw_detections
|
||||
|
||||
preview = image.copy()
|
||||
bboxes = [face.bbox for face in faces]
|
||||
scores = [face.confidence for face in faces]
|
||||
landmarks = [face.landmarks for face in faces]
|
||||
draw_detections(preview, bboxes, scores, landmarks)
|
||||
draw_detections(image=preview, faces=faces)
|
||||
|
||||
cv2.imshow('Detections (Press any key to continue)', preview)
|
||||
cv2.waitKey(0)
|
||||
@@ -121,9 +118,9 @@ def run_camera(detector, blurrer: BlurFace, camera_id: int = 0):
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
if faces:
|
||||
|
||||
@@ -52,15 +52,10 @@ def process_image(
|
||||
if not faces:
|
||||
return
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
result = age_gender.predict(image, face.bbox)
|
||||
result = age_gender.predict(image, face)
|
||||
print(f' Face {i + 1}: {result.sex}, {result.age} years old')
|
||||
draw_age_gender_label(image, face.bbox, result.sex, result.age)
|
||||
|
||||
@@ -104,15 +99,10 @@ def process_video(
|
||||
frame_count += 1
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = age_gender.predict(frame, face.bbox)
|
||||
result = age_gender.predict(frame, face)
|
||||
draw_age_gender_label(frame, face.bbox, result.sex, result.age)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
@@ -137,21 +127,16 @@ def run_camera(detector, age_gender, camera_id: int = 0, threshold: float = 0.6)
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = age_gender.predict(frame, face.bbox)
|
||||
result = age_gender.predict(frame, face)
|
||||
draw_age_gender_label(frame, face.bbox, result.sex, result.age)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
|
||||
@@ -34,13 +34,7 @@ def process_image(detector, image_path: Path, output_path: Path, threshold: floa
|
||||
|
||||
faces = detector.detect(image)
|
||||
|
||||
# unpack face data for visualization
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
cv2.putText(
|
||||
image,
|
||||
|
||||
@@ -35,10 +35,7 @@ def process_image(detector, image_path: str, threshold: float = 0.6, save_dir: s
|
||||
faces = detector.detect(image)
|
||||
|
||||
if faces:
|
||||
bboxes = [face.bbox for face in faces]
|
||||
scores = [face.confidence for face in faces]
|
||||
landmarks = [face.landmarks for face in faces]
|
||||
draw_detections(image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=threshold)
|
||||
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
output_path = os.path.join(save_dir, f'{os.path.splitext(os.path.basename(image_path))[0]}_out.jpg')
|
||||
@@ -89,14 +86,9 @@ def process_video(
|
||||
faces = detector.detect(frame)
|
||||
total_faces += len(faces)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame,
|
||||
bboxes=bboxes,
|
||||
scores=scores,
|
||||
landmarks=landmarks,
|
||||
faces=faces,
|
||||
vis_threshold=threshold,
|
||||
draw_score=True,
|
||||
corner_bbox=True,
|
||||
@@ -135,20 +127,15 @@ def run_camera(detector, camera_id: int = 0, threshold: float = 0.6):
|
||||
prev_time = time.perf_counter()
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame,
|
||||
bboxes=bboxes,
|
||||
scores=scores,
|
||||
landmarks=landmarks,
|
||||
faces=faces,
|
||||
vis_threshold=threshold,
|
||||
draw_score=True,
|
||||
corner_bbox=True,
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import argparse
|
||||
|
||||
from uniface.constants import (
|
||||
AdaFaceWeights,
|
||||
AgeGenderWeights,
|
||||
ArcFaceWeights,
|
||||
DDAMFNWeights,
|
||||
EdgeFaceWeights,
|
||||
HeadPoseWeights,
|
||||
LandmarkWeights,
|
||||
MobileFaceWeights,
|
||||
RetinaFaceWeights,
|
||||
@@ -14,13 +17,16 @@ from uniface.model_store import verify_model_weights
|
||||
|
||||
MODEL_TYPES = {
|
||||
'retinaface': RetinaFaceWeights,
|
||||
'sphereface': SphereFaceWeights,
|
||||
'mobileface': MobileFaceWeights,
|
||||
'adaface': AdaFaceWeights,
|
||||
'arcface': ArcFaceWeights,
|
||||
'edgeface': EdgeFaceWeights,
|
||||
'mobileface': MobileFaceWeights,
|
||||
'sphereface': SphereFaceWeights,
|
||||
'scrfd': SCRFDWeights,
|
||||
'ddamfn': DDAMFNWeights,
|
||||
'agegender': AgeGenderWeights,
|
||||
'landmark': LandmarkWeights,
|
||||
'headpose': HeadPoseWeights,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -52,15 +52,10 @@ def process_image(
|
||||
if not faces:
|
||||
return
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
result = emotion_predictor.predict(image, face.landmarks)
|
||||
result = emotion_predictor.predict(image, face)
|
||||
print(f' Face {i + 1}: {result.emotion} (confidence: {result.confidence:.3f})')
|
||||
draw_emotion_label(image, face.bbox, result.emotion, result.confidence)
|
||||
|
||||
@@ -104,15 +99,10 @@ def process_video(
|
||||
frame_count += 1
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = emotion_predictor.predict(frame, face.landmarks)
|
||||
result = emotion_predictor.predict(frame, face)
|
||||
draw_emotion_label(frame, face.bbox, result.emotion, result.confidence)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
@@ -137,21 +127,16 @@ def run_camera(detector, emotion_predictor, camera_id: int = 0, threshold: float
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = emotion_predictor.predict(frame, face.landmarks)
|
||||
result = emotion_predictor.predict(frame, face)
|
||||
draw_emotion_label(frame, face.bbox, result.emotion, result.confidence)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
|
||||
@@ -52,15 +52,10 @@ def process_image(
|
||||
if not faces:
|
||||
return
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=image, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=image, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
result = fairface.predict(image, face.bbox)
|
||||
result = fairface.predict(image, face)
|
||||
print(f' Face {i + 1}: {result.sex}, {result.age_group}, {result.race}')
|
||||
draw_fairface_label(image, face.bbox, result.sex, result.age_group, result.race)
|
||||
|
||||
@@ -104,15 +99,10 @@ def process_video(
|
||||
frame_count += 1
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = fairface.predict(frame, face.bbox)
|
||||
result = fairface.predict(frame, face)
|
||||
draw_fairface_label(frame, face.bbox, result.sex, result.age_group, result.race)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
@@ -137,21 +127,16 @@ def run_camera(detector, fairface, camera_id: int = 0, threshold: float = 0.6):
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
bboxes = [f.bbox for f in faces]
|
||||
scores = [f.confidence for f in faces]
|
||||
landmarks = [f.landmarks for f in faces]
|
||||
draw_detections(
|
||||
image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks, vis_threshold=threshold, corner_bbox=True
|
||||
)
|
||||
draw_detections(image=frame, faces=faces, vis_threshold=threshold, corner_bbox=True)
|
||||
|
||||
for face in faces:
|
||||
result = fairface.predict(frame, face.bbox)
|
||||
result = fairface.predict(frame, face)
|
||||
draw_fairface_label(frame, face.bbox, result.sex, result.age_group, result.race)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
|
||||
@@ -24,7 +24,7 @@ import cv2
|
||||
|
||||
from uniface import create_detector, create_recognizer
|
||||
from uniface.draw import draw_corner_bbox, draw_text_label
|
||||
from uniface.indexing import FAISS
|
||||
from uniface.stores import FAISS
|
||||
|
||||
|
||||
def _draw_face(image, bbox, text: str, color: tuple[int, int, int]) -> None:
|
||||
@@ -97,9 +97,9 @@ def run_camera(detector, recognizer, store: FAISS, camera_id: int = 0, threshold
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
frame = process_frame(frame, detector, recognizer, store, threshold)
|
||||
|
||||
|
||||
181
tools/headpose.py
Normal file
@@ -0,0 +1,181 @@
|
||||
# Copyright 2025-2026 Yakhyokhuja Valikhujaev
|
||||
# Author: Yakhyokhuja Valikhujaev
|
||||
# GitHub: https://github.com/yakhyo
|
||||
|
||||
"""Head pose estimation on detected faces.
|
||||
|
||||
Usage:
|
||||
python tools/headpose.py --source path/to/image.jpg
|
||||
python tools/headpose.py --source path/to/video.mp4
|
||||
python tools/headpose.py --source 0 # webcam
|
||||
python tools/headpose.py --source path/to/image.jpg --draw-type axis
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from _common import get_source_type
|
||||
import cv2
|
||||
|
||||
from uniface.detection import RetinaFace
|
||||
from uniface.draw import draw_head_pose
|
||||
from uniface.headpose import HeadPose
|
||||
|
||||
|
||||
def process_image(detector, head_pose_estimator, image_path: str, save_dir: str = 'outputs', draw_type: str = 'cube'):
|
||||
"""Process a single image."""
|
||||
image = cv2.imread(image_path)
|
||||
if image is None:
|
||||
print(f"Error: Failed to load image from '{image_path}'")
|
||||
return
|
||||
|
||||
faces = detector.detect(image)
|
||||
print(f'Detected {len(faces)} face(s)')
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
bbox = face.bbox
|
||||
x1, y1, x2, y2 = map(int, bbox[:4])
|
||||
face_crop = image[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size == 0:
|
||||
continue
|
||||
|
||||
result = head_pose_estimator.estimate(face_crop)
|
||||
print(f' Face {i + 1}: pitch={result.pitch:.1f}°, yaw={result.yaw:.1f}°, roll={result.roll:.1f}°')
|
||||
|
||||
draw_head_pose(image, bbox, result.pitch, result.yaw, result.roll, draw_type=draw_type)
|
||||
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
output_path = os.path.join(save_dir, f'{Path(image_path).stem}_headpose.jpg')
|
||||
cv2.imwrite(output_path, image)
|
||||
print(f'Output saved: {output_path}')
|
||||
|
||||
|
||||
def process_video(detector, head_pose_estimator, video_path: str, save_dir: str = 'outputs', draw_type: str = 'cube'):
|
||||
"""Process a video file."""
|
||||
cap = cv2.VideoCapture(video_path)
|
||||
if not cap.isOpened():
|
||||
print(f"Error: Cannot open video file '{video_path}'")
|
||||
return
|
||||
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
output_path = os.path.join(save_dir, f'{Path(video_path).stem}_headpose.mp4')
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||||
|
||||
print(f'Processing video: {video_path} ({total_frames} frames)')
|
||||
frame_count = 0
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
frame_count += 1
|
||||
faces = detector.detect(frame)
|
||||
|
||||
for face in faces:
|
||||
bbox = face.bbox
|
||||
x1, y1, x2, y2 = map(int, bbox[:4])
|
||||
face_crop = frame[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size == 0:
|
||||
continue
|
||||
|
||||
result = head_pose_estimator.estimate(face_crop)
|
||||
draw_head_pose(frame, bbox, result.pitch, result.yaw, result.roll, draw_type=draw_type)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
out.write(frame)
|
||||
|
||||
if frame_count % 100 == 0:
|
||||
print(f' Processed {frame_count}/{total_frames} frames...')
|
||||
|
||||
cap.release()
|
||||
out.release()
|
||||
print(f'Done! Output saved: {output_path}')
|
||||
|
||||
|
||||
def run_camera(detector, head_pose_estimator, camera_id: int = 0, draw_type: str = 'cube'):
|
||||
"""Run real-time detection on webcam."""
|
||||
cap = cv2.VideoCapture(camera_id)
|
||||
if not cap.isOpened():
|
||||
print(f'Cannot open camera {camera_id}')
|
||||
return
|
||||
|
||||
print("Press 'q' to quit")
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
frame = cv2.flip(frame, 1)
|
||||
faces = detector.detect(frame)
|
||||
|
||||
for face in faces:
|
||||
bbox = face.bbox
|
||||
x1, y1, x2, y2 = map(int, bbox[:4])
|
||||
face_crop = frame[y1:y2, x1:x2]
|
||||
|
||||
if face_crop.size == 0:
|
||||
continue
|
||||
|
||||
result = head_pose_estimator.estimate(face_crop)
|
||||
draw_head_pose(frame, bbox, result.pitch, result.yaw, result.roll, draw_type=draw_type)
|
||||
|
||||
cv2.putText(frame, f'Faces: {len(faces)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||
cv2.imshow('Head Pose Estimation', frame)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Run head pose estimation')
|
||||
parser.add_argument('--source', type=str, required=True, help='Image/video path or camera ID (0, 1, ...)')
|
||||
parser.add_argument('--save-dir', type=str, default='outputs', help='Output directory')
|
||||
parser.add_argument(
|
||||
'--draw-type',
|
||||
type=str,
|
||||
default='cube',
|
||||
choices=['cube', 'axis'],
|
||||
help='Visualization type: cube (default) or axis',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
detector = RetinaFace()
|
||||
head_pose_estimator = HeadPose()
|
||||
|
||||
source_type = get_source_type(args.source)
|
||||
|
||||
if source_type == 'camera':
|
||||
run_camera(detector, head_pose_estimator, int(args.source), args.draw_type)
|
||||
elif source_type == 'image':
|
||||
if not os.path.exists(args.source):
|
||||
print(f'Error: Image not found: {args.source}')
|
||||
return
|
||||
process_image(detector, head_pose_estimator, args.source, args.save_dir, args.draw_type)
|
||||
elif source_type == 'video':
|
||||
if not os.path.exists(args.source):
|
||||
print(f'Error: Video not found: {args.source}')
|
||||
return
|
||||
process_video(detector, head_pose_estimator, args.source, args.save_dir, args.draw_type)
|
||||
else:
|
||||
print(f"Error: Unknown source type for '{args.source}'")
|
||||
print('Supported formats: images (.jpg, .png, ...), videos (.mp4, .avi, ...), or camera ID (0, 1, ...)')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -114,9 +114,9 @@ def run_camera(detector, landmarker, camera_id: int = 0):
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
faces = detector.detect(frame)
|
||||
|
||||
|
||||
@@ -16,16 +16,22 @@ import numpy as np
|
||||
|
||||
from uniface.detection import SCRFD, RetinaFace
|
||||
from uniface.face_utils import compute_similarity
|
||||
from uniface.recognition import ArcFace, MobileFace, SphereFace
|
||||
from uniface.recognition import AdaFace, ArcFace, EdgeFace, MobileFace, SphereFace
|
||||
|
||||
RECOGNIZERS = {
|
||||
'arcface': ArcFace,
|
||||
'adaface': AdaFace,
|
||||
'edgeface': EdgeFace,
|
||||
'mobileface': MobileFace,
|
||||
'sphereface': SphereFace,
|
||||
}
|
||||
|
||||
|
||||
def get_recognizer(name: str):
|
||||
if name == 'arcface':
|
||||
return ArcFace()
|
||||
elif name == 'mobileface':
|
||||
return MobileFace()
|
||||
else:
|
||||
return SphereFace()
|
||||
cls = RECOGNIZERS.get(name)
|
||||
if cls is None:
|
||||
raise ValueError(f"Unknown recognizer: '{name}'. Available: {list(RECOGNIZERS)}")
|
||||
return cls()
|
||||
|
||||
|
||||
def run_inference(detector, recognizer, image_path: str):
|
||||
@@ -41,12 +47,13 @@ def run_inference(detector, recognizer, image_path: str):
|
||||
|
||||
print(f'Detected {len(faces)} face(s). Extracting embedding for the first face...')
|
||||
|
||||
landmarks = faces[0].landmarks # 5-point landmarks for alignment (already np.ndarray)
|
||||
landmarks = faces[0].landmarks
|
||||
embedding = recognizer.get_embedding(image, landmarks)
|
||||
norm_embedding = recognizer.get_normalized_embedding(image, landmarks) # L2 normalized
|
||||
raw_norm = np.linalg.norm(embedding)
|
||||
norm_embedding = embedding.ravel() / raw_norm if raw_norm > 0 else embedding.ravel()
|
||||
|
||||
print(f' Embedding shape: {embedding.shape}')
|
||||
print(f' L2 norm (raw): {np.linalg.norm(embedding):.4f}')
|
||||
print(f' L2 norm (raw): {raw_norm:.4f}')
|
||||
print(f' L2 norm (normalized): {np.linalg.norm(norm_embedding):.4f}')
|
||||
|
||||
|
||||
@@ -90,7 +97,7 @@ def main():
|
||||
'--recognizer',
|
||||
type=str,
|
||||
default='arcface',
|
||||
choices=['arcface', 'mobileface', 'sphereface'],
|
||||
choices=list(RECOGNIZERS),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -109,9 +109,9 @@ def run_camera(detector, recognizer, ref_embedding: np.ndarray, camera_id: int =
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
frame = process_frame(frame, detector, recognizer, ref_embedding, threshold)
|
||||
|
||||
|
||||
@@ -134,9 +134,9 @@ def run_camera(
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
frame = cv2.flip(frame, 1)
|
||||
if not ret:
|
||||
break
|
||||
frame = cv2.flip(frame, 1)
|
||||
|
||||
# Detect faces
|
||||
faces = detector.detect(frame)
|
||||
|
||||
@@ -15,11 +15,13 @@
|
||||
|
||||
This library provides unified APIs for:
|
||||
- Face detection (RetinaFace, SCRFD, YOLOv5Face, YOLOv8Face)
|
||||
- Face recognition (AdaFace, ArcFace, MobileFace, SphereFace)
|
||||
- Face recognition (AdaFace, ArcFace, EdgeFace, MobileFace, SphereFace)
|
||||
- Face tracking (ByteTrack with Kalman filtering)
|
||||
- Facial landmarks (106-point detection)
|
||||
- Face parsing (semantic segmentation)
|
||||
- Portrait matting (trimap-free alpha matte)
|
||||
- Gaze estimation
|
||||
- Head pose estimation
|
||||
- Age, gender, and emotion prediction
|
||||
- Face anti-spoofing
|
||||
- Privacy/anonymization
|
||||
@@ -29,7 +31,7 @@ from __future__ import annotations
|
||||
|
||||
__license__ = 'MIT'
|
||||
__author__ = 'Yakhyokhuja Valikhujaev'
|
||||
__version__ = '3.1.0'
|
||||
__version__ = '3.5.3'
|
||||
|
||||
import contextlib
|
||||
|
||||
@@ -38,7 +40,7 @@ from uniface.log import Logger, enable_logging
|
||||
from uniface.model_store import download_models, get_cache_dir, set_cache_dir, verify_model_weights
|
||||
|
||||
from .analyzer import FaceAnalyzer
|
||||
from .attribute import AgeGender, Emotion, FairFace
|
||||
from .attribute import AgeGender, Emotion, FairFace, create_attribute_predictor
|
||||
from .detection import (
|
||||
SCRFD,
|
||||
RetinaFace,
|
||||
@@ -48,17 +50,19 @@ from .detection import (
|
||||
list_available_detectors,
|
||||
)
|
||||
from .gaze import MobileGaze, create_gaze_estimator
|
||||
from .headpose import HeadPose, create_head_pose_estimator
|
||||
from .landmark import Landmark106, create_landmarker
|
||||
from .matting import MODNet, create_matting_model
|
||||
from .parsing import BiSeNet, XSeg, create_face_parser
|
||||
from .privacy import BlurFace
|
||||
from .recognition import AdaFace, ArcFace, MobileFace, SphereFace, create_recognizer
|
||||
from .recognition import AdaFace, ArcFace, EdgeFace, MobileFace, SphereFace, create_recognizer
|
||||
from .spoofing import MiniFASNet, create_spoofer
|
||||
from .tracking import BYTETracker
|
||||
from .types import AttributeResult, EmotionResult, Face, GazeResult, SpoofingResult
|
||||
from .types import AttributeResult, EmotionResult, Face, GazeResult, HeadPoseResult, SpoofingResult
|
||||
|
||||
# Optional: FAISS vector store (requires `pip install faiss-cpu`)
|
||||
with contextlib.suppress(ImportError):
|
||||
from .indexing import FAISS
|
||||
from .stores import FAISS
|
||||
|
||||
__all__ = [
|
||||
# Metadata
|
||||
@@ -72,6 +76,8 @@ __all__ = [
|
||||
'create_detector',
|
||||
'create_face_parser',
|
||||
'create_gaze_estimator',
|
||||
'create_matting_model',
|
||||
'create_head_pose_estimator',
|
||||
'create_landmarker',
|
||||
'create_recognizer',
|
||||
'create_spoofer',
|
||||
@@ -84,6 +90,7 @@ __all__ = [
|
||||
# Recognition models
|
||||
'AdaFace',
|
||||
'ArcFace',
|
||||
'EdgeFace',
|
||||
'MobileFace',
|
||||
'SphereFace',
|
||||
# Landmark models
|
||||
@@ -91,12 +98,18 @@ __all__ = [
|
||||
# Gaze models
|
||||
'GazeResult',
|
||||
'MobileGaze',
|
||||
# Head pose models
|
||||
'HeadPose',
|
||||
'HeadPoseResult',
|
||||
# Matting models
|
||||
'MODNet',
|
||||
# Parsing models
|
||||
'BiSeNet',
|
||||
'XSeg',
|
||||
# Attribute models
|
||||
'AgeGender',
|
||||
'AttributeResult',
|
||||
'create_attribute_predictor',
|
||||
'Emotion',
|
||||
'EmotionResult',
|
||||
'FairFace',
|
||||
@@ -107,7 +120,7 @@ __all__ = [
|
||||
'BYTETracker',
|
||||
# Privacy
|
||||
'BlurFace',
|
||||
# Indexing (optional)
|
||||
# Stores (optional)
|
||||
'FAISS',
|
||||
# Utilities
|
||||
'Logger',
|
||||
|
||||
@@ -4,10 +4,11 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
from uniface.attribute.age_gender import AgeGender
|
||||
from uniface.attribute.fairface import FairFace
|
||||
from uniface.attribute.base import Attribute
|
||||
from uniface.detection.base import BaseDetector
|
||||
from uniface.log import Logger
|
||||
from uniface.recognition.base import BaseRecognizer
|
||||
@@ -15,53 +16,73 @@ from uniface.types import Face
|
||||
|
||||
__all__ = ['FaceAnalyzer']
|
||||
|
||||
_UNSET: Any = object()
|
||||
|
||||
|
||||
class FaceAnalyzer:
|
||||
"""Unified face analyzer combining detection, recognition, and attributes.
|
||||
|
||||
This class provides a high-level interface for face analysis by combining
|
||||
multiple components: face detection, recognition (embedding extraction),
|
||||
and attribute prediction (age, gender, race).
|
||||
and an extensible list of attribute predictors (age, gender, race,
|
||||
emotion, etc.).
|
||||
|
||||
Any :class:`~uniface.attribute.base.Attribute` subclass can be passed
|
||||
via the ``attributes`` list. Each predictor's ``predict(image, face)``
|
||||
is called once per detected face, enriching the :class:`Face` in-place.
|
||||
|
||||
When called with no arguments, uses SCRFD (500M) for detection and
|
||||
ArcFace (MobileNet) for recognition — the smallest and fastest variants.
|
||||
|
||||
Args:
|
||||
detector: Face detector instance for detecting faces in images.
|
||||
recognizer: Optional face recognizer for extracting embeddings.
|
||||
age_gender: Optional age/gender predictor.
|
||||
fairface: Optional FairFace predictor for demographics.
|
||||
detector: Face detector instance. Defaults to ``SCRFD(SCRFD_500M_KPS)``.
|
||||
recognizer: Face recognizer for extracting embeddings.
|
||||
Defaults to ``ArcFace(MNET)``. Pass ``None`` to disable recognition.
|
||||
attributes: Optional list of ``Attribute`` predictors to run on
|
||||
each detected face (e.g. ``[AgeGender()]``).
|
||||
|
||||
Example:
|
||||
>>> from uniface import RetinaFace, ArcFace, FaceAnalyzer
|
||||
>>> detector = RetinaFace()
|
||||
>>> recognizer = ArcFace()
|
||||
>>> analyzer = FaceAnalyzer(detector, recognizer=recognizer)
|
||||
Examples:
|
||||
>>> from uniface import FaceAnalyzer
|
||||
>>> analyzer = FaceAnalyzer()
|
||||
>>> faces = analyzer.analyze(image)
|
||||
|
||||
>>> from uniface import FaceAnalyzer, AgeGender
|
||||
>>> analyzer = FaceAnalyzer(attributes=[AgeGender()])
|
||||
>>> faces = analyzer.analyze(image)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
detector: BaseDetector,
|
||||
recognizer: BaseRecognizer | None = None,
|
||||
age_gender: AgeGender | None = None,
|
||||
fairface: FairFace | None = None,
|
||||
detector: BaseDetector | None = None,
|
||||
recognizer: BaseRecognizer | None = _UNSET,
|
||||
attributes: list[Attribute] | None = None,
|
||||
) -> None:
|
||||
if detector is None:
|
||||
from uniface.constants import SCRFDWeights
|
||||
from uniface.detection import SCRFD
|
||||
|
||||
detector = SCRFD(model_name=SCRFDWeights.SCRFD_500M_KPS)
|
||||
|
||||
if recognizer is _UNSET:
|
||||
from uniface.recognition import ArcFace
|
||||
|
||||
recognizer = ArcFace()
|
||||
|
||||
self.detector = detector
|
||||
self.recognizer = recognizer
|
||||
self.age_gender = age_gender
|
||||
self.fairface = fairface
|
||||
self.attributes: list[Attribute] = attributes or []
|
||||
|
||||
Logger.info(f'Initialized FaceAnalyzer with detector={detector.__class__.__name__}')
|
||||
if recognizer:
|
||||
Logger.info(f' - Recognition enabled: {recognizer.__class__.__name__}')
|
||||
if age_gender:
|
||||
Logger.info(f' - Age/Gender enabled: {age_gender.__class__.__name__}')
|
||||
if fairface:
|
||||
Logger.info(f' - FairFace enabled: {fairface.__class__.__name__}')
|
||||
Logger.info(f'Recognition enabled: {recognizer.__class__.__name__}')
|
||||
for attr in self.attributes:
|
||||
Logger.info(f'Attribute enabled: {attr.__class__.__name__}')
|
||||
|
||||
def analyze(self, image: np.ndarray) -> list[Face]:
|
||||
"""Analyze faces in an image.
|
||||
|
||||
Performs face detection and optionally extracts embeddings and
|
||||
predicts attributes for each detected face.
|
||||
Performs face detection, optionally extracts embeddings, and runs
|
||||
every registered attribute predictor on each detected face.
|
||||
|
||||
Args:
|
||||
image: Input image as numpy array with shape (H, W, C) in BGR format.
|
||||
@@ -76,28 +97,17 @@ class FaceAnalyzer:
|
||||
if self.recognizer is not None:
|
||||
try:
|
||||
face.embedding = self.recognizer.get_normalized_embedding(image, face.landmarks)
|
||||
Logger.debug(f' Face {idx + 1}: Extracted embedding with shape {face.embedding.shape}')
|
||||
Logger.debug(f'Face {idx + 1}: Extracted embedding with shape {face.embedding.shape}')
|
||||
except Exception as e:
|
||||
Logger.warning(f' Face {idx + 1}: Failed to extract embedding: {e}')
|
||||
Logger.warning(f'Face {idx + 1}: Failed to extract embedding: {e}')
|
||||
|
||||
if self.age_gender is not None:
|
||||
for attr in self.attributes:
|
||||
attr_name = attr.__class__.__name__
|
||||
try:
|
||||
result = self.age_gender.predict(image, face.bbox)
|
||||
face.gender = result.gender
|
||||
face.age = result.age
|
||||
Logger.debug(f' Face {idx + 1}: Age={face.age}, Gender={face.sex}')
|
||||
attr.predict(image, face)
|
||||
Logger.debug(f'Face {idx + 1}: {attr_name} prediction succeeded')
|
||||
except Exception as e:
|
||||
Logger.warning(f' Face {idx + 1}: Failed to predict age/gender: {e}')
|
||||
|
||||
if self.fairface is not None:
|
||||
try:
|
||||
result = self.fairface.predict(image, face.bbox)
|
||||
face.gender = result.gender
|
||||
face.age_group = result.age_group
|
||||
face.race = result.race
|
||||
Logger.debug(f' Face {idx + 1}: AgeGroup={face.age_group}, Gender={face.sex}, Race={face.race}')
|
||||
except Exception as e:
|
||||
Logger.warning(f' Face {idx + 1}: Failed to predict FairFace attributes: {e}')
|
||||
Logger.warning(f'Face {idx + 1}: {attr_name} prediction failed: {e}')
|
||||
|
||||
Logger.info(f'Analysis complete: {len(faces)} face(s) processed')
|
||||
return faces
|
||||
@@ -106,8 +116,6 @@ class FaceAnalyzer:
|
||||
parts = [f'FaceAnalyzer(detector={self.detector.__class__.__name__}']
|
||||
if self.recognizer:
|
||||
parts.append(f'recognizer={self.recognizer.__class__.__name__}')
|
||||
if self.age_gender:
|
||||
parts.append(f'age_gender={self.age_gender.__class__.__name__}')
|
||||
if self.fairface:
|
||||
parts.append(f'fairface={self.fairface.__class__.__name__}')
|
||||
for attr in self.attributes:
|
||||
parts.append(f'{attr.__class__.__name__}')
|
||||
return ', '.join(parts) + ')'
|
||||
|
||||
@@ -12,7 +12,7 @@ from uniface.attribute.age_gender import AgeGender
|
||||
from uniface.attribute.base import Attribute
|
||||
from uniface.attribute.fairface import FairFace
|
||||
from uniface.constants import AgeGenderWeights, DDAMFNWeights, FairFaceWeights
|
||||
from uniface.types import AttributeResult, EmotionResult
|
||||
from uniface.types import AttributeResult, EmotionResult, Face
|
||||
|
||||
try:
|
||||
from uniface.attribute.emotion import Emotion
|
||||
@@ -30,7 +30,7 @@ except ImportError:
|
||||
def _initialize_model(self) -> None: ...
|
||||
def preprocess(self, image: np.ndarray, *args: Any) -> Any: ...
|
||||
def postprocess(self, prediction: Any) -> Any: ...
|
||||
def predict(self, image: np.ndarray, *args: Any) -> Any: ...
|
||||
def predict(self, image: np.ndarray, face: Face) -> Any: ...
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -12,7 +12,7 @@ from uniface.face_utils import bbox_center_alignment
|
||||
from uniface.log import Logger
|
||||
from uniface.model_store import verify_model_weights
|
||||
from uniface.onnx_utils import create_onnx_session
|
||||
from uniface.types import AttributeResult
|
||||
from uniface.types import AttributeResult, Face
|
||||
|
||||
__all__ = ['AgeGender']
|
||||
|
||||
@@ -133,17 +133,20 @@ class AgeGender(Attribute):
|
||||
age = int(np.round(prediction[2] * 100))
|
||||
return AttributeResult(gender=gender, age=age)
|
||||
|
||||
def predict(self, image: np.ndarray, bbox: list | np.ndarray) -> AttributeResult:
|
||||
"""
|
||||
Predicts age and gender for a single face specified by a bounding box.
|
||||
def predict(self, image: np.ndarray, face: Face) -> AttributeResult:
|
||||
"""Predict age and gender and enrich the Face in-place.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): The full input image in BGR format.
|
||||
bbox (Union[List, np.ndarray]): The face bounding box coordinates [x1, y1, x2, y2].
|
||||
image: The full input image in BGR format.
|
||||
face: Detected face; ``face.bbox`` is used for alignment.
|
||||
|
||||
Returns:
|
||||
AttributeResult: Result containing gender (0=Female, 1=Male) and age (in years).
|
||||
``AttributeResult`` with gender (0=Female, 1=Male) and age (years).
|
||||
"""
|
||||
face_blob = self.preprocess(image, bbox)
|
||||
face_blob = self.preprocess(image, face.bbox)
|
||||
prediction = self.session.run(self.output_names, {self.input_name: face_blob})[0][0]
|
||||
return self.postprocess(prediction)
|
||||
result = self.postprocess(prediction)
|
||||
|
||||
face.gender = result.gender
|
||||
face.age = result.age
|
||||
return result
|
||||
|
||||
@@ -2,95 +2,78 @@
|
||||
# Author: Yakhyokhuja Valikhujaev
|
||||
# GitHub: https://github.com/yakhyo
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
|
||||
from uniface.types import AttributeResult, EmotionResult
|
||||
from uniface.types import AttributeResult, EmotionResult, Face
|
||||
|
||||
__all__ = ['Attribute', 'AttributeResult', 'EmotionResult']
|
||||
|
||||
|
||||
class Attribute(ABC):
|
||||
"""
|
||||
Abstract base class for face attribute models.
|
||||
"""Abstract base class for face attribute models.
|
||||
|
||||
This class defines the common interface that all attribute models
|
||||
(e.g., age-gender, emotion) must implement. It ensures a consistent API
|
||||
across different attribute prediction modules in the library, making them
|
||||
interchangeable and easy to use.
|
||||
All attribute models (age-gender, emotion, FairFace, etc.) implement this
|
||||
interface so they can be used interchangeably inside ``FaceAnalyzer``.
|
||||
|
||||
The ``predict`` method accepts an image and a :class:`Face` object. Each
|
||||
subclass extracts what it needs (bbox, landmarks) from the Face, runs
|
||||
inference, writes the results back to the Face **and** returns a typed
|
||||
result dataclass.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def _initialize_model(self) -> None:
|
||||
"""
|
||||
Initializes the underlying model for inference.
|
||||
|
||||
This method should handle loading model weights, creating the
|
||||
inference session (e.g., ONNX Runtime, PyTorch), and any necessary
|
||||
warm-up procedures to prepare the model for prediction.
|
||||
"""
|
||||
"""Load model weights and create the inference session."""
|
||||
raise NotImplementedError('Subclasses must implement the _initialize_model method.')
|
||||
|
||||
@abstractmethod
|
||||
def preprocess(self, image: np.ndarray, *args: Any) -> Any:
|
||||
"""
|
||||
Preprocesses the input data for the model.
|
||||
|
||||
This method should take a raw image and any other necessary data
|
||||
(like bounding boxes or landmarks) and convert it into the format
|
||||
expected by the model's inference engine (e.g., a blob or tensor).
|
||||
"""Preprocess the input data for the model.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): The input image containing the face, typically
|
||||
in BGR format.
|
||||
*args: Additional arguments required for preprocessing, such as
|
||||
bounding boxes or facial landmarks.
|
||||
image: The input image in BGR format.
|
||||
*args: Subclass-specific data (bbox, landmarks, etc.).
|
||||
|
||||
Returns:
|
||||
The preprocessed data ready for model inference.
|
||||
Preprocessed data ready for model inference.
|
||||
"""
|
||||
raise NotImplementedError('Subclasses must implement the preprocess method.')
|
||||
|
||||
@abstractmethod
|
||||
def postprocess(self, prediction: Any) -> Any:
|
||||
"""
|
||||
Postprocesses the raw model output into a human-readable format.
|
||||
|
||||
This method takes the raw output from the model's inference and
|
||||
converts it into a meaningful result, such as an age value, a gender
|
||||
label, or an emotion category.
|
||||
"""Convert raw model output into a typed result dataclass.
|
||||
|
||||
Args:
|
||||
prediction (Any): The raw output from the model's inference.
|
||||
prediction: Raw output from the model.
|
||||
|
||||
Returns:
|
||||
The final, processed attributes.
|
||||
An ``AttributeResult`` or ``EmotionResult``.
|
||||
"""
|
||||
raise NotImplementedError('Subclasses must implement the postprocess method.')
|
||||
|
||||
@abstractmethod
|
||||
def predict(self, image: np.ndarray, *args: Any) -> Any:
|
||||
"""
|
||||
Performs end-to-end attribute prediction on a given image.
|
||||
def predict(self, image: np.ndarray, face: Face) -> AttributeResult | EmotionResult:
|
||||
"""Run end-to-end prediction and enrich the Face in-place.
|
||||
|
||||
This method orchestrates the full pipeline: it calls the preprocess,
|
||||
inference, and postprocess steps to return the final, user-friendly
|
||||
attribute prediction.
|
||||
Each subclass extracts what it needs from *face* (e.g. ``face.bbox``
|
||||
or ``face.landmarks``), runs the full preprocess-infer-postprocess
|
||||
pipeline, writes relevant fields back to *face*, and returns the
|
||||
result dataclass.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): The input image containing the face.
|
||||
*args: Additional data required for prediction, such as a bounding
|
||||
box or landmarks.
|
||||
image: The full input image in BGR format.
|
||||
face: Detected face whose attribute fields will be populated.
|
||||
|
||||
Returns:
|
||||
The final predicted attributes.
|
||||
The prediction result (``AttributeResult`` or ``EmotionResult``).
|
||||
"""
|
||||
raise NotImplementedError('Subclasses must implement the predict method.')
|
||||
|
||||
def __call__(self, *args, **kwargs) -> Any:
|
||||
"""
|
||||
Provides a convenient, callable shortcut for the `predict` method.
|
||||
"""
|
||||
return self.predict(*args, **kwargs)
|
||||
def __call__(self, image: np.ndarray, face: Face) -> AttributeResult | EmotionResult:
|
||||
"""Callable shortcut for :meth:`predict`."""
|
||||
return self.predict(image, face)
|
||||
|
||||
@@ -12,7 +12,7 @@ from uniface.constants import DDAMFNWeights
|
||||
from uniface.face_utils import face_alignment
|
||||
from uniface.log import Logger
|
||||
from uniface.model_store import verify_model_weights
|
||||
from uniface.types import EmotionResult
|
||||
from uniface.types import EmotionResult, Face
|
||||
|
||||
__all__ = ['Emotion']
|
||||
|
||||
@@ -116,14 +116,23 @@ class Emotion(Attribute):
|
||||
confidence = float(probabilities[pred_index])
|
||||
return EmotionResult(emotion=emotion_label, confidence=confidence)
|
||||
|
||||
def predict(self, image: np.ndarray, landmark: list | np.ndarray) -> EmotionResult:
|
||||
def predict(self, image: np.ndarray, face: Face) -> EmotionResult:
|
||||
"""Predict emotion and enrich the Face in-place.
|
||||
|
||||
Args:
|
||||
image: The full input image in BGR format.
|
||||
face: Detected face; ``face.landmarks`` is used for alignment.
|
||||
|
||||
Returns:
|
||||
``EmotionResult`` with emotion label and confidence score.
|
||||
"""
|
||||
Predicts the emotion from a single face specified by its landmarks.
|
||||
"""
|
||||
input_tensor = self.preprocess(image, landmark)
|
||||
input_tensor = self.preprocess(image, face.landmarks)
|
||||
with torch.no_grad():
|
||||
output = self.model(input_tensor)
|
||||
if isinstance(output, tuple):
|
||||
output = output[0]
|
||||
|
||||
return self.postprocess(output)
|
||||
result = self.postprocess(output)
|
||||
face.emotion = result.emotion
|
||||
face.emotion_confidence = result.confidence
|
||||
return result
|
||||
|
||||
@@ -11,7 +11,7 @@ from uniface.constants import FairFaceWeights
|
||||
from uniface.log import Logger
|
||||
from uniface.model_store import verify_model_weights
|
||||
from uniface.onnx_utils import create_onnx_session
|
||||
from uniface.types import AttributeResult
|
||||
from uniface.types import AttributeResult, Face
|
||||
|
||||
__all__ = ['AGE_LABELS', 'RACE_LABELS', 'FairFace']
|
||||
|
||||
@@ -168,29 +168,24 @@ class FairFace(Attribute):
|
||||
race=RACE_LABELS[race_idx],
|
||||
)
|
||||
|
||||
def predict(self, image: np.ndarray, bbox: list | np.ndarray | None = None) -> AttributeResult:
|
||||
"""
|
||||
Predicts race, gender, and age for a face.
|
||||
def predict(self, image: np.ndarray, face: Face) -> AttributeResult:
|
||||
"""Predict race, gender, and age and enrich the Face in-place.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): The input image in BGR format.
|
||||
bbox (Optional[Union[List, np.ndarray]]): Face bounding box [x1, y1, x2, y2].
|
||||
If None, uses the entire image.
|
||||
image: The full input image in BGR format.
|
||||
face: Detected face; ``face.bbox`` is used for cropping.
|
||||
|
||||
Returns:
|
||||
AttributeResult: Result containing:
|
||||
- gender: 0=Female, 1=Male
|
||||
- age_group: Age range string like "20-29"
|
||||
- race: Race/ethnicity label
|
||||
``AttributeResult`` with gender, age_group, and race.
|
||||
"""
|
||||
# Preprocess
|
||||
input_blob = self.preprocess(image, bbox)
|
||||
|
||||
# Inference
|
||||
input_blob = self.preprocess(image, face.bbox)
|
||||
outputs = self.session.run(self.output_names, {self.input_name: input_blob})
|
||||
result = self.postprocess(outputs)
|
||||
|
||||
# Postprocess
|
||||
return self.postprocess(outputs)
|
||||
face.gender = result.gender
|
||||
face.age_group = result.age_group
|
||||
face.race = result.race
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _softmax(x: np.ndarray) -> np.ndarray:
|
||||
|
||||
@@ -16,6 +16,7 @@ __all__ = [
|
||||
'distance2bbox',
|
||||
'distance2kps',
|
||||
'generate_anchors',
|
||||
'letterbox_resize',
|
||||
'non_max_suppression',
|
||||
'resize_image',
|
||||
'xyxy_to_cxcywh',
|
||||
@@ -277,3 +278,70 @@ def distance2kps(
|
||||
preds.append(px)
|
||||
preds.append(py)
|
||||
return np.stack(preds, axis=-1)
|
||||
|
||||
|
||||
def letterbox_resize(
|
||||
image: np.ndarray,
|
||||
target_size: int,
|
||||
fill_value: int = 114,
|
||||
) -> tuple[np.ndarray, float, tuple[int, int]]:
|
||||
"""Letterbox resize with center padding for YOLO-style detectors.
|
||||
|
||||
Maintains aspect ratio by scaling the image to fit within target_size,
|
||||
then center-pads with a constant fill value. Converts BGR to RGB,
|
||||
normalizes to [0, 1], and transposes to NCHW format.
|
||||
|
||||
This preprocessing strategy is standard for YOLO models and ensures
|
||||
no distortion while maintaining a square input size.
|
||||
|
||||
Args:
|
||||
image: Input image in BGR format with shape (H, W, C).
|
||||
target_size: Target square size (e.g., 640 for 640x640 input).
|
||||
fill_value: Padding fill value (default: 114 for gray background).
|
||||
|
||||
Returns:
|
||||
Tuple of (preprocessed_tensor, scale_ratio, padding):
|
||||
- preprocessed_tensor: Shape (1, 3, target_size, target_size),
|
||||
RGB, normalized [0, 1], NCHW format, float32, contiguous.
|
||||
- scale_ratio: Resize scale factor for coordinate transformation.
|
||||
- padding: Padding offsets as (pad_w, pad_h) for coordinate transformation.
|
||||
|
||||
Example:
|
||||
>>> image = cv2.imread('face.jpg') # (480, 640, 3)
|
||||
>>> tensor, scale, (pad_w, pad_h) = letterbox_resize(image, 640)
|
||||
>>> tensor.shape
|
||||
(1, 3, 640, 640)
|
||||
>>> # To transform coordinates back to original:
|
||||
>>> x_orig = (x_detected - pad_w) / scale
|
||||
>>> y_orig = (y_detected - pad_h) / scale
|
||||
"""
|
||||
# Get original image shape
|
||||
img_h, img_w = image.shape[:2]
|
||||
|
||||
# Calculate scale ratio to fit within target_size
|
||||
scale = min(target_size / img_h, target_size / img_w)
|
||||
new_h, new_w = int(img_h * scale), int(img_w * scale)
|
||||
|
||||
# Resize image maintaining aspect ratio
|
||||
img_resized = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
# Create padded canvas with fill_value
|
||||
img_padded = np.full((target_size, target_size, 3), fill_value, dtype=np.uint8)
|
||||
|
||||
# Calculate padding to center the image
|
||||
pad_h = (target_size - new_h) // 2
|
||||
pad_w = (target_size - new_w) // 2
|
||||
|
||||
# Place resized image in center of canvas
|
||||
img_padded[pad_h : pad_h + new_h, pad_w : pad_w + new_w] = img_resized
|
||||
|
||||
# Convert BGR to RGB and normalize to [0, 1]
|
||||
img_rgb = cv2.cvtColor(img_padded, cv2.COLOR_BGR2RGB)
|
||||
img_normalized = img_rgb.astype(np.float32) / 255.0
|
||||
|
||||
# Transpose to CHW format and add batch dimension (NCHW)
|
||||
img_transposed = np.transpose(img_normalized, (2, 0, 1))
|
||||
img_batch = np.expand_dims(img_transposed, axis=0)
|
||||
img_batch = np.ascontiguousarray(img_batch)
|
||||
|
||||
return img_batch, scale, (pad_w, pad_h)
|
||||
|
||||
@@ -57,6 +57,18 @@ class AdaFaceWeights(str, Enum):
|
||||
IR_18 = "adaface_ir_18"
|
||||
IR_101 = "adaface_ir_101"
|
||||
|
||||
class EdgeFaceWeights(str, Enum):
|
||||
"""
|
||||
EdgeFace: Efficient Face Recognition Model for Edge Devices.
|
||||
Based on EdgeNeXt backbone with optional LoRA low-rank compression.
|
||||
All models output 512-D embeddings from 112x112 aligned face crops.
|
||||
https://github.com/yakhyo/edgeface-onnx
|
||||
"""
|
||||
XXS = "edgeface_xxs"
|
||||
XS_GAMMA_06 = "edgeface_xs_gamma_06"
|
||||
S_GAMMA_05 = "edgeface_s_gamma_05"
|
||||
BASE = "edgeface_base"
|
||||
|
||||
class RetinaFaceWeights(str, Enum):
|
||||
"""
|
||||
Trained on WIDER FACE dataset.
|
||||
@@ -156,6 +168,20 @@ class GazeWeights(str, Enum):
|
||||
MOBILEONE_S0 = "gaze_mobileone_s0"
|
||||
|
||||
|
||||
class HeadPoseWeights(str, Enum):
|
||||
"""
|
||||
Head pose estimation models using 6D rotation representation.
|
||||
Trained on 300W-LP dataset, evaluated on AFLW2000.
|
||||
https://github.com/yakhyo/head-pose-estimation
|
||||
"""
|
||||
RESNET18 = "headpose_resnet18"
|
||||
RESNET34 = "headpose_resnet34"
|
||||
RESNET50 = "headpose_resnet50"
|
||||
MOBILENET_V2 = "headpose_mobilenetv2"
|
||||
MOBILENET_V3_SMALL = "headpose_mobilenetv3_small"
|
||||
MOBILENET_V3_LARGE = "headpose_mobilenetv3_large"
|
||||
|
||||
|
||||
class ParsingWeights(str, Enum):
|
||||
"""
|
||||
Face Parsing: Semantic Segmentation of Facial Components.
|
||||
@@ -175,6 +201,15 @@ class XSegWeights(str, Enum):
|
||||
DEFAULT = "xseg"
|
||||
|
||||
|
||||
class MODNetWeights(str, Enum):
|
||||
"""
|
||||
MODNet: Real-Time Trimap-Free Portrait Matting via Objective Decomposition.
|
||||
https://github.com/yakhyo/modnet
|
||||
"""
|
||||
PHOTOGRAPHIC = "modnet_photographic"
|
||||
WEBCAM = "modnet_webcam"
|
||||
|
||||
|
||||
class MiniFASNetWeights(str, Enum):
|
||||
"""
|
||||
MiniFASNet: Lightweight Face Anti-Spoofing models.
|
||||
@@ -264,6 +299,24 @@ MODEL_REGISTRY: dict[Enum, ModelInfo] = {
|
||||
sha256='f2eb07d03de0af560a82e1214df799fec5e09375d43521e2868f9dc387e5a43e'
|
||||
),
|
||||
|
||||
# EdgeFace
|
||||
EdgeFaceWeights.XXS: ModelInfo(
|
||||
url='https://github.com/yakhyo/edgeface-onnx/releases/download/weights/edgeface_xxs.onnx',
|
||||
sha256='dc674de4cbc77fa0bf9a82d5149558ab8581d82a2cd3bb60f28fd1a5d3ff8a2f'
|
||||
),
|
||||
EdgeFaceWeights.XS_GAMMA_06: ModelInfo(
|
||||
url='https://github.com/yakhyo/edgeface-onnx/releases/download/weights/edgeface_xs_gamma_06.onnx',
|
||||
sha256='9206e2eb13a2761d7b5b76e13016d4b9acd3fa3535a9a09939f3adacd139a5ff'
|
||||
),
|
||||
EdgeFaceWeights.S_GAMMA_05: ModelInfo(
|
||||
url='https://github.com/yakhyo/edgeface-onnx/releases/download/weights/edgeface_s_gamma_05.onnx',
|
||||
sha256='b850767cf791bda585600b5c4c7d7432b2f998ccd862caae34ef1afa967d2e54'
|
||||
),
|
||||
EdgeFaceWeights.BASE: ModelInfo(
|
||||
url='https://github.com/yakhyo/edgeface-onnx/releases/download/weights/edgeface_base.onnx',
|
||||
sha256='b56942f072c67385f44734b9458b0ccc4a2226888a113f77e0c802ad0c77b4c3'
|
||||
),
|
||||
|
||||
# SCRFD
|
||||
SCRFDWeights.SCRFD_10G_KPS: ModelInfo(
|
||||
url='https://github.com/yakhyo/uniface/releases/download/weights/scrfd_10g_kps.onnx',
|
||||
@@ -348,6 +401,32 @@ MODEL_REGISTRY: dict[Enum, ModelInfo] = {
|
||||
sha256='8b4fdc4e3da44733c9a82e7776b411e4a39f94e8e285aee0fc85a548a55f7d9f'
|
||||
),
|
||||
|
||||
# Head Pose
|
||||
HeadPoseWeights.RESNET18: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/resnet18.onnx',
|
||||
sha256='61c34e877989412980d1ea80c52391250b074abc00d19a6100de5c8e999212ee'
|
||||
),
|
||||
HeadPoseWeights.RESNET34: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/resnet34.onnx',
|
||||
sha256='8da9f2ce4810298ebea68bd85fba1b6bd11716060c10534596f46be52cc908c9'
|
||||
),
|
||||
HeadPoseWeights.RESNET50: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/resnet50.onnx',
|
||||
sha256='50c74d57b7663361b8ede83b0e4122546171119ef502ec55b790dbd7fc360260'
|
||||
),
|
||||
HeadPoseWeights.MOBILENET_V2: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/mobilenetv2.onnx',
|
||||
sha256='1e902872868e483bd0e4f8f4a8ff2a4d61c2ccbca9dadf748e5479b5cc86a9e9'
|
||||
),
|
||||
HeadPoseWeights.MOBILENET_V3_SMALL: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/mobilenetv3_small.onnx',
|
||||
sha256='e8ae4d932b3d13221638fc72e171603e020c6da28b770753f76146867f40e190'
|
||||
),
|
||||
HeadPoseWeights.MOBILENET_V3_LARGE: ModelInfo(
|
||||
url='https://github.com/yakhyo/head-pose-estimation/releases/download/weights/mobilenetv3_large.onnx',
|
||||
sha256='3a68815fa00aba41ddc4e014bf631b637caba8619df71160383f1fee8c15a3c9'
|
||||
),
|
||||
|
||||
# Parsing
|
||||
ParsingWeights.RESNET18: ModelInfo(
|
||||
url='https://github.com/yakhyo/face-parsing/releases/download/weights/resnet18.onnx',
|
||||
@@ -373,6 +452,16 @@ MODEL_REGISTRY: dict[Enum, ModelInfo] = {
|
||||
url='https://github.com/yakhyo/face-segmentation/releases/download/weights/xseg.onnx',
|
||||
sha256='0b57328efcb839d85973164b617ceee9dfe6cfcb2c82e8a033bba9f4f09b27e5'
|
||||
),
|
||||
|
||||
# MODNet (Portrait Matting)
|
||||
MODNetWeights.PHOTOGRAPHIC: ModelInfo(
|
||||
url='https://github.com/yakhyo/modnet/releases/download/weights/modnet_photographic.onnx',
|
||||
sha256='5069a5e306b9f5e9f4f2b0360264c9f8ea13b257c7c39943c7cf6a2ec3a102ae'
|
||||
),
|
||||
MODNetWeights.WEBCAM: ModelInfo(
|
||||
url='https://github.com/yakhyo/modnet/releases/download/weights/modnet_webcam.onnx',
|
||||
sha256='de03cc16f3c91f25b7c2f0b42ea1a8d34f40a752234f3887572655e744e55306'
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
import numpy as np
|
||||
|
||||
@@ -119,3 +119,77 @@ class BaseDetector(ABC):
|
||||
List of detected Face objects.
|
||||
"""
|
||||
return self.detect(image, **kwargs)
|
||||
|
||||
def _select_top_detections(
|
||||
self,
|
||||
detections: np.ndarray,
|
||||
landmarks: np.ndarray,
|
||||
max_num: int,
|
||||
original_shape: tuple[int, int],
|
||||
metric: Literal['default', 'max'] = 'max',
|
||||
center_weight: float = 2.0,
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
"""Filter detections to keep only top max_num faces.
|
||||
|
||||
Ranks faces by area and/or distance from image center, then selects
|
||||
the top max_num detections.
|
||||
|
||||
Args:
|
||||
detections: Array of shape (N, 5) as [x1, y1, x2, y2, confidence].
|
||||
landmarks: Array of shape (N, 5, 2) for 5-point landmarks.
|
||||
max_num: Maximum number of faces to keep. If 0 or >= N, returns all.
|
||||
original_shape: Original image shape as (height, width).
|
||||
metric: Ranking metric:
|
||||
- 'max': Rank by bounding box area only.
|
||||
- 'default': Rank by area minus center distance penalty.
|
||||
center_weight: Weight for center distance penalty (only used with 'default' metric).
|
||||
|
||||
Returns:
|
||||
Filtered (detections, landmarks) tuple with at most max_num faces.
|
||||
"""
|
||||
if max_num <= 0 or detections.shape[0] <= max_num:
|
||||
return detections, landmarks
|
||||
|
||||
# Calculate bounding box areas
|
||||
area = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
|
||||
# Calculate offsets from image center
|
||||
center_y, center_x = original_shape[0] // 2, original_shape[1] // 2
|
||||
offsets = np.vstack(
|
||||
[
|
||||
(detections[:, 0] + detections[:, 2]) / 2 - center_x,
|
||||
(detections[:, 1] + detections[:, 3]) / 2 - center_y,
|
||||
]
|
||||
)
|
||||
offset_dist_squared = np.sum(np.power(offsets, 2.0), axis=0)
|
||||
|
||||
# Calculate ranking scores based on metric
|
||||
if metric == 'max':
|
||||
scores = area
|
||||
else:
|
||||
scores = area - offset_dist_squared * center_weight
|
||||
|
||||
# Select top max_num by score
|
||||
top_indices = np.argsort(scores)[::-1][:max_num]
|
||||
return detections[top_indices], landmarks[top_indices]
|
||||
|
||||
@staticmethod
|
||||
def _detections_to_faces(detections: np.ndarray, landmarks: np.ndarray) -> list[Face]:
|
||||
"""Convert detection arrays to Face objects.
|
||||
|
||||
Args:
|
||||
detections: Array of shape (N, 5) as [x1, y1, x2, y2, confidence].
|
||||
landmarks: Array of shape (N, 5, 2) for 5-point landmarks.
|
||||
|
||||
Returns:
|
||||
List of Face objects.
|
||||
"""
|
||||
faces = []
|
||||
for i in range(detections.shape[0]):
|
||||
face = Face(
|
||||
bbox=detections[i, :4],
|
||||
confidence=float(detections[i, 4]),
|
||||
landmarks=landmarks[i],
|
||||
)
|
||||
faces.append(face)
|
||||
return faces
|
||||
|
||||
@@ -208,42 +208,12 @@ class RetinaFace(BaseDetector):
|
||||
# Postprocessing
|
||||
detections, landmarks = self.postprocess(outputs, resize_factor, shape=(width, height))
|
||||
|
||||
if max_num > 0 and detections.shape[0] > max_num:
|
||||
# Calculate area of detections
|
||||
areas = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
# Filter to top max_num faces if requested
|
||||
detections, landmarks = self._select_top_detections(
|
||||
detections, landmarks, max_num, (original_height, original_width), metric, center_weight
|
||||
)
|
||||
|
||||
# Calculate offsets from image center
|
||||
center = (original_height // 2, original_width // 2)
|
||||
offsets = np.vstack(
|
||||
[
|
||||
(detections[:, 0] + detections[:, 2]) / 2 - center[1],
|
||||
(detections[:, 1] + detections[:, 3]) / 2 - center[0],
|
||||
]
|
||||
)
|
||||
offset_dist_squared = np.sum(np.power(offsets, 2.0), axis=0)
|
||||
|
||||
# Calculate scores based on the chosen metric
|
||||
if metric == 'max':
|
||||
scores = areas
|
||||
else:
|
||||
scores = areas - offset_dist_squared * center_weight
|
||||
|
||||
# Sort by scores and select top `max_num`
|
||||
sorted_indices = np.argsort(scores)[::-1][:max_num]
|
||||
|
||||
detections = detections[sorted_indices]
|
||||
landmarks = landmarks[sorted_indices]
|
||||
|
||||
faces = []
|
||||
for i in range(detections.shape[0]):
|
||||
face = Face(
|
||||
bbox=detections[i, :4],
|
||||
confidence=float(detections[i, 4]),
|
||||
landmarks=landmarks[i],
|
||||
)
|
||||
faces.append(face)
|
||||
|
||||
return faces
|
||||
return self._detections_to_faces(detections, landmarks)
|
||||
|
||||
def postprocess(
|
||||
self,
|
||||
|
||||
@@ -272,38 +272,9 @@ class SCRFD(BaseDetector):
|
||||
landmarks = landmarks[order, :, :]
|
||||
landmarks = landmarks[keep, :, :].astype(np.float32)
|
||||
|
||||
if 0 < max_num < detections.shape[0]:
|
||||
# Calculate area of detections
|
||||
area = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
# Filter to top max_num faces if requested
|
||||
detections, landmarks = self._select_top_detections(
|
||||
detections, landmarks, max_num, (original_height, original_width), metric, center_weight
|
||||
)
|
||||
|
||||
# Calculate offsets from image center
|
||||
center = (original_height // 2, original_width // 2)
|
||||
offsets = np.vstack(
|
||||
[
|
||||
(detections[:, 0] + detections[:, 2]) / 2 - center[1],
|
||||
(detections[:, 1] + detections[:, 3]) / 2 - center[0],
|
||||
]
|
||||
)
|
||||
|
||||
# Calculate scores based on the chosen metric
|
||||
offset_dist_squared = np.sum(np.power(offsets, 2.0), axis=0)
|
||||
if metric == 'max':
|
||||
values = area
|
||||
else:
|
||||
values = area - offset_dist_squared * center_weight
|
||||
|
||||
# Sort by scores and select top `max_num`
|
||||
sorted_indices = np.argsort(values)[::-1][:max_num]
|
||||
detections = detections[sorted_indices]
|
||||
landmarks = landmarks[sorted_indices]
|
||||
|
||||
faces = []
|
||||
for i in range(detections.shape[0]):
|
||||
face = Face(
|
||||
bbox=detections[i, :4],
|
||||
confidence=float(detections[i, 4]),
|
||||
landmarks=landmarks[i],
|
||||
)
|
||||
faces.append(face)
|
||||
|
||||
return faces
|
||||
return self._detections_to_faces(detections, landmarks)
|
||||
|
||||
@@ -4,10 +4,9 @@
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from uniface.common import non_max_suppression
|
||||
from uniface.common import letterbox_resize, non_max_suppression
|
||||
from uniface.constants import YOLOv5FaceWeights
|
||||
from uniface.log import Logger
|
||||
from uniface.model_store import verify_model_weights
|
||||
@@ -140,45 +139,15 @@ class YOLOv5Face(BaseDetector):
|
||||
raise RuntimeError(f"Failed to initialize model session for '{model_path}'") from e
|
||||
|
||||
def preprocess(self, image: np.ndarray) -> tuple[np.ndarray, float, tuple[int, int]]:
|
||||
"""
|
||||
Preprocess image for inference.
|
||||
"""Preprocess image using letterbox resize.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): Input image (BGR format)
|
||||
image: Input image in BGR format.
|
||||
|
||||
Returns:
|
||||
Tuple[np.ndarray, float, Tuple[int, int]]: Preprocessed image, scale ratio, and padding
|
||||
Tuple of (preprocessed_tensor, scale_ratio, padding).
|
||||
"""
|
||||
# Get original image shape
|
||||
img_h, img_w = image.shape[:2]
|
||||
|
||||
# Calculate scale ratio
|
||||
scale = min(self.input_size / img_h, self.input_size / img_w)
|
||||
new_h, new_w = int(img_h * scale), int(img_w * scale)
|
||||
|
||||
# Resize image
|
||||
img_resized = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
# Create padded image
|
||||
img_padded = np.full((self.input_size, self.input_size, 3), 114, dtype=np.uint8)
|
||||
|
||||
# Calculate padding
|
||||
pad_h = (self.input_size - new_h) // 2
|
||||
pad_w = (self.input_size - new_w) // 2
|
||||
|
||||
# Place resized image in center
|
||||
img_padded[pad_h : pad_h + new_h, pad_w : pad_w + new_w] = img_resized
|
||||
|
||||
# Convert to RGB and normalize
|
||||
img_rgb = cv2.cvtColor(img_padded, cv2.COLOR_BGR2RGB)
|
||||
img_normalized = img_rgb.astype(np.float32) / 255.0
|
||||
|
||||
# Transpose to CHW format (HWC -> CHW) and add batch dimension
|
||||
img_transposed = np.transpose(img_normalized, (2, 0, 1))
|
||||
img_batch = np.expand_dims(img_transposed, axis=0)
|
||||
img_batch = np.ascontiguousarray(img_batch)
|
||||
|
||||
return img_batch, scale, (pad_w, pad_h)
|
||||
return letterbox_resize(image, self.input_size)
|
||||
|
||||
def inference(self, input_tensor: np.ndarray) -> list[np.ndarray]:
|
||||
"""Perform model inference on the preprocessed image tensor.
|
||||
@@ -337,38 +306,9 @@ class YOLOv5Face(BaseDetector):
|
||||
if len(detections) == 0:
|
||||
return []
|
||||
|
||||
if 0 < max_num < detections.shape[0]:
|
||||
# Calculate area of detections
|
||||
area = (detections[:, 2] - detections[:, 0]) * (detections[:, 3] - detections[:, 1])
|
||||
# Filter to top max_num faces if requested
|
||||
detections, landmarks = self._select_top_detections(
|
||||
detections, landmarks, max_num, (original_height, original_width), metric, center_weight
|
||||
)
|
||||
|
||||
# Calculate offsets from image center
|
||||
center = (original_height // 2, original_width // 2)
|
||||
offsets = np.vstack(
|
||||
[
|
||||
(detections[:, 0] + detections[:, 2]) / 2 - center[1],
|
||||
(detections[:, 1] + detections[:, 3]) / 2 - center[0],
|
||||
]
|
||||
)
|
||||
|
||||
# Calculate scores based on the chosen metric
|
||||
offset_dist_squared = np.sum(np.power(offsets, 2.0), axis=0)
|
||||
if metric == 'max':
|
||||
values = area
|
||||
else:
|
||||
values = area - offset_dist_squared * center_weight
|
||||
|
||||
# Sort by scores and select top `max_num`
|
||||
sorted_indices = np.argsort(values)[::-1][:max_num]
|
||||
detections = detections[sorted_indices]
|
||||
landmarks = landmarks[sorted_indices]
|
||||
|
||||
faces = []
|
||||
for i in range(detections.shape[0]):
|
||||
face = Face(
|
||||
bbox=detections[i, :4],
|
||||
confidence=float(detections[i, 4]),
|
||||
landmarks=landmarks[i],
|
||||
)
|
||||
faces.append(face)
|
||||
|
||||
return faces
|
||||
return self._detections_to_faces(detections, landmarks)
|
||||
|
||||