diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..256c185
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,34 @@
+name: Deploy docs
+
+on:
+ push:
+ branches: [main]
+ workflow_dispatch:
+
+permissions:
+ contents: write
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install mkdocs-material pymdown-extensions
+
+ - name: Build docs
+ run: mkdocs build --strict
+
+ - name: Deploy to GitHub Pages
+ uses: peaceiris/actions-gh-pages@v4
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./site
+ destination_dir: docs
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c023331..84afcf8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -10,6 +10,7 @@ repos:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
+ exclude: ^mkdocs.yml$
- id: check-toml
- id: check-added-large-files
args: ['--maxkb=1000']
diff --git a/docs/api/reference.md b/docs/api/reference.md
new file mode 100644
index 0000000..71eeeb3
--- /dev/null
+++ b/docs/api/reference.md
@@ -0,0 +1,200 @@
+# API Reference
+
+Quick reference for all UniFace classes and functions.
+
+---
+
+## Detection
+
+### RetinaFace
+
+```python
+from uniface import RetinaFace
+
+detector = RetinaFace(
+ model_name=RetinaFaceWeights.MNET_V2, # Model variant
+ confidence_threshold=0.5, # Min confidence
+ nms_threshold=0.4, # NMS IoU threshold
+ input_size=(640, 640) # Input resolution
+)
+
+faces = detector.detect(image) # Returns list[Face]
+```
+
+### SCRFD
+
+```python
+from uniface import SCRFD
+
+detector = SCRFD(
+ model_name=SCRFDWeights.SCRFD_10G_KPS,
+ confidence_threshold=0.5,
+ nms_threshold=0.4,
+ input_size=(640, 640)
+)
+```
+
+### YOLOv5Face
+
+```python
+from uniface import YOLOv5Face
+
+detector = YOLOv5Face(
+ model_name=YOLOv5FaceWeights.YOLOV5S,
+ confidence_threshold=0.6,
+ nms_threshold=0.5
+)
+```
+
+---
+
+## Recognition
+
+### ArcFace
+
+```python
+from uniface import ArcFace
+
+recognizer = ArcFace(model_name=ArcFaceWeights.MNET)
+
+embedding = recognizer.get_normalized_embedding(image, landmarks)
+# Returns: np.ndarray (1, 512)
+```
+
+### MobileFace / SphereFace
+
+```python
+from uniface import MobileFace, SphereFace
+
+recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V2)
+recognizer = SphereFace(model_name=SphereFaceWeights.SPHERE20)
+```
+
+---
+
+## Landmarks
+
+```python
+from uniface import Landmark106
+
+landmarker = Landmark106()
+landmarks = landmarker.get_landmarks(image, bbox)
+# Returns: np.ndarray (106, 2)
+```
+
+---
+
+## Attributes
+
+### AgeGender
+
+```python
+from uniface import AgeGender
+
+predictor = AgeGender()
+result = predictor.predict(image, bbox)
+# Returns: AttributeResult(gender, age, sex)
+```
+
+### FairFace
+
+```python
+from uniface import FairFace
+
+predictor = FairFace()
+result = predictor.predict(image, bbox)
+# Returns: AttributeResult(gender, age_group, race, sex)
+```
+
+---
+
+## Gaze
+
+```python
+from uniface import MobileGaze
+
+gaze = MobileGaze(model_name=GazeWeights.RESNET34)
+result = gaze.estimate(face_crop)
+# Returns: GazeResult(pitch, yaw) in radians
+```
+
+---
+
+## Parsing
+
+```python
+from uniface.parsing import BiSeNet
+
+parser = BiSeNet(model_name=ParsingWeights.RESNET18)
+mask = parser.parse(face_image)
+# Returns: np.ndarray (H, W) with values 0-18
+```
+
+---
+
+## Anti-Spoofing
+
+```python
+from uniface.spoofing import MiniFASNet
+
+spoofer = MiniFASNet(model_name=MiniFASNetWeights.V2)
+result = spoofer.predict(image, bbox)
+# Returns: SpoofingResult(is_real, confidence)
+```
+
+---
+
+## Privacy
+
+```python
+from uniface.privacy import BlurFace, anonymize_faces
+
+# One-liner
+anonymized = anonymize_faces(image, method='pixelate')
+
+# Manual control
+blurrer = BlurFace(method='gaussian', blur_strength=3.0)
+anonymized = blurrer.anonymize(image, faces)
+```
+
+---
+
+## Types
+
+### Face
+
+```python
+@dataclass
+class Face:
+ bbox: np.ndarray # [x1, y1, x2, y2]
+ confidence: float # 0.0 to 1.0
+ landmarks: np.ndarray # (5, 2)
+ embedding: np.ndarray | None = None
+ gender: int | None = None
+ age: int | None = None
+ age_group: str | None = None
+ race: str | None = None
+```
+
+### Result Types
+
+```python
+GazeResult(pitch: float, yaw: float)
+SpoofingResult(is_real: bool, confidence: float)
+AttributeResult(gender: int, age: int, age_group: str, race: str)
+EmotionResult(emotion: str, confidence: float)
+```
+
+---
+
+## Utilities
+
+```python
+from uniface import (
+ compute_similarity, # Compare embeddings
+ face_alignment, # Align face for recognition
+ draw_detections, # Visualize detections
+ vis_parsing_maps, # Visualize parsing
+ verify_model_weights, # Download/verify models
+)
+```
diff --git a/docs/assets/logo.png b/docs/assets/logo.png
new file mode 100644
index 0000000..534d976
Binary files /dev/null and b/docs/assets/logo.png differ
diff --git a/docs/assets/logo.webp b/docs/assets/logo.webp
new file mode 100644
index 0000000..534d976
Binary files /dev/null and b/docs/assets/logo.webp differ
diff --git a/docs/changelog.md b/docs/changelog.md
new file mode 100644
index 0000000..3b7e8c6
--- /dev/null
+++ b/docs/changelog.md
@@ -0,0 +1,46 @@
+# Changelog
+
+All notable changes to UniFace.
+
+---
+
+## [2.0.0] - 2025
+
+### Added
+
+- YOLOv5-Face detection models (N/S/M variants)
+- FairFace attribute prediction (race, gender, age group)
+- Face parsing with BiSeNet (ResNet18/34)
+- Gaze estimation with MobileGaze
+- Anti-spoofing with MiniFASNet
+- Face anonymization with 5 blur methods
+- FaceAnalyzer for combined analysis
+- Type hints throughout
+- Comprehensive documentation
+
+### Changed
+
+- Unified API across all modules
+- Improved model download with SHA-256 verification
+- Better error messages
+
+---
+
+## [1.0.0] - 2024
+
+### Added
+
+- RetinaFace detection
+- SCRFD detection
+- ArcFace recognition
+- MobileFace recognition
+- SphereFace recognition
+- 106-point landmarks
+- Age/Gender prediction
+- Emotion detection
+
+---
+
+## Contributing
+
+See [Contributing Guide](contributing.md) for how to contribute.
diff --git a/docs/concepts/coordinate-systems.md b/docs/concepts/coordinate-systems.md
new file mode 100644
index 0000000..d480d1f
--- /dev/null
+++ b/docs/concepts/coordinate-systems.md
@@ -0,0 +1,191 @@
+# Coordinate Systems
+
+This page explains the coordinate formats used in UniFace.
+
+---
+
+## Image Coordinates
+
+All coordinates use **pixel-based, top-left origin**:
+
+```
+(0, 0) ────────────────► x (width)
+ │
+ │ Image
+ │
+ ▼
+ y (height)
+```
+
+---
+
+## Bounding Box Format
+
+Bounding boxes use `[x1, y1, x2, y2]` format (top-left and bottom-right corners):
+
+```
+(x1, y1) ─────────────────┐
+ │ │
+ │ Face │
+ │ │
+ └─────────────────────┘ (x2, y2)
+```
+
+### Accessing Coordinates
+
+```python
+face = faces[0]
+
+# Direct access
+x1, y1, x2, y2 = face.bbox
+
+# As properties
+bbox_xyxy = face.bbox_xyxy # [x1, y1, x2, y2]
+bbox_xywh = face.bbox_xywh # [x1, y1, width, height]
+```
+
+### Conversion
+
+```python
+import numpy as np
+
+# xyxy → xywh
+def xyxy_to_xywh(bbox):
+ x1, y1, x2, y2 = bbox
+ return np.array([x1, y1, x2 - x1, y2 - y1])
+
+# xywh → xyxy
+def xywh_to_xyxy(bbox):
+ x, y, w, h = bbox
+ return np.array([x, y, x + w, y + h])
+```
+
+---
+
+## Landmarks
+
+### 5-Point Landmarks (Detection)
+
+Returned by all detection models:
+
+```python
+landmarks = face.landmarks # Shape: (5, 2)
+```
+
+| Index | Point |
+|-------|-------|
+| 0 | Left Eye |
+| 1 | Right Eye |
+| 2 | Nose Tip |
+| 3 | Left Mouth Corner |
+| 4 | Right Mouth Corner |
+
+```
+ 0 ● ● 1
+
+ ● 2
+
+ 3 ● ● 4
+```
+
+### 106-Point Landmarks
+
+Returned by `Landmark106`:
+
+```python
+from uniface import Landmark106
+
+landmarker = Landmark106()
+landmarks = landmarker.get_landmarks(image, face.bbox)
+# Shape: (106, 2)
+```
+
+**Landmark Groups:**
+
+| Range | Group | Points |
+|-------|-------|--------|
+| 0-32 | Face Contour | 33 |
+| 33-50 | Eyebrows | 18 |
+| 51-62 | Nose | 12 |
+| 63-86 | Eyes | 24 |
+| 87-105 | Mouth | 19 |
+
+---
+
+## Face Crop
+
+To crop a face from an image:
+
+```python
+def crop_face(image, bbox, margin=0):
+ """Crop face with optional margin."""
+ h, w = image.shape[:2]
+ x1, y1, x2, y2 = map(int, bbox)
+
+ # Add margin
+ if margin > 0:
+ bw, bh = x2 - x1, y2 - y1
+ x1 = max(0, x1 - int(bw * margin))
+ y1 = max(0, y1 - int(bh * margin))
+ x2 = min(w, x2 + int(bw * margin))
+ y2 = min(h, y2 + int(bh * margin))
+
+ return image[y1:y2, x1:x2]
+
+# Usage
+face_crop = crop_face(image, face.bbox, margin=0.1)
+```
+
+---
+
+## Gaze Angles
+
+Gaze estimation returns pitch and yaw in **radians**:
+
+```python
+result = gaze_estimator.estimate(face_crop)
+
+# Angles in radians
+pitch = result.pitch # Vertical: + = up, - = down
+yaw = result.yaw # Horizontal: + = right, - = left
+
+# Convert to degrees
+import numpy as np
+pitch_deg = np.degrees(pitch)
+yaw_deg = np.degrees(yaw)
+```
+
+**Angle Reference:**
+
+```
+ pitch = +90° (up)
+ │
+ │
+yaw = -90° ────┼──── yaw = +90°
+(left) │ (right)
+ │
+ pitch = -90° (down)
+```
+
+---
+
+## Face Alignment
+
+Face alignment uses 5-point landmarks to normalize face orientation:
+
+```python
+from uniface import face_alignment
+
+# Align face to standard template
+aligned_face = face_alignment(image, face.landmarks)
+# Output: 112x112 aligned face image
+```
+
+The alignment transforms faces to a canonical pose for better recognition accuracy.
+
+---
+
+## Next Steps
+
+- [Inputs & Outputs](inputs-outputs.md) - Data types reference
+- [Recognition Module](../modules/recognition.md) - Face recognition details
diff --git a/docs/concepts/execution-providers.md b/docs/concepts/execution-providers.md
new file mode 100644
index 0000000..0b30751
--- /dev/null
+++ b/docs/concepts/execution-providers.md
@@ -0,0 +1,204 @@
+# Execution Providers
+
+UniFace uses ONNX Runtime for model inference, which supports multiple hardware acceleration backends.
+
+---
+
+## Automatic Provider Selection
+
+UniFace automatically selects the optimal execution provider based on available hardware:
+
+```python
+from uniface import RetinaFace
+
+# Automatically uses best available provider
+detector = RetinaFace()
+```
+
+**Priority order:**
+
+1. **CUDAExecutionProvider** - NVIDIA GPU
+2. **CoreMLExecutionProvider** - Apple Silicon
+3. **CPUExecutionProvider** - Fallback
+
+---
+
+## Check Available Providers
+
+```python
+import onnxruntime as ort
+
+providers = ort.get_available_providers()
+print("Available providers:", providers)
+```
+
+**Example outputs:**
+
+=== "macOS (Apple Silicon)"
+
+ ```
+ ['CoreMLExecutionProvider', 'CPUExecutionProvider']
+ ```
+
+=== "Linux (NVIDIA GPU)"
+
+ ```
+ ['CUDAExecutionProvider', 'CPUExecutionProvider']
+ ```
+
+=== "Windows (CPU)"
+
+ ```
+ ['CPUExecutionProvider']
+ ```
+
+---
+
+## Platform-Specific Setup
+
+### Apple Silicon (M1/M2/M3/M4)
+
+No additional setup required. ARM64 optimizations are built into `onnxruntime`:
+
+```bash
+pip install uniface
+```
+
+Verify ARM64:
+
+```bash
+python -c "import platform; print(platform.machine())"
+# Should show: arm64
+```
+
+!!! tip "Performance"
+ Apple Silicon Macs use CoreML acceleration automatically, providing excellent performance for face analysis tasks.
+
+---
+
+### NVIDIA GPU (CUDA)
+
+Install with GPU support:
+
+```bash
+pip install uniface[gpu]
+```
+
+**Requirements:**
+
+- CUDA 11.x or 12.x
+- cuDNN 8.x
+- Compatible NVIDIA driver
+
+Verify CUDA:
+
+```python
+import onnxruntime as ort
+
+if 'CUDAExecutionProvider' in ort.get_available_providers():
+ print("CUDA is available!")
+else:
+ print("CUDA not available, using CPU")
+```
+
+---
+
+### CPU Fallback
+
+CPU execution is always available:
+
+```bash
+pip install uniface
+```
+
+Works on all platforms without additional configuration.
+
+---
+
+## Internal API
+
+For advanced use cases, you can access the provider utilities:
+
+```python
+from uniface.onnx_utils import get_available_providers, create_onnx_session
+
+# Check available providers
+providers = get_available_providers()
+print(f"Available: {providers}")
+
+# Models use create_onnx_session() internally
+# which auto-selects the best provider
+```
+
+---
+
+## Performance Tips
+
+### 1. Use GPU When Available
+
+For batch processing or real-time applications, GPU acceleration provides significant speedups:
+
+```bash
+pip install uniface[gpu]
+```
+
+### 2. Optimize Input Size
+
+Smaller input sizes are faster but may reduce accuracy:
+
+```python
+from uniface import RetinaFace
+
+# Faster, lower accuracy
+detector = RetinaFace(input_size=(320, 320))
+
+# Balanced (default)
+detector = RetinaFace(input_size=(640, 640))
+```
+
+### 3. Batch Processing
+
+Process multiple images to maximize GPU utilization:
+
+```python
+# Process images in batch (GPU-efficient)
+for image_path in image_paths:
+ image = cv2.imread(image_path)
+ faces = detector.detect(image)
+ # ...
+```
+
+---
+
+## Troubleshooting
+
+### CUDA Not Detected
+
+1. Verify CUDA installation:
+ ```bash
+ nvidia-smi
+ ```
+
+2. Check CUDA version compatibility with ONNX Runtime
+
+3. Reinstall with GPU support:
+ ```bash
+ pip uninstall onnxruntime onnxruntime-gpu
+ pip install uniface[gpu]
+ ```
+
+### Slow Performance on Mac
+
+Verify you're using ARM64 Python (not Rosetta):
+
+```bash
+python -c "import platform; print(platform.machine())"
+# Should show: arm64 (not x86_64)
+```
+
+---
+
+## Next Steps
+
+- [Model Cache & Offline](model-cache-offline.md) - Model management
+- [Thresholds & Calibration](thresholds-calibration.md) - Tuning parameters
diff --git a/docs/concepts/inputs-outputs.md b/docs/concepts/inputs-outputs.md
new file mode 100644
index 0000000..7a2f68b
--- /dev/null
+++ b/docs/concepts/inputs-outputs.md
@@ -0,0 +1,218 @@
+# Inputs & Outputs
+
+This page describes the data types used throughout UniFace.
+
+---
+
+## Input: Images
+
+All models accept NumPy arrays in **BGR format** (OpenCV default):
+
+```python
+import cv2
+
+# Load image (BGR format)
+image = cv2.imread("photo.jpg")
+print(f"Shape: {image.shape}") # (H, W, 3)
+print(f"Dtype: {image.dtype}") # uint8
+```
+
+!!! warning "Color Format"
+ UniFace expects **BGR** format (OpenCV default). If using PIL or other libraries, convert first:
+
+ ```python
+ from PIL import Image
+ import numpy as np
+
+ pil_image = Image.open("photo.jpg")
+ bgr_image = np.array(pil_image)[:, :, ::-1] # RGB → BGR
+ ```
+
+---
+
+## Output: Face Dataclass
+
+Detection returns a list of `Face` objects:
+
+```python
+from dataclasses import dataclass
+import numpy as np
+
+@dataclass
+class Face:
+ # Required (from detection)
+ bbox: np.ndarray # [x1, y1, x2, y2]
+ confidence: float # 0.0 to 1.0
+ landmarks: np.ndarray # (5, 2) or (106, 2)
+
+ # Optional (enriched by analyzers)
+ embedding: np.ndarray | None = None
+ gender: int | None = None # 0=Female, 1=Male
+ age: int | None = None # Years
+ age_group: str | None = None # "20-29", etc.
+ race: str | None = None # "East Asian", etc.
+ emotion: str | None = None # "Happy", etc.
+ emotion_confidence: float | None = None
+```
+
+### Properties
+
+```python
+face = faces[0]
+
+# Bounding box formats
+face.bbox_xyxy # [x1, y1, x2, y2] - same as bbox
+face.bbox_xywh # [x1, y1, width, height]
+
+# Gender as string
+face.sex # "Female" or "Male" (None if not predicted)
+```
+
+### Methods
+
+```python
+# Compute similarity with another face
+similarity = face1.compute_similarity(face2)
+
+# Convert to dictionary
+face_dict = face.to_dict()
+```
+
+---
+
+## Result Types
+
+### GazeResult
+
+```python
+from dataclasses import dataclass
+
+@dataclass(frozen=True)
+class GazeResult:
+ pitch: float # Vertical angle (radians), + = up
+ yaw: float # Horizontal angle (radians), + = right
+```
+
+**Usage:**
+
+```python
+import numpy as np
+
+result = gaze_estimator.estimate(face_crop)
+print(f"Pitch: {np.degrees(result.pitch):.1f}°")
+print(f"Yaw: {np.degrees(result.yaw):.1f}°")
+```
+
+---
+
+### SpoofingResult
+
+```python
+@dataclass(frozen=True)
+class SpoofingResult:
+ is_real: bool # True = real, False = fake
+ confidence: float # 0.0 to 1.0
+```
+
+**Usage:**
+
+```python
+result = spoofer.predict(image, face.bbox)
+label = "Real" if result.is_real else "Fake"
+print(f"{label}: {result.confidence:.1%}")
+```
+
+---
+
+### AttributeResult
+
+```python
+@dataclass(frozen=True)
+class AttributeResult:
+ gender: int # 0=Female, 1=Male
+ age: int | None # Years (AgeGender model)
+ age_group: str | None # "20-29" (FairFace model)
+ race: str | None # Race label (FairFace model)
+
+ @property
+ def sex(self) -> str:
+ return "Female" if self.gender == 0 else "Male"
+```
+
+**Usage:**
+
+```python
+# AgeGender model
+result = age_gender.predict(image, face.bbox)
+print(f"{result.sex}, {result.age} years old")
+
+# FairFace model
+result = fairface.predict(image, face.bbox)
+print(f"{result.sex}, {result.age_group}, {result.race}")
+```
+
+---
+
+### EmotionResult
+
+```python
+@dataclass(frozen=True)
+class EmotionResult:
+ emotion: str # "Happy", "Sad", etc.
+ confidence: float # 0.0 to 1.0
+```
+
+---
+
+## Embeddings
+
+Face recognition models return normalized 512-dimensional embeddings:
+
+```python
+embedding = recognizer.get_normalized_embedding(image, landmarks)
+print(f"Shape: {embedding.shape}") # (1, 512)
+print(f"Norm: {np.linalg.norm(embedding):.4f}") # ~1.0
+```
+
+### Similarity Computation
+
+```python
+from uniface import compute_similarity
+
+similarity = compute_similarity(embedding1, embedding2)
+# Returns: float between -1 and 1 (cosine similarity)
+```
+
+---
+
+## Parsing Masks
+
+Face parsing returns a segmentation mask:
+
+```python
+mask = parser.parse(face_image)
+print(f"Shape: {mask.shape}") # (H, W)
+print(f"Classes: {np.unique(mask)}") # [0, 1, 2, ...]
+```
+
+**19 Classes:**
+
+| ID | Class | ID | Class |
+|----|-------|----|-------|
+| 0 | Background | 10 | Ear Ring |
+| 1 | Skin | 11 | Nose |
+| 2 | Left Eyebrow | 12 | Mouth |
+| 3 | Right Eyebrow | 13 | Upper Lip |
+| 4 | Left Eye | 14 | Lower Lip |
+| 5 | Right Eye | 15 | Neck |
+| 6 | Eye Glasses | 16 | Neck Lace |
+| 7 | Left Ear | 17 | Cloth |
+| 8 | Right Ear | 18 | Hair |
+| 9 | Hat | | |
+
+---
+
+## Next Steps
+
+- [Coordinate Systems](coordinate-systems.md) - Bbox and landmark formats
+- [Thresholds & Calibration](thresholds-calibration.md) - Tuning confidence thresholds
diff --git a/docs/concepts/model-cache-offline.md b/docs/concepts/model-cache-offline.md
new file mode 100644
index 0000000..eb05c64
--- /dev/null
+++ b/docs/concepts/model-cache-offline.md
@@ -0,0 +1,218 @@
+# Model Cache & Offline Use
+
+UniFace automatically downloads and caches models. This page explains how model management works.
+
+---
+
+## Automatic Download
+
+Models are downloaded on first use:
+
+```python
+from uniface import RetinaFace
+
+# First run: downloads model to cache
+detector = RetinaFace() # ~3.5 MB download
+
+# Subsequent runs: loads from cache
+detector = RetinaFace() # Instant
+```
+
+---
+
+## Cache Location
+
+Default cache directory:
+
+```
+~/.uniface/models/
+```
+
+**Example structure:**
+
+```
+~/.uniface/models/
+├── retinaface_mv2.onnx
+├── w600k_mbf.onnx
+├── 2d106det.onnx
+├── gaze_resnet34.onnx
+├── parsing_resnet18.onnx
+└── ...
+```
+
+---
+
+## Custom Cache Directory
+
+Specify a custom cache location:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+# Download to custom directory
+model_path = verify_model_weights(
+ RetinaFaceWeights.MNET_V2,
+ root='./my_models'
+)
+print(f"Model at: {model_path}")
+```
+
+---
+
+## Pre-Download Models
+
+Download models before deployment:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import (
+ RetinaFaceWeights,
+ ArcFaceWeights,
+ AgeGenderWeights,
+)
+
+# Download all needed models
+models = [
+ RetinaFaceWeights.MNET_V2,
+ ArcFaceWeights.MNET,
+ AgeGenderWeights.DEFAULT,
+]
+
+for model in models:
+ path = verify_model_weights(model)
+ print(f"Downloaded: {path}")
+```
+
+Or use the CLI tool:
+
+```bash
+python tools/download_model.py
+```
+
+---
+
+## Offline Use
+
+For air-gapped or offline environments:
+
+### 1. Pre-download models
+
+On a connected machine:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+path = verify_model_weights(RetinaFaceWeights.MNET_V2)
+print(f"Copy from: {path}")
+```
+
+### 2. Copy to target machine
+
+```bash
+# Copy the entire cache directory
+scp -r ~/.uniface/models/ user@offline-machine:~/.uniface/models/
+```
+
+### 3. Use normally
+
+```python
+# Models load from local cache
+from uniface import RetinaFace
+detector = RetinaFace() # No network required
+```
+
+---
+
+## Model Verification
+
+Models are verified with SHA-256 checksums:
+
+```python
+from uniface.constants import MODEL_SHA256, RetinaFaceWeights
+
+# Check expected checksum
+expected = MODEL_SHA256[RetinaFaceWeights.MNET_V2]
+print(f"Expected SHA256: {expected}")
+```
+
+If a model fails verification, it's re-downloaded automatically.
+
+---
+
+## Available Models
+
+### Detection Models
+
+| Model | Size | Download |
+|-------|------|----------|
+| RetinaFace MNET_025 | 1.7 MB | ✅ |
+| RetinaFace MNET_V2 | 3.5 MB | ✅ |
+| RetinaFace RESNET34 | 56 MB | ✅ |
+| SCRFD 500M | 2.5 MB | ✅ |
+| SCRFD 10G | 17 MB | ✅ |
+| YOLOv5n-Face | 11 MB | ✅ |
+| YOLOv5s-Face | 28 MB | ✅ |
+| YOLOv5m-Face | 82 MB | ✅ |
+
+### Recognition Models
+
+| Model | Size | Download |
+|-------|------|----------|
+| ArcFace MNET | 8 MB | ✅ |
+| ArcFace RESNET | 166 MB | ✅ |
+| MobileFace MNET_V2 | 4 MB | ✅ |
+| SphereFace SPHERE20 | 50 MB | ✅ |
+
+### Other Models
+
+| Model | Size | Download |
+|-------|------|----------|
+| Landmark106 | 14 MB | ✅ |
+| AgeGender | 8 MB | ✅ |
+| FairFace | 44 MB | ✅ |
+| Gaze ResNet34 | 82 MB | ✅ |
+| BiSeNet ResNet18 | 51 MB | ✅ |
+| MiniFASNet V2 | 1.2 MB | ✅ |
+
+---
+
+## Clear Cache
+
+Remove cached models:
+
+```bash
+# Remove all cached models
+rm -rf ~/.uniface/models/
+
+# Remove specific model
+rm ~/.uniface/models/retinaface_mv2.onnx
+```
+
+Models will be re-downloaded on next use.
+
+---
+
+## Environment Variables
+
+Set custom cache location via environment variable:
+
+```bash
+export UNIFACE_CACHE_DIR=/path/to/custom/cache
+```
+
+```python
+import os
+os.environ['UNIFACE_CACHE_DIR'] = '/path/to/custom/cache'
+
+from uniface import RetinaFace
+detector = RetinaFace() # Uses custom cache
+```
+
+---
+
+## Next Steps
+
+- [Thresholds & Calibration](thresholds-calibration.md) - Tune model parameters
+- [Detection Module](../modules/detection.md) - Detection model details
diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md
new file mode 100644
index 0000000..747a940
--- /dev/null
+++ b/docs/concepts/overview.md
@@ -0,0 +1,195 @@
+# Overview
+
+UniFace is designed as a modular, production-ready face analysis library. This page explains the architecture and design principles.
+
+---
+
+## Architecture
+
+UniFace follows a modular architecture where each face analysis task is handled by a dedicated module:
+
+```mermaid
+graph TB
+ subgraph Input
+ IMG[Image/Frame]
+ end
+
+ subgraph Detection
+ DET[RetinaFace / SCRFD / YOLOv5Face]
+ end
+
+ subgraph Analysis
+ REC[Recognition]
+ LMK[Landmarks]
+ ATTR[Attributes]
+ GAZE[Gaze]
+ PARSE[Parsing]
+ SPOOF[Anti-Spoofing]
+ PRIV[Privacy]
+ end
+
+ subgraph Output
+ FACE[Face Objects]
+ end
+
+ IMG --> DET
+ DET --> REC
+ DET --> LMK
+ DET --> ATTR
+ DET --> GAZE
+ DET --> PARSE
+ DET --> SPOOF
+ DET --> PRIV
+ REC --> FACE
+ LMK --> FACE
+ ATTR --> FACE
+```
+
+---
+
+## Design Principles
+
+### 1. ONNX-First
+
+All models use ONNX Runtime for inference:
+
+- **Cross-platform**: Same models work on macOS, Linux, Windows
+- **Hardware acceleration**: Automatic selection of optimal provider
+- **Production-ready**: No Python-only dependencies for inference
+
+### 2. Minimal Dependencies
+
+Core dependencies are kept minimal:
+
+```
+numpy # Array operations
+opencv-python # Image processing
+onnxruntime # Model inference
+requests # Model download
+tqdm # Progress bars
+```
+
+### 3. Simple API
+
+Factory functions and direct instantiation:
+
+```python
+# Factory function
+detector = create_detector('retinaface')
+
+# Direct instantiation (recommended)
+from uniface import RetinaFace
+detector = RetinaFace()
+```
+
+### 4. Type Safety
+
+Full type hints throughout:
+
+```python
+def detect(self, image: np.ndarray) -> list[Face]:
+ ...
+```
+
+---
+
+## Module Structure
+
+```
+uniface/
+├── detection/ # Face detection (RetinaFace, SCRFD, YOLOv5Face)
+├── recognition/ # Face recognition (ArcFace, MobileFace, SphereFace)
+├── landmark/ # 106-point landmarks
+├── attribute/ # Age, gender, emotion, race
+├── parsing/ # Face semantic segmentation
+├── gaze/ # Gaze estimation
+├── spoofing/ # Anti-spoofing
+├── privacy/ # Face anonymization
+├── types.py # Dataclasses (Face, GazeResult, etc.)
+├── constants.py # Model weights and URLs
+├── model_store.py # Model download and caching
+├── onnx_utils.py # ONNX Runtime utilities
+└── visualization.py # Drawing utilities
+```
+
+---
+
+## Workflow
+
+A typical face analysis workflow:
+
+```python
+import cv2
+from uniface import RetinaFace, ArcFace, AgeGender
+
+# 1. Initialize models
+detector = RetinaFace()
+recognizer = ArcFace()
+age_gender = AgeGender()
+
+# 2. Load image
+image = cv2.imread("photo.jpg")
+
+# 3. Detect faces
+faces = detector.detect(image)
+
+# 4. Analyze each face
+for face in faces:
+ # Recognition embedding
+ embedding = recognizer.get_normalized_embedding(image, face.landmarks)
+
+ # Attributes
+ attrs = age_gender.predict(image, face.bbox)
+
+ print(f"Face: {attrs.sex}, {attrs.age} years")
+```
+
+---
+
+## FaceAnalyzer
+
+For convenience, `FaceAnalyzer` combines multiple modules:
+
+```python
+from uniface import FaceAnalyzer
+
+analyzer = FaceAnalyzer(
+ detect=True,
+ recognize=True,
+ attributes=True
+)
+
+faces = analyzer.analyze(image)
+for face in faces:
+ print(f"Age: {face.age}, Gender: {face.sex}")
+ print(f"Embedding: {face.embedding.shape}")
+```
+
+---
+
+## Model Lifecycle
+
+1. **First use**: Model is downloaded from GitHub releases
+2. **Cached**: Stored in `~/.uniface/models/`
+3. **Verified**: SHA-256 checksum validation
+4. **Loaded**: ONNX Runtime session created
+5. **Inference**: Hardware-accelerated execution
+
+```python
+# Models auto-download on first use
+detector = RetinaFace() # Downloads if not cached
+
+# Or manually pre-download
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+path = verify_model_weights(RetinaFaceWeights.MNET_V2)
+```
+
+---
+
+## Next Steps
+
+- [Inputs & Outputs](inputs-outputs.md) - Understand data types
+- [Execution Providers](execution-providers.md) - Hardware acceleration
+- [Detection Module](../modules/detection.md) - Start with face detection
diff --git a/docs/concepts/thresholds-calibration.md b/docs/concepts/thresholds-calibration.md
new file mode 100644
index 0000000..1917d94
--- /dev/null
+++ b/docs/concepts/thresholds-calibration.md
@@ -0,0 +1,234 @@
+# Thresholds & Calibration
+
+This page explains how to tune detection and recognition thresholds for your use case.
+
+---
+
+## Detection Thresholds
+
+### Confidence Threshold
+
+Controls minimum confidence for face detection:
+
+```python
+from uniface import RetinaFace
+
+# Default (balanced)
+detector = RetinaFace(confidence_threshold=0.5)
+
+# High precision (fewer false positives)
+detector = RetinaFace(confidence_threshold=0.8)
+
+# High recall (catch more faces)
+detector = RetinaFace(confidence_threshold=0.3)
+```
+
+**Guidelines:**
+
+| Threshold | Use Case |
+|-----------|----------|
+| 0.3 - 0.4 | Maximum recall (research, analysis) |
+| 0.5 - 0.6 | Balanced (default, general use) |
+| 0.7 - 0.9 | High precision (production, security) |
+
+---
+
+### NMS Threshold
+
+Non-Maximum Suppression removes overlapping detections:
+
+```python
+# Default
+detector = RetinaFace(nms_threshold=0.4)
+
+# Stricter (fewer overlapping boxes)
+detector = RetinaFace(nms_threshold=0.3)
+
+# Looser (for crowded scenes)
+detector = RetinaFace(nms_threshold=0.5)
+```
+
+---
+
+### Input Size
+
+Affects detection accuracy and speed:
+
+```python
+# Faster, lower accuracy
+detector = RetinaFace(input_size=(320, 320))
+
+# Balanced (default)
+detector = RetinaFace(input_size=(640, 640))
+
+# Higher accuracy, slower
+detector = RetinaFace(input_size=(1280, 1280))
+```
+
+!!! tip "Dynamic Size"
+ For RetinaFace, enable dynamic input for variable image sizes:
+ ```python
+ detector = RetinaFace(dynamic_size=True)
+ ```
+
+---
+
+## Recognition Thresholds
+
+### Similarity Threshold
+
+For identity verification (same person check):
+
+```python
+import numpy as np
+from uniface import compute_similarity
+
+similarity = compute_similarity(embedding1, embedding2)
+
+# Threshold interpretation
+if similarity > 0.6:
+ print("Same person (high confidence)")
+elif similarity > 0.4:
+ print("Uncertain (manual review)")
+else:
+ print("Different people")
+```
+
+**Recommended thresholds:**
+
+| Threshold | Decision | False Accept Rate |
+|-----------|----------|-------------------|
+| 0.4 | Low security | Higher FAR |
+| 0.5 | Balanced | Moderate FAR |
+| 0.6 | High security | Lower FAR |
+| 0.7 | Very strict | Very low FAR |
+
+---
+
+### Calibration for Your Dataset
+
+Test on your data to find optimal thresholds:
+
+```python
+import numpy as np
+
+def calibrate_threshold(same_pairs, diff_pairs, recognizer, detector):
+ """Find optimal threshold for your dataset."""
+ same_scores = []
+ diff_scores = []
+
+ # Compute similarities for same-person pairs
+ for img1_path, img2_path in same_pairs:
+ img1 = cv2.imread(img1_path)
+ img2 = cv2.imread(img2_path)
+
+ faces1 = detector.detect(img1)
+ faces2 = detector.detect(img2)
+
+ if faces1 and faces2:
+ emb1 = recognizer.get_normalized_embedding(img1, faces1[0].landmarks)
+ emb2 = recognizer.get_normalized_embedding(img2, faces2[0].landmarks)
+ same_scores.append(np.dot(emb1, emb2.T)[0][0])
+
+ # Compute similarities for different-person pairs
+ for img1_path, img2_path in diff_pairs:
+ # ... similar process
+ diff_scores.append(similarity)
+
+ # Find optimal threshold
+ thresholds = np.arange(0.3, 0.8, 0.05)
+ best_threshold = 0.5
+ best_accuracy = 0
+
+ for thresh in thresholds:
+ tp = sum(1 for s in same_scores if s >= thresh)
+ tn = sum(1 for s in diff_scores if s < thresh)
+ accuracy = (tp + tn) / (len(same_scores) + len(diff_scores))
+
+ if accuracy > best_accuracy:
+ best_accuracy = accuracy
+ best_threshold = thresh
+
+ return best_threshold, best_accuracy
+```
+
+---
+
+## Anti-Spoofing Thresholds
+
+The MiniFASNet model returns a confidence score:
+
+```python
+from uniface.spoofing import MiniFASNet
+
+spoofer = MiniFASNet()
+result = spoofer.predict(image, face.bbox)
+
+# Default threshold (0.5)
+if result.is_real: # confidence > 0.5
+ print("Real face")
+
+# Custom threshold for high security
+SPOOF_THRESHOLD = 0.7
+if result.confidence > SPOOF_THRESHOLD:
+ print("Real face (high confidence)")
+else:
+ print("Potentially fake")
+```
+
+---
+
+## Attribute Model Confidence
+
+### Emotion
+
+```python
+result = emotion_predictor.predict(image, landmarks)
+
+# Filter low-confidence predictions
+if result.confidence > 0.6:
+ print(f"Emotion: {result.emotion}")
+else:
+ print("Uncertain emotion")
+```
+
+---
+
+## Visualization Threshold
+
+For drawing detections, filter by confidence:
+
+```python
+from uniface.visualization import draw_detections
+
+# Only draw high-confidence detections
+bboxes = [f.bbox for f in faces if f.confidence > 0.7]
+scores = [f.confidence for f in faces if f.confidence > 0.7]
+landmarks = [f.landmarks for f in faces if f.confidence > 0.7]
+
+draw_detections(
+ image=image,
+ bboxes=bboxes,
+ scores=scores,
+ landmarks=landmarks,
+ vis_threshold=0.6 # Additional visualization filter
+)
+```
+
+---
+
+## Summary
+
+| Parameter | Default | Range | Lower = | Higher = |
+|-----------|---------|-------|---------|----------|
+| `confidence_threshold` | 0.5 | 0.1-0.9 | More detections | Fewer false positives |
+| `nms_threshold` | 0.4 | 0.1-0.7 | Fewer overlaps | More overlapping boxes |
+| Similarity threshold | 0.6 | 0.3-0.8 | More matches (FAR↑) | Fewer matches (FRR↑) |
+| Spoof confidence | 0.5 | 0.3-0.9 | More "real" | Stricter liveness |
+
+---
+
+## Next Steps
+
+- [Detection Module](../modules/detection.md) - Detection model options
+- [Recognition Module](../modules/recognition.md) - Recognition model options
diff --git a/docs/contributing.md b/docs/contributing.md
new file mode 100644
index 0000000..f4900a7
--- /dev/null
+++ b/docs/contributing.md
@@ -0,0 +1,72 @@
+# Contributing
+
+Thank you for contributing to UniFace!
+
+---
+
+## Quick Start
+
+```bash
+# Clone
+git clone https://github.com/yakhyo/uniface.git
+cd uniface
+
+# Install dev dependencies
+pip install -e ".[dev]"
+
+# Run tests
+pytest
+```
+
+---
+
+## Code Style
+
+We use [Ruff](https://docs.astral.sh/ruff/) for formatting:
+
+```bash
+ruff format .
+ruff check . --fix
+```
+
+**Guidelines:**
+
+- Line length: 120
+- Python 3.11+ type hints
+- Google-style docstrings
+
+---
+
+## Pre-commit Hooks
+
+```bash
+pip install pre-commit
+pre-commit install
+pre-commit run --all-files
+```
+
+---
+
+## Pull Request Process
+
+1. Fork the repository
+2. Create a feature branch
+3. Write tests for new features
+4. Ensure tests pass
+5. Submit PR with clear description
+
+---
+
+## Adding New Models
+
+1. Create model class in appropriate submodule
+2. Add weight constants to `uniface/constants.py`
+3. Export in `__init__.py` files
+4. Write tests in `tests/`
+5. Add example in `tools/` or notebooks
+
+---
+
+## Questions?
+
+Open an issue on [GitHub](https://github.com/yakhyo/uniface/issues).
diff --git a/docs/faq.md b/docs/faq.md
new file mode 100644
index 0000000..e7689f3
--- /dev/null
+++ b/docs/faq.md
@@ -0,0 +1,138 @@
+# FAQ
+
+Frequently asked questions.
+
+---
+
+## General
+
+### What is UniFace?
+
+A Python library for face analysis: detection, recognition, landmarks, attributes, parsing, gaze estimation, anti-spoofing, and privacy protection.
+
+### What are the requirements?
+
+- Python 3.11+
+- Works on macOS, Linux, Windows
+
+### Is GPU required?
+
+No. CPU works fine. GPU (CUDA) provides faster inference.
+
+---
+
+## Models
+
+### Where are models stored?
+
+```
+~/.uniface/models/
+```
+
+### How to use offline?
+
+Pre-download models:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+verify_model_weights(RetinaFaceWeights.MNET_V2)
+```
+
+### Which detection model is best?
+
+| Use Case | Model |
+|----------|-------|
+| Balanced | RetinaFace MNET_V2 |
+| Accuracy | SCRFD 10G |
+| Speed | YOLOv5n-Face |
+
+---
+
+## Usage
+
+### What image format?
+
+BGR (OpenCV default):
+
+```python
+image = cv2.imread("photo.jpg") # BGR
+```
+
+### How to compare faces?
+
+```python
+from uniface import compute_similarity
+
+similarity = compute_similarity(emb1, emb2)
+if similarity > 0.6:
+ print("Same person")
+```
+
+### How to get age and gender?
+
+```python
+from uniface import AgeGender
+
+predictor = AgeGender()
+result = predictor.predict(image, face.bbox)
+print(f"{result.sex}, {result.age}")
+```
+
+---
+
+## Performance
+
+### How to speed up detection?
+
+1. Use smaller input:
+ ```python
+ detector = RetinaFace(input_size=(320, 320))
+ ```
+
+2. Skip frames in video:
+ ```python
+ if frame_count % 3 == 0:
+ faces = detector.detect(frame)
+ ```
+
+3. Use GPU:
+ ```bash
+ pip install uniface[gpu]
+ ```
+
+---
+
+## Accuracy
+
+### Detection threshold?
+
+Default: 0.5
+
+- Higher (0.7+): Fewer false positives
+- Lower (0.3): More detections
+
+### Similarity threshold?
+
+| Threshold | Meaning |
+|-----------|---------|
+| > 0.6 | Same person |
+| 0.4-0.6 | Uncertain |
+| < 0.4 | Different |
+
+---
+
+## Privacy
+
+### How to blur faces?
+
+```python
+from uniface.privacy import anonymize_faces
+
+result = anonymize_faces(image, method='pixelate')
+```
+
+### Available blur methods?
+
+`pixelate`, `gaussian`, `blackout`, `elliptical`, `median`
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..87e692f
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,137 @@
+---
+hide:
+ - toc
+ - navigation
+---
+
+
+
+# :material-face-recognition: UniFace { .hero-title }
+
+A lightweight, production-ready face analysis library built on ONNX Runtime.
+
+[](https://pypi.org/project/uniface/)
+[](https://www.python.org/)
+[](https://opensource.org/licenses/MIT)
+[](https://pepy.tech/project/uniface)
+
+{ width="60%" }
+
+[Get Started](quickstart.md){ .md-button .md-button--primary }
+[View on GitHub](https://github.com/yakhyo/uniface){ .md-button }
+
+
+
+---
+
+## Features
+
+
+
+
+### :material-face-recognition: Face Detection
+ONNX-optimized RetinaFace, SCRFD, and YOLOv5-Face models with 5-point landmarks.
+
+
+
+### :material-account-check: Face Recognition
+ArcFace, MobileFace, and SphereFace embeddings for identity verification.
+
+
+
+### :material-map-marker: Landmarks
+Accurate 106-point facial landmark localization for detailed face analysis.
+
+
+
+### :material-account-details: Attributes
+Age, gender, race (FairFace), and emotion detection from faces.
+
+
+
+### :material-face-man-shimmer: Face Parsing
+BiSeNet semantic segmentation with 19 facial component classes.
+
+
+
+### :material-eye: Gaze Estimation
+Real-time gaze direction prediction with MobileGaze models.
+
+
+
+### :material-shield-check: Anti-Spoofing
+Face liveness detection with MiniFASNet to prevent fraud.
+
+
+
+### :material-blur: Privacy
+Face anonymization with 5 blur methods for privacy protection.
+
+
+
+
+---
+
+## Installation
+
+=== "Standard"
+
+ ```bash
+ pip install uniface
+ ```
+
+=== "GPU (CUDA)"
+
+ ```bash
+ pip install uniface[gpu]
+ ```
+
+=== "From Source"
+
+ ```bash
+ git clone https://github.com/yakhyo/uniface.git
+ cd uniface
+ pip install -e .
+ ```
+
+---
+
+## Next Steps
+
+
+
+
+### :material-rocket-launch: Quickstart
+Get up and running in 5 minutes with common use cases.
+
+[Quickstart Guide →](quickstart.md)
+
+
+
+### :material-book-open-variant: Concepts
+Learn about the architecture and design principles.
+
+[Read Concepts →](concepts/overview.md)
+
+
+
+### :material-puzzle: Modules
+Explore individual modules and their APIs.
+
+[Browse Modules →](modules/detection.md)
+
+
+
+### :material-chef-hat: Recipes
+Complete examples for common workflows.
+
+[View Recipes →](recipes/image-pipeline.md)
+
+
+
+
+---
+
+## License
+
+UniFace is released under the [MIT License](https://opensource.org/licenses/MIT).
diff --git a/docs/installation.md b/docs/installation.md
new file mode 100644
index 0000000..f4b277a
--- /dev/null
+++ b/docs/installation.md
@@ -0,0 +1,174 @@
+# Installation
+
+This guide covers all installation options for UniFace.
+
+---
+
+## Requirements
+
+- **Python**: 3.11 or higher
+- **Operating Systems**: macOS, Linux, Windows
+
+---
+
+## Quick Install
+
+The simplest way to install UniFace:
+
+```bash
+pip install uniface
+```
+
+This installs the CPU version with all core dependencies.
+
+---
+
+## Platform-Specific Installation
+
+### macOS (Apple Silicon - M1/M2/M3/M4)
+
+For Apple Silicon Macs, the standard installation automatically includes ARM64 optimizations:
+
+```bash
+pip install uniface
+```
+
+!!! tip "Native Performance"
+ The base `onnxruntime` package has native Apple Silicon support with ARM64 optimizations built-in since version 1.13+. No additional configuration needed.
+
+Verify ARM64 installation:
+
+```bash
+python -c "import platform; print(platform.machine())"
+# Should show: arm64
+```
+
+---
+
+### Linux/Windows with NVIDIA GPU
+
+For CUDA acceleration on NVIDIA GPUs:
+
+```bash
+pip install uniface[gpu]
+```
+
+**Requirements:**
+
+- CUDA 11.x or 12.x
+- cuDNN 8.x
+
+!!! info "CUDA Compatibility"
+ See [ONNX Runtime GPU requirements](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) for detailed compatibility matrix.
+
+Verify GPU installation:
+
+```python
+import onnxruntime as ort
+print("Available providers:", ort.get_available_providers())
+# Should include: 'CUDAExecutionProvider'
+```
+
+---
+
+### CPU-Only (All Platforms)
+
+```bash
+pip install uniface
+```
+
+Works on all platforms with automatic CPU fallback.
+
+---
+
+## Install from Source
+
+For development or the latest features:
+
+```bash
+git clone https://github.com/yakhyo/uniface.git
+cd uniface
+pip install -e .
+```
+
+With development dependencies:
+
+```bash
+pip install -e ".[dev]"
+```
+
+---
+
+## Dependencies
+
+UniFace has minimal dependencies:
+
+| Package | Purpose |
+|---------|---------|
+| `numpy` | Array operations |
+| `opencv-python` | Image processing |
+| `onnxruntime` | Model inference |
+| `requests` | Model download |
+| `tqdm` | Progress bars |
+
+---
+
+## Verify Installation
+
+Test your installation:
+
+```python
+import uniface
+print(f"UniFace version: {uniface.__version__}")
+
+# Check available ONNX providers
+import onnxruntime as ort
+print(f"Available providers: {ort.get_available_providers()}")
+
+# Quick test
+from uniface import RetinaFace
+detector = RetinaFace()
+print("Installation successful!")
+```
+
+---
+
+## Troubleshooting
+
+### Import Errors
+
+If you encounter import errors, ensure you're using Python 3.11+:
+
+```bash
+python --version
+# Should show: Python 3.11.x or higher
+```
+
+### Model Download Issues
+
+Models are automatically downloaded on first use. If downloads fail:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+# Manually download a model
+model_path = verify_model_weights(RetinaFaceWeights.MNET_V2)
+print(f"Model downloaded to: {model_path}")
+```
+
+### Performance Issues on Mac
+
+Verify you're using the ARM64 build (not x86_64 via Rosetta):
+
+```bash
+python -c "import platform; print(platform.machine())"
+# Should show: arm64 (not x86_64)
+```
+
+---
+
+## Next Steps
+
+- [Quickstart](quickstart.md) - Get started with common use cases
+- [Concepts Overview](concepts/overview.md) - Understand the architecture
diff --git a/docs/license-attribution.md b/docs/license-attribution.md
new file mode 100644
index 0000000..73919cf
--- /dev/null
+++ b/docs/license-attribution.md
@@ -0,0 +1,43 @@
+# Licenses & Attribution
+
+## UniFace License
+
+UniFace is released under the [MIT License](https://opensource.org/licenses/MIT).
+
+---
+
+## Model Credits
+
+| Model | Source | License |
+|-------|--------|---------|
+| RetinaFace | [yakhyo/retinaface-pytorch](https://github.com/yakhyo/retinaface-pytorch) | MIT |
+| SCRFD | [InsightFace](https://github.com/deepinsight/insightface) | MIT |
+| YOLOv5-Face | [deepcam-cn/yolov5-face](https://github.com/deepcam-cn/yolov5-face) | GPL-3.0 |
+| ArcFace | [InsightFace](https://github.com/deepinsight/insightface) | MIT |
+| MobileFace | [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) | MIT |
+| SphereFace | [yakhyo/face-recognition](https://github.com/yakhyo/face-recognition) | MIT |
+| BiSeNet | [yakhyo/face-parsing](https://github.com/yakhyo/face-parsing) | MIT |
+| MobileGaze | [yakhyo/gaze-estimation](https://github.com/yakhyo/gaze-estimation) | MIT |
+| MiniFASNet | [minivision-ai/Silent-Face-Anti-Spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing) | Apache-2.0 |
+| FairFace | [yakhyo/fairface-onnx](https://github.com/yakhyo/fairface-onnx) | CC BY 4.0 |
+
+---
+
+## Papers
+
+- **RetinaFace**: [arXiv:1905.00641](https://arxiv.org/abs/1905.00641)
+- **SCRFD**: [arXiv:2105.04714](https://arxiv.org/abs/2105.04714)
+- **YOLOv5-Face**: [arXiv:2105.12931](https://arxiv.org/abs/2105.12931)
+- **ArcFace**: [arXiv:1801.07698](https://arxiv.org/abs/1801.07698)
+- **SphereFace**: [arXiv:1704.08063](https://arxiv.org/abs/1704.08063)
+- **BiSeNet**: [arXiv:1808.00897](https://arxiv.org/abs/1808.00897)
+
+---
+
+## Third-Party Libraries
+
+| Library | License |
+|---------|---------|
+| ONNX Runtime | MIT |
+| OpenCV | Apache-2.0 |
+| NumPy | BSD-3-Clause |
diff --git a/docs/modules/attributes.md b/docs/modules/attributes.md
new file mode 100644
index 0000000..87dfd76
--- /dev/null
+++ b/docs/modules/attributes.md
@@ -0,0 +1,279 @@
+# Attributes
+
+Facial attribute analysis for age, gender, race, and emotion detection.
+
+---
+
+## Available Models
+
+| Model | Attributes | Size | Notes |
+|-------|------------|------|-------|
+| **AgeGender** | Age, Gender | 8 MB | Exact age prediction |
+| **FairFace** | Gender, Age Group, Race | 44 MB | Balanced demographics |
+| **Emotion** | 7-8 emotions | 2 MB | Requires PyTorch |
+
+---
+
+## AgeGender
+
+Predicts exact age and binary gender.
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace, AgeGender
+
+detector = RetinaFace()
+age_gender = AgeGender()
+
+faces = detector.detect(image)
+
+for face in faces:
+ result = age_gender.predict(image, face.bbox)
+ print(f"Gender: {result.sex}") # "Female" or "Male"
+ print(f"Age: {result.age} years")
+```
+
+### Output
+
+```python
+# AttributeResult fields
+result.gender # 0=Female, 1=Male
+result.sex # "Female" or "Male" (property)
+result.age # int, age in years
+result.age_group # None (not provided by this model)
+result.race # None (not provided by this model)
+```
+
+---
+
+## FairFace
+
+Predicts gender, age group, and race with balanced demographics.
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace, FairFace
+
+detector = RetinaFace()
+fairface = FairFace()
+
+faces = detector.detect(image)
+
+for face in faces:
+ result = fairface.predict(image, face.bbox)
+ print(f"Gender: {result.sex}")
+ print(f"Age Group: {result.age_group}")
+ print(f"Race: {result.race}")
+```
+
+### Output
+
+```python
+# AttributeResult fields
+result.gender # 0=Female, 1=Male
+result.sex # "Female" or "Male"
+result.age # None (not provided by this model)
+result.age_group # "20-29", "30-39", etc.
+result.race # Race/ethnicity label
+```
+
+### Race Categories
+
+| Label |
+|-------|
+| White |
+| Black |
+| Latino Hispanic |
+| East Asian |
+| Southeast Asian |
+| Indian |
+| Middle Eastern |
+
+### Age Groups
+
+| Group |
+|-------|
+| 0-2 |
+| 3-9 |
+| 10-19 |
+| 20-29 |
+| 30-39 |
+| 40-49 |
+| 50-59 |
+| 60-69 |
+| 70+ |
+
+---
+
+## Emotion
+
+Predicts facial emotions. Requires PyTorch.
+
+!!! warning "Optional Dependency"
+ Emotion detection requires PyTorch. Install with:
+ ```bash
+ pip install torch
+ ```
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace
+from uniface.attribute import Emotion
+from uniface.constants import DDAMFNWeights
+
+detector = RetinaFace()
+emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7)
+
+faces = detector.detect(image)
+
+for face in faces:
+ result = emotion.predict(image, face.landmarks)
+ print(f"Emotion: {result.emotion}")
+ print(f"Confidence: {result.confidence:.2%}")
+```
+
+### Emotion Classes
+
+=== "7-Class (AFFECNET7)"
+
+ | Label |
+ |-------|
+ | Neutral |
+ | Happy |
+ | Sad |
+ | Surprise |
+ | Fear |
+ | Disgust |
+ | Anger |
+
+=== "8-Class (AFFECNET8)"
+
+ | Label |
+ |-------|
+ | Neutral |
+ | Happy |
+ | Sad |
+ | Surprise |
+ | Fear |
+ | Disgust |
+ | Anger |
+ | Contempt |
+
+### Model Variants
+
+```python
+from uniface.attribute import Emotion
+from uniface.constants import DDAMFNWeights
+
+# 7-class emotion
+emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7)
+
+# 8-class emotion
+emotion = Emotion(model_name=DDAMFNWeights.AFFECNET8)
+```
+
+---
+
+## Combining Models
+
+### Full Attribute Analysis
+
+```python
+from uniface import RetinaFace, AgeGender, FairFace
+
+detector = RetinaFace()
+age_gender = AgeGender()
+fairface = FairFace()
+
+faces = detector.detect(image)
+
+for face in faces:
+ # Get exact age from AgeGender
+ ag_result = age_gender.predict(image, face.bbox)
+
+ # Get race from FairFace
+ ff_result = fairface.predict(image, face.bbox)
+
+ print(f"Gender: {ag_result.sex}")
+ print(f"Exact Age: {ag_result.age}")
+ print(f"Age Group: {ff_result.age_group}")
+ print(f"Race: {ff_result.race}")
+```
+
+### Using FaceAnalyzer
+
+```python
+from uniface import FaceAnalyzer
+
+analyzer = FaceAnalyzer(
+ detect=True,
+ recognize=False,
+ attributes=True # Uses AgeGender
+)
+
+faces = analyzer.analyze(image)
+
+for face in faces:
+ print(f"Age: {face.age}, Gender: {face.sex}")
+```
+
+---
+
+## Visualization
+
+```python
+import cv2
+
+def draw_attributes(image, face, result):
+ """Draw attributes on image."""
+ x1, y1, x2, y2 = map(int, face.bbox)
+
+ # Draw bounding box
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Build label
+ label = f"{result.sex}"
+ if result.age:
+ label += f", {result.age}y"
+ if result.age_group:
+ label += f", {result.age_group}"
+ if result.race:
+ label += f", {result.race}"
+
+ # Draw label
+ cv2.putText(
+ image, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2
+ )
+
+ return image
+
+# Usage
+for face in faces:
+ result = age_gender.predict(image, face.bbox)
+ image = draw_attributes(image, face, result)
+
+cv2.imwrite("attributes.jpg", image)
+```
+
+---
+
+## Accuracy Notes
+
+!!! note "Model Limitations"
+ - **AgeGender**: Trained on CelebA; accuracy varies by demographic
+ - **FairFace**: Trained for balanced demographics; better cross-racial accuracy
+ - **Emotion**: Accuracy depends on facial expression clarity
+
+ Always test on your specific use case and consider cultural context.
+
+---
+
+## Next Steps
+
+- [Parsing](parsing.md) - Face semantic segmentation
+- [Gaze](gaze.md) - Gaze estimation
+- [Image Pipeline Recipe](../recipes/image-pipeline.md) - Complete workflow
diff --git a/docs/modules/detection.md b/docs/modules/detection.md
new file mode 100644
index 0000000..3d49b3b
--- /dev/null
+++ b/docs/modules/detection.md
@@ -0,0 +1,251 @@
+# Detection
+
+Face detection is the first step in any face analysis pipeline. UniFace provides three detection models.
+
+---
+
+## Available Models
+
+| Model | Backbone | Size | WIDER FACE (Easy/Medium/Hard) | Best For |
+|-------|----------|------|-------------------------------|----------|
+| **RetinaFace** | MobileNet V2 | 3.5 MB | 91.7% / 91.0% / 86.6% | Balanced (recommended) |
+| **SCRFD** | SCRFD-10G | 17 MB | 95.2% / 93.9% / 83.1% | High accuracy |
+| **YOLOv5-Face** | YOLOv5s | 28 MB | 94.3% / 92.6% / 83.2% | Real-time |
+
+---
+
+## RetinaFace
+
+The recommended detector for most use cases.
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace
+
+detector = RetinaFace()
+faces = detector.detect(image)
+
+for face in faces:
+ print(f"Confidence: {face.confidence:.2f}")
+ print(f"BBox: {face.bbox}")
+ print(f"Landmarks: {face.landmarks.shape}") # (5, 2)
+```
+
+### Model Variants
+
+```python
+from uniface import RetinaFace
+from uniface.constants import RetinaFaceWeights
+
+# Lightweight (mobile/edge)
+detector = RetinaFace(model_name=RetinaFaceWeights.MNET_025)
+
+# Balanced (default)
+detector = RetinaFace(model_name=RetinaFaceWeights.MNET_V2)
+
+# High accuracy
+detector = RetinaFace(model_name=RetinaFaceWeights.RESNET34)
+```
+
+| Variant | Params | Size | Easy | Medium | Hard |
+|---------|--------|------|------|--------|------|
+| MNET_025 | 0.4M | 1.7 MB | 88.5% | 87.0% | 80.6% |
+| MNET_050 | 1.0M | 2.6 MB | 89.4% | 88.0% | 82.4% |
+| MNET_V1 | 3.5M | 3.8 MB | 90.6% | 89.1% | 84.1% |
+| **MNET_V2** ⭐ | 3.2M | 3.5 MB | 91.7% | 91.0% | 86.6% |
+| RESNET18 | 11.7M | 27 MB | 92.5% | 91.0% | 86.6% |
+| RESNET34 | 24.8M | 56 MB | 94.2% | 93.1% | 88.9% |
+
+### Configuration
+
+```python
+detector = RetinaFace(
+ model_name=RetinaFaceWeights.MNET_V2,
+ confidence_threshold=0.5, # Min confidence
+ nms_threshold=0.4, # NMS IoU threshold
+ input_size=(640, 640), # Input resolution
+ dynamic_size=False # Enable dynamic input size
+)
+```
+
+---
+
+## SCRFD
+
+State-of-the-art detection with excellent accuracy-speed tradeoff.
+
+### Basic Usage
+
+```python
+from uniface import SCRFD
+
+detector = SCRFD()
+faces = detector.detect(image)
+```
+
+### Model Variants
+
+```python
+from uniface import SCRFD
+from uniface.constants import SCRFDWeights
+
+# Real-time (lightweight)
+detector = SCRFD(model_name=SCRFDWeights.SCRFD_500M_KPS)
+
+# High accuracy (default)
+detector = SCRFD(model_name=SCRFDWeights.SCRFD_10G_KPS)
+```
+
+| Variant | Params | Size | Easy | Medium | Hard |
+|---------|--------|------|------|--------|------|
+| SCRFD_500M_KPS | 0.6M | 2.5 MB | 90.6% | 88.1% | 68.5% |
+| **SCRFD_10G_KPS** ⭐ | 4.2M | 17 MB | 95.2% | 93.9% | 83.1% |
+
+### Configuration
+
+```python
+detector = SCRFD(
+ model_name=SCRFDWeights.SCRFD_10G_KPS,
+ confidence_threshold=0.5,
+ nms_threshold=0.4,
+ input_size=(640, 640)
+)
+```
+
+---
+
+## YOLOv5-Face
+
+YOLO-based detection optimized for faces.
+
+### Basic Usage
+
+```python
+from uniface import YOLOv5Face
+
+detector = YOLOv5Face()
+faces = detector.detect(image)
+```
+
+### Model Variants
+
+```python
+from uniface import YOLOv5Face
+from uniface.constants import YOLOv5FaceWeights
+
+# Lightweight
+detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5N)
+
+# Balanced (default)
+detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5S)
+
+# High accuracy
+detector = YOLOv5Face(model_name=YOLOv5FaceWeights.YOLOV5M)
+```
+
+| Variant | Size | Easy | Medium | Hard |
+|---------|------|------|--------|------|
+| YOLOV5N | 11 MB | 93.6% | 91.5% | 80.5% |
+| **YOLOV5S** ⭐ | 28 MB | 94.3% | 92.6% | 83.2% |
+| YOLOV5M | 82 MB | 95.3% | 93.8% | 85.3% |
+
+!!! note "Fixed Input Size"
+ YOLOv5-Face uses a fixed input size of 640×640.
+
+### Configuration
+
+```python
+detector = YOLOv5Face(
+ model_name=YOLOv5FaceWeights.YOLOV5S,
+ confidence_threshold=0.6,
+ nms_threshold=0.5
+)
+```
+
+---
+
+## Factory Function
+
+Create detectors dynamically:
+
+```python
+from uniface import create_detector
+
+detector = create_detector('retinaface')
+# or
+detector = create_detector('scrfd')
+# or
+detector = create_detector('yolov5face')
+```
+
+---
+
+## High-Level API
+
+One-line detection:
+
+```python
+from uniface import detect_faces
+
+faces = detect_faces(
+ image,
+ method='retinaface',
+ confidence_threshold=0.5
+)
+```
+
+---
+
+## Output Format
+
+All detectors return `list[Face]`:
+
+```python
+for face in faces:
+ # Bounding box [x1, y1, x2, y2]
+ bbox = face.bbox
+
+ # Detection confidence (0-1)
+ confidence = face.confidence
+
+ # 5-point landmarks (5, 2)
+ landmarks = face.landmarks
+ # [left_eye, right_eye, nose, left_mouth, right_mouth]
+```
+
+---
+
+## Visualization
+
+```python
+from uniface.visualization import draw_detections
+
+draw_detections(
+ image=image,
+ bboxes=[f.bbox for f in faces],
+ scores=[f.confidence for f in faces],
+ landmarks=[f.landmarks for f in faces],
+ vis_threshold=0.6
+)
+
+cv2.imwrite("result.jpg", image)
+```
+
+---
+
+## Performance Comparison
+
+Benchmark on your hardware:
+
+```bash
+python tools/detection.py --source image.jpg --iterations 100
+```
+
+---
+
+## Next Steps
+
+- [Recognition](recognition.md) - Extract face embeddings
+- [Landmarks](landmarks.md) - 106-point landmarks
+- [Image Pipeline Recipe](../recipes/image-pipeline.md) - Complete workflow
diff --git a/docs/modules/gaze.md b/docs/modules/gaze.md
new file mode 100644
index 0000000..c55737a
--- /dev/null
+++ b/docs/modules/gaze.md
@@ -0,0 +1,270 @@
+# Gaze Estimation
+
+Gaze estimation predicts where a person is looking (pitch and yaw angles).
+
+---
+
+## Available Models
+
+| Model | Backbone | Size | MAE* | Best For |
+|-------|----------|------|------|----------|
+| ResNet18 | ResNet18 | 43 MB | 12.84° | Balanced |
+| **ResNet34** ⭐ | ResNet34 | 82 MB | 11.33° | Recommended |
+| ResNet50 | ResNet50 | 91 MB | 11.34° | High accuracy |
+| MobileNetV2 | MobileNetV2 | 9.6 MB | 13.07° | Mobile |
+| MobileOne-S0 | MobileOne | 4.8 MB | 12.58° | Lightweight |
+
+*MAE = Mean Absolute Error on Gaze360 test set (lower is better)
+
+---
+
+## Basic Usage
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, MobileGaze
+
+detector = RetinaFace()
+gaze_estimator = MobileGaze()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for face in faces:
+ # Crop face
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = image[y1:y2, x1:x2]
+
+ if face_crop.size > 0:
+ # Estimate gaze
+ result = gaze_estimator.estimate(face_crop)
+
+ # Convert to degrees
+ pitch_deg = np.degrees(result.pitch)
+ yaw_deg = np.degrees(result.yaw)
+
+ print(f"Pitch: {pitch_deg:.1f}°, Yaw: {yaw_deg:.1f}°")
+```
+
+---
+
+## Model Variants
+
+```python
+from uniface import MobileGaze
+from uniface.constants import GazeWeights
+
+# Default (ResNet34, recommended)
+gaze = MobileGaze()
+
+# Lightweight for mobile/edge
+gaze = MobileGaze(model_name=GazeWeights.MOBILEONE_S0)
+
+# Higher accuracy
+gaze = MobileGaze(model_name=GazeWeights.RESNET50)
+```
+
+---
+
+## Output Format
+
+```python
+result = gaze_estimator.estimate(face_crop)
+
+# GazeResult dataclass
+result.pitch # Vertical angle in radians
+result.yaw # Horizontal angle in radians
+```
+
+### Angle Convention
+
+```
+ pitch = +90° (looking up)
+ │
+ │
+yaw = -90° ────┼──── yaw = +90°
+(looking left) │ (looking right)
+ │
+ pitch = -90° (looking down)
+```
+
+- **Pitch**: Vertical gaze angle
+ - Positive = looking up
+ - Negative = looking down
+
+- **Yaw**: Horizontal gaze angle
+ - Positive = looking right
+ - Negative = looking left
+
+---
+
+## Visualization
+
+```python
+from uniface.visualization import draw_gaze
+
+# Detect faces
+faces = detector.detect(image)
+
+for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = image[y1:y2, x1:x2]
+
+ if face_crop.size > 0:
+ result = gaze_estimator.estimate(face_crop)
+
+ # Draw gaze arrow on image
+ draw_gaze(image, face.bbox, result.pitch, result.yaw)
+
+cv2.imwrite("gaze_output.jpg", image)
+```
+
+### Custom Visualization
+
+```python
+import cv2
+import numpy as np
+
+def draw_gaze_custom(image, bbox, pitch, yaw, length=100, color=(0, 255, 0)):
+ """Draw custom gaze arrow."""
+ x1, y1, x2, y2 = map(int, bbox)
+
+ # Face center
+ cx = (x1 + x2) // 2
+ cy = (y1 + y2) // 2
+
+ # Calculate endpoint
+ dx = -length * np.sin(yaw) * np.cos(pitch)
+ dy = -length * np.sin(pitch)
+
+ # Draw arrow
+ end_x = int(cx + dx)
+ end_y = int(cy + dy)
+
+ cv2.arrowedLine(image, (cx, cy), (end_x, end_y), color, 2, tipLength=0.3)
+
+ return image
+```
+
+---
+
+## Real-Time Gaze Tracking
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, MobileGaze
+from uniface.visualization import draw_gaze
+
+detector = RetinaFace()
+gaze_estimator = MobileGaze()
+
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = frame[y1:y2, x1:x2]
+
+ if face_crop.size > 0:
+ result = gaze_estimator.estimate(face_crop)
+
+ # Draw bounding box
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Draw gaze
+ draw_gaze(frame, face.bbox, result.pitch, result.yaw)
+
+ # Display angles
+ pitch_deg = np.degrees(result.pitch)
+ yaw_deg = np.degrees(result.yaw)
+ label = f"P:{pitch_deg:.0f} Y:{yaw_deg:.0f}"
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
+
+ cv2.imshow("Gaze Estimation", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Use Cases
+
+### Attention Detection
+
+```python
+def is_looking_at_camera(result, threshold=15):
+ """Check if person is looking at camera."""
+ pitch_deg = abs(np.degrees(result.pitch))
+ yaw_deg = abs(np.degrees(result.yaw))
+
+ return pitch_deg < threshold and yaw_deg < threshold
+
+# Usage
+result = gaze_estimator.estimate(face_crop)
+if is_looking_at_camera(result):
+ print("Looking at camera")
+else:
+ print("Looking away")
+```
+
+### Gaze Direction Classification
+
+```python
+def classify_gaze_direction(result, threshold=20):
+ """Classify gaze into directions."""
+ pitch_deg = np.degrees(result.pitch)
+ yaw_deg = np.degrees(result.yaw)
+
+ directions = []
+
+ if pitch_deg > threshold:
+ directions.append("up")
+ elif pitch_deg < -threshold:
+ directions.append("down")
+
+ if yaw_deg > threshold:
+ directions.append("right")
+ elif yaw_deg < -threshold:
+ directions.append("left")
+
+ if not directions:
+ return "center"
+
+ return " ".join(directions)
+
+# Usage
+result = gaze_estimator.estimate(face_crop)
+direction = classify_gaze_direction(result)
+print(f"Looking: {direction}")
+```
+
+---
+
+## Factory Function
+
+```python
+from uniface import create_gaze_estimator
+
+gaze = create_gaze_estimator() # Returns MobileGaze
+```
+
+---
+
+## Next Steps
+
+- [Anti-Spoofing](spoofing.md) - Face liveness detection
+- [Privacy](privacy.md) - Face anonymization
+- [Video Recipe](../recipes/video-webcam.md) - Real-time processing
diff --git a/docs/modules/landmarks.md b/docs/modules/landmarks.md
new file mode 100644
index 0000000..bfe5407
--- /dev/null
+++ b/docs/modules/landmarks.md
@@ -0,0 +1,250 @@
+# Landmarks
+
+Facial landmark detection provides precise localization of facial features.
+
+---
+
+## Available Models
+
+| Model | Points | Size | Use Case |
+|-------|--------|------|----------|
+| **Landmark106** | 106 | 14 MB | Detailed face analysis |
+
+!!! info "5-Point Landmarks"
+ Basic 5-point landmarks are included with all detection models (RetinaFace, SCRFD, YOLOv5-Face).
+
+---
+
+## 106-Point Landmarks
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace, Landmark106
+
+detector = RetinaFace()
+landmarker = Landmark106()
+
+# Detect face
+faces = detector.detect(image)
+
+# Get detailed landmarks
+if faces:
+ landmarks = landmarker.get_landmarks(image, faces[0].bbox)
+ print(f"Landmarks shape: {landmarks.shape}") # (106, 2)
+```
+
+### Landmark Groups
+
+| Range | Group | Points |
+|-------|-------|--------|
+| 0-32 | Face Contour | 33 |
+| 33-50 | Eyebrows | 18 |
+| 51-62 | Nose | 12 |
+| 63-86 | Eyes | 24 |
+| 87-105 | Mouth | 19 |
+
+### Extract Specific Features
+
+```python
+landmarks = landmarker.get_landmarks(image, face.bbox)
+
+# Face contour
+contour = landmarks[0:33]
+
+# Left eyebrow
+left_eyebrow = landmarks[33:42]
+
+# Right eyebrow
+right_eyebrow = landmarks[42:51]
+
+# Nose
+nose = landmarks[51:63]
+
+# Left eye
+left_eye = landmarks[63:72]
+
+# Right eye
+right_eye = landmarks[76:84]
+
+# Mouth
+mouth = landmarks[87:106]
+```
+
+---
+
+## 5-Point Landmarks (Detection)
+
+All detection models provide 5-point landmarks:
+
+```python
+from uniface import RetinaFace
+
+detector = RetinaFace()
+faces = detector.detect(image)
+
+if faces:
+ landmarks_5 = faces[0].landmarks
+ print(f"Shape: {landmarks_5.shape}") # (5, 2)
+
+ left_eye = landmarks_5[0]
+ right_eye = landmarks_5[1]
+ nose = landmarks_5[2]
+ left_mouth = landmarks_5[3]
+ right_mouth = landmarks_5[4]
+```
+
+---
+
+## Visualization
+
+### Draw 106 Landmarks
+
+```python
+import cv2
+
+def draw_landmarks(image, landmarks, color=(0, 255, 0), radius=2):
+ """Draw landmarks on image."""
+ for x, y in landmarks.astype(int):
+ cv2.circle(image, (x, y), radius, color, -1)
+ return image
+
+# Usage
+landmarks = landmarker.get_landmarks(image, face.bbox)
+image_with_landmarks = draw_landmarks(image.copy(), landmarks)
+cv2.imwrite("landmarks.jpg", image_with_landmarks)
+```
+
+### Draw with Connections
+
+```python
+def draw_landmarks_with_connections(image, landmarks):
+ """Draw landmarks with facial feature connections."""
+ landmarks = landmarks.astype(int)
+
+ # Face contour (0-32)
+ for i in range(32):
+ cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (255, 255, 0), 1)
+
+ # Left eyebrow (33-41)
+ for i in range(33, 41):
+ cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 255, 0), 1)
+
+ # Right eyebrow (42-50)
+ for i in range(42, 50):
+ cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 255, 0), 1)
+
+ # Nose (51-62)
+ for i in range(51, 62):
+ cv2.line(image, tuple(landmarks[i]), tuple(landmarks[i+1]), (0, 0, 255), 1)
+
+ # Draw points
+ for x, y in landmarks:
+ cv2.circle(image, (x, y), 2, (0, 255, 255), -1)
+
+ return image
+```
+
+---
+
+## Use Cases
+
+### Face Alignment
+
+```python
+from uniface import face_alignment
+
+# Align face using 5-point landmarks
+aligned = face_alignment(image, faces[0].landmarks)
+# Returns: 112x112 aligned face
+```
+
+### Eye Aspect Ratio (Blink Detection)
+
+```python
+import numpy as np
+
+def eye_aspect_ratio(eye_landmarks):
+ """Calculate eye aspect ratio for blink detection."""
+ # Vertical distances
+ v1 = np.linalg.norm(eye_landmarks[1] - eye_landmarks[5])
+ v2 = np.linalg.norm(eye_landmarks[2] - eye_landmarks[4])
+
+ # Horizontal distance
+ h = np.linalg.norm(eye_landmarks[0] - eye_landmarks[3])
+
+ ear = (v1 + v2) / (2.0 * h)
+ return ear
+
+# Usage with 106-point landmarks
+left_eye = landmarks[63:72] # Approximate eye points
+ear = eye_aspect_ratio(left_eye)
+
+if ear < 0.2:
+ print("Eye closed (blink detected)")
+```
+
+### Head Pose Estimation
+
+```python
+import cv2
+import numpy as np
+
+def estimate_head_pose(landmarks, image_shape):
+ """Estimate head pose from facial landmarks."""
+ # 3D model points (generic face model)
+ model_points = np.array([
+ (0.0, 0.0, 0.0), # Nose tip
+ (0.0, -330.0, -65.0), # Chin
+ (-225.0, 170.0, -135.0), # Left eye corner
+ (225.0, 170.0, -135.0), # Right eye corner
+ (-150.0, -150.0, -125.0), # Left mouth corner
+ (150.0, -150.0, -125.0) # Right mouth corner
+ ], dtype=np.float64)
+
+ # 2D image points (from 106 landmarks)
+ image_points = np.array([
+ landmarks[51], # Nose tip
+ landmarks[16], # Chin
+ landmarks[63], # Left eye corner
+ landmarks[76], # Right eye corner
+ landmarks[87], # Left mouth corner
+ landmarks[93] # Right mouth corner
+ ], dtype=np.float64)
+
+ # Camera matrix
+ h, w = image_shape[:2]
+ focal_length = w
+ center = (w / 2, h / 2)
+ camera_matrix = np.array([
+ [focal_length, 0, center[0]],
+ [0, focal_length, center[1]],
+ [0, 0, 1]
+ ], dtype=np.float64)
+
+ # Solve PnP
+ dist_coeffs = np.zeros((4, 1))
+ success, rotation_vector, translation_vector = cv2.solvePnP(
+ model_points, image_points, camera_matrix, dist_coeffs
+ )
+
+ return rotation_vector, translation_vector
+```
+
+---
+
+## Factory Function
+
+```python
+from uniface import create_landmarker
+
+landmarker = create_landmarker() # Returns Landmark106
+```
+
+---
+
+## Next Steps
+
+- [Attributes](attributes.md) - Age, gender, emotion
+- [Gaze](gaze.md) - Gaze estimation
+- [Detection](detection.md) - Face detection with 5-point landmarks
diff --git a/docs/modules/parsing.md b/docs/modules/parsing.md
new file mode 100644
index 0000000..46c7dd3
--- /dev/null
+++ b/docs/modules/parsing.md
@@ -0,0 +1,265 @@
+# Parsing
+
+Face parsing segments faces into semantic components (skin, eyes, nose, mouth, hair, etc.).
+
+---
+
+## Available Models
+
+| Model | Backbone | Size | Classes | Best For |
+|-------|----------|------|---------|----------|
+| **BiSeNet ResNet18** ⭐ | ResNet18 | 51 MB | 19 | Balanced (recommended) |
+| **BiSeNet ResNet34** | ResNet34 | 89 MB | 19 | Higher accuracy |
+
+---
+
+## Basic Usage
+
+```python
+import cv2
+from uniface.parsing import BiSeNet
+from uniface.visualization import vis_parsing_maps
+
+# Initialize parser
+parser = BiSeNet()
+
+# Load face image (cropped)
+face_image = cv2.imread("face.jpg")
+
+# Parse face
+mask = parser.parse(face_image)
+print(f"Mask shape: {mask.shape}") # (H, W)
+
+# Visualize
+face_rgb = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
+vis_result = vis_parsing_maps(face_rgb, mask, save_image=False)
+
+# Save result
+vis_bgr = cv2.cvtColor(vis_result, cv2.COLOR_RGB2BGR)
+cv2.imwrite("parsed.jpg", vis_bgr)
+```
+
+---
+
+## 19 Facial Component Classes
+
+| ID | Class | ID | Class |
+|----|-------|----|-------|
+| 0 | Background | 10 | Ear Ring |
+| 1 | Skin | 11 | Nose |
+| 2 | Left Eyebrow | 12 | Mouth |
+| 3 | Right Eyebrow | 13 | Upper Lip |
+| 4 | Left Eye | 14 | Lower Lip |
+| 5 | Right Eye | 15 | Neck |
+| 6 | Eye Glasses | 16 | Neck Lace |
+| 7 | Left Ear | 17 | Cloth |
+| 8 | Right Ear | 18 | Hair |
+| 9 | Hat | | |
+
+---
+
+## Model Variants
+
+```python
+from uniface.parsing import BiSeNet
+from uniface.constants import ParsingWeights
+
+# Default (ResNet18)
+parser = BiSeNet()
+
+# Higher accuracy (ResNet34)
+parser = BiSeNet(model_name=ParsingWeights.RESNET34)
+```
+
+| Variant | Params | Size | Notes |
+|---------|--------|------|-------|
+| **RESNET18** ⭐ | 13.3M | 51 MB | Recommended |
+| RESNET34 | 24.1M | 89 MB | Higher accuracy |
+
+---
+
+## Full Pipeline
+
+### With Face Detection
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.parsing import BiSeNet
+from uniface.visualization import vis_parsing_maps
+
+detector = RetinaFace()
+parser = BiSeNet()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for i, face in enumerate(faces):
+ # Crop face
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = image[y1:y2, x1:x2]
+
+ # Parse
+ mask = parser.parse(face_crop)
+
+ # Visualize
+ face_rgb = cv2.cvtColor(face_crop, cv2.COLOR_BGR2RGB)
+ vis_result = vis_parsing_maps(face_rgb, mask, save_image=False)
+
+ # Save
+ vis_bgr = cv2.cvtColor(vis_result, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(f"face_{i}_parsed.jpg", vis_bgr)
+```
+
+---
+
+## Extract Specific Components
+
+### Get Single Component Mask
+
+```python
+import numpy as np
+
+# Parse face
+mask = parser.parse(face_image)
+
+# Extract specific component
+SKIN = 1
+HAIR = 18
+LEFT_EYE = 4
+RIGHT_EYE = 5
+
+# Binary mask for skin
+skin_mask = (mask == SKIN).astype(np.uint8) * 255
+
+# Binary mask for hair
+hair_mask = (mask == HAIR).astype(np.uint8) * 255
+
+# Binary mask for eyes
+eyes_mask = ((mask == LEFT_EYE) | (mask == RIGHT_EYE)).astype(np.uint8) * 255
+```
+
+### Count Pixels per Component
+
+```python
+import numpy as np
+
+mask = parser.parse(face_image)
+
+component_names = {
+ 0: 'Background', 1: 'Skin', 2: 'L-Eyebrow', 3: 'R-Eyebrow',
+ 4: 'L-Eye', 5: 'R-Eye', 6: 'Glasses', 7: 'L-Ear', 8: 'R-Ear',
+ 9: 'Hat', 10: 'Earring', 11: 'Nose', 12: 'Mouth',
+ 13: 'U-Lip', 14: 'L-Lip', 15: 'Neck', 16: 'Necklace',
+ 17: 'Cloth', 18: 'Hair'
+}
+
+for class_id in np.unique(mask):
+ pixel_count = np.sum(mask == class_id)
+ name = component_names.get(class_id, f'Class {class_id}')
+ print(f"{name}: {pixel_count} pixels")
+```
+
+---
+
+## Applications
+
+### Face Makeup
+
+Apply virtual makeup using component masks:
+
+```python
+import cv2
+import numpy as np
+
+def apply_lip_color(image, mask, color=(180, 50, 50)):
+ """Apply lip color using parsing mask."""
+ result = image.copy()
+
+ # Get lip mask (upper + lower lip)
+ lip_mask = ((mask == 13) | (mask == 14)).astype(np.uint8)
+
+ # Create color overlay
+ overlay = np.zeros_like(image)
+ overlay[:] = color
+
+ # Blend with original
+ lip_region = cv2.bitwise_and(overlay, overlay, mask=lip_mask)
+ non_lip = cv2.bitwise_and(result, result, mask=1 - lip_mask)
+
+ # Combine with alpha blending
+ alpha = 0.4
+ result = cv2.addWeighted(result, 1 - alpha * lip_mask[:,:,np.newaxis] / 255,
+ lip_region, alpha, 0)
+
+ return result.astype(np.uint8)
+```
+
+### Background Replacement
+
+```python
+def replace_background(image, mask, background):
+ """Replace background using parsing mask."""
+ # Create foreground mask (everything except background)
+ foreground_mask = (mask != 0).astype(np.uint8)
+
+ # Resize background to match image
+ background = cv2.resize(background, (image.shape[1], image.shape[0]))
+
+ # Combine
+ result = image.copy()
+ result[foreground_mask == 0] = background[foreground_mask == 0]
+
+ return result
+```
+
+### Hair Segmentation
+
+```python
+def get_hair_mask(mask):
+ """Extract clean hair mask."""
+ hair_mask = (mask == 18).astype(np.uint8) * 255
+
+ # Clean up with morphological operations
+ kernel = np.ones((5, 5), np.uint8)
+ hair_mask = cv2.morphologyEx(hair_mask, cv2.MORPH_CLOSE, kernel)
+ hair_mask = cv2.morphologyEx(hair_mask, cv2.MORPH_OPEN, kernel)
+
+ return hair_mask
+```
+
+---
+
+## Visualization Options
+
+```python
+from uniface.visualization import vis_parsing_maps
+
+# Default visualization
+vis_result = vis_parsing_maps(face_rgb, mask)
+
+# With different parameters
+vis_result = vis_parsing_maps(
+ face_rgb,
+ mask,
+ save_image=False, # Don't save to file
+)
+```
+
+---
+
+## Factory Function
+
+```python
+from uniface import create_face_parser
+
+parser = create_face_parser() # Returns BiSeNet
+```
+
+---
+
+## Next Steps
+
+- [Gaze](gaze.md) - Gaze estimation
+- [Privacy](privacy.md) - Face anonymization
+- [Detection](detection.md) - Face detection
diff --git a/docs/modules/privacy.md b/docs/modules/privacy.md
new file mode 100644
index 0000000..fc8d820
--- /dev/null
+++ b/docs/modules/privacy.md
@@ -0,0 +1,277 @@
+# Privacy
+
+Face anonymization protects privacy by blurring or obscuring faces in images and videos.
+
+---
+
+## Available Methods
+
+| Method | Description | Use Case |
+|--------|-------------|----------|
+| **pixelate** | Blocky pixelation | News media standard |
+| **gaussian** | Smooth blur | Natural appearance |
+| **blackout** | Solid color fill | Maximum privacy |
+| **elliptical** | Oval-shaped blur | Natural face shape |
+| **median** | Edge-preserving blur | Artistic effect |
+
+---
+
+## Quick Start
+
+### One-Line Anonymization
+
+```python
+from uniface.privacy import anonymize_faces
+import cv2
+
+image = cv2.imread("group_photo.jpg")
+anonymized = anonymize_faces(image, method='pixelate')
+cv2.imwrite("anonymized.jpg", anonymized)
+```
+
+---
+
+## BlurFace Class
+
+For more control, use the `BlurFace` class:
+
+```python
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+import cv2
+
+detector = RetinaFace()
+blurrer = BlurFace(method='gaussian', blur_strength=5.0)
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+anonymized = blurrer.anonymize(image, faces)
+
+cv2.imwrite("anonymized.jpg", anonymized)
+```
+
+---
+
+## Blur Methods
+
+### Pixelate
+
+Blocky pixelation effect (common in news media):
+
+```python
+blurrer = BlurFace(method='pixelate', pixel_blocks=10)
+```
+
+| Parameter | Default | Description |
+|-----------|---------|-------------|
+| `pixel_blocks` | 10 | Number of blocks (lower = more pixelated) |
+
+### Gaussian
+
+Smooth, natural-looking blur:
+
+```python
+blurrer = BlurFace(method='gaussian', blur_strength=3.0)
+```
+
+| Parameter | Default | Description |
+|-----------|---------|-------------|
+| `blur_strength` | 3.0 | Blur intensity (higher = more blur) |
+
+### Blackout
+
+Solid color fill for maximum privacy:
+
+```python
+blurrer = BlurFace(method='blackout', color=(0, 0, 0))
+```
+
+| Parameter | Default | Description |
+|-----------|---------|-------------|
+| `color` | (0, 0, 0) | Fill color (BGR format) |
+
+### Elliptical
+
+Oval-shaped blur matching natural face shape:
+
+```python
+blurrer = BlurFace(method='elliptical', blur_strength=3.0, margin=20)
+```
+
+| Parameter | Default | Description |
+|-----------|---------|-------------|
+| `blur_strength` | 3.0 | Blur intensity |
+| `margin` | 20 | Margin around face |
+
+### Median
+
+Edge-preserving blur with artistic effect:
+
+```python
+blurrer = BlurFace(method='median', blur_strength=3.0)
+```
+
+| Parameter | Default | Description |
+|-----------|---------|-------------|
+| `blur_strength` | 3.0 | Blur intensity |
+
+---
+
+## In-Place Processing
+
+Modify image directly (faster, saves memory):
+
+```python
+blurrer = BlurFace(method='pixelate')
+
+# In-place modification
+result = blurrer.anonymize(image, faces, inplace=True)
+# 'image' and 'result' point to the same array
+```
+
+---
+
+## Real-Time Anonymization
+
+### Webcam
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+blurrer = BlurFace(method='pixelate')
+
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+ frame = blurrer.anonymize(frame, faces, inplace=True)
+
+ cv2.imshow('Anonymized', frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+### Video File
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+blurrer = BlurFace(method='gaussian')
+
+cap = cv2.VideoCapture("input_video.mp4")
+fps = cap.get(cv2.CAP_PROP_FPS)
+width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+ frame = blurrer.anonymize(frame, faces, inplace=True)
+ out.write(frame)
+
+cap.release()
+out.release()
+```
+
+---
+
+## Selective Anonymization
+
+### Exclude Specific Faces
+
+```python
+def anonymize_except(image, all_faces, exclude_embeddings, recognizer, threshold=0.6):
+ """Anonymize all faces except those matching exclude_embeddings."""
+ faces_to_blur = []
+
+ for face in all_faces:
+ # Get embedding
+ embedding = recognizer.get_normalized_embedding(image, face.landmarks)
+
+ # Check if should be excluded
+ should_exclude = False
+ for ref_emb in exclude_embeddings:
+ similarity = np.dot(embedding, ref_emb.T)[0][0]
+ if similarity > threshold:
+ should_exclude = True
+ break
+
+ if not should_exclude:
+ faces_to_blur.append(face)
+
+ # Blur remaining faces
+ return blurrer.anonymize(image, faces_to_blur)
+```
+
+### Confidence-Based
+
+```python
+def anonymize_low_confidence(image, faces, blurrer, confidence_threshold=0.8):
+ """Anonymize faces below confidence threshold."""
+ faces_to_blur = [f for f in faces if f.confidence < confidence_threshold]
+ return blurrer.anonymize(image, faces_to_blur)
+```
+
+---
+
+## Comparison
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+methods = ['pixelate', 'gaussian', 'blackout', 'elliptical', 'median']
+
+for method in methods:
+ blurrer = BlurFace(method=method)
+ result = blurrer.anonymize(image.copy(), faces)
+ cv2.imwrite(f"anonymized_{method}.jpg", result)
+```
+
+---
+
+## Command-Line Tool
+
+```bash
+# Anonymize image with pixelation
+python tools/face_anonymize.py --source photo.jpg
+
+# Real-time webcam
+python tools/face_anonymize.py --source 0 --method gaussian
+
+# Custom blur strength
+python tools/face_anonymize.py --source photo.jpg --method gaussian --blur-strength 5.0
+```
+
+---
+
+## Next Steps
+
+- [Anonymize Stream Recipe](../recipes/anonymize-stream.md) - Video pipeline
+- [Detection](detection.md) - Face detection options
+- [Batch Processing Recipe](../recipes/batch-processing.md) - Process multiple files
diff --git a/docs/modules/recognition.md b/docs/modules/recognition.md
new file mode 100644
index 0000000..fc4ef94
--- /dev/null
+++ b/docs/modules/recognition.md
@@ -0,0 +1,240 @@
+# Recognition
+
+Face recognition extracts embeddings for identity verification and face search.
+
+---
+
+## Available Models
+
+| Model | Backbone | Size | Embedding Dim | Best For |
+|-------|----------|------|---------------|----------|
+| **ArcFace** | MobileNet/ResNet | 8-166 MB | 512 | General use (recommended) |
+| **MobileFace** | MobileNet V2/V3 | 1-10 MB | 512 | Mobile/Edge |
+| **SphereFace** | Sphere20/36 | 50-92 MB | 512 | Research |
+
+---
+
+## ArcFace
+
+State-of-the-art recognition using additive angular margin loss.
+
+### Basic Usage
+
+```python
+from uniface import RetinaFace, ArcFace
+
+detector = RetinaFace()
+recognizer = ArcFace()
+
+# Detect face
+faces = detector.detect(image)
+
+# Extract embedding
+if faces:
+ embedding = recognizer.get_normalized_embedding(image, faces[0].landmarks)
+ print(f"Embedding shape: {embedding.shape}") # (1, 512)
+```
+
+### Model Variants
+
+```python
+from uniface import ArcFace
+from uniface.constants import ArcFaceWeights
+
+# Lightweight (default)
+recognizer = ArcFace(model_name=ArcFaceWeights.MNET)
+
+# High accuracy
+recognizer = ArcFace(model_name=ArcFaceWeights.RESNET)
+```
+
+| Variant | Backbone | Size | Use Case |
+|---------|----------|------|----------|
+| **MNET** ⭐ | MobileNet | 8 MB | Balanced (recommended) |
+| RESNET | ResNet50 | 166 MB | Maximum accuracy |
+
+---
+
+## MobileFace
+
+Lightweight recognition for resource-constrained environments.
+
+### Basic Usage
+
+```python
+from uniface import MobileFace
+
+recognizer = MobileFace()
+embedding = recognizer.get_normalized_embedding(image, landmarks)
+```
+
+### Model Variants
+
+```python
+from uniface import MobileFace
+from uniface.constants import MobileFaceWeights
+
+# Ultra-lightweight
+recognizer = MobileFace(model_name=MobileFaceWeights.MNET_025)
+
+# Balanced (default)
+recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V2)
+
+# Higher accuracy
+recognizer = MobileFace(model_name=MobileFaceWeights.MNET_V3_LARGE)
+```
+
+| Variant | Params | Size | LFW | Use Case |
+|---------|--------|------|-----|----------|
+| MNET_025 | 0.36M | 1 MB | 98.8% | Ultra-lightweight |
+| **MNET_V2** ⭐ | 2.29M | 4 MB | 99.6% | Mobile/Edge |
+| MNET_V3_SMALL | 1.25M | 3 MB | 99.3% | Mobile optimized |
+| MNET_V3_LARGE | 3.52M | 10 MB | 99.5% | Balanced mobile |
+
+---
+
+## SphereFace
+
+Recognition using angular softmax loss (A-Softmax).
+
+### Basic Usage
+
+```python
+from uniface import SphereFace
+from uniface.constants import SphereFaceWeights
+
+recognizer = SphereFace(model_name=SphereFaceWeights.SPHERE20)
+embedding = recognizer.get_normalized_embedding(image, landmarks)
+```
+
+| Variant | Params | Size | LFW | Use Case |
+|---------|--------|------|-----|----------|
+| SPHERE20 | 24.5M | 50 MB | 99.7% | Research |
+| SPHERE36 | 34.6M | 92 MB | 99.7% | Research |
+
+---
+
+## Face Comparison
+
+### Compute Similarity
+
+```python
+from uniface import compute_similarity
+import numpy as np
+
+# Extract embeddings
+emb1 = recognizer.get_normalized_embedding(image1, landmarks1)
+emb2 = recognizer.get_normalized_embedding(image2, landmarks2)
+
+# Method 1: Using utility function
+similarity = compute_similarity(emb1, emb2)
+
+# Method 2: Direct computation
+similarity = np.dot(emb1, emb2.T)[0][0]
+
+print(f"Similarity: {similarity:.4f}")
+```
+
+### Threshold Guidelines
+
+| Threshold | Decision | Use Case |
+|-----------|----------|----------|
+| > 0.7 | Very high confidence | Security-critical |
+| > 0.6 | Same person | General verification |
+| 0.4 - 0.6 | Uncertain | Manual review needed |
+| < 0.4 | Different people | Rejection |
+
+---
+
+## Face Alignment
+
+Recognition models require aligned faces. UniFace handles this internally:
+
+```python
+# Alignment is done automatically
+embedding = recognizer.get_normalized_embedding(image, landmarks)
+
+# Or manually align
+from uniface import face_alignment
+
+aligned_face = face_alignment(image, landmarks)
+# Returns: 112x112 aligned face image
+```
+
+---
+
+## Building a Face Database
+
+```python
+import numpy as np
+from uniface import RetinaFace, ArcFace
+
+detector = RetinaFace()
+recognizer = ArcFace()
+
+# Build database
+database = {}
+for person_id, image_path in person_images.items():
+ image = cv2.imread(image_path)
+ faces = detector.detect(image)
+
+ if faces:
+ embedding = recognizer.get_normalized_embedding(image, faces[0].landmarks)
+ database[person_id] = embedding
+
+# Save for later use
+np.savez('face_database.npz', **database)
+
+# Load database
+data = np.load('face_database.npz')
+database = {key: data[key] for key in data.files}
+```
+
+---
+
+## Face Search
+
+Find a person in a database:
+
+```python
+def search_face(query_embedding, database, threshold=0.6):
+ """Find best match in database."""
+ best_match = None
+ best_similarity = -1
+
+ for person_id, db_embedding in database.items():
+ similarity = np.dot(query_embedding, db_embedding.T)[0][0]
+
+ if similarity > best_similarity and similarity > threshold:
+ best_similarity = similarity
+ best_match = person_id
+
+ return best_match, best_similarity
+
+# Usage
+query_embedding = recognizer.get_normalized_embedding(query_image, landmarks)
+match, similarity = search_face(query_embedding, database)
+
+if match:
+ print(f"Found: {match} (similarity: {similarity:.4f})")
+else:
+ print("No match found")
+```
+
+---
+
+## Factory Function
+
+```python
+from uniface import create_recognizer
+
+recognizer = create_recognizer('arcface')
+```
+
+---
+
+## Next Steps
+
+- [Landmarks](landmarks.md) - 106-point landmarks
+- [Face Search Recipe](../recipes/face-search.md) - Complete search system
+- [Thresholds](../concepts/thresholds-calibration.md) - Calibration guide
diff --git a/docs/modules/spoofing.md b/docs/modules/spoofing.md
new file mode 100644
index 0000000..1d2ff34
--- /dev/null
+++ b/docs/modules/spoofing.md
@@ -0,0 +1,266 @@
+# Anti-Spoofing
+
+Face anti-spoofing detects whether a face is real (live) or fake (photo, video replay, mask).
+
+---
+
+## Available Models
+
+| Model | Size | Notes |
+|-------|------|-------|
+| MiniFASNet V1SE | 1.2 MB | Squeeze-and-Excitation variant |
+| **MiniFASNet V2** ⭐ | 1.2 MB | Improved version (recommended) |
+
+---
+
+## Basic Usage
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.spoofing import MiniFASNet
+
+detector = RetinaFace()
+spoofer = MiniFASNet()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for face in faces:
+ result = spoofer.predict(image, face.bbox)
+
+ label = "Real" if result.is_real else "Fake"
+ print(f"{label}: {result.confidence:.1%}")
+```
+
+---
+
+## Output Format
+
+```python
+result = spoofer.predict(image, face.bbox)
+
+# SpoofingResult dataclass
+result.is_real # True = real, False = fake
+result.confidence # 0.0 to 1.0
+```
+
+---
+
+## Model Variants
+
+```python
+from uniface.spoofing import MiniFASNet
+from uniface.constants import MiniFASNetWeights
+
+# Default (V2, recommended)
+spoofer = MiniFASNet()
+
+# V1SE variant
+spoofer = MiniFASNet(model_name=MiniFASNetWeights.V1SE)
+```
+
+| Variant | Size | Scale Factor |
+|---------|------|--------------|
+| V1SE | 1.2 MB | 4.0 |
+| **V2** ⭐ | 1.2 MB | 2.7 |
+
+---
+
+## Confidence Thresholds
+
+The default threshold is 0.5. Adjust for your use case:
+
+```python
+result = spoofer.predict(image, face.bbox)
+
+# High security (fewer false accepts)
+HIGH_THRESHOLD = 0.7
+if result.confidence > HIGH_THRESHOLD:
+ print("Real (high confidence)")
+else:
+ print("Suspicious")
+
+# Balanced
+if result.is_real: # Uses default 0.5 threshold
+ print("Real")
+else:
+ print("Fake")
+```
+
+---
+
+## Visualization
+
+```python
+import cv2
+
+def draw_spoofing_result(image, face, result):
+ """Draw spoofing result on image."""
+ x1, y1, x2, y2 = map(int, face.bbox)
+
+ # Color based on result
+ color = (0, 255, 0) if result.is_real else (0, 0, 255)
+ label = "Real" if result.is_real else "Fake"
+
+ # Draw bounding box
+ cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
+
+ # Draw label
+ text = f"{label}: {result.confidence:.1%}"
+ cv2.putText(image, text, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+
+ return image
+
+# Usage
+for face in faces:
+ result = spoofer.predict(image, face.bbox)
+ image = draw_spoofing_result(image, face, result)
+
+cv2.imwrite("spoofing_result.jpg", image)
+```
+
+---
+
+## Real-Time Liveness Detection
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.spoofing import MiniFASNet
+
+detector = RetinaFace()
+spoofer = MiniFASNet()
+
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ for face in faces:
+ result = spoofer.predict(frame, face.bbox)
+
+ # Draw result
+ x1, y1, x2, y2 = map(int, face.bbox)
+ color = (0, 255, 0) if result.is_real else (0, 0, 255)
+ label = f"{'Real' if result.is_real else 'Fake'}: {result.confidence:.0%}"
+
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+
+ cv2.imshow("Liveness Detection", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Use Cases
+
+### Access Control
+
+```python
+def verify_liveness(image, face, spoofer, threshold=0.6):
+ """Verify face is real for access control."""
+ result = spoofer.predict(image, face.bbox)
+
+ if result.is_real and result.confidence > threshold:
+ return True, result.confidence
+ return False, result.confidence
+
+# Usage
+is_live, confidence = verify_liveness(image, face, spoofer)
+if is_live:
+ print(f"Access granted (confidence: {confidence:.1%})")
+else:
+ print(f"Access denied - possible spoof attempt")
+```
+
+### Multi-Frame Verification
+
+For higher security, verify across multiple frames:
+
+```python
+def verify_liveness_multiframe(frames, detector, spoofer, min_real=3):
+ """Verify liveness across multiple frames."""
+ real_count = 0
+
+ for frame in frames:
+ faces = detector.detect(frame)
+ if not faces:
+ continue
+
+ result = spoofer.predict(frame, faces[0].bbox)
+ if result.is_real:
+ real_count += 1
+
+ return real_count >= min_real
+
+# Collect frames and verify
+frames = []
+for _ in range(5):
+ ret, frame = cap.read()
+ if ret:
+ frames.append(frame)
+
+is_verified = verify_liveness_multiframe(frames, detector, spoofer)
+```
+
+---
+
+## Attack Types Detected
+
+MiniFASNet can detect various spoof attacks:
+
+| Attack Type | Detection |
+|-------------|-----------|
+| Printed photos | ✅ |
+| Screen replay | ✅ |
+| Video replay | ✅ |
+| Paper masks | ✅ |
+| 3D masks | Limited |
+
+!!! warning "Limitations"
+ - High-quality 3D masks may not be detected
+ - Performance varies with lighting and image quality
+ - Always combine with other verification methods for high-security applications
+
+---
+
+## Command-Line Tool
+
+```bash
+# Image
+python tools/spoofing.py --source photo.jpg
+
+# Webcam
+python tools/spoofing.py --source 0
+```
+
+---
+
+## Factory Function
+
+```python
+from uniface import create_spoofer
+
+spoofer = create_spoofer() # Returns MiniFASNet
+```
+
+---
+
+## Next Steps
+
+- [Privacy](privacy.md) - Face anonymization
+- [Detection](detection.md) - Face detection
+- [Recognition](recognition.md) - Face recognition
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 0000000..2825ed6
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,362 @@
+# Quickstart
+
+Get up and running with UniFace in 5 minutes. This guide covers the most common use cases.
+
+---
+
+## 1. Face Detection
+
+Detect faces in an image:
+
+```python
+import cv2
+from uniface import RetinaFace
+
+# Load image
+image = cv2.imread("photo.jpg")
+
+# Initialize detector (models auto-download on first use)
+detector = RetinaFace()
+
+# Detect faces
+faces = detector.detect(image)
+
+# Print results
+for i, face in enumerate(faces):
+ print(f"Face {i+1}:")
+ print(f" Confidence: {face.confidence:.2f}")
+ print(f" BBox: {face.bbox}")
+ print(f" Landmarks: {len(face.landmarks)} points")
+```
+
+**Output:**
+
+```
+Face 1:
+ Confidence: 0.99
+ BBox: [120.5, 85.3, 245.8, 210.6]
+ Landmarks: 5 points
+```
+
+---
+
+## 2. Visualize Detections
+
+Draw bounding boxes and landmarks:
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.visualization import draw_detections
+
+# Detect faces
+detector = RetinaFace()
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+# Extract visualization data
+bboxes = [f.bbox for f in faces]
+scores = [f.confidence for f in faces]
+landmarks = [f.landmarks for f in faces]
+
+# Draw on image
+draw_detections(
+ image=image,
+ bboxes=bboxes,
+ scores=scores,
+ landmarks=landmarks,
+ vis_threshold=0.6,
+)
+
+# Save result
+cv2.imwrite("output.jpg", image)
+```
+
+---
+
+## 3. Face Recognition
+
+Compare two faces:
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, ArcFace
+
+# Initialize models
+detector = RetinaFace()
+recognizer = ArcFace()
+
+# Load two images
+image1 = cv2.imread("person1.jpg")
+image2 = cv2.imread("person2.jpg")
+
+# Detect faces
+faces1 = detector.detect(image1)
+faces2 = detector.detect(image2)
+
+if faces1 and faces2:
+ # Extract embeddings
+ emb1 = recognizer.get_normalized_embedding(image1, faces1[0].landmarks)
+ emb2 = recognizer.get_normalized_embedding(image2, faces2[0].landmarks)
+
+ # Compute similarity (cosine similarity)
+ similarity = np.dot(emb1, emb2.T)[0][0]
+
+ # Interpret result
+ if similarity > 0.6:
+ print(f"Same person (similarity: {similarity:.3f})")
+ else:
+ print(f"Different people (similarity: {similarity:.3f})")
+```
+
+!!! tip "Similarity Thresholds"
+ - `> 0.6`: Same person (high confidence)
+ - `0.4 - 0.6`: Uncertain (manual review)
+ - `< 0.4`: Different people
+
+---
+
+## 4. Age & Gender Detection
+
+```python
+import cv2
+from uniface import RetinaFace, AgeGender
+
+# Initialize models
+detector = RetinaFace()
+age_gender = AgeGender()
+
+# Load image
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+# Predict attributes
+for i, face in enumerate(faces):
+ result = age_gender.predict(image, face.bbox)
+ print(f"Face {i+1}: {result.sex}, {result.age} years old")
+```
+
+**Output:**
+
+```
+Face 1: Male, 32 years old
+Face 2: Female, 28 years old
+```
+
+---
+
+## 5. FairFace Attributes
+
+Detect race, gender, and age group:
+
+```python
+import cv2
+from uniface import RetinaFace, FairFace
+
+detector = RetinaFace()
+fairface = FairFace()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for i, face in enumerate(faces):
+ result = fairface.predict(image, face.bbox)
+ print(f"Face {i+1}: {result.sex}, {result.age_group}, {result.race}")
+```
+
+**Output:**
+
+```
+Face 1: Male, 30-39, East Asian
+Face 2: Female, 20-29, White
+```
+
+---
+
+## 6. Facial Landmarks (106 Points)
+
+```python
+import cv2
+from uniface import RetinaFace, Landmark106
+
+detector = RetinaFace()
+landmarker = Landmark106()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+if faces:
+ landmarks = landmarker.get_landmarks(image, faces[0].bbox)
+ print(f"Detected {len(landmarks)} landmarks")
+
+ # Draw landmarks
+ for x, y in landmarks.astype(int):
+ cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
+
+ cv2.imwrite("landmarks.jpg", image)
+```
+
+---
+
+## 7. Gaze Estimation
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, MobileGaze
+from uniface.visualization import draw_gaze
+
+detector = RetinaFace()
+gaze_estimator = MobileGaze()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for i, face in enumerate(faces):
+ x1, y1, x2, y2 = map(int, face.bbox[:4])
+ face_crop = image[y1:y2, x1:x2]
+
+ if face_crop.size > 0:
+ result = gaze_estimator.estimate(face_crop)
+ print(f"Face {i+1}: pitch={np.degrees(result.pitch):.1f}°, yaw={np.degrees(result.yaw):.1f}°")
+
+ # Draw gaze direction
+ draw_gaze(image, face.bbox, result.pitch, result.yaw)
+
+cv2.imwrite("gaze_output.jpg", image)
+```
+
+---
+
+## 8. Face Parsing
+
+Segment face into semantic components:
+
+```python
+import cv2
+import numpy as np
+from uniface.parsing import BiSeNet
+from uniface.visualization import vis_parsing_maps
+
+parser = BiSeNet()
+
+# Load face image (already cropped)
+face_image = cv2.imread("face.jpg")
+
+# Parse face into 19 components
+mask = parser.parse(face_image)
+
+# Visualize with overlay
+face_rgb = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
+vis_result = vis_parsing_maps(face_rgb, mask, save_image=False)
+
+print(f"Detected {len(np.unique(mask))} facial components")
+```
+
+---
+
+## 9. Face Anonymization
+
+Blur faces for privacy protection:
+
+```python
+from uniface.privacy import anonymize_faces
+import cv2
+
+# One-liner: automatic detection and blurring
+image = cv2.imread("group_photo.jpg")
+anonymized = anonymize_faces(image, method='pixelate')
+cv2.imwrite("anonymized.jpg", anonymized)
+```
+
+**Manual control:**
+
+```python
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+blurrer = BlurFace(method='gaussian', blur_strength=5.0)
+
+faces = detector.detect(image)
+anonymized = blurrer.anonymize(image, faces)
+```
+
+**Available methods:**
+
+| Method | Description |
+|--------|-------------|
+| `pixelate` | Blocky effect (news media standard) |
+| `gaussian` | Smooth, natural blur |
+| `blackout` | Solid color boxes (maximum privacy) |
+| `elliptical` | Soft oval blur (natural face shape) |
+| `median` | Edge-preserving blur |
+
+---
+
+## 10. Face Anti-Spoofing
+
+Detect real vs. fake faces:
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.spoofing import MiniFASNet
+
+detector = RetinaFace()
+spoofer = MiniFASNet()
+
+image = cv2.imread("photo.jpg")
+faces = detector.detect(image)
+
+for i, face in enumerate(faces):
+ result = spoofer.predict(image, face.bbox)
+ label = 'Real' if result.is_real else 'Fake'
+ print(f"Face {i+1}: {label} ({result.confidence:.1%})")
+```
+
+---
+
+## 11. Webcam Demo
+
+Real-time face detection:
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.visualization import draw_detections
+
+detector = RetinaFace()
+cap = cv2.VideoCapture(0)
+
+print("Press 'q' to quit")
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ bboxes = [f.bbox for f in faces]
+ scores = [f.confidence for f in faces]
+ landmarks = [f.landmarks for f in faces]
+ draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks)
+
+ cv2.imshow("UniFace - Press 'q' to quit", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Next Steps
+
+- [Concepts Overview](concepts/overview.md) - Understand the architecture
+- [Detection Module](modules/detection.md) - Deep dive into detection models
+- [Recipes](recipes/image-pipeline.md) - Complete workflow examples
diff --git a/docs/recipes/anonymize-stream.md b/docs/recipes/anonymize-stream.md
new file mode 100644
index 0000000..f361bda
--- /dev/null
+++ b/docs/recipes/anonymize-stream.md
@@ -0,0 +1,88 @@
+# Anonymize Stream
+
+Blur faces in real-time video streams for privacy protection.
+
+---
+
+## Webcam
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+blurrer = BlurFace(method='pixelate')
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+ frame = blurrer.anonymize(frame, faces, inplace=True)
+
+ cv2.imshow('Anonymized', frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Video File
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.privacy import BlurFace
+
+detector = RetinaFace()
+blurrer = BlurFace(method='gaussian')
+
+cap = cv2.VideoCapture("input.mp4")
+fps = cap.get(cv2.CAP_PROP_FPS)
+w, h = int(cap.get(3)), int(cap.get(4))
+
+out = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
+
+while cap.read()[0]:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+ blurrer.anonymize(frame, faces, inplace=True)
+ out.write(frame)
+
+cap.release()
+out.release()
+```
+
+---
+
+## One-Liner
+
+```python
+from uniface.privacy import anonymize_faces
+import cv2
+
+image = cv2.imread("photo.jpg")
+result = anonymize_faces(image, method='pixelate')
+cv2.imwrite("anonymized.jpg", result)
+```
+
+---
+
+## Blur Methods
+
+| Method | Code |
+|--------|------|
+| Pixelate | `BlurFace(method='pixelate', pixel_blocks=10)` |
+| Gaussian | `BlurFace(method='gaussian', blur_strength=3.0)` |
+| Blackout | `BlurFace(method='blackout', color=(0,0,0))` |
+| Elliptical | `BlurFace(method='elliptical', margin=20)` |
+| Median | `BlurFace(method='median', blur_strength=3.0)` |
diff --git a/docs/recipes/batch-processing.md b/docs/recipes/batch-processing.md
new file mode 100644
index 0000000..7fa8880
--- /dev/null
+++ b/docs/recipes/batch-processing.md
@@ -0,0 +1,353 @@
+# Batch Processing
+
+Process multiple images efficiently.
+
+---
+
+## Basic Batch Processing
+
+```python
+import cv2
+from pathlib import Path
+from uniface import RetinaFace
+from uniface.visualization import draw_detections
+
+detector = RetinaFace()
+
+def process_directory(input_dir, output_dir):
+ """Process all images in a directory."""
+ input_path = Path(input_dir)
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ # Supported image formats
+ extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp']
+ image_files = []
+ for ext in extensions:
+ image_files.extend(input_path.glob(ext))
+ image_files.extend(input_path.glob(ext.upper()))
+
+ print(f"Found {len(image_files)} images")
+
+ results = {}
+
+ for image_path in image_files:
+ print(f"Processing {image_path.name}...")
+
+ image = cv2.imread(str(image_path))
+ if image is None:
+ print(f" Failed to load {image_path.name}")
+ continue
+
+ faces = detector.detect(image)
+ print(f" Found {len(faces)} face(s)")
+
+ # Store results
+ results[image_path.name] = {
+ 'num_faces': len(faces),
+ 'faces': [
+ {
+ 'bbox': face.bbox.tolist(),
+ 'confidence': float(face.confidence)
+ }
+ for face in faces
+ ]
+ }
+
+ # Visualize and save
+ if faces:
+ draw_detections(
+ image=image,
+ bboxes=[f.bbox for f in faces],
+ scores=[f.confidence for f in faces],
+ landmarks=[f.landmarks for f in faces]
+ )
+
+ output_file = output_path / image_path.name
+ cv2.imwrite(str(output_file), image)
+
+ return results
+
+# Usage
+results = process_directory("input_images/", "output_images/")
+print(f"\nProcessed {len(results)} images")
+```
+
+---
+
+## Parallel Processing
+
+Use multiprocessing for faster batch processing:
+
+```python
+import cv2
+from pathlib import Path
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from uniface import RetinaFace
+
+def process_single_image(image_path, output_dir):
+ """Process a single image (runs in worker process)."""
+ # Create detector in each process
+ detector = RetinaFace()
+
+ image = cv2.imread(str(image_path))
+ if image is None:
+ return image_path.name, {'error': 'Failed to load'}
+
+ faces = detector.detect(image)
+
+ result = {
+ 'num_faces': len(faces),
+ 'faces': [
+ {
+ 'bbox': face.bbox.tolist(),
+ 'confidence': float(face.confidence)
+ }
+ for face in faces
+ ]
+ }
+
+ # Save result
+ output_path = Path(output_dir) / image_path.name
+ cv2.imwrite(str(output_path), image)
+
+ return image_path.name, result
+
+def batch_process_parallel(input_dir, output_dir, max_workers=4):
+ """Process images in parallel."""
+ input_path = Path(input_dir)
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ image_files = list(input_path.glob("*.jpg")) + list(input_path.glob("*.png"))
+
+ results = {}
+
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
+ futures = {
+ executor.submit(process_single_image, img, output_dir): img
+ for img in image_files
+ }
+
+ for future in as_completed(futures):
+ name, result = future.result()
+ results[name] = result
+ print(f"Completed: {name} - {result.get('num_faces', 'error')} faces")
+
+ return results
+
+# Usage
+results = batch_process_parallel("input_images/", "output_images/", max_workers=4)
+```
+
+---
+
+## Progress Tracking
+
+Use tqdm for progress bars:
+
+```python
+from tqdm import tqdm
+
+def process_with_progress(input_dir, output_dir):
+ """Process with progress bar."""
+ detector = RetinaFace()
+
+ input_path = Path(input_dir)
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ image_files = list(input_path.glob("*.jpg")) + list(input_path.glob("*.png"))
+
+ results = {}
+
+ for image_path in tqdm(image_files, desc="Processing images"):
+ image = cv2.imread(str(image_path))
+ if image is None:
+ continue
+
+ faces = detector.detect(image)
+ results[image_path.name] = len(faces)
+
+ cv2.imwrite(str(output_path / image_path.name), image)
+
+ return results
+
+# Usage
+results = process_with_progress("input/", "output/")
+print(f"Total faces found: {sum(results.values())}")
+```
+
+---
+
+## Batch Embedding Extraction
+
+Extract embeddings for a face database:
+
+```python
+import numpy as np
+from pathlib import Path
+from uniface import RetinaFace, ArcFace
+
+def extract_embeddings(image_dir):
+ """Extract embeddings from all faces."""
+ detector = RetinaFace()
+ recognizer = ArcFace()
+
+ embeddings = {}
+
+ for image_path in Path(image_dir).glob("*.jpg"):
+ image = cv2.imread(str(image_path))
+ if image is None:
+ continue
+
+ faces = detector.detect(image)
+
+ if faces:
+ # Use first face
+ embedding = recognizer.get_normalized_embedding(
+ image, faces[0].landmarks
+ )
+ embeddings[image_path.stem] = embedding
+ print(f"Extracted: {image_path.stem}")
+
+ return embeddings
+
+def save_embeddings(embeddings, output_path):
+ """Save embeddings to file."""
+ np.savez(output_path, **embeddings)
+ print(f"Saved {len(embeddings)} embeddings to {output_path}")
+
+def load_embeddings(input_path):
+ """Load embeddings from file."""
+ data = np.load(input_path)
+ return {key: data[key] for key in data.files}
+
+# Usage
+embeddings = extract_embeddings("faces/")
+save_embeddings(embeddings, "embeddings.npz")
+
+# Later...
+loaded = load_embeddings("embeddings.npz")
+```
+
+---
+
+## CSV Output
+
+Export results to CSV:
+
+```python
+import csv
+from pathlib import Path
+
+def export_to_csv(results, output_path):
+ """Export detection results to CSV."""
+ with open(output_path, 'w', newline='') as f:
+ writer = csv.writer(f)
+ writer.writerow(['filename', 'face_id', 'x1', 'y1', 'x2', 'y2', 'confidence'])
+
+ for filename, data in results.items():
+ for i, face in enumerate(data['faces']):
+ bbox = face['bbox']
+ writer.writerow([
+ filename, i,
+ bbox[0], bbox[1], bbox[2], bbox[3],
+ face['confidence']
+ ])
+
+ print(f"Exported to {output_path}")
+
+# Usage
+results = process_directory("input/", "output/")
+export_to_csv(results, "detections.csv")
+```
+
+---
+
+## Memory-Efficient Processing
+
+For large batches, process in chunks:
+
+```python
+def process_in_chunks(image_files, chunk_size=100):
+ """Process images in memory-efficient chunks."""
+ detector = RetinaFace()
+
+ all_results = {}
+
+ for i in range(0, len(image_files), chunk_size):
+ chunk = image_files[i:i + chunk_size]
+ print(f"Processing chunk {i//chunk_size + 1}/{(len(image_files)-1)//chunk_size + 1}")
+
+ for image_path in chunk:
+ image = cv2.imread(str(image_path))
+ if image is None:
+ continue
+
+ faces = detector.detect(image)
+ all_results[image_path.name] = len(faces)
+
+ # Free memory
+ del image
+
+ # Optional: force garbage collection
+ import gc
+ gc.collect()
+
+ return all_results
+```
+
+---
+
+## Error Handling
+
+Robust batch processing with error handling:
+
+```python
+import logging
+from pathlib import Path
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+def robust_batch_process(input_dir, output_dir):
+ """Batch process with error handling."""
+ detector = RetinaFace()
+
+ input_path = Path(input_dir)
+ output_path = Path(output_dir)
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ image_files = list(input_path.glob("*.[jJ][pP][gG]"))
+
+ success_count = 0
+ error_count = 0
+
+ for image_path in image_files:
+ try:
+ image = cv2.imread(str(image_path))
+ if image is None:
+ raise ValueError("Failed to load image")
+
+ faces = detector.detect(image)
+
+ cv2.imwrite(str(output_path / image_path.name), image)
+ success_count += 1
+ logger.info(f"Processed {image_path.name}: {len(faces)} faces")
+
+ except Exception as e:
+ error_count += 1
+ logger.error(f"Error processing {image_path.name}: {e}")
+
+ logger.info(f"Completed: {success_count} success, {error_count} errors")
+ return success_count, error_count
+```
+
+---
+
+## Next Steps
+
+- [Video & Webcam](video-webcam.md) - Real-time processing
+- [Face Search](face-search.md) - Search through embeddings
+- [Image Pipeline](image-pipeline.md) - Full analysis pipeline
diff --git a/docs/recipes/custom-models.md b/docs/recipes/custom-models.md
new file mode 100644
index 0000000..fd077c5
--- /dev/null
+++ b/docs/recipes/custom-models.md
@@ -0,0 +1,96 @@
+# Custom Models
+
+Add your own ONNX models to UniFace.
+
+---
+
+## Add Detection Model
+
+```python
+from uniface.detection.base import BaseDetector
+from uniface.onnx_utils import create_onnx_session
+from uniface.types import Face
+import numpy as np
+
+class MyDetector(BaseDetector):
+ def __init__(self, model_path: str, confidence_threshold: float = 0.5):
+ self.session = create_onnx_session(model_path)
+ self.threshold = confidence_threshold
+
+ def detect(self, image: np.ndarray) -> list[Face]:
+ # Preprocess
+ input_tensor = self._preprocess(image)
+
+ # Inference
+ outputs = self.session.run(None, {'input': input_tensor})
+
+ # Postprocess
+ faces = self._postprocess(outputs, image.shape)
+ return faces
+
+ def _preprocess(self, image):
+ # Your preprocessing logic
+ pass
+
+ def _postprocess(self, outputs, shape):
+ # Your postprocessing logic
+ pass
+```
+
+---
+
+## Add Recognition Model
+
+```python
+from uniface.recognition.base import BaseRecognizer
+from uniface.onnx_utils import create_onnx_session
+from uniface import face_alignment
+import numpy as np
+
+class MyRecognizer(BaseRecognizer):
+ def __init__(self, model_path: str):
+ self.session = create_onnx_session(model_path)
+
+ def get_normalized_embedding(self, image: np.ndarray, landmarks: np.ndarray) -> np.ndarray:
+ # Align face
+ aligned = face_alignment(image, landmarks)
+
+ # Preprocess
+ input_tensor = self._preprocess(aligned)
+
+ # Inference
+ embedding = self.session.run(None, {'input': input_tensor})[0]
+
+ # Normalize
+ embedding = embedding / np.linalg.norm(embedding)
+ return embedding
+
+ def _preprocess(self, image):
+ # Your preprocessing logic
+ pass
+```
+
+---
+
+## Register Weights
+
+Add to `uniface/constants.py`:
+
+```python
+class MyModelWeights(str, Enum):
+ DEFAULT = "my_model"
+
+MODEL_URLS[MyModelWeights.DEFAULT] = 'https://...'
+MODEL_SHA256[MyModelWeights.DEFAULT] = 'sha256hash...'
+```
+
+---
+
+## Use Custom Model
+
+```python
+from my_module import MyDetector
+
+detector = MyDetector("path/to/model.onnx")
+faces = detector.detect(image)
+```
diff --git a/docs/recipes/face-search.md b/docs/recipes/face-search.md
new file mode 100644
index 0000000..ffedae2
--- /dev/null
+++ b/docs/recipes/face-search.md
@@ -0,0 +1,340 @@
+# Face Search
+
+Build a face search system for finding people in images.
+
+---
+
+## Build Face Database
+
+```python
+import numpy as np
+import cv2
+from pathlib import Path
+from uniface import RetinaFace, ArcFace
+
+class FaceDatabase:
+ def __init__(self):
+ self.detector = RetinaFace()
+ self.recognizer = ArcFace()
+ self.embeddings = {}
+ self.metadata = {}
+
+ def add_face(self, person_id, image, metadata=None):
+ """Add a face to the database."""
+ faces = self.detector.detect(image)
+
+ if not faces:
+ raise ValueError(f"No face found for {person_id}")
+
+ # Use highest confidence face
+ face = max(faces, key=lambda f: f.confidence)
+ embedding = self.recognizer.get_normalized_embedding(image, face.landmarks)
+
+ self.embeddings[person_id] = embedding
+ self.metadata[person_id] = metadata or {}
+
+ return True
+
+ def add_from_directory(self, directory):
+ """Add faces from a directory (filename = person_id)."""
+ dir_path = Path(directory)
+
+ for image_path in dir_path.glob("*.jpg"):
+ person_id = image_path.stem
+ image = cv2.imread(str(image_path))
+
+ try:
+ self.add_face(person_id, image, {'source': str(image_path)})
+ print(f"Added: {person_id}")
+ except ValueError as e:
+ print(f"Skipped {person_id}: {e}")
+
+ def search(self, image, threshold=0.6):
+ """Search for faces in an image."""
+ faces = self.detector.detect(image)
+ results = []
+
+ for face in faces:
+ embedding = self.recognizer.get_normalized_embedding(image, face.landmarks)
+
+ best_match = None
+ best_similarity = -1
+
+ for person_id, db_embedding in self.embeddings.items():
+ similarity = np.dot(embedding, db_embedding.T)[0][0]
+
+ if similarity > best_similarity:
+ best_similarity = similarity
+ best_match = person_id
+
+ results.append({
+ 'bbox': face.bbox,
+ 'confidence': face.confidence,
+ 'match': best_match if best_similarity >= threshold else None,
+ 'similarity': best_similarity,
+ 'metadata': self.metadata.get(best_match, {})
+ })
+
+ return results
+
+ def save(self, path):
+ """Save database to file."""
+ np.savez(
+ path,
+ embeddings=dict(self.embeddings),
+ metadata=self.metadata
+ )
+ print(f"Saved database to {path}")
+
+ def load(self, path):
+ """Load database from file."""
+ data = np.load(path, allow_pickle=True)
+ self.embeddings = data['embeddings'].item()
+ self.metadata = data['metadata'].item()
+ print(f"Loaded {len(self.embeddings)} faces from {path}")
+
+# Usage
+db = FaceDatabase()
+
+# Add faces from directory
+db.add_from_directory("known_faces/")
+
+# Save for later
+db.save("face_database.npz")
+
+# Search for person
+query_image = cv2.imread("group_photo.jpg")
+results = db.search(query_image)
+
+for r in results:
+ if r['match']:
+ print(f"Found: {r['match']} (similarity: {r['similarity']:.3f})")
+ else:
+ print(f"Unknown face (best similarity: {r['similarity']:.3f})")
+```
+
+---
+
+## Visualization
+
+```python
+import cv2
+
+def visualize_search_results(image, results):
+ """Draw search results on image."""
+ for r in results:
+ x1, y1, x2, y2 = map(int, r['bbox'])
+
+ if r['match']:
+ color = (0, 255, 0) # Green for match
+ label = f"{r['match']} ({r['similarity']:.2f})"
+ else:
+ color = (0, 0, 255) # Red for unknown
+ label = f"Unknown ({r['similarity']:.2f})"
+
+ cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
+ cv2.putText(image, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+
+ return image
+
+# Usage
+results = db.search(image)
+annotated = visualize_search_results(image.copy(), results)
+cv2.imwrite("search_result.jpg", annotated)
+```
+
+---
+
+## Real-Time Search
+
+```python
+import cv2
+
+def realtime_search(db):
+ """Real-time face search from webcam."""
+ cap = cv2.VideoCapture(0)
+
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ results = db.search(frame, threshold=0.5)
+
+ for r in results:
+ x1, y1, x2, y2 = map(int, r['bbox'])
+
+ if r['match']:
+ color = (0, 255, 0)
+ label = r['match']
+ else:
+ color = (0, 0, 255)
+ label = "Unknown"
+
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
+
+ cv2.imshow("Face Search", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+ cap.release()
+ cv2.destroyAllWindows()
+
+# Usage
+db = FaceDatabase()
+db.load("face_database.npz")
+realtime_search(db)
+```
+
+---
+
+## Top-K Search
+
+Find top K matches instead of best match only:
+
+```python
+def search_top_k(self, embedding, k=5):
+ """Find top K matches for an embedding."""
+ similarities = []
+
+ for person_id, db_embedding in self.embeddings.items():
+ similarity = np.dot(embedding, db_embedding.T)[0][0]
+ similarities.append((person_id, similarity))
+
+ # Sort by similarity (descending)
+ similarities.sort(key=lambda x: x[1], reverse=True)
+
+ return similarities[:k]
+
+# Usage
+query_embedding = recognizer.get_normalized_embedding(image, face.landmarks)
+top_matches = search_top_k(query_embedding, k=3)
+
+for person_id, similarity in top_matches:
+ print(f"{person_id}: {similarity:.4f}")
+```
+
+---
+
+## Batch Search
+
+Search through multiple query images:
+
+```python
+from pathlib import Path
+
+def batch_search(db, query_dir, threshold=0.6):
+ """Search for faces in multiple images."""
+ all_results = {}
+
+ for image_path in Path(query_dir).glob("*.jpg"):
+ image = cv2.imread(str(image_path))
+ results = db.search(image, threshold)
+
+ matches = [r['match'] for r in results if r['match']]
+ all_results[image_path.name] = matches
+
+ print(f"{image_path.name}: {matches}")
+
+ return all_results
+
+# Usage
+results = batch_search(db, "query_images/")
+```
+
+---
+
+## Find Person in Group Photo
+
+```python
+def find_person_in_group(db, person_id, group_image, threshold=0.6):
+ """Find a specific person in a group photo."""
+ if person_id not in db.embeddings:
+ raise ValueError(f"Person {person_id} not in database")
+
+ reference_embedding = db.embeddings[person_id]
+ faces = db.detector.detect(group_image)
+
+ best_match = None
+ best_similarity = -1
+
+ for face in faces:
+ embedding = db.recognizer.get_normalized_embedding(
+ group_image, face.landmarks
+ )
+ similarity = np.dot(embedding, reference_embedding.T)[0][0]
+
+ if similarity > best_similarity:
+ best_similarity = similarity
+ best_match = face
+
+ if best_match and best_similarity >= threshold:
+ return {
+ 'found': True,
+ 'face': best_match,
+ 'similarity': best_similarity
+ }
+
+ return {'found': False, 'similarity': best_similarity}
+
+# Usage
+group = cv2.imread("group_photo.jpg")
+result = find_person_in_group(db, "john_doe", group)
+
+if result['found']:
+ print(f"Found with similarity: {result['similarity']:.3f}")
+ # Draw the found face
+ x1, y1, x2, y2 = map(int, result['face'].bbox)
+ cv2.rectangle(group, (x1, y1), (x2, y2), (0, 255, 0), 3)
+ cv2.imwrite("found.jpg", group)
+```
+
+---
+
+## Update Database
+
+Add or update faces:
+
+```python
+def update_face(db, person_id, new_image):
+ """Update a person's face in the database."""
+ faces = db.detector.detect(new_image)
+
+ if not faces:
+ print(f"No face found in new image for {person_id}")
+ return False
+
+ face = max(faces, key=lambda f: f.confidence)
+ new_embedding = db.recognizer.get_normalized_embedding(
+ new_image, face.landmarks
+ )
+
+ if person_id in db.embeddings:
+ # Average with existing embedding
+ old_embedding = db.embeddings[person_id]
+ db.embeddings[person_id] = (old_embedding + new_embedding) / 2
+ # Re-normalize
+ db.embeddings[person_id] /= np.linalg.norm(db.embeddings[person_id])
+ print(f"Updated: {person_id}")
+ else:
+ db.embeddings[person_id] = new_embedding
+ print(f"Added: {person_id}")
+
+ return True
+
+# Usage
+update_face(db, "john_doe", cv2.imread("john_new.jpg"))
+db.save("face_database.npz")
+```
+
+---
+
+## Next Steps
+
+- [Anonymize Stream](anonymize-stream.md) - Privacy protection
+- [Batch Processing](batch-processing.md) - Process multiple files
+- [Recognition Module](../modules/recognition.md) - Model details
diff --git a/docs/recipes/image-pipeline.md b/docs/recipes/image-pipeline.md
new file mode 100644
index 0000000..3e9745b
--- /dev/null
+++ b/docs/recipes/image-pipeline.md
@@ -0,0 +1,279 @@
+# Image Pipeline
+
+A complete pipeline for processing images with detection, recognition, and attribute analysis.
+
+---
+
+## Basic Pipeline
+
+```python
+import cv2
+from uniface import RetinaFace, ArcFace, AgeGender
+from uniface.visualization import draw_detections
+
+# Initialize models
+detector = RetinaFace()
+recognizer = ArcFace()
+age_gender = AgeGender()
+
+def process_image(image_path):
+ """Process a single image through the full pipeline."""
+ # Load image
+ image = cv2.imread(image_path)
+
+ # Step 1: Detect faces
+ faces = detector.detect(image)
+ print(f"Found {len(faces)} face(s)")
+
+ results = []
+
+ for i, face in enumerate(faces):
+ # Step 2: Extract embedding
+ embedding = recognizer.get_normalized_embedding(image, face.landmarks)
+
+ # Step 3: Predict attributes
+ attrs = age_gender.predict(image, face.bbox)
+
+ results.append({
+ 'face_id': i,
+ 'bbox': face.bbox,
+ 'confidence': face.confidence,
+ 'embedding': embedding,
+ 'gender': attrs.sex,
+ 'age': attrs.age
+ })
+
+ print(f" Face {i+1}: {attrs.sex}, {attrs.age} years old")
+
+ # Visualize
+ draw_detections(
+ image=image,
+ bboxes=[f.bbox for f in faces],
+ scores=[f.confidence for f in faces],
+ landmarks=[f.landmarks for f in faces]
+ )
+
+ return image, results
+
+# Usage
+result_image, results = process_image("photo.jpg")
+cv2.imwrite("result.jpg", result_image)
+```
+
+---
+
+## Using FaceAnalyzer
+
+For convenience, use the built-in `FaceAnalyzer`:
+
+```python
+from uniface import FaceAnalyzer
+import cv2
+
+# Initialize with desired modules
+analyzer = FaceAnalyzer(
+ detect=True,
+ recognize=True,
+ attributes=True
+)
+
+# Process image
+image = cv2.imread("photo.jpg")
+faces = analyzer.analyze(image)
+
+# Access enriched Face objects
+for face in faces:
+ print(f"Confidence: {face.confidence:.2f}")
+ print(f"Embedding: {face.embedding.shape}")
+ print(f"Age: {face.age}, Gender: {face.sex}")
+```
+
+---
+
+## Full Analysis Pipeline
+
+Complete pipeline with all modules:
+
+```python
+import cv2
+import numpy as np
+from uniface import (
+ RetinaFace, ArcFace, AgeGender, FairFace,
+ Landmark106, MobileGaze
+)
+from uniface.parsing import BiSeNet
+from uniface.spoofing import MiniFASNet
+from uniface.visualization import draw_detections, draw_gaze
+
+class FaceAnalysisPipeline:
+ def __init__(self):
+ # Initialize all models
+ self.detector = RetinaFace()
+ self.recognizer = ArcFace()
+ self.age_gender = AgeGender()
+ self.fairface = FairFace()
+ self.landmarker = Landmark106()
+ self.gaze = MobileGaze()
+ self.parser = BiSeNet()
+ self.spoofer = MiniFASNet()
+
+ def analyze(self, image):
+ """Run full analysis pipeline."""
+ faces = self.detector.detect(image)
+ results = []
+
+ for face in faces:
+ result = {
+ 'bbox': face.bbox,
+ 'confidence': face.confidence,
+ 'landmarks_5': face.landmarks
+ }
+
+ # Recognition embedding
+ result['embedding'] = self.recognizer.get_normalized_embedding(
+ image, face.landmarks
+ )
+
+ # Attributes
+ ag_result = self.age_gender.predict(image, face.bbox)
+ result['age'] = ag_result.age
+ result['gender'] = ag_result.sex
+
+ # FairFace attributes
+ ff_result = self.fairface.predict(image, face.bbox)
+ result['age_group'] = ff_result.age_group
+ result['race'] = ff_result.race
+
+ # 106-point landmarks
+ result['landmarks_106'] = self.landmarker.get_landmarks(
+ image, face.bbox
+ )
+
+ # Gaze estimation
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = image[y1:y2, x1:x2]
+ if face_crop.size > 0:
+ gaze_result = self.gaze.estimate(face_crop)
+ result['gaze_pitch'] = gaze_result.pitch
+ result['gaze_yaw'] = gaze_result.yaw
+
+ # Face parsing
+ if face_crop.size > 0:
+ result['parsing_mask'] = self.parser.parse(face_crop)
+
+ # Anti-spoofing
+ spoof_result = self.spoofer.predict(image, face.bbox)
+ result['is_real'] = spoof_result.is_real
+ result['spoof_confidence'] = spoof_result.confidence
+
+ results.append(result)
+
+ return results
+
+# Usage
+pipeline = FaceAnalysisPipeline()
+results = pipeline.analyze(cv2.imread("photo.jpg"))
+
+for i, r in enumerate(results):
+ print(f"\nFace {i+1}:")
+ print(f" Gender: {r['gender']}, Age: {r['age']}")
+ print(f" Race: {r['race']}, Age Group: {r['age_group']}")
+ print(f" Gaze: pitch={np.degrees(r['gaze_pitch']):.1f}°")
+ print(f" Real: {r['is_real']} ({r['spoof_confidence']:.1%})")
+```
+
+---
+
+## Visualization Pipeline
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, AgeGender, MobileGaze
+from uniface.visualization import draw_detections, draw_gaze
+
+def visualize_analysis(image_path, output_path):
+ """Create annotated visualization of face analysis."""
+ detector = RetinaFace()
+ age_gender = AgeGender()
+ gaze = MobileGaze()
+
+ image = cv2.imread(image_path)
+ faces = detector.detect(image)
+
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+
+ # Draw bounding box
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Age and gender
+ attrs = age_gender.predict(image, face.bbox)
+ label = f"{attrs.sex}, {attrs.age}y"
+ cv2.putText(image, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
+
+ # Gaze
+ face_crop = image[y1:y2, x1:x2]
+ if face_crop.size > 0:
+ gaze_result = gaze.estimate(face_crop)
+ draw_gaze(image, face.bbox, gaze_result.pitch, gaze_result.yaw)
+
+ # Confidence
+ conf_label = f"{face.confidence:.0%}"
+ cv2.putText(image, conf_label, (x1, y2 + 20),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
+
+ cv2.imwrite(output_path, image)
+ print(f"Saved to {output_path}")
+
+# Usage
+visualize_analysis("input.jpg", "output.jpg")
+```
+
+---
+
+## JSON Output
+
+Export results to JSON:
+
+```python
+import json
+import numpy as np
+
+def results_to_json(results):
+ """Convert analysis results to JSON-serializable format."""
+ output = []
+
+ for r in results:
+ item = {
+ 'bbox': r['bbox'].tolist(),
+ 'confidence': float(r['confidence']),
+ 'age': int(r['age']) if r.get('age') else None,
+ 'gender': r.get('gender'),
+ 'race': r.get('race'),
+ 'is_real': r.get('is_real'),
+ 'gaze': {
+ 'pitch_deg': float(np.degrees(r['gaze_pitch'])) if 'gaze_pitch' in r else None,
+ 'yaw_deg': float(np.degrees(r['gaze_yaw'])) if 'gaze_yaw' in r else None
+ }
+ }
+ output.append(item)
+
+ return output
+
+# Usage
+results = pipeline.analyze(image)
+json_data = results_to_json(results)
+
+with open('results.json', 'w') as f:
+ json.dump(json_data, f, indent=2)
+```
+
+---
+
+## Next Steps
+
+- [Batch Processing](batch-processing.md) - Process multiple images
+- [Video & Webcam](video-webcam.md) - Real-time processing
+- [Face Search](face-search.md) - Build a search system
diff --git a/docs/recipes/video-webcam.md b/docs/recipes/video-webcam.md
new file mode 100644
index 0000000..713faf9
--- /dev/null
+++ b/docs/recipes/video-webcam.md
@@ -0,0 +1,392 @@
+# Video & Webcam
+
+Real-time face analysis for video streams.
+
+---
+
+## Webcam Detection
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.visualization import draw_detections
+
+detector = RetinaFace()
+cap = cv2.VideoCapture(0)
+
+print("Press 'q' to quit")
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ # Detect faces
+ faces = detector.detect(frame)
+
+ # Draw results
+ draw_detections(
+ image=frame,
+ bboxes=[f.bbox for f in faces],
+ scores=[f.confidence for f in faces],
+ landmarks=[f.landmarks for f in faces]
+ )
+
+ cv2.imshow("Face Detection", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Video File Processing
+
+```python
+import cv2
+from uniface import RetinaFace
+from uniface.visualization import draw_detections
+
+def process_video(input_path, output_path):
+ """Process a video file."""
+ detector = RetinaFace()
+
+ cap = cv2.VideoCapture(input_path)
+
+ # Get video properties
+ fps = cap.get(cv2.CAP_PROP_FPS)
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+ # Setup output video
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
+
+ frame_count = 0
+
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ # Detect and draw
+ faces = detector.detect(frame)
+ draw_detections(
+ image=frame,
+ bboxes=[f.bbox for f in faces],
+ scores=[f.confidence for f in faces],
+ landmarks=[f.landmarks for f in faces]
+ )
+
+ out.write(frame)
+
+ frame_count += 1
+ if frame_count % 100 == 0:
+ print(f"Processed {frame_count}/{total_frames} frames")
+
+ cap.release()
+ out.release()
+ print(f"Saved to {output_path}")
+
+# Usage
+process_video("input.mp4", "output.mp4")
+```
+
+---
+
+## FPS Counter
+
+Add frame rate display:
+
+```python
+import cv2
+import time
+from uniface import RetinaFace
+
+detector = RetinaFace()
+cap = cv2.VideoCapture(0)
+
+prev_time = time.time()
+fps = 0
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ # Calculate FPS
+ curr_time = time.time()
+ fps = 1 / (curr_time - prev_time)
+ prev_time = curr_time
+
+ # Detect faces
+ faces = detector.detect(frame)
+
+ # Draw FPS
+ cv2.putText(frame, f"FPS: {fps:.1f}", (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
+ cv2.putText(frame, f"Faces: {len(faces)}", (10, 70),
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
+
+ # Draw detections
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ cv2.imshow("Face Detection", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Skip Frames for Performance
+
+Process every N frames for better performance:
+
+```python
+import cv2
+from uniface import RetinaFace
+
+detector = RetinaFace()
+cap = cv2.VideoCapture(0)
+
+PROCESS_EVERY_N = 3 # Process every 3rd frame
+frame_count = 0
+last_faces = []
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ frame_count += 1
+
+ # Only detect every N frames
+ if frame_count % PROCESS_EVERY_N == 0:
+ last_faces = detector.detect(frame)
+
+ # Draw last detection results
+ for face in last_faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ cv2.imshow("Detection", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Full Analysis Pipeline
+
+Real-time detection with age/gender:
+
+```python
+import cv2
+from uniface import RetinaFace, AgeGender
+
+detector = RetinaFace()
+age_gender = AgeGender()
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+
+ # Draw box
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Predict age/gender
+ result = age_gender.predict(frame, face.bbox)
+ label = f"{result.sex}, {result.age}y"
+
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
+
+ cv2.imshow("Age/Gender Detection", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Gaze Tracking
+
+Real-time gaze estimation:
+
+```python
+import cv2
+import numpy as np
+from uniface import RetinaFace, MobileGaze
+from uniface.visualization import draw_gaze
+
+detector = RetinaFace()
+gaze = MobileGaze()
+cap = cv2.VideoCapture(0)
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ face_crop = frame[y1:y2, x1:x2]
+
+ if face_crop.size > 0:
+ result = gaze.estimate(face_crop)
+
+ # Draw box
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Draw gaze arrow
+ draw_gaze(frame, face.bbox, result.pitch, result.yaw)
+
+ # Display angles
+ label = f"P:{np.degrees(result.pitch):.0f} Y:{np.degrees(result.yaw):.0f}"
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
+
+ cv2.imshow("Gaze Estimation", frame)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Recording Output
+
+Record processed video:
+
+```python
+import cv2
+from uniface import RetinaFace
+
+detector = RetinaFace()
+cap = cv2.VideoCapture(0)
+
+# Get camera properties
+fps = 30
+width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+# Setup recording
+fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+out = cv2.VideoWriter('recording.mp4', fourcc, fps, (width, height))
+
+is_recording = False
+
+print("Press 'r' to start/stop recording, 'q' to quit")
+
+while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ faces = detector.detect(frame)
+
+ # Draw detections
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ # Recording indicator
+ if is_recording:
+ cv2.circle(frame, (30, 30), 10, (0, 0, 255), -1)
+ out.write(frame)
+
+ cv2.imshow("Detection", frame)
+
+ key = cv2.waitKey(1) & 0xFF
+ if key == ord('r'):
+ is_recording = not is_recording
+ print(f"Recording: {is_recording}")
+ elif key == ord('q'):
+ break
+
+cap.release()
+out.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Multi-Camera
+
+Process multiple cameras:
+
+```python
+import cv2
+from uniface import RetinaFace
+
+detector = RetinaFace()
+
+# Open multiple cameras
+caps = [
+ cv2.VideoCapture(0),
+ cv2.VideoCapture(1) # Second camera
+]
+
+while True:
+ frames = []
+
+ for i, cap in enumerate(caps):
+ ret, frame = cap.read()
+ if ret:
+ faces = detector.detect(frame)
+
+ for face in faces:
+ x1, y1, x2, y2 = map(int, face.bbox)
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+
+ frames.append(frame)
+
+ # Display side by side
+ if len(frames) == 2:
+ combined = cv2.hconcat(frames)
+ cv2.imshow("Multi-Camera", combined)
+
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+for cap in caps:
+ cap.release()
+cv2.destroyAllWindows()
+```
+
+---
+
+## Next Steps
+
+- [Anonymize Stream](anonymize-stream.md) - Privacy in video
+- [Face Search](face-search.md) - Identity search
+- [Image Pipeline](image-pipeline.md) - Full analysis
diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css
new file mode 100644
index 0000000..7fad898
--- /dev/null
+++ b/docs/stylesheets/extra.css
@@ -0,0 +1,43 @@
+/* UniFace Documentation - Custom Styles */
+
+/* Hero section */
+.hero {
+ text-align: center;
+ padding: 2rem 0;
+}
+
+.hero-title {
+ font-size: 3rem !important;
+ font-weight: 700 !important;
+ margin-bottom: 0.5rem !important;
+}
+
+/* Feature grid */
+.feature-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
+ gap: 1rem;
+ margin: 1.5rem 0;
+}
+
+.feature-card {
+ padding: 1rem;
+ border-radius: 8px;
+ background: var(--md-code-bg-color);
+ border: 1px solid var(--md-default-fg-color--lightest);
+}
+
+.feature-card h3 {
+ margin-top: 0;
+ font-size: 0.9rem;
+}
+
+@media (max-width: 768px) {
+ .hero-title {
+ font-size: 2rem !important;
+ }
+
+ .feature-grid {
+ grid-template-columns: 1fr;
+ }
+}
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
new file mode 100644
index 0000000..6009e59
--- /dev/null
+++ b/docs/troubleshooting.md
@@ -0,0 +1,159 @@
+# Troubleshooting
+
+Common issues and solutions.
+
+---
+
+## Installation Issues
+
+### Import Error
+
+```
+ModuleNotFoundError: No module named 'uniface'
+```
+
+**Solution:** Install the package:
+
+```bash
+pip install uniface
+```
+
+### Python Version
+
+```
+Python 3.10+ required
+```
+
+**Solution:** Check your Python version:
+
+```bash
+python --version # Should be 3.11+
+```
+
+---
+
+## Model Issues
+
+### Model Download Failed
+
+```
+Failed to download model
+```
+
+**Solution:** Manually download:
+
+```python
+from uniface.model_store import verify_model_weights
+from uniface.constants import RetinaFaceWeights
+
+path = verify_model_weights(RetinaFaceWeights.MNET_V2)
+```
+
+### Model Not Found
+
+**Solution:** Check cache directory:
+
+```bash
+ls ~/.uniface/models/
+```
+
+---
+
+## Performance Issues
+
+### Slow on Mac
+
+**Check:** Verify ARM64 Python:
+
+```bash
+python -c "import platform; print(platform.machine())"
+# Should show: arm64
+```
+
+### No GPU Acceleration
+
+**Check:** Verify CUDA:
+
+```python
+import onnxruntime as ort
+print(ort.get_available_providers())
+# Should include 'CUDAExecutionProvider'
+```
+
+**Solution:** Install GPU version:
+
+```bash
+pip install uniface[gpu]
+```
+
+---
+
+## Detection Issues
+
+### No Faces Detected
+
+**Try:**
+
+1. Lower confidence threshold:
+ ```python
+ detector = RetinaFace(confidence_threshold=0.3)
+ ```
+
+2. Check image format (should be BGR):
+ ```python
+ image = cv2.imread("photo.jpg") # BGR format
+ ```
+
+### Wrong Bounding Boxes
+
+**Check:** Image orientation. Some cameras return rotated images.
+
+---
+
+## Recognition Issues
+
+### Low Similarity Scores
+
+**Try:**
+
+1. Ensure face alignment is working
+2. Use higher quality images
+3. Check lighting conditions
+
+### Different Results Each Time
+
+**Note:** Results should be deterministic. If not, check:
+
+- Image preprocessing
+- Model loading
+
+---
+
+## Memory Issues
+
+### Out of Memory
+
+**Solutions:**
+
+1. Process images in batches
+2. Use smaller input size:
+ ```python
+ detector = RetinaFace(input_size=(320, 320))
+ ```
+3. Release resources:
+ ```python
+ del detector
+ import gc
+ gc.collect()
+ ```
+
+---
+
+## Still Having Issues?
+
+1. Check [GitHub Issues](https://github.com/yakhyo/uniface/issues)
+2. Open a new issue with:
+ - Python version
+ - UniFace version
+ - Error message
+ - Minimal code to reproduce
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000..b55d392
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,152 @@
+site_name: UniFace
+site_description: All-in-One Face Analysis Library with ONNX Runtime
+site_author: Yakhyokhuja Valikhujaev
+site_url: https://yakhyo.github.io/uniface
+
+repo_name: yakhyo/uniface
+repo_url: https://github.com/yakhyo/uniface
+edit_uri: edit/main/docs/
+
+copyright: Copyright © 2025 Yakhyokhuja Valikhujaev
+
+theme:
+ name: material
+ palette:
+ - media: "(prefers-color-scheme: light)"
+ scheme: default
+ primary: custom
+ accent: custom
+ toggle:
+ icon: material/brightness-7
+ name: Switch to dark mode
+ - media: "(prefers-color-scheme: dark)"
+ scheme: slate
+ primary: custom
+ accent: custom
+ toggle:
+ icon: material/brightness-4
+ name: Switch to light mode
+ font:
+ text: Roboto
+ code: Roboto Mono
+ features:
+ - navigation.tabs
+ - navigation.sections
+ - navigation.path
+ - navigation.top
+ - navigation.footer
+ - navigation.indexes
+ - navigation.instant
+ - navigation.tracking
+ - search.suggest
+ - search.highlight
+ - content.code.copy
+ - content.code.annotate
+ - content.action.edit
+ - content.tabs.link
+ - toc.follow
+ icon:
+ logo: material/book-open-page-variant
+ repo: fontawesome/brands/github
+ admonition:
+ note: octicons/tag-16
+ abstract: octicons/checklist-16
+ info: octicons/info-16
+ tip: octicons/squirrel-16
+ success: octicons/check-16
+ question: octicons/question-16
+ warning: octicons/alert-16
+ failure: octicons/x-circle-16
+ danger: octicons/zap-16
+ bug: octicons/bug-16
+ example: octicons/beaker-16
+ quote: octicons/quote-16
+
+extra:
+ social:
+ - icon: fontawesome/brands/github
+ link: https://github.com/yakhyo
+ - icon: fontawesome/brands/python
+ link: https://pypi.org/project/uniface/
+ version:
+ provider: mike
+ analytics:
+ provider: google
+ property: G-XXXXXXXXXX
+
+extra_css:
+ - stylesheets/extra.css
+
+markdown_extensions:
+ - admonition
+ - footnotes
+ - attr_list
+ - md_in_html
+ - def_list
+ - tables
+ - toc:
+ permalink: true
+ toc_depth: 3
+ - pymdownx.superfences:
+ custom_fences:
+ - name: mermaid
+ class: mermaid
+ format: !!python/name:pymdownx.superfences.fence_code_format
+ - pymdownx.details
+ - pymdownx.highlight:
+ anchor_linenums: true
+ line_spans: __span
+ pygments_lang_class: true
+ - pymdownx.inlinehilite
+ - pymdownx.snippets
+ - pymdownx.tabbed:
+ alternate_style: true
+ - pymdownx.emoji:
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
+ emoji_generator: !!python/name:material.extensions.emoji.to_svg
+ - pymdownx.tasklist:
+ custom_checkbox: true
+ - pymdownx.keys
+ - pymdownx.mark
+ - pymdownx.critic
+ - pymdownx.caret
+ - pymdownx.tilde
+
+plugins:
+ - search
+
+nav:
+ - Home: index.md
+ - Getting started:
+ - Installation: installation.md
+ - Quickstart: quickstart.md
+ - Concepts:
+ - Overview: concepts/overview.md
+ - Inputs & Outputs: concepts/inputs-outputs.md
+ - Coordinate Systems: concepts/coordinate-systems.md
+ - Execution Providers: concepts/execution-providers.md
+ - Model Cache: concepts/model-cache-offline.md
+ - Thresholds: concepts/thresholds-calibration.md
+ - API:
+ - Detection: modules/detection.md
+ - Recognition: modules/recognition.md
+ - Landmarks: modules/landmarks.md
+ - Attributes: modules/attributes.md
+ - Parsing: modules/parsing.md
+ - Gaze: modules/gaze.md
+ - Anti-Spoofing: modules/spoofing.md
+ - Privacy: modules/privacy.md
+ - Examples:
+ - Image Pipeline: recipes/image-pipeline.md
+ - Batch Processing: recipes/batch-processing.md
+ - Video & Webcam: recipes/video-webcam.md
+ - Face Search: recipes/face-search.md
+ - Anonymize Stream: recipes/anonymize-stream.md
+ - Custom Models: recipes/custom-models.md
+ - Reference:
+ - API Reference: api/reference.md
+ - Troubleshooting: troubleshooting.md
+ - FAQ: faq.md
+ - Changelog: changelog.md
+ - Contributing: contributing.md
+ - License: license-attribution.md