feat: Add face blurring for privacy (#39)

* feat: Add face blurring for privacy

* chore: Revert back the version
This commit is contained in:
Yakhyokhuja Valikhujaev
2025-12-20 20:57:42 +09:00
committed by GitHub
parent 13b518e96d
commit d085c6a822
11 changed files with 901 additions and 14 deletions

View File

@@ -51,6 +51,7 @@ Example notebooks demonstrating library usage:
| Face Recognition | [face_analyzer.ipynb](examples/face_analyzer.ipynb) | | Face Recognition | [face_analyzer.ipynb](examples/face_analyzer.ipynb) |
| Face Verification | [face_verification.ipynb](examples/face_verification.ipynb) | | Face Verification | [face_verification.ipynb](examples/face_verification.ipynb) |
| Face Search | [face_search.ipynb](examples/face_search.ipynb) | | Face Search | [face_search.ipynb](examples/face_search.ipynb) |
| Face Anonymization | [face_anonymization.ipynb](examples/face_anonymization.ipynb) |
## Questions? ## Questions?

View File

@@ -328,7 +328,86 @@ Detected 12 facial components
--- ---
## 9. Batch Processing (3 minutes) ## 9. Face Anonymization (2 minutes)
Automatically blur faces for privacy protection:
```python
from uniface.privacy import anonymize_faces
import cv2
# One-liner: automatic detection and blurring
image = cv2.imread("group_photo.jpg")
anonymized = anonymize_faces(image, method='pixelate')
cv2.imwrite("anonymized.jpg", anonymized)
print("Faces anonymized successfully!")
```
**Manual control with custom parameters:**
```python
from uniface import RetinaFace
from uniface.privacy import BlurFace
# Initialize detector and blurrer
detector = RetinaFace()
blurrer = BlurFace(method='gaussian', blur_strength=5.0)
# Detect and anonymize
faces = detector.detect(image)
anonymized = blurrer.anonymize(image, faces)
cv2.imwrite("output.jpg", anonymized)
```
**Available blur methods:**
```python
# Pixelation (news media standard)
blurrer = BlurFace(method='pixelate', pixel_blocks=8)
# Gaussian blur (smooth, natural)
blurrer = BlurFace(method='gaussian', blur_strength=4.0)
# Black boxes (maximum privacy)
blurrer = BlurFace(method='blackout', color=(0, 0, 0))
# Elliptical blur (natural face shape)
blurrer = BlurFace(method='elliptical', blur_strength=3.0, margin=30)
# Median blur (edge-preserving)
blurrer = BlurFace(method='median', blur_strength=3.0)
```
**Webcam anonymization:**
```python
import cv2
from uniface import RetinaFace
from uniface.privacy import BlurFace
detector = RetinaFace()
blurrer = BlurFace(method='pixelate')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
faces = detector.detect(frame)
frame = blurrer.anonymize(frame, faces, inplace=True)
cv2.imshow('Anonymized', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```
---
## 10. Batch Processing (3 minutes)
Process multiple images: Process multiple images:
@@ -361,7 +440,7 @@ print("Done!")
--- ---
## 10. Model Selection ## 11. Model Selection
Choose the right model for your use case: Choose the right model for your use case:

View File

@@ -23,6 +23,7 @@
- **Face Parsing**: BiSeNet-based semantic segmentation with 19 facial component classes - **Face Parsing**: BiSeNet-based semantic segmentation with 19 facial component classes
- **Gaze Estimation**: Real-time gaze direction prediction with MobileGaze - **Gaze Estimation**: Real-time gaze direction prediction with MobileGaze
- **Attribute Analysis**: Age, gender, and emotion detection - **Attribute Analysis**: Age, gender, and emotion detection
- **Face Anonymization**: Privacy-preserving face blurring with multiple methods
- **Face Alignment**: Precise alignment for downstream tasks - **Face Alignment**: Precise alignment for downstream tasks
- **Hardware Acceleration**: ARM64 optimizations (Apple Silicon), CUDA (NVIDIA), CPU fallback - **Hardware Acceleration**: ARM64 optimizations (Apple Silicon), CUDA (NVIDIA), CPU fallback
- **Simple API**: Intuitive factory functions and clean interfaces - **Simple API**: Intuitive factory functions and clean interfaces
@@ -198,6 +199,34 @@ vis_result = vis_parsing_maps(face_rgb, mask, save_image=False)
print(f"Unique classes: {len(np.unique(mask))}") print(f"Unique classes: {len(np.unique(mask))}")
``` ```
### Face Anonymization
Blur or pixelate faces for privacy protection:
```python
from uniface import RetinaFace
from uniface.privacy import BlurFace, anonymize_faces
# Method 1: One-liner with automatic detection
anonymized = anonymize_faces(image, method='pixelate')
# Method 2: Manual control with custom parameters
detector = RetinaFace()
blurrer = BlurFace(method='gaussian', blur_strength=4.0)
faces = detector.detect(image)
anonymized = blurrer.anonymize(image, faces)
# Available methods with examples
methods = {
'gaussian': BlurFace(method='gaussian', blur_strength=3.0), # Smooth, natural blur
'pixelate': BlurFace(method='pixelate', pixel_blocks=10), # Blocky effect (news media)
'blackout': BlurFace(method='blackout', color=(0, 0, 0)), # Solid color (max privacy)
'elliptical': BlurFace(method='elliptical', margin=20), # Soft oval blur
'median': BlurFace(method='median', blur_strength=3.0) # Edge-preserving
}
```
--- ---
## Documentation ## Documentation
@@ -216,6 +245,7 @@ print(f"Unique classes: {len(np.unique(mask))}")
from uniface.detection import RetinaFace, SCRFD from uniface.detection import RetinaFace, SCRFD
from uniface.recognition import ArcFace from uniface.recognition import ArcFace
from uniface.landmark import Landmark106 from uniface.landmark import Landmark106
from uniface.privacy import BlurFace, anonymize_faces
from uniface.constants import SCRFDWeights from uniface.constants import SCRFDWeights

File diff suppressed because one or more lines are too long

View File

@@ -7,6 +7,7 @@ Scripts for testing UniFace features.
| Script | Description | | Script | Description |
|--------|-------------| |--------|-------------|
| `run_detection.py` | Face detection on image or webcam | | `run_detection.py` | Face detection on image or webcam |
| `run_anonymization.py` | Face anonymization/blurring for privacy |
| `run_age_gender.py` | Age and gender prediction | | `run_age_gender.py` | Age and gender prediction |
| `run_emotion.py` | Emotion detection (7 or 8 emotions) | | `run_emotion.py` | Emotion detection (7 or 8 emotions) |
| `run_gaze_estimation.py` | Gaze direction estimation | | `run_gaze_estimation.py` | Gaze direction estimation |
@@ -26,6 +27,11 @@ Scripts for testing UniFace features.
python scripts/run_detection.py --image assets/test.jpg python scripts/run_detection.py --image assets/test.jpg
python scripts/run_detection.py --webcam python scripts/run_detection.py --webcam
# Face anonymization
python scripts/run_anonymization.py --image assets/test.jpg --method pixelate
python scripts/run_anonymization.py --webcam --method gaussian
python scripts/run_anonymization.py --image photo.jpg --method pixelate --pixel-blocks 5
# Age and gender # Age and gender
python scripts/run_age_gender.py --image assets/test.jpg python scripts/run_age_gender.py --image assets/test.jpg
python scripts/run_age_gender.py --webcam python scripts/run_age_gender.py --webcam

View File

@@ -0,0 +1,207 @@
# Face anonymization/blurring for privacy
# Usage: python run_anonymization.py --image path/to/image.jpg --method pixelate
# python run_anonymization.py --webcam --method gaussian
import argparse
import os
import cv2
from uniface import RetinaFace
from uniface.privacy import BlurFace
def process_image(
detector,
blurrer: BlurFace,
image_path: str,
save_dir: str = 'outputs',
show_detections: bool = False,
):
"""Process a single image."""
image = cv2.imread(image_path)
if image is None:
print(f"Error: Failed to load image from '{image_path}'")
return
# Detect faces
faces = detector.detect(image)
print(f'Detected {len(faces)} face(s)')
# Optionally draw detection boxes before blurring
if show_detections and faces:
from uniface.visualization import draw_detections
preview = image.copy()
bboxes = [face['bbox'] for face in faces]
scores = [face['confidence'] for face in faces]
landmarks = [face['landmarks'] for face in faces]
draw_detections(preview, bboxes, scores, landmarks)
# Show preview
cv2.imshow('Detections (Press any key to continue)', preview)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Anonymize faces
if faces:
anonymized = blurrer.anonymize(image, faces)
else:
anonymized = image
# Save output
os.makedirs(save_dir, exist_ok=True)
basename = os.path.splitext(os.path.basename(image_path))[0]
output_path = os.path.join(save_dir, f'{basename}_anonymized.jpg')
cv2.imwrite(output_path, anonymized)
print(f'Output saved: {output_path}')
def run_webcam(detector, blurrer: BlurFace):
"""Run real-time anonymization on webcam."""
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('Cannot open webcam')
return
print("Press 'q' to quit")
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # mirror for natural interaction
if not ret:
break
# Detect and anonymize
faces = detector.detect(frame)
if faces:
frame = blurrer.anonymize(frame, faces, inplace=True)
# Display info
cv2.putText(
frame,
f'Faces blurred: {len(faces)} | Method: {blurrer.method}',
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
2,
)
cv2.imshow('Face Anonymization (Press q to quit)', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(
description='Face anonymization using various blur methods',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Anonymize image with pixelation (default)
python run_anonymization.py --image photo.jpg
# Use Gaussian blur with custom strength
python run_anonymization.py --image photo.jpg --method gaussian --blur-strength 5.0
# Real-time webcam anonymization
python run_anonymization.py --webcam --method pixelate
# Black boxes for maximum privacy
python run_anonymization.py --image photo.jpg --method blackout
# Custom pixelation intensity
python run_anonymization.py --image photo.jpg --method pixelate --pixel-blocks 5
""",
)
# Input/output
parser.add_argument('--image', type=str, help='Path to input image')
parser.add_argument('--webcam', action='store_true', help='Use webcam for real-time anonymization')
parser.add_argument('--save-dir', type=str, default='outputs', help='Output directory (default: outputs)')
# Blur method
parser.add_argument(
'--method',
type=str,
default='pixelate',
choices=['gaussian', 'pixelate', 'blackout', 'elliptical', 'median'],
help='Blur method (default: pixelate)',
)
# Method-specific parameters
parser.add_argument(
'--blur-strength',
type=float,
default=3.0,
help='Blur strength for gaussian/elliptical/median (default: 3.0)',
)
parser.add_argument(
'--pixel-blocks',
type=int,
default=20,
help='Number of pixel blocks for pixelate (default: 10, lower=more pixelated)',
)
parser.add_argument(
'--color',
type=str,
default='0,0,0',
help='Fill color for blackout as R,G,B (default: 0,0,0 for black)',
)
parser.add_argument('--margin', type=int, default=20, help='Margin for elliptical blur (default: 20)')
# Detection
parser.add_argument(
'--conf-thresh',
type=float,
default=0.5,
help='Detection confidence threshold (default: 0.5)',
)
# Visualization
parser.add_argument(
'--show-detections',
action='store_true',
help='Show detection boxes before blurring (image mode only)',
)
args = parser.parse_args()
# Validate input
if not args.image and not args.webcam:
parser.error('Either --image or --webcam must be specified')
# Parse color
color_values = [int(x) for x in args.color.split(',')]
if len(color_values) != 3:
parser.error('--color must be in format R,G,B (e.g., 0,0,0)')
color = tuple(color_values)
# Initialize detector
print(f'Initializing face detector (conf_thresh={args.conf_thresh})...')
detector = RetinaFace(conf_thresh=args.conf_thresh)
# Initialize blurrer
print(f'Initializing blur method: {args.method}')
blurrer = BlurFace(
method=args.method,
blur_strength=args.blur_strength,
pixel_blocks=args.pixel_blocks,
color=color,
margin=args.margin,
)
# Run
if args.webcam:
run_webcam(detector, blurrer)
else:
process_image(detector, blurrer, args.image, args.save_dir, args.show_detections)
if __name__ == '__main__':
main()

View File

@@ -40,6 +40,7 @@ from .detection import (
from .gaze import MobileGaze, create_gaze_estimator from .gaze import MobileGaze, create_gaze_estimator
from .landmark import Landmark106, create_landmarker from .landmark import Landmark106, create_landmarker
from .parsing import BiSeNet, create_face_parser from .parsing import BiSeNet, create_face_parser
from .privacy import BlurFace, anonymize_faces
from .recognition import ArcFace, MobileFace, SphereFace, create_recognizer from .recognition import ArcFace, MobileFace, SphereFace, create_recognizer
__all__ = [ __all__ = [
@@ -74,6 +75,9 @@ __all__ = [
# Attribute models # Attribute models
'AgeGender', 'AgeGender',
'Emotion', 'Emotion',
# Privacy
'BlurFace',
'anonymize_faces',
# Utilities # Utilities
'compute_similarity', 'compute_similarity',
'draw_detections', 'draw_detections',

View File

@@ -51,8 +51,4 @@ def create_gaze_estimator(method: str = 'mobilegaze', **kwargs) -> BaseGazeEstim
raise ValueError(f"Unsupported gaze estimation method: '{method}'. Available: {available}") raise ValueError(f"Unsupported gaze estimation method: '{method}'. Available: {available}")
__all__ = [ __all__ = ['create_gaze_estimator', 'MobileGaze', 'BaseGazeEstimator']
'create_gaze_estimator',
'MobileGaze',
'BaseGazeEstimator',
]

View File

@@ -0,0 +1,52 @@
# Copyright 2025 Yakhyokhuja Valikhujaev
# Author: Yakhyokhuja Valikhujaev
# GitHub: https://github.com/yakhyo
from typing import Optional
import numpy as np
from .blur import BlurFace
def anonymize_faces(
image: np.ndarray,
detector: Optional[object] = None,
method: str = 'pixelate',
blur_strength: float = 3.0,
pixel_blocks: int = 10,
conf_thresh: float = 0.5,
**kwargs,
) -> np.ndarray:
"""One-line face anonymization with automatic detection.
Args:
image (np.ndarray): Input image (BGR format).
detector: Face detector instance. Creates RetinaFace if None.
method (str): Blur method name. Defaults to 'pixelate'.
blur_strength (float): Blur intensity. Defaults to 3.0.
pixel_blocks (int): Block count for pixelate. Defaults to 10.
conf_thresh (float): Detection confidence threshold. Defaults to 0.5.
**kwargs: Additional detector arguments.
Returns:
np.ndarray: Anonymized image.
Example:
>>> from uniface.privacy import anonymize_faces
>>> anonymized = anonymize_faces(image, method='pixelate')
"""
if detector is None:
try:
from uniface import RetinaFace
detector = RetinaFace(conf_thresh=conf_thresh, **kwargs)
except ImportError as err:
raise ImportError('Could not import RetinaFace. Please ensure UniFace is properly installed.') from err
faces = detector.detect(image)
blurrer = BlurFace(method=method, blur_strength=blur_strength, pixel_blocks=pixel_blocks)
return blurrer.anonymize(image, faces)
__all__ = ['BlurFace', 'anonymize_faces']

193
uniface/privacy/blur.py Normal file
View File

@@ -0,0 +1,193 @@
# Copyright 2025 Yakhyokhuja Valikhujaev
# Author: Yakhyokhuja Valikhujaev
# GitHub: https://github.com/yakhyo
from typing import Dict, List, Tuple, Union
import cv2
import numpy as np
__all__ = ['BlurFace']
def _gaussian_blur(region: np.ndarray, strength: float = 3.0) -> np.ndarray:
"""Apply Gaussian blur to a region."""
h, w = region.shape[:2]
kernel_size = max(3, int((min(h, w) / 7) * strength)) | 1
return cv2.GaussianBlur(region, (kernel_size, kernel_size), 0)
def _median_blur(region: np.ndarray, strength: float = 3.0) -> np.ndarray:
"""Apply median blur to a region."""
h, w = region.shape[:2]
kernel_size = max(3, int((min(h, w) / 7) * strength)) | 1
return cv2.medianBlur(region, kernel_size)
def _pixelate_blur(region: np.ndarray, blocks: int = 10) -> np.ndarray:
"""Apply pixelation to a region."""
h, w = region.shape[:2]
temp_h, temp_w = max(1, h // blocks), max(1, w // blocks)
temp = cv2.resize(region, (temp_w, temp_h), interpolation=cv2.INTER_LINEAR)
return cv2.resize(temp, (w, h), interpolation=cv2.INTER_NEAREST)
def _blackout_blur(region: np.ndarray, color: Tuple[int, int, int] = (0, 0, 0)) -> np.ndarray:
"""Replace region with solid color."""
return np.full_like(region, color)
class EllipticalBlur:
"""Elliptical blur with soft, feathered edges.
This blur applies Gaussian blur within an elliptical mask that follows
the natural oval shape of faces, requiring full image context for proper blending.
Args:
blur_strength (float): Blur intensity multiplier. Defaults to 3.0.
margin (int): Extra pixels to extend ellipse beyond bbox. Defaults to 20.
"""
def __init__(self, blur_strength: float = 3.0, margin: int = 20):
self.blur_strength = blur_strength
self.margin = margin
def __call__(
self,
image: np.ndarray,
bboxes: List[Union[Tuple, List]],
inplace: bool = False,
) -> np.ndarray:
if not inplace:
image = image.copy()
h, w = image.shape[:2]
for bbox in bboxes:
x1, y1, x2, y2 = map(int, bbox)
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
axes_x = (x2 - x1) // 2 + self.margin
axes_y = (y2 - y1) // 2 + self.margin
# Create soft elliptical mask
mask = np.zeros((h, w), dtype=np.float32)
cv2.ellipse(mask, (center_x, center_y), (axes_x, axes_y), 0, 0, 360, 255, -1)
mask = cv2.GaussianBlur(mask, (51, 51), 0) / 255.0
mask = mask[:, :, np.newaxis]
kernel_size = max(3, int((min(axes_y, axes_x) * 2 / 7) * self.blur_strength)) | 1
blurred = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
image = (blurred * mask + image * (1 - mask)).astype(np.uint8)
return image
class BlurFace:
"""Face blurring with multiple anonymization methods.
Args:
method (str): Blur method - 'gaussian', 'pixelate', 'blackout', 'elliptical', or 'median'.
Defaults to 'pixelate'.
blur_strength (float): Intensity for gaussian/elliptical/median. Defaults to 3.0.
pixel_blocks (int): Block count for pixelate. Defaults to 10.
color (Tuple[int, int, int]): Fill color (BGR) for blackout. Defaults to (0, 0, 0).
margin (int): Edge margin for elliptical. Defaults to 20.
Example:
>>> blurrer = BlurFace(method='pixelate')
>>> anonymized = blurrer.anonymize(image, faces)
"""
VALID_METHODS = {'gaussian', 'pixelate', 'blackout', 'elliptical', 'median'}
def __init__(
self,
method: str = 'pixelate',
blur_strength: float = 3.0,
pixel_blocks: int = 15,
color: Tuple[int, int, int] = (0, 0, 0),
margin: int = 20,
):
self.method = method.lower()
self._blur_strength = blur_strength
self._pixel_blocks = pixel_blocks
self._color = color
self._margin = margin
if self.method not in self.VALID_METHODS:
raise ValueError(f"Invalid blur method: '{method}'. Choose from: {sorted(self.VALID_METHODS)}")
if self.method == 'elliptical':
self._elliptical = EllipticalBlur(blur_strength, margin)
def _blur_region(self, region: np.ndarray) -> np.ndarray:
if self.method == 'gaussian':
return _gaussian_blur(region, self._blur_strength)
elif self.method == 'median':
return _median_blur(region, self._blur_strength)
elif self.method == 'pixelate':
return _pixelate_blur(region, self._pixel_blocks)
elif self.method == 'blackout':
return _blackout_blur(region, self._color)
def anonymize(
self,
image: np.ndarray,
faces: List[Dict],
inplace: bool = False,
) -> np.ndarray:
"""Anonymize faces in an image.
Args:
image (np.ndarray): Input image (BGR format).
faces (List[Dict]): Face detections with 'bbox' key containing [x1, y1, x2, y2].
inplace (bool): Modify image in-place if True. Defaults to False.
Returns:
np.ndarray: Image with anonymized faces.
"""
if not faces:
return image if inplace else image.copy()
bboxes = [face['bbox'] for face in faces]
return self.blur_regions(image, bboxes, inplace)
def blur_regions(
self,
image: np.ndarray,
bboxes: List[Union[Tuple, List]],
inplace: bool = False,
) -> np.ndarray:
"""Blur specific rectangular regions in an image.
Args:
image (np.ndarray): Input image (BGR format).
bboxes (List): Bounding boxes as [x1, y1, x2, y2].
inplace (bool): Modify image in-place if True. Defaults to False.
Returns:
np.ndarray: Image with blurred regions.
"""
if not bboxes:
return image if inplace else image.copy()
if self.method == 'elliptical':
return self._elliptical(image, bboxes, inplace)
if not inplace:
image = image.copy()
h, w = image.shape[:2]
for bbox in bboxes:
x1, y1, x2, y2 = map(int, bbox)
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x2), min(h, y2)
if x2 > x1 and y2 > y1:
image[y1:y2, x1:x2] = self._blur_region(image[y1:y2, x1:x2])
return image
def __repr__(self) -> str:
return f"BlurFace(method='{self.method}')"

View File

@@ -55,10 +55,4 @@ def create_recognizer(method: str = 'arcface', **kwargs) -> BaseRecognizer:
raise ValueError(f"Unsupported method: '{method}'. Available: {available}") raise ValueError(f"Unsupported method: '{method}'. Available: {available}")
__all__ = [ __all__ = ['create_recognizer', 'BaseRecognizer', 'ArcFace', 'MobileFace', 'SphereFace']
'create_recognizer',
'ArcFace',
'MobileFace',
'SphereFace',
'BaseRecognizer',
]