mirror of
https://github.com/yakhyo/uniface.git
synced 2026-05-15 04:37:49 +00:00
307 lines
6.2 KiB
Markdown
307 lines
6.2 KiB
Markdown
# Attributes
|
|
|
|
Facial attribute analysis for age, gender, race, and emotion detection.
|
|
|
|
<figure markdown="span">
|
|
{ width="100%" }
|
|
<figcaption>Age and gender prediction with detection bounding boxes</figcaption>
|
|
</figure>
|
|
|
|
---
|
|
|
|
## Available Models
|
|
|
|
| Model | Attributes | Size | Notes |
|
|
|-------|------------|------|-------|
|
|
| **AgeGender** | Age, Gender | 8 MB | Exact age prediction |
|
|
| **FairFace** | Gender, Age Group, Race | 44 MB | Balanced demographics |
|
|
| **Emotion** | 7-8 emotions | 2 MB | Requires PyTorch |
|
|
|
|
---
|
|
|
|
## AgeGender
|
|
|
|
Predicts exact age and binary gender.
|
|
|
|
### Basic Usage
|
|
|
|
```python
|
|
from uniface.attribute import AgeGender
|
|
from uniface.detection import RetinaFace
|
|
|
|
detector = RetinaFace()
|
|
age_gender = AgeGender()
|
|
|
|
faces = detector.detect(image)
|
|
|
|
for face in faces:
|
|
result = age_gender.predict(image, face)
|
|
print(f"Gender: {result.sex}") # "Female" or "Male"
|
|
print(f"Age: {result.age} years")
|
|
# face.gender and face.age are also set automatically
|
|
```
|
|
|
|
### Output
|
|
|
|
```python
|
|
# AttributeResult fields
|
|
result.gender # 0=Female, 1=Male
|
|
result.sex # "Female" or "Male" (property)
|
|
result.age # int, age in years
|
|
result.age_group # None (not provided by this model)
|
|
result.race # None (not provided by this model)
|
|
```
|
|
|
|
---
|
|
|
|
## FairFace
|
|
|
|
Predicts gender, age group, and race with balanced demographics.
|
|
|
|
### Basic Usage
|
|
|
|
```python
|
|
from uniface.attribute import FairFace
|
|
from uniface.detection import RetinaFace
|
|
|
|
detector = RetinaFace()
|
|
fairface = FairFace()
|
|
|
|
faces = detector.detect(image)
|
|
|
|
for face in faces:
|
|
result = fairface.predict(image, face)
|
|
print(f"Gender: {result.sex}")
|
|
print(f"Age Group: {result.age_group}")
|
|
print(f"Race: {result.race}")
|
|
# face.gender, face.age_group, face.race are also set automatically
|
|
```
|
|
|
|
### Output
|
|
|
|
```python
|
|
# AttributeResult fields
|
|
result.gender # 0=Female, 1=Male
|
|
result.sex # "Female" or "Male"
|
|
result.age # None (not provided by this model)
|
|
result.age_group # "20-29", "30-39", etc.
|
|
result.race # Race/ethnicity label
|
|
```
|
|
|
|
### Race Categories
|
|
|
|
| Label |
|
|
|-------|
|
|
| White |
|
|
| Black |
|
|
| Latino Hispanic |
|
|
| East Asian |
|
|
| Southeast Asian |
|
|
| Indian |
|
|
| Middle Eastern |
|
|
|
|
### Age Groups
|
|
|
|
| Group |
|
|
|-------|
|
|
| 0-2 |
|
|
| 3-9 |
|
|
| 10-19 |
|
|
| 20-29 |
|
|
| 30-39 |
|
|
| 40-49 |
|
|
| 50-59 |
|
|
| 60-69 |
|
|
| 70+ |
|
|
|
|
---
|
|
|
|
## Emotion
|
|
|
|
Predicts facial emotions. Requires PyTorch.
|
|
|
|
!!! warning "Optional Dependency"
|
|
Emotion detection requires PyTorch. Install with:
|
|
```bash
|
|
pip install torch
|
|
```
|
|
|
|
### Basic Usage
|
|
|
|
```python
|
|
from uniface.detection import RetinaFace
|
|
from uniface.attribute import Emotion
|
|
from uniface.constants import DDAMFNWeights
|
|
|
|
detector = RetinaFace()
|
|
emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7)
|
|
|
|
faces = detector.detect(image)
|
|
|
|
for face in faces:
|
|
result = emotion.predict(image, face)
|
|
print(f"Emotion: {result.emotion}")
|
|
print(f"Confidence: {result.confidence:.2%}")
|
|
```
|
|
|
|
### Emotion Classes
|
|
|
|
=== "7-Class (AFFECNET7)"
|
|
|
|
| Label |
|
|
|-------|
|
|
| Neutral |
|
|
| Happy |
|
|
| Sad |
|
|
| Surprise |
|
|
| Fear |
|
|
| Disgust |
|
|
| Angry |
|
|
|
|
=== "8-Class (AFFECNET8)"
|
|
|
|
| Label |
|
|
|-------|
|
|
| Neutral |
|
|
| Happy |
|
|
| Sad |
|
|
| Surprise |
|
|
| Fear |
|
|
| Disgust |
|
|
| Angry |
|
|
| Contempt |
|
|
|
|
### Model Variants
|
|
|
|
```python
|
|
from uniface.attribute import Emotion
|
|
from uniface.constants import DDAMFNWeights
|
|
|
|
# 7-class emotion
|
|
emotion = Emotion(model_name=DDAMFNWeights.AFFECNET7)
|
|
|
|
# 8-class emotion
|
|
emotion = Emotion(model_name=DDAMFNWeights.AFFECNET8)
|
|
```
|
|
|
|
---
|
|
|
|
## Factory Function
|
|
|
|
Use `create_attribute_predictor()` for dynamic model selection:
|
|
|
|
```python
|
|
from uniface import create_attribute_predictor
|
|
|
|
age_gender = create_attribute_predictor('age_gender')
|
|
fairface = create_attribute_predictor('fairface')
|
|
emotion = create_attribute_predictor('emotion')
|
|
```
|
|
|
|
Available model names: `'age_gender'`, `'fairface'`, `'emotion'`.
|
|
|
|
---
|
|
|
|
## Combining Models
|
|
|
|
### Full Attribute Analysis
|
|
|
|
```python
|
|
from uniface.attribute import AgeGender, FairFace
|
|
from uniface.detection import RetinaFace
|
|
|
|
detector = RetinaFace()
|
|
age_gender = AgeGender()
|
|
fairface = FairFace()
|
|
|
|
faces = detector.detect(image)
|
|
|
|
for face in faces:
|
|
# Get exact age from AgeGender
|
|
ag_result = age_gender.predict(image, face)
|
|
|
|
# Get race from FairFace
|
|
ff_result = fairface.predict(image, face)
|
|
|
|
print(f"Gender: {ag_result.sex}")
|
|
print(f"Exact Age: {ag_result.age}")
|
|
print(f"Age Group: {ff_result.age_group}")
|
|
print(f"Race: {ff_result.race}")
|
|
```
|
|
|
|
### Using FaceAnalyzer
|
|
|
|
```python
|
|
from uniface.analyzer import FaceAnalyzer
|
|
from uniface.attribute import AgeGender
|
|
from uniface.detection import RetinaFace
|
|
|
|
analyzer = FaceAnalyzer(
|
|
RetinaFace(),
|
|
attributes=[AgeGender()],
|
|
)
|
|
|
|
faces = analyzer.analyze(image)
|
|
|
|
for face in faces:
|
|
print(f"Age: {face.age}, Gender: {face.sex}")
|
|
```
|
|
|
|
---
|
|
|
|
## Visualization
|
|
|
|
```python
|
|
import cv2
|
|
|
|
def draw_attributes(image, face, result):
|
|
"""Draw attributes on image."""
|
|
x1, y1, x2, y2 = map(int, face.bbox)
|
|
|
|
# Draw bounding box
|
|
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
|
|
|
# Build label
|
|
label = f"{result.sex}"
|
|
if result.age:
|
|
label += f", {result.age}y"
|
|
if result.age_group:
|
|
label += f", {result.age_group}"
|
|
if result.race:
|
|
label += f", {result.race}"
|
|
|
|
# Draw label
|
|
cv2.putText(
|
|
image, label, (x1, y1 - 10),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2
|
|
)
|
|
|
|
return image
|
|
|
|
# Usage
|
|
for face in faces:
|
|
result = age_gender.predict(image, face)
|
|
image = draw_attributes(image, face, result)
|
|
|
|
cv2.imwrite("attributes.jpg", image)
|
|
```
|
|
|
|
---
|
|
|
|
## Accuracy Notes
|
|
|
|
!!! note "Model Limitations"
|
|
- **AgeGender**: Trained on CelebA; accuracy varies by demographic
|
|
- **FairFace**: Trained for balanced demographics; better cross-racial accuracy
|
|
- **Emotion**: Accuracy depends on facial expression clarity
|
|
|
|
Always test on your specific use case and consider cultural context.
|
|
|
|
---
|
|
|
|
## Next Steps
|
|
|
|
- [Parsing](parsing.md) - Face semantic segmentation
|
|
- [Gaze](gaze.md) - Gaze estimation
|
|
- [Image Pipeline Recipe](../recipes/image-pipeline.md) - Complete workflow
|