mirror of
https://github.com/yakhyo/uniface.git
synced 2025-12-30 09:02:25 +00:00
feat: Enhace emotion inference speed on ARM and add FaceAnalyzer, Face classes for ease of use. (#25)
* feat: Update linting and type annotations, return types in detect * feat: add face analyzer and face classes * chore: Update the format and clean up some docstrings * docs: Update usage documentation * feat: Change AgeGender model output to 0, 1 instead of string (Female, Male) * test: Update testing code * feat: Add Apple silicon backend for torchscript inference * feat: Add face analyzer example and add run emotion for testing
This commit is contained in:
committed by
GitHub
parent
779952e3f8
commit
0c93598007
@@ -14,8 +14,8 @@ from uniface.visualization import draw_detections
|
||||
def get_image_files(input_dir: Path, extensions: tuple) -> list:
|
||||
files = []
|
||||
for ext in extensions:
|
||||
files.extend(input_dir.glob(f"*.{ext}"))
|
||||
files.extend(input_dir.glob(f"*.{ext.upper()}"))
|
||||
files.extend(input_dir.glob(f'*.{ext}'))
|
||||
files.extend(input_dir.glob(f'*.{ext.upper()}'))
|
||||
return sorted(files)
|
||||
|
||||
|
||||
@@ -28,14 +28,14 @@ def process_image(detector, image_path: Path, output_path: Path, threshold: floa
|
||||
faces = detector.detect(image)
|
||||
|
||||
# unpack face data for visualization
|
||||
bboxes = [f["bbox"] for f in faces]
|
||||
scores = [f["confidence"] for f in faces]
|
||||
landmarks = [f["landmarks"] for f in faces]
|
||||
bboxes = [f['bbox'] for f in faces]
|
||||
scores = [f['confidence'] for f in faces]
|
||||
landmarks = [f['landmarks'] for f in faces]
|
||||
draw_detections(image, bboxes, scores, landmarks, vis_threshold=threshold)
|
||||
|
||||
cv2.putText(
|
||||
image,
|
||||
f"Faces: {len(faces)}",
|
||||
f'Faces: {len(faces)}',
|
||||
(10, 30),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
@@ -48,12 +48,12 @@ def process_image(detector, image_path: Path, output_path: Path, threshold: floa
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Batch process images with face detection")
|
||||
parser.add_argument("--input", type=str, required=True, help="Input directory")
|
||||
parser.add_argument("--output", type=str, required=True, help="Output directory")
|
||||
parser.add_argument("--detector", type=str, default="retinaface", choices=["retinaface", "scrfd"])
|
||||
parser.add_argument("--threshold", type=float, default=0.6, help="Visualization threshold")
|
||||
parser.add_argument("--extensions", type=str, default="jpg,jpeg,png,bmp", help="Image extensions")
|
||||
parser = argparse.ArgumentParser(description='Batch process images with face detection')
|
||||
parser.add_argument('--input', type=str, required=True, help='Input directory')
|
||||
parser.add_argument('--output', type=str, required=True, help='Output directory')
|
||||
parser.add_argument('--detector', type=str, default='retinaface', choices=['retinaface', 'scrfd'])
|
||||
parser.add_argument('--threshold', type=float, default=0.6, help='Visualization threshold')
|
||||
parser.add_argument('--extensions', type=str, default='jpg,jpeg,png,bmp', help='Image extensions')
|
||||
args = parser.parse_args()
|
||||
|
||||
input_path = Path(args.input)
|
||||
@@ -65,21 +65,21 @@ def main():
|
||||
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
extensions = tuple(ext.strip() for ext in args.extensions.split(","))
|
||||
extensions = tuple(ext.strip() for ext in args.extensions.split(','))
|
||||
image_files = get_image_files(input_path, extensions)
|
||||
|
||||
if not image_files:
|
||||
print(f"No images found with extensions {extensions}")
|
||||
print(f'No images found with extensions {extensions}')
|
||||
return
|
||||
|
||||
print(f"Found {len(image_files)} images")
|
||||
print(f'Found {len(image_files)} images')
|
||||
|
||||
detector = RetinaFace() if args.detector == "retinaface" else SCRFD()
|
||||
detector = RetinaFace() if args.detector == 'retinaface' else SCRFD()
|
||||
|
||||
success, errors, total_faces = 0, 0, 0
|
||||
|
||||
for img_path in tqdm(image_files, desc="Processing", unit="img"):
|
||||
out_path = output_path / f"{img_path.stem}_detected{img_path.suffix}"
|
||||
for img_path in tqdm(image_files, desc='Processing', unit='img'):
|
||||
out_path = output_path / f'{img_path.stem}_detected{img_path.suffix}'
|
||||
result = process_image(detector, img_path, out_path, args.threshold)
|
||||
|
||||
if result >= 0:
|
||||
@@ -87,10 +87,10 @@ def main():
|
||||
total_faces += result
|
||||
else:
|
||||
errors += 1
|
||||
print(f"\nFailed: {img_path.name}")
|
||||
print(f'\nFailed: {img_path.name}')
|
||||
|
||||
print(f"\nDone! {success} processed, {errors} errors, {total_faces} faces total")
|
||||
print(f'\nDone! {success} processed, {errors} errors, {total_faces} faces total')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user