mirror of
https://github.com/yakhyo/uniface.git
synced 2025-12-30 09:02:25 +00:00
feat: Enhace emotion inference speed on ARM and add FaceAnalyzer, Face classes for ease of use. (#25)
* feat: Update linting and type annotations, return types in detect * feat: add face analyzer and face classes * chore: Update the format and clean up some docstrings * docs: Update usage documentation * feat: Change AgeGender model output to 0, 1 instead of string (Female, Male) * test: Update testing code * feat: Add Apple silicon backend for torchscript inference * feat: Add face analyzer example and add run emotion for testing
This commit is contained in:
committed by
GitHub
parent
779952e3f8
commit
0c93598007
@@ -13,9 +13,9 @@ from uniface.recognition import ArcFace, MobileFace, SphereFace
|
||||
|
||||
|
||||
def get_recognizer(name: str):
|
||||
if name == "arcface":
|
||||
if name == 'arcface':
|
||||
return ArcFace()
|
||||
elif name == "mobileface":
|
||||
elif name == 'mobileface':
|
||||
return MobileFace()
|
||||
else:
|
||||
return SphereFace()
|
||||
@@ -29,18 +29,18 @@ def run_inference(detector, recognizer, image_path: str):
|
||||
|
||||
faces = detector.detect(image)
|
||||
if not faces:
|
||||
print("No faces detected.")
|
||||
print('No faces detected.')
|
||||
return
|
||||
|
||||
print(f"Detected {len(faces)} face(s). Extracting embedding for the first face...")
|
||||
print(f'Detected {len(faces)} face(s). Extracting embedding for the first face...')
|
||||
|
||||
landmarks = np.array(faces[0]["landmarks"]) # 5-point landmarks for alignment
|
||||
landmarks = faces[0]['landmarks'] # 5-point landmarks for alignment (already np.ndarray)
|
||||
embedding = recognizer.get_embedding(image, landmarks)
|
||||
norm_embedding = recognizer.get_normalized_embedding(image, landmarks) # L2 normalized
|
||||
|
||||
print(f" Embedding shape: {embedding.shape}")
|
||||
print(f" L2 norm (raw): {np.linalg.norm(embedding):.4f}")
|
||||
print(f" L2 norm (normalized): {np.linalg.norm(norm_embedding):.4f}")
|
||||
print(f' Embedding shape: {embedding.shape}')
|
||||
print(f' L2 norm (raw): {np.linalg.norm(embedding):.4f}')
|
||||
print(f' L2 norm (normalized): {np.linalg.norm(norm_embedding):.4f}')
|
||||
|
||||
|
||||
def compare_faces(detector, recognizer, image1_path: str, image2_path: str, threshold: float = 0.35):
|
||||
@@ -48,18 +48,18 @@ def compare_faces(detector, recognizer, image1_path: str, image2_path: str, thre
|
||||
img2 = cv2.imread(image2_path)
|
||||
|
||||
if img1 is None or img2 is None:
|
||||
print("Error: Failed to load one or both images")
|
||||
print('Error: Failed to load one or both images')
|
||||
return
|
||||
|
||||
faces1 = detector.detect(img1)
|
||||
faces2 = detector.detect(img2)
|
||||
|
||||
if not faces1 or not faces2:
|
||||
print("Error: No faces detected in one or both images")
|
||||
print('Error: No faces detected in one or both images')
|
||||
return
|
||||
|
||||
landmarks1 = np.array(faces1[0]["landmarks"])
|
||||
landmarks2 = np.array(faces2[0]["landmarks"])
|
||||
landmarks1 = faces1[0]['landmarks']
|
||||
landmarks2 = faces2[0]['landmarks']
|
||||
|
||||
embedding1 = recognizer.get_normalized_embedding(img1, landmarks1)
|
||||
embedding2 = recognizer.get_normalized_embedding(img2, landmarks2)
|
||||
@@ -68,26 +68,26 @@ def compare_faces(detector, recognizer, image1_path: str, image2_path: str, thre
|
||||
similarity = compute_similarity(embedding1, embedding2, normalized=True)
|
||||
is_match = similarity > threshold
|
||||
|
||||
print(f"Similarity: {similarity:.4f}")
|
||||
print(f"Result: {'Same person' if is_match else 'Different person'} (threshold: {threshold})")
|
||||
print(f'Similarity: {similarity:.4f}')
|
||||
print(f'Result: {"Same person" if is_match else "Different person"} (threshold: {threshold})')
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Face recognition and comparison")
|
||||
parser.add_argument("--image", type=str, help="Single image for embedding extraction")
|
||||
parser.add_argument("--image1", type=str, help="First image for comparison")
|
||||
parser.add_argument("--image2", type=str, help="Second image for comparison")
|
||||
parser.add_argument("--threshold", type=float, default=0.35, help="Similarity threshold")
|
||||
parser.add_argument("--detector", type=str, default="retinaface", choices=["retinaface", "scrfd"])
|
||||
parser = argparse.ArgumentParser(description='Face recognition and comparison')
|
||||
parser.add_argument('--image', type=str, help='Single image for embedding extraction')
|
||||
parser.add_argument('--image1', type=str, help='First image for comparison')
|
||||
parser.add_argument('--image2', type=str, help='Second image for comparison')
|
||||
parser.add_argument('--threshold', type=float, default=0.35, help='Similarity threshold')
|
||||
parser.add_argument('--detector', type=str, default='retinaface', choices=['retinaface', 'scrfd'])
|
||||
parser.add_argument(
|
||||
"--recognizer",
|
||||
'--recognizer',
|
||||
type=str,
|
||||
default="arcface",
|
||||
choices=["arcface", "mobileface", "sphereface"],
|
||||
default='arcface',
|
||||
choices=['arcface', 'mobileface', 'sphereface'],
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
detector = RetinaFace() if args.detector == "retinaface" else SCRFD()
|
||||
detector = RetinaFace() if args.detector == 'retinaface' else SCRFD()
|
||||
recognizer = get_recognizer(args.recognizer)
|
||||
|
||||
if args.image1 and args.image2:
|
||||
@@ -95,9 +95,9 @@ def main():
|
||||
elif args.image:
|
||||
run_inference(detector, recognizer, args.image)
|
||||
else:
|
||||
print("Error: Provide --image or both --image1 and --image2")
|
||||
print('Error: Provide --image or both --image1 and --image2')
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user