Update inspireface to 1.2.0

This commit is contained in:
Jingyu
2025-03-25 00:51:26 +08:00
parent 977ea6795b
commit ca64996b84
388 changed files with 28584 additions and 13036 deletions

View File

@@ -23,9 +23,7 @@ share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
.pytest_cache/
.idea/
*.db
*.db
version.txt

View File

@@ -1,12 +1,33 @@
# PyInspireFace
# InspireFace Python API
We provide a Python API for calling InspireFace, which is implemented by wrapping the dynamic link library using ctypes. You can install the latest release version on your computer via pip from PyPI, or you can configure it using a self-compiled dynamic library with this project.
## Quick Install
For Python users on Linux and MacOS, InspireFace can be quickly installed via pip:
```bash
pip install inspireface
```
## Setup Library
You need to compile the dynamic linking library in the main project and then place it in **inspireface/modules/core**.
#### Copy the compiled dynamic library
You need to compile the dynamic linking library in the main project and then place it in **inspireface/modules/core/SYSTEM/CORE_ARCH/**.
```Bash
# copy or link
cp YOUR_BUILD_DIR/libInspireFace.so inspireface/modules/core
cp YOUR_BUILD_DIR/libInspireFace.so inspireface/modules/core/SYSTEM/CORE_ARCH/
```
#### Install
Run the command to install:
```
python setup.py install
```
## Require
@@ -19,23 +40,19 @@ pip install tqdm
pip install opencv-python
```
## Quick Start
## Simple example
You can easily call the api to implement a number of functions:
```Python
import cv2
import inspireface as ifac
from inspireface.param import *
# Step 1: Initialize the SDK and load the algorithm resource files.
resource_path = "pack/Pikachu"
ret = ifac.launch(resource_path)
assert ret, "Launch failure. Please ensure the resource path is correct."
import inspireface as isf
# Optional features, loaded during session creation based on the modules specified.
opt = HF_ENABLE_NONE
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_IMAGE)
opt = isf.HF_ENABLE_NONE
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
# Set detection confidence threshold
session.set_detection_confidence_threshold(0.5)
# Load the image using OpenCV.
image = cv2.imread(image_path)
@@ -50,14 +67,30 @@ draw = image.copy()
for idx, face in enumerate(faces):
print(f"{'==' * 20}")
print(f"idx: {idx}")
# Print detection confidence.
print(f"detection confidence: {face.detection_confidence}")
# Print Euler angles of the face.
print(f"roll: {face.roll}, yaw: {face.yaw}, pitch: {face.pitch}")
# Draw bounding box around the detected face.
x1, y1, x2, y2 = face.location
cv2.rectangle(draw, (x1, y1), (x2, y2), (0, 0, 255), 2)
# Get face bounding box
x1, y1, x2, y2 = face.location
# Calculate center, size, and angle
center = ((x1 + x2) / 2, (y1 + y2) / 2)
size = (x2 - x1, y2 - y1)
angle = face.roll
# Apply rotation to the bounding box corners
rect = ((center[0], center[1]), (size[0], size[1]), angle)
box = cv2.boxPoints(rect)
box = box.astype(int)
# Draw the rotated bounding box
cv2.drawContours(draw, [box], 0, (100, 180, 29), 2)
# Draw landmarks
lmk = session.get_face_dense_landmark(face)
for x, y in lmk.astype(int):
cv2.circle(draw, (x, y), 0, (220, 100, 0), 2)
```

View File

@@ -1,4 +1,5 @@
from .modules import *
from .param import *
__version__ = version()

View File

@@ -1,6 +1,8 @@
from .inspire_face import ImageStream, FaceExtended, FaceInformation, SessionCustomParameter, InspireFaceSession, \
from .inspireface import ImageStream, FaceExtended, FaceInformation, SessionCustomParameter, InspireFaceSession, \
launch, FeatureHubConfiguration, feature_hub_enable, feature_hub_disable, feature_comparison, \
FaceIdentity, feature_hub_set_search_threshold, feature_hub_face_insert, SearchResult, \
feature_hub_face_search, feature_hub_face_search_top_k, feature_hub_face_update, feature_hub_face_remove, \
feature_hub_get_face_identity, feature_hub_get_face_count, view_table_in_terminal, version, \
set_logging_level, disable_logging, show_system_resource_statistics
feature_hub_get_face_identity, feature_hub_get_face_count, view_table_in_terminal, version, query_launch_status, reload, set_expansive_pack_path, \
set_logging_level, disable_logging, show_system_resource_statistics, get_recommended_cosine_threshold, cosine_similarity_convert_to_percentage, \
get_similarity_converter_config, set_similarity_converter_config, pull_latest_model, \
HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, HF_SEARCH_MODE_EAGER, HF_SEARCH_MODE_EXHAUSTIVE

View File

@@ -1,12 +1,11 @@
import ctypes
import cv2
import numpy as np
from .core import *
from typing import Tuple, List
from dataclasses import dataclass
from loguru import logger
from .utils import ResourceManager
class ImageStream(object):
"""
@@ -268,6 +267,12 @@ class InspireFaceSession(object):
Raises:
Exception: If session creation fails.
"""
# If InspireFace is not initialized, run launch() use Pikachu model
if not query_launch_status():
ret = launch()
if not ret:
raise Exception("Launch InspireFace failure")
self.multiple_faces = None
self._sess = HFSession()
self.param = param
@@ -330,6 +335,20 @@ class InspireFaceSession(object):
return infos
else:
return []
def get_face_five_key_points(self, single_face: FaceInformation):
num_landmarks = 5
landmarks_array = (HPoint2f * num_landmarks)()
ret = HFGetFaceFiveKeyPointsFromFaceToken(single_face._token, landmarks_array, num_landmarks)
if ret != 0:
logger.error(f"An error occurred obtaining a dense landmark for a single face: {ret}")
landmark = []
for point in landmarks_array:
landmark.append(point.x)
landmark.append(point.y)
return np.asarray(landmark).reshape(-1, 2)
def get_face_dense_landmark(self, single_face: FaceInformation):
num_landmarks = HInt32()
@@ -379,6 +398,21 @@ class InspireFaceSession(object):
if ret != 0:
logger.error(f"Set filter minimum face pixel size error: {ret}")
def set_track_mode_smooth_ratio(self, ratio=0.025):
ret = HFSessionSetTrackModeSmoothRatio(self._sess, ratio)
if ret != 0:
logger.error(f"Set track mode smooth ratio error: {ret}")
def set_track_mode_num_smooth_cache_frame(self, num=15):
ret = HFSessionSetTrackModeNumSmoothCacheFrame(self._sess, num)
if ret != 0:
logger.error(f"Set track mode num smooth cache frame error: {ret}")
def set_track_model_detect_interval(self, num=20):
ret = HFSessionSetTrackModeDetectInterval(self._sess, num)
if ret != 0:
logger.error(f"Set track model detect interval error: {ret}")
def face_pipeline(self, image, faces: List[FaceInformation], exec_param) -> List[FaceExtended]:
"""
Processes detected faces to extract additional attributes based on the provided execution parameters.
@@ -478,22 +512,22 @@ class InspireFaceSession(object):
def _update_face_interact_confidence(self, exec_param, flag, extends):
if (flag == "object" and exec_param.enable_interaction_liveness) or (
flag == "bitmask" and exec_param & HF_ENABLE_INTERACTION):
results = HFFaceIntereactionState()
ret = HFGetFaceIntereactionStateResult(self._sess, PHFFaceIntereactionState(results))
results = HFFaceInteractionState()
ret = HFGetFaceInteractionStateResult(self._sess, PHFFaceInteractionState(results))
if ret == 0:
for i in range(results.num):
extends[i].left_eye_status_confidence = results.leftEyeStatusConfidence[i]
extends[i].right_eye_status_confidence = results.rightEyeStatusConfidence[i]
else:
logger.error(f"Get face interact result error: {ret}")
actions = HFFaceIntereactionsActions()
ret = HFGetFaceIntereactionActionsResult(self._sess, PHFFaceIntereactionsActions(actions))
actions = HFFaceInteractionsActions()
ret = HFGetFaceInteractionActionsResult(self._sess, PHFFaceInteractionsActions(actions))
if ret == 0:
for i in range(results.num):
extends[i].action_normal = actions.normal[i]
extends[i].action_shake = actions.shake[i]
extends[i].action_jaw_open = actions.jawOpen[i]
extends[i].action_head_raise = actions.headRiase[i]
extends[i].action_head_raise = actions.headRaise[i]
extends[i].action_blink = actions.blink[i]
else:
logger.error(f"Get face action result error: {ret}")
@@ -571,12 +605,13 @@ class InspireFaceSession(object):
# == Global API ==
def launch(resource_path: str) -> bool:
def launch(model_name: str = "Pikachu", resource_path: str = None) -> bool:
"""
Launches the InspireFace system with the specified resource directory.
Args:
resource_path (str): The file path to the resource directory necessary for operation.
model_name (str): the name of the model to use.
resource_path (str): if None, use the default model path.
Returns:
bool: True if the system was successfully launched, False otherwise.
@@ -584,6 +619,9 @@ def launch(resource_path: str) -> bool:
Notes:
A specific error is logged if duplicate loading is detected or if there is any other launch failure.
"""
if resource_path is None:
sm = ResourceManager()
resource_path = sm.get_model(model_name)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFLaunchInspireFace(path_c)
if ret != 0:
@@ -595,6 +633,48 @@ def launch(resource_path: str) -> bool:
return False
return True
def set_expansive_pack_path(path: str):
path_c = String(bytes(path, encoding="utf8"))
ret = HFSetExpansiveHardwareAppleCoreMLModelPath(path_c)
if ret != 0:
logger.error(f"Set expansive pack path error: {ret}")
return False
return True
def pull_latest_model(model_name: str = "Pikachu") -> str:
sm = ResourceManager()
resource_path = sm.get_model(model_name, re_download=True)
return resource_path
def reload(model_name: str = "Pikachu", resource_path: str = None) -> bool:
if resource_path is None:
sm = ResourceManager()
resource_path = sm.get_model(model_name)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFReloadInspireFace(path_c)
if ret != 0:
if ret == 1363:
logger.warning("Duplicate loading was found")
return True
else:
logger.error(f"Launch InspireFace failure: {ret}")
return False
return True
def query_launch_status() -> bool:
"""
Queries the launch status of the InspireFace SDK.
Returns:
bool: True if InspireFace is launched, False otherwise.
"""
status = HInt32()
ret = HFQueryInspireFaceLaunchStatus(byref(status))
if ret != 0:
logger.error(f"Query launch status error: {ret}")
return False
return status.value == 1
@dataclass
class FeatureHubConfiguration:
@@ -608,9 +688,9 @@ class FeatureHubConfiguration:
search_threshold (float): The threshold value for considering a match.
search_mode (int): The mode of searching in the database.
"""
feature_block_num: int
enable_use_db: bool
db_path: str
primary_key_mode: int
enable_persistence: bool
persistence_db_path: str
search_threshold: float
search_mode: int
@@ -622,9 +702,9 @@ class FeatureHubConfiguration:
HFFeatureHubConfiguration: A C-structure for feature hub configuration.
"""
return HFFeatureHubConfiguration(
enablePersistence=int(self.enable_use_db),
dbPath=String(bytes(self.db_path, encoding="utf8")),
featureBlockNum=self.feature_block_num,
primaryKeyMode=self.primary_key_mode,
enablePersistence=int(self.enable_persistence),
persistenceDbPath=String(bytes(self.persistence_db_path, encoding="utf8")),
searchThreshold=self.search_threshold,
searchMode=self.search_mode
)
@@ -714,7 +794,7 @@ class FaceIdentity(object):
_c_struct: Converts the instance back to a compatible C structure.
"""
def __init__(self, data: np.ndarray, custom_id: int, tag: str):
def __init__(self, data: np.ndarray, id: int):
"""
Initializes a new FaceIdentity instance with facial feature data, a custom identifier, and a tag.
@@ -723,9 +803,12 @@ class FaceIdentity(object):
custom_id (int): A custom identifier for tracking or referencing the face identity.
tag (str): A descriptive tag or label for the face identity.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
self.feature = data
self.custom_id = custom_id
self.tag = tag
self.id = id
@staticmethod
def from_ctypes(raw_identity: HFFaceFeatureIdentity):
@@ -741,10 +824,9 @@ class FaceIdentity(object):
feature_size = raw_identity.feature.contents.size
feature_data_ptr = raw_identity.feature.contents.data
feature_data = np.ctypeslib.as_array(cast(feature_data_ptr, HPFloat), (feature_size,))
custom_id = raw_identity.customId
tag = raw_identity.tag.data.decode('utf-8')
id_ = raw_identity.id
return FaceIdentity(data=feature_data, custom_id=custom_id, tag=tag)
return FaceIdentity(data=feature_data, id=id_)
def _c_struct(self):
"""
@@ -758,8 +840,7 @@ class FaceIdentity(object):
feature.size = HInt32(self.feature.size)
feature.data = data_ptr
return HFFaceFeatureIdentity(
customId=self.custom_id,
tag=String(bytes(self.tag, encoding="utf8")),
customId=HFaceId(self.id),
feature=PHFFaceFeature(feature)
)
@@ -774,7 +855,7 @@ def feature_hub_set_search_threshold(threshold: float):
HFFeatureHubFaceSearchThresholdSetting(threshold)
def feature_hub_face_insert(face_identity: FaceIdentity) -> bool:
def feature_hub_face_insert(face_identity: FaceIdentity) -> Tuple[bool, int]:
"""
Inserts a face identity into the FeatureHub database.
@@ -787,11 +868,12 @@ def feature_hub_face_insert(face_identity: FaceIdentity) -> bool:
Notes:
Logs an error if the insertion process fails.
"""
ret = HFFeatureHubInsertFeature(face_identity._c_struct())
alloc_id = HFaceId()
ret = HFFeatureHubInsertFeature(face_identity._c_struct(), HPFaceId(alloc_id))
if ret != 0:
logger.error(f"Failed to insert face feature data into FeatureHub: {ret}")
return False
return True
return False, -1
return True, int(alloc_id.value)
@dataclass
@@ -820,18 +902,21 @@ def feature_hub_face_search(data: np.ndarray) -> SearchResult:
Notes:
If the search operation fails, logs an error and returns a SearchResult with a confidence of -1.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
feature = HFFaceFeature(size=HInt32(data.size), data=data.ctypes.data_as(HPFloat))
confidence = HFloat()
most_similar = HFFaceFeatureIdentity()
ret = HFFeatureHubFaceSearch(feature, HPFloat(confidence), PHFFaceFeatureIdentity(most_similar))
if ret != 0:
logger.error(f"Failed to search face: {ret}")
return SearchResult(confidence=-1, similar_identity=FaceIdentity(np.zeros(0), most_similar.customId, "None"))
if most_similar.customId != -1:
return SearchResult(confidence=-1, similar_identity=FaceIdentity(np.zeros(0), most_similar.id))
if most_similar.id != -1:
search_identity = FaceIdentity.from_ctypes(most_similar)
return SearchResult(confidence=confidence.value, similar_identity=search_identity)
else:
none = FaceIdentity(np.zeros(0), most_similar.customId, "None")
none = FaceIdentity(np.zeros(0, dtype=np.float32), most_similar.id)
return SearchResult(confidence=confidence.value, similar_identity=none)
@@ -849,6 +934,10 @@ def feature_hub_face_search_top_k(data: np.ndarray, top_k: int) -> List[Tuple]:
Notes:
If the search operation fails, an empty list is returned.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
feature = HFFaceFeature(size=HInt32(data.size), data=data.ctypes.data_as(HPFloat))
results = HFSearchTopKResults()
ret = HFFeatureHubFaceSearchTopK(feature, top_k, PHFSearchTopKResults(results))
@@ -856,8 +945,8 @@ def feature_hub_face_search_top_k(data: np.ndarray, top_k: int) -> List[Tuple]:
if ret == 0:
for idx in range(results.size):
confidence = results.confidence[idx]
customId = results.customIds[idx]
outputs.append((confidence, customId))
id_ = results.ids[idx]
outputs.append((confidence, id_))
return outputs
@@ -894,7 +983,7 @@ def feature_hub_face_remove(custom_id: int) -> bool:
Notes:
Logs an error if the removal operation fails.
"""
ret = HFFeatureHubFaceRemove(custom_id)
ret = HFFeatureHubFaceRemove(HFaceId(custom_id))
if ret != 0:
logger.error(f"Failed to remove face feature data from FeatureHub: {ret}")
return False
@@ -915,7 +1004,7 @@ def feature_hub_get_face_identity(custom_id: int):
Logs an error if retrieving the face identity fails.
"""
identify = HFFaceFeatureIdentity()
ret = HFFeatureHubGetFaceIdentity(custom_id, PHFFaceFeatureIdentity(identify))
ret = HFFeatureHubGetFaceIdentity(HFaceId(custom_id), PHFFaceFeatureIdentity(identify))
if ret != 0:
logger.error("Get face identity errors from FeatureHub")
return None
@@ -952,6 +1041,52 @@ def view_table_in_terminal():
if ret != 0:
logger.error(f"Failed to view DB: {ret}")
def get_recommended_cosine_threshold() -> float:
"""
Retrieves the recommended cosine threshold.
"""
threshold = HFloat()
HFGetRecommendedCosineThreshold(threshold)
return float(threshold.value)
def get_similarity_converter_config() -> dict:
"""
Retrieves the similarity converter configuration.
"""
config = HFSimilarityConverterConfig()
ret = HFGetCosineSimilarityConverter(PHFSimilarityConverterConfig(config))
if ret != 0:
logger.error(f"Failed to get cosine similarity converter config: {ret}")
cfg = {
"threshold": config.threshold,
"middleScore": config.middleScore,
"steepness": config.steepness,
"outputMin": config.outputMin,
"outputMax": config.outputMax
}
return cfg
def set_similarity_converter_config(cfg: dict):
"""
Sets the similarity converter configuration.
"""
config = HFSimilarityConverterConfig()
config.threshold = cfg["threshold"]
config.middleScore = cfg["middleScore"]
config.steepness = cfg["steepness"]
config.outputMin = cfg["outputMin"]
config.outputMax = cfg["outputMax"]
HFUpdateCosineSimilarityConverter(config)
def cosine_similarity_convert_to_percentage(similarity: float) -> float:
"""
Converts a cosine similarity score to a percentage similarity score.
"""
result = HFloat()
ret = HFCosineSimilarityConvertToPercentage(HFloat(similarity), HPFloat(result))
if ret != 0:
logger.error(f"Failed to convert cosine similarity to percentage: {ret}")
return float(result.value)
def version() -> str:
"""
@@ -986,3 +1121,4 @@ def show_system_resource_statistics():
Displays the system resource information.
"""
HFDeBugShowResourceStatistics()

View File

@@ -0,0 +1 @@
from .resource import *

View File

@@ -0,0 +1,116 @@
import os
import sys
from pathlib import Path
import urllib.request
import ssl
import hashlib
def get_file_hash_sha256(file_path):
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
sha256.update(chunk)
return sha256.hexdigest()
class ResourceManager:
def __init__(self):
"""Initialize resource manager and create necessary directories"""
self.user_home = Path.home()
self.base_dir = self.user_home / '.inspireface'
self.models_dir = self.base_dir / 'models'
# Create directories
self.base_dir.mkdir(exist_ok=True)
self.models_dir.mkdir(exist_ok=True)
# Model URLs
self._MODEL_LIST = {
"Pikachu": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu",
"filename": "Pikachu",
"md5": "f2983a2d884902229c1443fdc921b8e5f49cf2daba8a4f103cd127910dc9e7cd"
},
"Megatron": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron",
"filename": "Megatron",
"md5": "28f2284c5e7cf53b0e152ff524a416c966ab21e724002643b1304aedc4af6b06"
}
}
def get_model(self, name: str, re_download: bool = False) -> str:
"""
Get model path. Download if not exists or re_download is True.
Args:
name: Model name
re_download: Force re-download if True
Returns:
str: Full path to model file
"""
if name not in self._MODEL_LIST:
raise ValueError(f"Model '{name}' not found. Available models: {list(self._MODEL_LIST.keys())}")
model_info = self._MODEL_LIST[name]
model_file = self.models_dir / model_info["filename"]
downloading_flag = model_file.with_suffix('.downloading')
# Check if model exists and is complete
if model_file.exists() and not downloading_flag.exists() and not re_download:
current_hash = get_file_hash_sha256(model_file)
if current_hash == model_info["md5"]:
return str(model_file)
else:
print(f"Model file hash mismatch for '{name}'. Re-downloading...")
# Start download
try:
print(f"Downloading model '{name}'...")
downloading_flag.touch()
# Create SSL context and headers
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
req = urllib.request.Request(model_info["url"], headers=headers)
with urllib.request.urlopen(req, context=ssl_context) as response:
total_size = int(response.headers.get('content-length', 0))
block_size = 8192
downloaded_size = 0
with open(model_file, 'wb') as f:
while True:
buffer = response.read(block_size)
if not buffer:
break
downloaded_size += len(buffer)
f.write(buffer)
if total_size > 0:
percent = (downloaded_size / total_size) * 100
sys.stdout.write(f"\rDownloading {name}: {percent:.1f}%")
sys.stdout.flush()
print("\nDownload completed")
downloading_flag.unlink() # Remove the downloading flag
return str(model_file)
except Exception as e:
if model_file.exists():
model_file.unlink()
if downloading_flag.exists():
downloading_flag.unlink()
raise RuntimeError(f"Failed to download model: {e}")
# Usage example
if __name__ == "__main__":
try:
rm = ResourceManager()
model_path = rm.get_model("Pikachu")
print(f"Model path: {model_path}")
except Exception as e:
print(f"Error: {e}")

View File

@@ -2,7 +2,7 @@
# Session option
from inspireface.modules.core.native import HF_ENABLE_NONE, HF_ENABLE_FACE_RECOGNITION, HF_ENABLE_LIVENESS, HF_ENABLE_IR_LIVENESS, \
HF_ENABLE_MASK_DETECT, HF_ENABLE_FACE_ATTRIBUTE, HF_ENABLE_QUALITY, HF_ENABLE_INTERACTION
HF_ENABLE_MASK_DETECT, HF_ENABLE_FACE_ATTRIBUTE, HF_ENABLE_QUALITY, HF_ENABLE_INTERACTION, HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT
# Face track mode
from inspireface.modules.core.native import HF_DETECT_MODE_ALWAYS_DETECT, HF_DETECT_MODE_LIGHT_TRACK, HF_DETECT_MODE_TRACK_BY_DETECTION

View File

@@ -0,0 +1,4 @@
import inspireface
for model in ["Pikachu", "Megatron"]:
inspireface.pull_latest_model(model)

View File

@@ -0,0 +1,52 @@
import cv2
import inspireface as isf
import click
@click.command()
@click.argument('image_path1')
@click.argument('image_path2')
def case_face_comparison(image_path1, image_path2):
"""
This is a sample application for comparing two face images.
Args:
image_path1 (str): Path to the first face image
image_path2 (str): Path to the second face image
"""
# Enable face recognition features
opt = isf.HF_ENABLE_FACE_RECOGNITION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
# Load and check the first image
image1 = cv2.imread(image_path1)
assert image1 is not None, "Failed to load first image"
# Load and check the second image
image2 = cv2.imread(image_path2)
assert image2 is not None, "Failed to load second image"
# Detect faces in first image
faces1 = session.face_detection(image1)
assert faces1, "No face detected in first image"
face1 = faces1[0] # Use the first detected face
# Detect faces in second image
faces2 = session.face_detection(image2)
assert faces2, "No face detected in second image"
face2 = faces2[0] # Use the first detected face
# Extract features
feature1 = session.face_feature_extract(image1, face1)
feature2 = session.face_feature_extract(image2, face2)
# Calculate similarity score between the two faces
similarity = isf.feature_comparison(feature1, feature2)
print(f"The cosine similarity score: {similarity:.4f}")
print(f"{'Same person' if similarity > isf.get_recommended_cosine_threshold() else 'Different person'}")
percentage = isf.cosine_similarity_convert_to_percentage(similarity)
print(f"The percentage similarity: {percentage:.4f}")
if __name__ == '__main__':
case_face_comparison()

View File

@@ -1,7 +1,6 @@
import os
import cv2
import inspireface as ifac
from inspireface.param import *
import inspireface as isf
import click
import numpy as np
@@ -11,20 +10,15 @@ age_bracket_tags = ["0-2 years old", "3-9 years old", "10-19 years old", "20-29
"40-49 years old", "50-59 years old", "60-69 years old", "more than 70 years old"]
@click.command()
@click.argument("resource_path")
@click.argument('image_path')
def case_face_detection_image(resource_path, image_path):
def case_face_detection_image(image_path):
"""
This is a sample application for face detection and tracking using an image.
It also includes pipeline extensions such as RGB liveness, mask detection, and face quality evaluation.
"""
# Step 1: Initialize the SDK and load the algorithm resource files.
ret = ifac.launch(resource_path)
assert ret, "Launch failure. Please ensure the resource path is correct."
# Optional features, loaded during session creation based on the modules specified.
opt = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_INTERACTION | HF_ENABLE_FACE_ATTRIBUTE
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_ALWAYS_DETECT)
opt = isf.HF_ENABLE_FACE_RECOGNITION | isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
# Set detection confidence threshold
session.set_detection_confidence_threshold(0.5)
@@ -68,7 +62,7 @@ def case_face_detection_image(resource_path, image_path):
cv2.circle(draw, (x, y), 0, (220, 100, 0), 2)
# Features must be enabled during session creation to use them here.
select_exec_func = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_INTERACTION | HF_ENABLE_FACE_ATTRIBUTE
select_exec_func = isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
# Execute the pipeline to obtain richer face information.
extends = session.face_pipeline(image, faces, select_exec_func)
for idx, ext in enumerate(extends):

View File

@@ -1,36 +1,34 @@
import os
import cv2
import inspireface as ifac
from inspireface.param import *
import inspireface as isf
import click
@click.command()
@click.argument("resource_path")
@click.argument('test_data_folder')
def case_face_recognition(resource_path, test_data_folder):
def case_face_recognition(test_data_folder):
"""
Launches the face recognition system, inserts face features into a database, and performs searches.
Args:
resource_path (str): Path to the resource directory for face recognition algorithms.
test_data_folder (str): Path to the test data containing images for insertion and recognition tests.
"""
# Initialize the face recognition system with provided resources.
ret = ifac.launch(resource_path)
# If you need to switch from the default Pikachu model to another model like Megatron, you can use reload
ret = isf.reload("Megatron")
assert ret, "Launch failure. Please ensure the resource path is correct."
# Enable face recognition features.
opt = HF_ENABLE_FACE_RECOGNITION
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_ALWAYS_DETECT)
opt = isf.HF_ENABLE_FACE_RECOGNITION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)
# Configure the feature management system.
feature_hub_config = ifac.FeatureHubConfiguration(
feature_block_num=10,
enable_use_db=False,
db_path="",
feature_hub_config = isf.FeatureHubConfiguration(
primary_key_mode=isf.HF_PK_AUTO_INCREMENT,
enable_persistence=False,
persistence_db_path="",
search_threshold=0.48,
search_mode=HF_SEARCH_MODE_EAGER,
search_mode=isf.HF_SEARCH_MODE_EAGER,
)
ret = ifac.feature_hub_enable(feature_hub_config)
ret = isf.feature_hub_enable(feature_hub_config)
assert ret, "Failed to enable FeatureHub."
# Insert face features from 'bulk' directory.
@@ -46,11 +44,11 @@ def case_face_recognition(resource_path, test_data_folder):
if faces:
face = faces[0] # Assume the most prominent face is what we want.
feature = session.face_feature_extract(image, face)
identity = ifac.FaceIdentity(feature, custom_id=idx, tag=name)
ret = ifac.feature_hub_face_insert(identity)
identity = isf.FaceIdentity(feature, id=idx)
ret, alloc_id = isf.feature_hub_face_insert(identity)
assert ret, "Failed to insert face."
count = ifac.feature_hub_get_face_count()
count = isf.feature_hub_get_face_count()
print(f"Number of faces inserted: {count}")
# Process faces from 'RD' directory and insert them.
@@ -66,11 +64,11 @@ def case_face_recognition(resource_path, test_data_folder):
if faces:
face = faces[0]
feature = session.face_feature_extract(image, face)
identity = ifac.FaceIdentity(feature, custom_id=idx+count+1, tag=name)
ret = ifac.feature_hub_face_insert(identity)
identity = isf.FaceIdentity(feature, id=idx+count+1)
ret, alloc_id = isf.feature_hub_face_insert(identity)
assert ret, "Failed to insert face."
count = ifac.feature_hub_get_face_count()
count = isf.feature_hub_get_face_count()
print(f"Total number of faces after insertion: {count}")
# Search for a similar face using the last image in RD directory.
@@ -81,18 +79,18 @@ def case_face_recognition(resource_path, test_data_folder):
face = faces[0]
feature = session.face_feature_extract(remain, face)
search = ifac.feature_hub_face_search(feature)
if search.similar_identity.custom_id != -1:
print(f"Found similar identity with ID: {search.similar_identity.custom_id}, Tag: {search.similar_identity.tag}, Confidence: {search.confidence:.2f}")
search = isf.feature_hub_face_search(feature)
if search.similar_identity.id != -1:
print(f"Found similar identity with ID: {search.similar_identity.id}, Confidence: {search.confidence:.2f}")
else:
print("No similar identity found.")
# Display top-k similar face identities.
print("Top-k similar identities:")
search_top_k = ifac.feature_hub_face_search_top_k(feature, 10)
for idx, (conf, custom_id) in enumerate(search_top_k):
identity = ifac.feature_hub_get_face_identity(custom_id)
print(f"Top-{idx + 1}: {identity.tag}, ID: {custom_id}, Confidence: {conf:.2f}")
search_top_k = isf.feature_hub_face_search_top_k(feature, 10)
for idx, (conf, _id) in enumerate(search_top_k):
identity = isf.feature_hub_get_face_identity(_id)
print(f"Top-{idx + 1}: ID: {_id}, Confidence: {conf:.2f}")
if __name__ == '__main__':

View File

@@ -2,8 +2,7 @@ import time
import click
import cv2
import inspireface as ifac
from inspireface.param import *
import inspireface as isf
import numpy as np
@@ -17,6 +16,10 @@ def generate_color(id):
Returns:
tuple: A tuple representing the color in BGR format.
"""
# Handle invalid ID (-1)
if id < 0:
return (128, 128, 128) # Return gray color for invalid ID
max_id = 50 # Number of unique colors
id = id % max_id
@@ -31,11 +34,10 @@ def generate_color(id):
return (int(rgb_color[0]), int(rgb_color[1]), int(rgb_color[2]))
@click.command()
@click.argument("resource_path")
@click.argument('source')
@click.option('--show', is_flag=True, help='Display the video stream or video file in a window.')
@click.option('--out', type=str, default=None, help='Path to save the processed video.')
def case_face_tracker_from_video(resource_path, source, show, out):
def case_face_tracker_from_video(source, show, out):
"""
Launch a face tracking process from a video source. The 'source' can either be a webcam index (0, 1, ...)
or a path to a video file. Use the --show option to display the video.
@@ -46,25 +48,19 @@ def case_face_tracker_from_video(resource_path, source, show, out):
show (bool): If set, the video will be displayed in a window.
out (str): Path to save the processed video.
"""
# Initialize the face tracker or other resources.
print(f"Initializing with resources from: {resource_path}")
# Step 1: Initialize the SDK and load the algorithm resource files.
ret = ifac.launch(resource_path)
assert ret, "Launch failure. Please ensure the resource path is correct."
# Optional features, loaded during session creation based on the modules specified.
opt = HF_ENABLE_NONE | HF_ENABLE_INTERACTION
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_ALWAYS_DETECT, max_detect_num=25, detect_pixel_level=320) # Use video mode
opt = isf.HF_ENABLE_NONE | isf.HF_ENABLE_INTERACTION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_LIGHT_TRACK, max_detect_num=25, detect_pixel_level=320) # Use video mode
session.set_filter_minimum_face_pixel_size(0)
# Determine if the source is a digital webcam index or a video file path.
try:
source_index = int(source) # Try to convert source to an integer.
cap = cv2.VideoCapture(source_index)
print(f"Using webcam at index {source_index}.")
cap = cv2.VideoCapture(source_index)
except ValueError:
# If conversion fails, treat source as a file path.
cap = cv2.VideoCapture(source)
print(f"Opening video file at {source}.")
cap = cv2.VideoCapture(source)
if not cap.isOpened():
print("Error: Could not open video source.")
@@ -88,8 +84,7 @@ def case_face_tracker_from_video(resource_path, source, show, out):
# Process frame here (e.g., face detection/tracking).
faces = session.face_detection(frame)
exts = session.face_pipeline(frame, faces, HF_ENABLE_INTERACTION)
print(exts)
exts = session.face_pipeline(frame, faces, isf.HF_ENABLE_INTERACTION)
for idx, face in enumerate(faces):
# Get face bounding box
@@ -105,6 +100,19 @@ def case_face_tracker_from_video(resource_path, source, show, out):
box = cv2.boxPoints(rect)
box = box.astype(int)
actions = []
if exts[idx].action_normal:
actions.append("Normal")
if exts[idx].action_jaw_open:
actions.append("Jaw Open")
if exts[idx].action_shake:
actions.append("Shake")
if exts[idx].action_blink:
actions.append("Blink")
if exts[idx].action_head_raise:
actions.append("Head Raise")
print("Actions:", actions)
color = generate_color(face.track_id)
# Draw the rotated bounding box
@@ -115,6 +123,10 @@ def case_face_tracker_from_video(resource_path, source, show, out):
for x, y in lmk.astype(int):
cv2.circle(frame, (x, y), 0, color, 4)
five_key_points = session.get_face_five_key_points(face)
for x, y in five_key_points.astype(int):
cv2.circle(frame, (x, y), 0, (255-color[0], 255-color[1], 255-color[2]), 6)
# Draw track ID at the top of the bounding box
text = f"ID: {face.track_id}"
text_size, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
@@ -126,7 +138,7 @@ def case_face_tracker_from_video(resource_path, source, show, out):
if show:
cv2.imshow("Face Tracker", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
if cv2.waitKey(25) & 0xFF == ord('q'):
break # Exit loop if 'q' is pressed.
if out:

View File

@@ -1,32 +1,82 @@
import os
import cv2
import inspireface as ifac
from inspireface.param import *
import inspireface as isf
import numpy as np
import os
FEATURE = np.asarray([ 0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963,
-0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179,
0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961,
-0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942,
-0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462,
0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792,
-0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103,
-0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638,
0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823,
0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569,
-0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047,
0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861,
0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725,
0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137,
-0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139,
-0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002,
-0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054,
-0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069,
0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419,
-0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264,
-0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936,
0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814,
-0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245,
-0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162,
-0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743,
-0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471,
-0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902,
0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575,
-0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842,
-0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046,
0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025,
0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019,
0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467,
0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584,
0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566,
0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422,
-0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787,
-0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362,
-0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798,
0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634,
0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007,
0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243,
0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446,
0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028,
-0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608,
-0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139,
-0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856,], dtype=np.float32)
def case_feature_hub():
db_path = "test.db"
if os.path.exists(db_path):
os.remove(db_path)
# Configure the feature management system.
feature_hub_config = ifac.FeatureHubConfiguration(
feature_block_num=10,
enable_use_db=True,
db_path=db_path,
feature_hub_config = isf.FeatureHubConfiguration(
primary_key_mode=isf.HF_PK_AUTO_INCREMENT,
enable_persistence=True,
persistence_db_path=db_path,
search_threshold=0.48,
search_mode=HF_SEARCH_MODE_EAGER,
search_mode=isf.HF_SEARCH_MODE_EAGER,
)
ret = ifac.feature_hub_enable(feature_hub_config)
ret = isf.feature_hub_enable(feature_hub_config)
assert ret, "Failed to enable FeatureHub."
print(ifac.feature_hub_get_face_count())
print(isf.feature_hub_get_face_count())
for i in range(10):
feature = ifac.FaceIdentity(np.random.rand(512), i, "test")
ifac.feature_hub_face_insert(feature)
print(ifac.feature_hub_get_face_count())
v = np.random.rand(512).astype(np.float32)
feature = isf.FaceIdentity(v, -1)
isf.feature_hub_face_insert(feature)
feature = isf.FaceIdentity(FEATURE, -1)
isf.feature_hub_face_insert(feature)
print(isf.feature_hub_get_face_count())
result = isf.feature_hub_face_search(FEATURE)
print(result.confidence, result.similar_identity.id)
assert os.path.exists(db_path), "FeatureHub database file not found."
if __name__ == "__main__":
case_feature_hub()

View File

@@ -1,6 +1,5 @@
import inspireface as ifac
from inspireface.param import *
import inspireface as isf
import click
@click.command()
@@ -9,12 +8,12 @@ def case_show_system_resource_statistics(resource_path):
"""
This case is used to test the system resource statistics.
"""
ret = ifac.launch(resource_path)
ret = isf.launch(resource_path)
assert ret, "Launch failure. Please ensure the resource path is correct."
print("-" * 100)
print("Initialization state")
print("-" * 100)
ifac.show_system_resource_statistics()
isf.show_system_resource_statistics()
print("-" * 100)
print("Create 10 sessions")
print("-" * 100)
@@ -22,16 +21,16 @@ def case_show_system_resource_statistics(resource_path):
num_created_sessions = 10
sessions = []
for i in range(num_created_sessions):
session = ifac.InspireFaceSession(HF_ENABLE_FACE_RECOGNITION, HF_DETECT_MODE_ALWAYS_DETECT)
session = isf.InspireFaceSession(isf.HF_ENABLE_FACE_RECOGNITION, isf.HF_DETECT_MODE_ALWAYS_DETECT)
sessions.append(session)
ifac.show_system_resource_statistics()
isf.show_system_resource_statistics()
print("-" * 100)
print("Release 10 sessions")
print("-" * 100)
print()
for session in sessions:
session.release()
ifac.show_system_resource_statistics()
isf.show_system_resource_statistics()
if __name__ == "__main__":
case_show_system_resource_statistics()

View File

@@ -0,0 +1,51 @@
import os
import cv2
import inspireface as isf
import numpy as np
import os
import cv2
def get_quality(image, session: isf.InspireFaceSession) -> float:
select_exec_func = isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
faces = session.face_detection(image)
if len(faces) > 0:
extends = session.face_pipeline(image, faces, select_exec_func)
if len(faces) == 0:
return 0
for idx, ext in enumerate(extends):
print(f"{'==' * 20}")
print(f"idx: {idx}")
# For these pipeline results, you can set thresholds based on the specific scenario to make judgments.
print(f"quality: {ext.quality_confidence}")
print(f"rgb liveness: {ext.rgb_liveness_confidence}")
print(f"face mask: {ext.mask_confidence}")
print(
f"face eyes status: left eye: {ext.left_eye_status_confidence} right eye: {ext.right_eye_status_confidence}")
print(f"gender: {ext.gender}")
print(f"race: {ext.race}")
print(f"age: {ext.age_bracket}")
if __name__ == "__main__":
register_exec_func = isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE
session = isf.InspireFaceSession(register_exec_func, isf.HF_DETECT_MODE_ALWAYS_DETECT)
cam = cv2.VideoCapture(0)
while True:
ret, frame = cam.read()
if not ret:
break
quality = get_quality(frame, session)
faces = session.face_detection(frame)
for face in faces:
x1, y1, x2, y2 = face.location
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()

View File

@@ -0,0 +1,121 @@
from setuptools import setup, find_packages
from wheel.bdist_wheel import bdist_wheel
import platform
import subprocess
import os
def get_version():
"""Get version number"""
version_path = os.path.join(os.path.dirname(__file__), 'version.txt')
with open(version_path, 'r') as f:
return f.read().strip()
def get_wheel_platform_tag():
"""Get wheel package platform tag"""
system = platform.system().lower()
machine = platform.machine().lower()
arch_mapping = {
'x86_64': {
'windows': 'win_amd64',
'linux': 'manylinux2014_x86_64',
'darwin': 'macosx_12_0_x86_64'
},
'amd64': {
'windows': 'win_amd64',
'linux': 'manylinux2014_x86_64',
'darwin': 'macosx_12_0_x86_64'
},
'arm64': {
'windows': 'win_arm64',
'linux': 'manylinux2014_aarch64',
'darwin': 'macosx_11_0_arm64'
},
'aarch64': {
'windows': 'win_arm64',
'linux': 'manylinux2014_aarch64',
'darwin': 'macosx_11_0_arm64'
}
}
platform_arch = arch_mapping.get(machine, {}).get(system)
if not platform_arch:
print("Unsupported platform: {} {}".format(system, machine))
raise RuntimeError("Unsupported platform: {} {}".format(system, machine))
return platform_arch
def get_lib_path_info():
"""Get library file path information"""
system = platform.system().lower()
machine = platform.machine().lower()
if system == 'windows':
arch = 'x64' if machine in ['amd64', 'x86_64'] else 'arm64'
elif system == 'linux':
arch = 'x64' if machine == 'x86_64' else 'arm64'
elif system == 'darwin':
if machine == 'x86_64':
try:
is_rosetta = bool(int(subprocess.check_output(
['sysctl', '-n', 'sysctl.proc_translated']).decode().strip()))
arch = 'arm64' if is_rosetta else 'x64'
except:
arch = 'x64'
else:
arch = 'arm64'
else:
raise RuntimeError(f"Unsupported system: {system}")
return system, arch
class BinaryDistWheel(bdist_wheel):
def finalize_options(self):
super().finalize_options()
# Mark this is not a pure Python package
self.root_is_pure = False
# Set platform tag
self.plat_name = get_wheel_platform_tag()
self.universal = False
# Get current platform information
system, arch = get_lib_path_info()
# Build library file path relative to package
lib_path = os.path.join('modules', 'core', 'libs', system, arch, '*')
setup(
name='inspireface',
version=get_version(),
packages=find_packages(),
# package_data path should be relative to package directory
package_data={
'inspireface': [lib_path]
},
install_requires=[
'numpy',
'loguru'
],
author='Jingyu Yan',
author_email='tunmxy@163.com',
description='InspireFace Python SDK',
long_description=open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'README.md')).read(),
long_description_content_type='text/markdown',
url='https://github.com/HyperInspire/InspireFace',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
],
python_requires='>=3.7',
cmdclass={
'bdist_wheel': BinaryDistWheel
}
)

View File

@@ -0,0 +1,6 @@
import os
import cv2
import inspireface as ifac
from inspireface.param import *
import click
import numpy as np

View File

@@ -119,7 +119,7 @@ def print_benchmark_table(benchmark_results):
header_format = "{:<20} | {:<10} | {:<15} | {:<15}"
row_format = "{:<20} | {:<10} | {:>10.2f} ms | {:>10.4f} ms"
print(header_format.format('Benchmark', 'Loops', 'Total Time', 'Avg Time'))
print("-" * 70) # 调整分割线长度以匹配标题长度
print("-" * 70)
for name, loops, total_time in benchmark_results:
avg_time = total_time / loops

View File

@@ -0,0 +1 @@
@INSPIRE_FACE_VERSION_MAJOR@.@INSPIRE_FACE_VERSION_MINOR@.@INSPIRE_FACE_VERSION_PATCH@