Merge pull request #2790 from tunmx/feature/up123

Update inspireface to 1.2.3
This commit is contained in:
Jia Guo
2025-08-08 14:49:10 +08:00
committed by GitHub
76 changed files with 3766 additions and 919 deletions

View File

@@ -19,7 +19,7 @@ pack/*
.vscode/*
build_local/*
local_build/*
cpp/inspireface/include/inspireface/information.h
cpp/inspireface/meta.cpp
cpp/inspireface/version.txt
.DS_Store
._.DS_Store

View File

@@ -7,6 +7,8 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
set(INSPIRE_FACE_SERIES_NAME "Community Edition" CACHE STRING "Series name")
# Hide symbols
option(ISF_ENABLE_SYMBOL_HIDING "Enable symbol hiding." ON)
if(ISF_ENABLE_SYMBOL_HIDING)
@@ -24,15 +26,15 @@ endif()
# Current version
set(INSPIRE_FACE_VERSION_MAJOR 1)
set(INSPIRE_FACE_VERSION_MINOR 2)
set(INSPIRE_FACE_VERSION_PATCH 2)
set(INSPIRE_FACE_VERSION_PATCH 3)
# Converts the version number to a string
string(CONCAT INSPIRE_FACE_VERSION_MAJOR_STR ${INSPIRE_FACE_VERSION_MAJOR})
string(CONCAT INSPIRE_FACE_VERSION_MINOR_STR ${INSPIRE_FACE_VERSION_MINOR})
string(CONCAT INSPIRE_FACE_VERSION_PATCH_STR ${INSPIRE_FACE_VERSION_PATCH})
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/information.h.in ${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/include/inspireface/information.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/version.txt.in ${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/version.txt)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/python/version.txt.in ${CMAKE_CURRENT_SOURCE_DIR}/python/version.txt)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/meta.cpp.in ${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/meta.cpp)
# Check that the 3rdparty folder exists
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/3rdparty")
@@ -188,8 +190,10 @@ endif()
option(ISF_ENABLE_OPENCV "Depends on opencv." OFF)
if(INSPIRECV_BACKEND_OPENCV OR INSPIRECV_BACKEND_OKCV_USE_OPENCV OR INSPIRECV_BACKEND_OKCV_USE_OPENCV_IO OR INSPIRECV_BACKEND_OKCV_USE_OPENCV_GUI)
set(ISF_ENABLE_OPENCV ON)
add_definitions("-DISF_ENABLE_OPENCV")
endif()
include_directories(${ISF_THIRD_PARTY_DIR}/InspireCV/3rdparty/Eigen-3.4.0-Headers)
if(APPLE)
@@ -299,7 +303,7 @@ elseif(DEFINED ISF_MNN_CUSTOM_SOURCE)
message("Using custom external MNN source path: ${ISF_MNN_CUSTOM_SOURCE}")
# In particular, rknpu2 uses a lower version of mnn
set(MNN_BUILD_SHARED_LIBS OFF CACHE BOOL "Build MNN as a shared library")
add_subdirectory(${ISF_MNN_CUSTOM_SOURCE} EXCLUDE_FROM_ALL)
add_subdirectory(${ISF_MNN_CUSTOM_SOURCE} ${CMAKE_BINARY_DIR}/MNN EXCLUDE_FROM_ALL)
set(MNN_INCLUDE_DIRS "${ISF_MNN_CUSTOM_SOURCE}/include")
set(MNN_LIBS MNN)
@@ -309,7 +313,8 @@ else ()
# MNN Options
set(MNN_BUILD_SHARED_LIBS OFF CACHE BOOL "Build MNN as a shared library")
add_subdirectory(${ISF_THIRD_PARTY_DIR}/MNN EXCLUDE_FROM_ALL)
# Set MNN build output directory to CMAKE_BINARY_DIR/MNN
add_subdirectory(${ISF_THIRD_PARTY_DIR}/MNN ${CMAKE_BINARY_DIR}/MNN EXCLUDE_FROM_ALL)
set(MNN_INCLUDE_DIRS "${ISF_THIRD_PARTY_DIR}/MNN/include")
set(MNN_LIBS MNN)

View File

@@ -24,6 +24,10 @@ We welcome your questions💬, they help guide and accelerate its development.
## Change Logs
**`2025-08-03`** Add a multi-link model download channel for the Python-SDK.
**`2025-06-15`** The [ErrorCode-Table](/doc/Error-Feedback-Codes.md) has been reorganized and streamlined.
**`2025-06-08`** Add facial expression recognition.
**`2025-04-27`** Optimize some issues and provide a stable version.
@@ -69,7 +73,7 @@ The licensing of the open-source models employed by InspireFace adheres to the s
For Python users on **Linux and MacOS**, InspireFace can be quickly installed via pip:
```bash
pip install inspireface
pip install -U inspireface
```
After installation, you can use inspireface like this:
@@ -513,7 +517,7 @@ Please note that the C++ interface has not been fully tested. It is recommended
**More detailed cases**:
- [C Sample](cpp/sample/api/)
- [C/C++ Sample](cpp/sample/cpp_api/)
- [C++ Sample](cpp/sample/cpp_api/)
### Python Native Sample
@@ -610,7 +614,7 @@ We released InspireFace's Android SDK on JitPack, which you can incorporate into
```groovy
dependencies {
implementation 'com.github.HyperInspire:inspireface-android-sdk:1.2.0'
implementation 'com.github.HyperInspire:inspireface-android-sdk:1.2.3.post4'
}
```
@@ -631,7 +635,10 @@ You need to get the resource file from the release [Release Page](https://githu
```
asset/
└── inspireface/
── Pikachu
── Pikachu
│── Megatron
│── Gundam_RK356X
└── Gundam_RK3588
```
#### How to use the Android/Java API
@@ -753,11 +760,11 @@ For different scenarios, we currently provide several Packs, each containing mul
| Name | Supported Devices | Note | Last Update | Link |
| --- | --- | --- | --- | --- |
| Pikachu | CPU | Lightweight edge-side models | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu) |
| Pikachu | CPU | Lightweight edge-side models | Jun 22, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu) |
| Megatron | CPU, GPU | Mobile and server models | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron) |
| Megatron_TRT | GPU | CUDA-based server models | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron_TRT) |
| Gundam-RV1109 | RKNPU | Supports RK1109 and RK1126 | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1109) |
| Gundam-RV1106 | RKNPU | Supports RV1103 and RV1106 | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1106) |
| Gundam-RV1106 | RKNPU | Supports RV1103 and RV1106 | Jul 6, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1106) |
| Gundam-RK356X | RKNPU | Supports RK3566 and RK3568 | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK356X) |
| Gundam-RK3588 | RKNPU | Supports RK3588 | Jun 15, 2025 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK3588) |
@@ -766,6 +773,7 @@ For different scenarios, we currently provide several Packs, each containing mul
- [x] Add TensorRT backend support.
- [x] Add Add C++ style header files.
- [x] Add the RKNPU backend support for Android .
- [ ] Python packages that support more platforms.
- [ ] Example app project for Android and iOS samples.
- [ ] Add the batch forward feature.
- [ ] Design a scheme that can be adapted to multiple CUDA devices.

View File

@@ -38,6 +38,8 @@ cp build/${BUILD_DIRNAME}/lib/libInspireFace.so python/inspireface/modules/core/
pip install opencv-python
pip install click
pip install loguru
pip install filelock
pip install modelscope
cd python/

View File

@@ -0,0 +1,72 @@
#!/bin/bash
# Reusable function to handle 'install' directory operations
move_install_files() {
local root_dir="$1"
local install_dir="$root_dir/install"
# Step 1: Check if the 'install' directory exists
if [ ! -d "$install_dir" ]; then
echo "Error: 'install' directory does not exist in $root_dir"
exit 1
fi
# Step 2: Delete all other files/folders except 'install'
find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} +
# Step 3: Move all files from 'install' to the root directory
mv "$install_dir"/* "$root_dir" 2>/dev/null
# Step 4: Remove the empty 'install' directory
rmdir "$install_dir"
echo "Files from 'install' moved to $root_dir, and 'install' directory deleted."
}
if [ -n "$VERSION" ]; then
TAG="-$VERSION"
else
TAG=""
fi
BUILD_FOLDER_PATH="build/inspireface-aarch64-manylinux2014${TAG}/"
SCRIPT_DIR=$(pwd) # Project dir
mkdir -p ${BUILD_FOLDER_PATH}
# shellcheck disable=SC2164
cd ${BUILD_FOLDER_PATH}
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
-DISF_BUILD_WITH_SAMPLE=OFF \
-DISF_BUILD_WITH_TEST=OFF \
-DISF_ENABLE_BENCHMARK=OFF \
-DISF_ENABLE_USE_LFW_DATA=OFF \
-DISF_ENABLE_TEST_EVALUATION=OFF \
-DISF_BUILD_SHARED_LIBS=ON \
-Wno-dev \
${SCRIPT_DIR}
make -j4
make install
move_install_files "$(pwd)"
BUILD_DYLIB_PATH="$(pwd)/InspireFace/lib/libInspireFace.so"
# Copy the library to the python directory
DYLIB_DEST_PATH="${SCRIPT_DIR}/python/inspireface/modules/core/libs/linux/arm64/"
mkdir -p ${DYLIB_DEST_PATH}
cp -r ${BUILD_DYLIB_PATH} ${DYLIB_DEST_PATH}
PYTHON_PRJ_PATH=${SCRIPT_DIR}/python
cd ${PYTHON_PRJ_PATH}/
# Build wheels for Python 3.7-3.12
for PYTHON_VERSION in python3.7 python3.8 python3.9 python3.10 python3.11 python3.12; do
if [[ "${PYTHON_VERSION}" == "python3.12" ]]; then
${PYTHON_VERSION} -m pip install setuptools wheel twine
fi
${PYTHON_VERSION} setup.py bdist_wheel
done
echo "Build wheel for Linux arm64, Well Done!"

View File

@@ -0,0 +1,72 @@
#!/bin/bash
# Reusable function to handle 'install' directory operations
move_install_files() {
local root_dir="$1"
local install_dir="$root_dir/install"
# Step 1: Check if the 'install' directory exists
if [ ! -d "$install_dir" ]; then
echo "Error: 'install' directory does not exist in $root_dir"
exit 1
fi
# Step 2: Delete all other files/folders except 'install'
find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} +
# Step 3: Move all files from 'install' to the root directory
mv "$install_dir"/* "$root_dir" 2>/dev/null
# Step 4: Remove the empty 'install' directory
rmdir "$install_dir"
echo "Files from 'install' moved to $root_dir, and 'install' directory deleted."
}
if [ -n "$VERSION" ]; then
TAG="-$VERSION"
else
TAG=""
fi
BUILD_FOLDER_PATH="build/inspireface-aarch64-manylinux_2_36${TAG}/"
SCRIPT_DIR=$(pwd) # Project dir
mkdir -p ${BUILD_FOLDER_PATH}
# shellcheck disable=SC2164
cd ${BUILD_FOLDER_PATH}
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
-DISF_BUILD_WITH_SAMPLE=OFF \
-DISF_BUILD_WITH_TEST=OFF \
-DISF_ENABLE_BENCHMARK=OFF \
-DISF_ENABLE_USE_LFW_DATA=OFF \
-DISF_ENABLE_TEST_EVALUATION=OFF \
-DISF_BUILD_SHARED_LIBS=ON \
-Wno-dev \
${SCRIPT_DIR}
make -j4
make install
move_install_files "$(pwd)"
BUILD_DYLIB_PATH="$(pwd)/InspireFace/lib/libInspireFace.so"
# Copy the library to the python directory
DYLIB_DEST_PATH="${SCRIPT_DIR}/python/inspireface/modules/core/libs/linux/arm64/"
mkdir -p ${DYLIB_DEST_PATH}
cp -r ${BUILD_DYLIB_PATH} ${DYLIB_DEST_PATH}
PYTHON_PRJ_PATH=${SCRIPT_DIR}/python
cd ${PYTHON_PRJ_PATH}/
# Build wheels for Python 3.7-3.12
for PYTHON_VERSION in python3.7 python3.8 python3.9 python3.10 python3.11 python3.12; do
if [[ "${PYTHON_VERSION}" == "python3.12" ]]; then
${PYTHON_VERSION} -m pip install setuptools wheel twine
fi
${PYTHON_VERSION} setup.py bdist_wheel
done
echo "Build wheel for Linux arm64, Well Done!"

View File

@@ -7,16 +7,6 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
option(ISF_BUILD_SHARED_LIBS "Build shared libraries (DLLs)." ON)
string(TIMESTAMP BUILD_TIMESTAMP "%Y-%m-%d")
set(EXTENDED_INFORMATION "InspireFace[Community Edition]")
if(INSPIRECV_BACKEND_OPENCV)
set(EXTENDED_INFORMATION "${EXTENDED_INFORMATION}@OpenCV Backend")
else()
set(EXTENDED_INFORMATION "${EXTENDED_INFORMATION}@General")
endif()
set(EXTENDED_INFORMATION "${EXTENDED_INFORMATION} - Build Time: ${BUILD_TIMESTAMP}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/information.h.in ${CMAKE_CURRENT_SOURCE_DIR}/include/inspireface/information.h)
file(GLOB_RECURSE SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
set(SOURCE_FILES ${SOURCE_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/c_api/inspireface.cc) # Add C_API file
@@ -262,9 +252,9 @@ endif()
if (NOT IOS)
if (NOT ISF_BUILD_SHARED_LIBS)
if(MNN_BUILD_SHARED_LIBS)
install(FILES ${CMAKE_BINARY_DIR}/3rdparty/MNN/libMNN.so DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/lib)
install(FILES ${CMAKE_BINARY_DIR}/MNN/libMNN.so DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/lib)
else()
install(FILES ${CMAKE_BINARY_DIR}/3rdparty/MNN/libMNN.a DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/lib)
install(FILES ${CMAKE_BINARY_DIR}/MNN/libMNN.a DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/lib)
endif()
if(RKNN_USE_STATIC_LIBS)
# To be added: The compilation of the RK series needs to be added

View File

@@ -21,7 +21,7 @@
using namespace inspire;
HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, HFImageStream *handle) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, PHFImageStream handle) {
if (data == nullptr || handle == nullptr) {
return HERR_INVALID_IMAGE_STREAM_HANDLE;
}
@@ -80,7 +80,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, HFImageS
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamEmpty(HFImageStream *handle) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamEmpty(PHFImageStream handle) {
if (handle == nullptr) {
return HERR_INVALID_IMAGE_STREAM_HANDLE;
}
@@ -166,7 +166,7 @@ HYPER_CAPI_EXPORT extern HResult HFReleaseImageStream(HFImageStream streamHandle
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, HFImageBitmap *handle) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, PHFImageBitmap handle) {
if (data == nullptr || handle == nullptr) {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
@@ -178,7 +178,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, HF
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath, HInt32 channels, HFImageBitmap *handle) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath, HInt32 channels, PHFImageBitmap handle) {
if (handle == nullptr) {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
@@ -191,7 +191,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath,
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFImageBitmapCopy(HFImageBitmap handle, HFImageBitmap *copyHandle) {
HYPER_CAPI_EXPORT extern HResult HFImageBitmapCopy(HFImageBitmap handle, PHFImageBitmap copyHandle) {
if (handle == nullptr || copyHandle == nullptr) {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
@@ -216,7 +216,7 @@ HYPER_CAPI_EXPORT extern HResult HFReleaseImageBitmap(HFImageBitmap handle) {
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitmap handle, HFRotation rotation, HFImageStream *streamHandle) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitmap handle, HFRotation rotation, PHFImageStream streamHandle) {
if (handle == nullptr || streamHandle == nullptr) {
return HERR_INVALID_IMAGE_STREAM_HANDLE;
}
@@ -235,7 +235,11 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitma
stream->impl.SetRotationMode(inspirecv::ROTATION_0);
break;
}
stream->impl.SetDataFormat(inspirecv::BGR);
if (((HF_ImageBitmap *)handle)->impl.Channels() == 1) {
stream->impl.SetDataFormat(inspirecv::GRAY);
} else {
stream->impl.SetDataFormat(inspirecv::BGR);
}
stream->impl.SetDataBuffer(((HF_ImageBitmap *)handle)->impl.Data(), ((HF_ImageBitmap *)handle)->impl.Height(),
((HF_ImageBitmap *)handle)->impl.Width());
*streamHandle = (HFImageStream)stream;
@@ -245,8 +249,8 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitma
return HSUCCEED;
}
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromImageStreamProcess(HFImageStream streamHandle, HFImageBitmap *handle, int is_rotate,
float scale) {
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromImageStreamProcess(HFImageStream streamHandle, PHFImageBitmap handle, HInt32 is_rotate,
HFloat scale) {
if (streamHandle == nullptr || handle == nullptr) {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
@@ -263,7 +267,12 @@ HYPER_CAPI_EXPORT extern HResult HFImageBitmapWriteToFile(HFImageBitmap handle,
if (handle == nullptr) {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
return ((HF_ImageBitmap *)handle)->impl.Write(filePath);
auto success = ((HF_ImageBitmap *)handle)->impl.Write(filePath);
if (success) {
return HSUCCEED;
} else {
return HERR_INVALID_IMAGE_BITMAP_HANDLE;
}
}
HYPER_CAPI_EXPORT extern HResult HFImageBitmapDrawRect(HFImageBitmap handle, HFaceRect rect, HColor color, HInt32 thickness) {
@@ -344,7 +353,7 @@ HResult HFDeBugImageStreamDecodeSave(HFImageStream streamHandle, HPath savePath)
return HSUCCEED;
} else {
INSPIRE_LOGE("Failed to save image to %s", savePath);
return -1;
return HERR_IMAGE_STREAM_DECODE_FAILED;
}
}
@@ -360,6 +369,33 @@ HResult HFReleaseInspireFaceSession(HFSession handle) {
return HSUCCEED;
}
HResult HFSessionClearTrackingFace(HFSession session) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession *)session;
ctx->impl.ClearTrackingFace();
return HSUCCEED;
}
HResult HFSessionSetTrackLostRecoveryMode(HFSession session, HInt32 enable) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession *)session;
ctx->impl.SetTrackLostRecoveryMode(enable);
return HSUCCEED;
}
HResult HFSessionSetLightTrackConfidenceThreshold(HFSession session, HFloat value) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession *)session;
ctx->impl.SetLightTrackConfidenceThreshold(value);
return HSUCCEED;
}
HResult HFSwitchLandmarkEngine(HFSessionLandmarkEngine engine) {
inspire::Launch::LandmarkEngine type;
if (engine == HF_LANDMARK_HYPLMV2_0_25) {
@@ -386,7 +422,7 @@ HResult HFQuerySupportedPixelLevelsForFaceDetection(PHFFaceDetectPixelList pixel
}
HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HInt32 detectPixelLevel,
HInt32 trackByDetectModeFPS, HFSession *handle) {
HInt32 trackByDetectModeFPS, PHFSession handle) {
inspire::ContextCustomParameter param;
param.enable_mask_detect = parameter.enable_mask_detect;
param.enable_liveness = parameter.enable_liveness;
@@ -419,7 +455,7 @@ HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectM
}
HResult HFCreateInspireFaceSessionOptional(HOption customOption, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HInt32 detectPixelLevel,
HInt32 trackByDetectModeFPS, HFSession *handle) {
HInt32 trackByDetectModeFPS, PHFSession handle) {
inspire::ContextCustomParameter param;
if (customOption & HF_ENABLE_FACE_RECOGNITION) {
param.enable_recognition = true;
@@ -484,7 +520,7 @@ HResult HFTerminateInspireFace() {
return HSUCCEED;
}
HResult HFQueryInspireFaceLaunchStatus(HInt32 *status) {
HResult HFQueryInspireFaceLaunchStatus(HPInt32 status) {
*status = INSPIREFACE_CONTEXT->isMLoad();
return HSUCCEED;
}
@@ -493,6 +529,17 @@ HResult HFFeatureHubDataDisable() {
return INSPIREFACE_FEATURE_HUB->DisableHub();
}
HResult HFQueryExpansiveHardwareRGACompileOption(HPInt32 enable) {
#if defined(ISF_ENABLE_RGA)
INSPIRE_LOGI("RGA is enabled during compilation");
*enable = 1;
#else
INSPIRE_LOGW("RGA is not enabled during compilation");
*enable = 0;
#endif
return HSUCCEED;
}
HResult HFSetExpansiveHardwareRockchipDmaHeapPath(HPath path) {
INSPIREFACE_CONTEXT->SetRockchipDmaHeapPath(path);
return HSUCCEED;
@@ -517,12 +564,29 @@ HResult HFSetAppleCoreMLInferenceMode(HFAppleCoreMLInferenceMode mode) {
return HSUCCEED;
}
HResult HFSetCudaDeviceId(int32_t device_id) {
HResult HFSwitchImageProcessingBackend(HFImageProcessingBackend backend) {
if (backend == HF_IMAGE_PROCESSING_CPU) {
INSPIREFACE_CONTEXT->SwitchImageProcessingBackend(inspire::Launch::IMAGE_PROCESSING_CPU);
} else if (backend == HF_IMAGE_PROCESSING_RGA) {
INSPIREFACE_CONTEXT->SwitchImageProcessingBackend(inspire::Launch::IMAGE_PROCESSING_RGA);
} else {
INSPIRE_LOGE("Unsupported image processing backend.");
return HERR_INVALID_PARAM;
}
return HSUCCEED;
}
HResult HFSetImageProcessAlignedWidth(HInt32 width) {
INSPIREFACE_CONTEXT->SetImageProcessAlignedWidth(width);
return HSUCCEED;
}
HResult HFSetCudaDeviceId(HInt32 device_id) {
INSPIREFACE_CONTEXT->SetCudaDeviceId(device_id);
return HSUCCEED;
}
HResult HFGetCudaDeviceId(int32_t *device_id) {
HResult HFGetCudaDeviceId(HPInt32 device_id) {
*device_id = INSPIREFACE_CONTEXT->GetCudaDeviceId();
return HSUCCEED;
}
@@ -536,7 +600,7 @@ HResult HFPrintCudaDeviceInfo() {
#endif
}
HResult HFGetNumCudaDevices(int32_t *num_devices) {
HResult HFGetNumCudaDevices(HPInt32 num_devices) {
#if defined(ISF_ENABLE_TENSORRT)
return inspire::GetCudaDeviceCount(num_devices);
#else
@@ -545,7 +609,7 @@ HResult HFGetNumCudaDevices(int32_t *num_devices) {
#endif
}
HResult HFCheckCudaDeviceSupport(int32_t *is_support) {
HResult HFCheckCudaDeviceSupport(HPInt32 is_support) {
#if defined(ISF_ENABLE_TENSORRT)
return inspire::CheckCudaUsability(is_support);
#else
@@ -592,7 +656,7 @@ HResult HFSessionSetTrackPreviewSize(HFSession session, HInt32 previewSize) {
return ctx->impl.SetTrackPreviewSize(previewSize);
}
HResult HFSessionGetTrackPreviewSize(HFSession session, HInt32 *previewSize) {
HResult HFSessionGetTrackPreviewSize(HFSession session, HPInt32 previewSize) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
@@ -703,7 +767,7 @@ HResult HFExecuteFaceTrack(HFSession session, HFImageStream streamHandle, PHFMul
return ret;
}
HResult HFSessionLastFaceDetectionGetDebugPreviewImageSize(HFSession session, HInt32 *size) {
HResult HFSessionLastFaceDetectionGetDebugPreviewImageSize(HFSession session, HPInt32 size) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
@@ -733,7 +797,7 @@ HResult HFGetNumOfFaceDenseLandmark(HPInt32 num) {
return HSUCCEED;
}
HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, HPoint2f *landmarks, HInt32 num) {
HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, PHPoint2f landmarks, HInt32 num) {
if (num != 106) {
return HERR_SESS_LANDMARK_NUM_NOT_MATCH;
}
@@ -746,7 +810,7 @@ HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, HPoint2
if (ret != HSUCCEED) {
return ret;
}
if (face.densityLandmarkEnable == 0) {
if (face.densityLandmarkEnable == HF_STATUS_DISABLE) {
INSPIRE_LOGW("To get dense landmarks in always-detect mode, you need to enable HF_ENABLE_DETECT_MODE_LANDMARK");
return HERR_SESS_LANDMARK_NOT_ENABLE;
}
@@ -757,7 +821,7 @@ HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, HPoint2
return HSUCCEED;
}
HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, HPoint2f *landmarks, HInt32 num) {
HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, PHPoint2f landmarks, HInt32 num) {
if (num != 5) {
return HERR_SESS_KEY_POINT_NUM_NOT_MATCH;
}
@@ -777,7 +841,7 @@ HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, HPoint2
return HSUCCEED;
}
HResult HFSessionSetEnableTrackCostSpend(HFSession session, int value) {
HResult HFSessionSetEnableTrackCostSpend(HFSession session, HInt32 value) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
@@ -801,7 +865,7 @@ HResult HFSessionPrintTrackCostSpend(HFSession session) {
return HSUCCEED;
}
HResult HFFeatureHubFaceSearchThresholdSetting(float threshold) {
HResult HFFeatureHubFaceSearchThresholdSetting(HFloat threshold) {
INSPIREFACE_FEATURE_HUB->SetRecognitionThreshold(threshold);
return HSUCCEED;
}
@@ -911,7 +975,7 @@ HResult HFReleaseFaceFeature(PHFFaceFeature feature) {
return HSUCCEED;
}
HResult HFFaceGetFaceAlignmentImage(HFSession session, HFImageStream streamHandle, HFFaceBasicToken singleFace, HFImageBitmap *handle) {
HResult HFFaceGetFaceAlignmentImage(HFSession session, HFImageStream streamHandle, HFFaceBasicToken singleFace, PHFImageBitmap handle) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
@@ -1284,7 +1348,7 @@ HResult HFGetFaceQualityConfidence(HFSession session, PHFFaceQualityConfidence c
return HSUCCEED;
}
HResult HFFaceQualityDetect(HFSession session, HFFaceBasicToken singleFace, HFloat *confidence) {
HResult HFFaceQualityDetect(HFSession session, HFFaceBasicToken singleFace, HPFloat confidence) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
@@ -1367,7 +1431,7 @@ HResult HFGetFaceEmotionResult(HFSession session, PHFFaceEmotionResult result) {
return HSUCCEED;
}
HResult HFFeatureHubGetFaceCount(HInt32 *count) {
HResult HFFeatureHubGetFaceCount(HPInt32 count) {
*count = INSPIREFACE_FEATURE_HUB->GetFaceFeatureCount();
return HSUCCEED;
}
@@ -1387,9 +1451,9 @@ HResult HFFeatureHubGetExistingIds(PHFFeatureHubExistingIds ids) {
}
HResult HFQueryInspireFaceVersion(PHFInspireFaceVersion version) {
version->major = std::stoi(INSPIRE_FACE_VERSION_MAJOR_STR);
version->minor = std::stoi(INSPIRE_FACE_VERSION_MINOR_STR);
version->patch = std::stoi(INSPIRE_FACE_VERSION_PATCH_STR);
version->major = atoi(INSPIRE_FACE_VERSION_MAJOR_STR);
version->minor = atoi(INSPIRE_FACE_VERSION_MINOR_STR);
version->patch = atoi(INSPIRE_FACE_VERSION_PATCH_STR);
return HSUCCEED;
}
@@ -1449,12 +1513,12 @@ HResult HFDeBugShowResourceStatistics() {
return HSUCCEED;
}
HResult HFDeBugGetUnreleasedSessionsCount(HInt32 *count) {
HResult HFDeBugGetUnreleasedSessionsCount(HPInt32 count) {
*count = RESOURCE_MANAGE->getUnreleasedSessions().size();
return HSUCCEED;
}
HResult HFDeBugGetUnreleasedSessions(HFSession *sessions, HInt32 count) {
HResult HFDeBugGetUnreleasedSessions(PHFSession sessions, HInt32 count) {
std::vector<long> unreleasedSessions = RESOURCE_MANAGE->getUnreleasedSessions();
for (int i = 0; i < count; ++i) {
sessions[i] = (HFSession)unreleasedSessions[i];
@@ -1462,12 +1526,12 @@ HResult HFDeBugGetUnreleasedSessions(HFSession *sessions, HInt32 count) {
return HSUCCEED;
}
HResult HFDeBugGetUnreleasedStreamsCount(HInt32 *count) {
HResult HFDeBugGetUnreleasedStreamsCount(HPInt32 count) {
*count = RESOURCE_MANAGE->getUnreleasedStreams().size();
return HSUCCEED;
}
HResult HFDeBugGetUnreleasedStreams(HFImageStream *streams, HInt32 count) {
HResult HFDeBugGetUnreleasedStreams(PHFImageStream streams, HInt32 count) {
std::vector<long> unreleasedStreams = RESOURCE_MANAGE->getUnreleasedStreams();
for (int i = 0; i < count; ++i) {
streams[i] = (HFImageStream)unreleasedStreams[i];

View File

@@ -39,6 +39,51 @@ extern "C" {
#define HF_ENABLE_FACE_POSE 0x00000200 ///< Flag to enable face pose estimation feature.
#define HF_ENABLE_FACE_EMOTION 0x00000400 ///< Flag to enable face emotion recognition feature.
/************************************************************************
* Image Stream Function
*
* ImageStream directly interacts with algorithm modules, providing image data streams for algorithm module input;
* ImageStream provides automatic transformation to adapt camera stream rotation angles and common image encoding/decoding format conversion;
* Camera picture rotation mode.
* To accommodate the rotation of certain devices, four image rotation modes are provided.
*
* 1. ROTATION_0 (No Rotation):
* Original Image (w x h): Scaled Image (w*s x h*s):
* A(0,0) ----------- B(w-1,0) A(0,0) ----------- B(w*s-1,0)
* | | | |
* | Original | => | Scaled |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) C(0,h*s-1) ---- D(w*s-1,h*s-1)
* Point Mapping: A->A(0,0), B->B(w*s-1,0), C->C(0,h*s-1), D->D(w*s-1,h*s-1)
*
* 2. ROTATION_90 (90° Counter-Clockwise):
* Original Image (w x h): Rotated Image (h*s x w*s):
* A(0,0) ----------- B(w-1,0) B(0,0) ----------- A(h*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) D(0,w*s-1) ---- C(h*s-1,w*s-1)
* Point Mapping: A->A(h*s-1,0), B->B(0,0), C->C(h*s-1,w*s-1), D->D(0,w*s-1)
*
* 3. ROTATION_180 (180° Rotation):
* Original Image (w x h): Rotated Image (w*s x h*s):
* A(0,0) ----------- B(w-1,0) D(0,0) ----------- C(w*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) B(0,h*s-1) ---- A(w*s-1,h*s-1)
* Point Mapping: A->A(w*s-1,h*s-1), B->B(0,h*s-1), C->C(w*s-1,0), D->D(0,0)
*
* 4. ROTATION_270 (270° Counter-Clockwise):
* Original Image (w x h): Rotated Image (h*s x w*s):
* A(0,0) ----------- B(w-1,0) D(0,0) ----------- C(h*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) B(0,w*s-1) ---- A(h*s-1,w*s-1)
* Point Mapping: A->A(h*s-1,w*s-1), B->B(0,w*s-1), C->C(h*s-1,0), D->D(0,0)
************************************************************************/
/**
* Camera stream format.
* Contains several common camera stream formats available in the market.
@@ -86,7 +131,7 @@ typedef struct HFImageData {
* @param handle Pointer to the stream handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, HFImageStream *handle);
HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, PHFImageStream handle);
/**
* @brief Create an empty image stream instance.
@@ -96,7 +141,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageStream(PHFImageData data, HFImageS
* @param handle Pointer to the stream handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamEmpty(HFImageStream *handle);
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamEmpty(PHFImageStream handle);
/**
* @brief Set the buffer of the image stream.
@@ -137,11 +182,18 @@ HYPER_CAPI_EXPORT extern HResult HFImageStreamSetFormat(HFImageStream handle, HF
*/
HYPER_CAPI_EXPORT extern HResult HFReleaseImageStream(HFImageStream streamHandle);
/************************************************************************
* Image Bitmap Function
*
* Provides a simple Bitmap interface wrapper that copies image data when creating objects and requires manual release.
* Provides interfaces for copying, drawing, displaying (OpenCV-GUI), writing to files, and converting to/from ImageStream.
************************************************************************/
/**
* @brief Struct for image bitmap data.
*/
typedef struct HFImageBitmapData {
uint8_t *data; ///< Pointer to the image data.
HPUInt8 data; ///< Pointer to the image data.
HInt32 width; ///< Width of the image.
HInt32 height; ///< Height of the image.
HInt32 channels; ///< Number of channels in the image, only support 3 channels or 1 channel.
@@ -154,7 +206,7 @@ typedef struct HFImageBitmapData {
* @param handle Pointer to the image bitmap handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, HFImageBitmap *handle);
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, PHFImageBitmap handle);
/**
* @brief Create a image bitmap from file path, default pixel format is BGR.
@@ -164,7 +216,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmap(PHFImageBitmapData data, HF
* @param handle Pointer to the image bitmap handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath, HInt32 channels, HFImageBitmap *handle);
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath, HInt32 channels, PHFImageBitmap handle);
/**
* @brief Copy an image bitmap.
@@ -173,7 +225,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromFilePath(HPath filePath,
* @param copyHandle Pointer to the image bitmap handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFImageBitmapCopy(HFImageBitmap handle, HFImageBitmap *copyHandle);
HYPER_CAPI_EXPORT extern HResult HFImageBitmapCopy(HFImageBitmap handle, PHFImageBitmap copyHandle);
/**
* @brief Release the image bitmap.
@@ -191,7 +243,7 @@ HYPER_CAPI_EXPORT extern HResult HFReleaseImageBitmap(HFImageBitmap handle);
* @param streamHandle Pointer to the image stream handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitmap handle, HFRotation rotation, HFImageStream *streamHandle);
HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitmap handle, HFRotation rotation, PHFImageStream streamHandle);
/**
* @brief Create a image bitmap from image stream.
@@ -202,8 +254,8 @@ HYPER_CAPI_EXPORT extern HResult HFCreateImageStreamFromImageBitmap(HFImageBitma
* @param scale The scale of the image.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromImageStreamProcess(HFImageStream streamHandle, HFImageBitmap *handle, int is_rotate,
float scale);
HYPER_CAPI_EXPORT extern HResult HFCreateImageBitmapFromImageStreamProcess(HFImageStream streamHandle, PHFImageBitmap handle, HInt32 is_rotate,
HFloat scale);
/**
* @brief Write the image bitmap to a file.
@@ -259,6 +311,9 @@ HYPER_CAPI_EXPORT extern HResult HFImageBitmapShow(HFImageBitmap handle, HString
/************************************************************************
* Resource Function
*
* The resource module is a system-level module that manages the life cycle of all resources.
* It is responsible for loading and unloading resources, and managing the memory of resources.
************************************************************************/
/**
@@ -293,7 +348,7 @@ HYPER_CAPI_EXPORT extern HResult HFTerminateInspireFace();
* @param status Pointer to the status variable that will be returned.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFQueryInspireFaceLaunchStatus(HInt32 *status);
HYPER_CAPI_EXPORT extern HResult HFQueryInspireFaceLaunchStatus(HPInt32 status);
/************************************************************************
* Extended Interface Based on Third-party Hardware Devices
@@ -304,6 +359,13 @@ HYPER_CAPI_EXPORT extern HResult HFQueryInspireFaceLaunchStatus(HInt32 *status);
* computing, and other features.
************************************************************************/
/**
* @brief Check whether RGA is enabled during compilation
* @param status Pointer to the status variable that will be returned.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFQueryExpansiveHardwareRGACompileOption(HPInt32 enable);
/**
* @brief Set the rockchip dma heap path
* By default, we have already configured the DMA Heap address used by RGA on RK devices.
@@ -321,6 +383,28 @@ HYPER_CAPI_EXPORT extern HResult HFSetExpansiveHardwareRockchipDmaHeapPath(HPath
* */
HYPER_CAPI_EXPORT extern HResult HFQueryExpansiveHardwareRockchipDmaHeapPath(HString path);
/**
* @brief Enum for image processing backend.
*/
typedef enum HFImageProcessingBackend {
HF_IMAGE_PROCESSING_CPU = 0, ///< CPU backend(Default)
HF_IMAGE_PROCESSING_RGA = 1, ///< Rockchip RGA backend(Hardware support is mandatory)
} HFImageProcessingBackend;
/**
* @brief Switch the image processing backend, must be called before HFCreateInspireFaceSession.
* @param backend The image processing backend to be set.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFSwitchImageProcessingBackend(HFImageProcessingBackend backend);
/**
* @brief Set the image process aligned width, must be called before HFCreateInspireFaceSession.
* @param width The image process aligned width to be set.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFSetImageProcessAlignedWidth(HInt32 width);
/**
* @brief Enum for Apple CoreML inference mode.
*/
@@ -342,14 +426,14 @@ HYPER_CAPI_EXPORT extern HResult HFSetAppleCoreMLInferenceMode(HFAppleCoreMLInfe
* @param device_id The device id to be set.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFSetCudaDeviceId(int32_t device_id);
HYPER_CAPI_EXPORT extern HResult HFSetCudaDeviceId(HInt32 device_id);
/**
* @brief Get the CUDA device id, must be called after HFCreateInspireFaceSession.
* @param device_id Pointer to the device id to be returned.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFGetCudaDeviceId(int32_t *device_id);
HYPER_CAPI_EXPORT extern HResult HFGetCudaDeviceId(HPInt32 device_id);
/**
* @brief Print the CUDA device information.
@@ -362,17 +446,22 @@ HYPER_CAPI_EXPORT extern HResult HFPrintCudaDeviceInfo();
* @param num_devices Pointer to the number of CUDA devices to be returned.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFGetNumCudaDevices(int32_t *num_devices);
HYPER_CAPI_EXPORT extern HResult HFGetNumCudaDevices(HPInt32 num_devices);
/**
* @brief Check if the CUDA device is supported.
* @param support The support flag to be checked.
* @return HResult indicating the success or failure of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFCheckCudaDeviceSupport(int32_t *is_support);
HYPER_CAPI_EXPORT extern HResult HFCheckCudaDeviceSupport(HPInt32 is_support);
/************************************************************************
* FaceSession
* FaceSession Function
*
* FaceSession is responsible for all face image algorithm-related functions,
* including face detection, face alignment, face recognition, face quality detection, face attribute prediction, etc.
* FaceSession supports flexible configuration, allowing you to enable or disable certain functions, and also set parameters for certain functions.
* In concurrent scenarios, multiple sessions can be created, each session can run independently without interfering with each other.
************************************************************************/
/**
@@ -455,7 +544,7 @@ HYPER_CAPI_EXPORT extern HResult HFQuerySupportedPixelLevelsForFaceDetection(PHF
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectMode detectMode, HInt32 maxDetectFaceNum,
HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, HFSession *handle);
HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, PHFSession handle);
/**
* @brief Create a session from a resource file with additional options.
@@ -472,7 +561,7 @@ HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSession(HFSessionCustomParam
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSessionOptional(HOption customOption, HFDetectMode detectMode, HInt32 maxDetectFaceNum,
HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, HFSession *handle);
HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, PHFSession handle);
/**
* @brief Release the session.
@@ -482,6 +571,13 @@ HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSessionOptional(HOption cust
*/
HYPER_CAPI_EXPORT extern HResult HFReleaseInspireFaceSession(HFSession handle);
/************************************************************************
* FaceTrack Module
*
* FaceTrack provides the most basic face image algorithm functions, such as face detection, tracking, landmark detection, etc.
* FaceTrack is independent of FaceSession, and can be used independently.
************************************************************************/
/**
* @brief Struct representing a basic token for face data.
*
@@ -498,9 +594,9 @@ typedef struct HFFaceBasicToken {
* This struct represents the Euler angles (roll, yaw, pitch) for face orientation.
*/
typedef struct HFFaceEulerAngle {
HFloat *roll; ///< Roll angle of the face.
HFloat *yaw; ///< Yaw angle of the face.
HFloat *pitch; ///< Pitch angle of the face.
HPFloat roll; ///< Roll angle of the face.
HPFloat yaw; ///< Yaw angle of the face.
HPFloat pitch; ///< Pitch angle of the face.
} HFFaceEulerAngle;
/**
@@ -511,14 +607,37 @@ typedef struct HFFaceEulerAngle {
*/
typedef struct HFMultipleFaceData {
HInt32 detectedNum; ///< Number of faces detected.
HFaceRect *rects; ///< Array of bounding rectangles for each face.
HInt32 *trackIds; ///< Array of track IDs for each face.
HInt32 *trackCounts; ///< Array of track counts for each face.
HFloat *detConfidence; ///< Array of detection confidence for each face.
PHFaceRect rects; ///< Array of bounding rectangles for each face.
HPInt32 trackIds; ///< Array of track IDs for each face.
HPInt32 trackCounts; ///< Array of track counts for each face.
HPFloat detConfidence; ///< Array of detection confidence for each face.
HFFaceEulerAngle angles; ///< Euler angles for each face.
PHFFaceBasicToken tokens; ///< Tokens associated with each face.
} HFMultipleFaceData, *PHFMultipleFaceData;
/**
* @brief Clear the tracking face
* @param session Handle to the session.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionClearTrackingFace(HFSession session);
/**
* @brief Set the track lost recovery mode(only for LightTrack mode, default is false(0))
* @param session Handle to the session.
* @param enable The track lost recovery mode value (0: disable, 1: enable)
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionSetTrackLostRecoveryMode(HFSession session, HInt32 enable);
/**
* @brief Set the light track confidence threshold(only for LightTrack mode, default is 0.1)
* @param session Handle to the session.
* @param value The light track confidence threshold value
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionSetLightTrackConfidenceThreshold(HFSession session, HFloat value);
/**
* @brief Set the track preview size in the session, it works with face detection and tracking
* algorithms. Default preview size is 192(px).
@@ -535,7 +654,7 @@ HYPER_CAPI_EXPORT extern HResult HFSessionSetTrackPreviewSize(HFSession session,
* @param previewSize The size of the preview for tracking.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionGetTrackPreviewSize(HFSession session, HInt32 *previewSize);
HYPER_CAPI_EXPORT extern HResult HFSessionGetTrackPreviewSize(HFSession session, HPInt32 previewSize);
/**
* @brief Set the minimum number of face pixels that the face detector can capture, and people below
@@ -583,7 +702,6 @@ HYPER_CAPI_EXPORT extern HResult HFSessionSetTrackModeNumSmoothCacheFrame(HFSess
*/
HYPER_CAPI_EXPORT extern HResult HFSessionSetTrackModeDetectInterval(HFSession session, HInt32 num);
/**
* @brief Run face tracking in the session.
*
@@ -600,7 +718,7 @@ HYPER_CAPI_EXPORT extern HResult HFExecuteFaceTrack(HFSession session, HFImageSt
* @param size The size of the preview for tracking.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionLastFaceDetectionGetDebugPreviewImageSize(HFSession session, HInt32 *size);
HYPER_CAPI_EXPORT extern HResult HFSessionLastFaceDetectionGetDebugPreviewImageSize(HFSession session, HPInt32 size);
/**
* @brief Copies the data from a HF_FaceBasicToken to a specified buffer.
@@ -648,7 +766,7 @@ HYPER_CAPI_EXPORT extern HResult HFGetNumOfFaceDenseLandmark(HPInt32 num);
* @param num Number of landmark points
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, HPoint2f *landmarks, HInt32 num);
HYPER_CAPI_EXPORT extern HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasicToken singleFace, PHPoint2f landmarks, HInt32 num);
/**
* @brief Get the five key points from the face token.
@@ -657,24 +775,27 @@ HYPER_CAPI_EXPORT extern HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasic
* @param num Number of landmark points
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, HPoint2f *landmarks, HInt32 num);
HYPER_CAPI_EXPORT extern HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, PHPoint2f landmarks, HInt32 num);
/**
* @brief Set the enable cost spend
* @param value The enable cost spend value
* @return int32_t Status code of the operation.
* @return HResult Status code of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFSessionSetEnableTrackCostSpend(HFSession session, int value);
HYPER_CAPI_EXPORT extern HResult HFSessionSetEnableTrackCostSpend(HFSession session, HInt32 value);
/**
* @brief Print the cost spend
* @param session The session handle
* @return int32_t Status code of the operation.
* @return HResult Status code of the operation.
* */
HYPER_CAPI_EXPORT extern HResult HFSessionPrintTrackCostSpend(HFSession session);
/************************************************************************
* Face Recognition
* Face Recognition Module
*
* The interface of the face recognition module depends on FaceSession,
* providing face feature extraction, face alignment image processing, and face comparison interfaces.
************************************************************************/
/**
@@ -744,7 +865,7 @@ HYPER_CAPI_EXPORT extern HResult HFReleaseFaceFeature(PHFFaceFeature feature);
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFFaceGetFaceAlignmentImage(HFSession session, HFImageStream streamHandle, HFFaceBasicToken singleFace,
HFImageBitmap *handle);
PHFImageBitmap handle);
/**
* @brief Use the aligned face image to extract face features to the HFFaceFeature that has applied memory in advance.
@@ -757,6 +878,11 @@ HYPER_CAPI_EXPORT extern HResult HFFaceFeatureExtractWithAlignmentImage(HFSessio
/************************************************************************
* Feature Hub
*
* FeatureHub is a built-in global lightweight face feature vector management functionality
* provided in the InspireFace-SDK. It supports basic face feature search, deletion, and
* modification functions, and offers two optional data storage modes: an in-memory model and a
* persistence model. If you have simple storage needs, you can enable it.
************************************************************************/
/**
@@ -785,7 +911,7 @@ typedef struct HFFeatureHubConfiguration {
HFPKMode primaryKeyMode; ///< Primary key mode(The id increment mode is recommended)
HInt32 enablePersistence; ///< Flag to enable or disable the use of the database.
HString persistenceDbPath; ///< Path to the database file.
float searchThreshold; ///< Threshold for face search
HFloat searchThreshold; ///< Threshold for face search
HFSearchMode searchMode; ///< Mode of face search
} HFFeatureHubConfiguration;
@@ -837,7 +963,7 @@ typedef struct HFSearchTopKResults {
* access control scenarios).
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFFeatureHubFaceSearchThresholdSetting(float threshold);
HYPER_CAPI_EXPORT extern HResult HFFeatureHubFaceSearchThresholdSetting(HFloat threshold);
/**
* @brief Perform a one-to-one comparison of two face features.
@@ -971,7 +1097,7 @@ HYPER_CAPI_EXPORT extern HResult HFFeatureHubGetFaceIdentity(HFaceId customId, P
* @param count Pointer to an integer where the count of features will be stored.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFFeatureHubGetFaceCount(HInt32 *count);
HYPER_CAPI_EXPORT extern HResult HFFeatureHubGetFaceCount(HPInt32 count);
/**
* @brief View the face database table.
@@ -996,7 +1122,10 @@ typedef struct HFFeatureHubExistingIds {
HYPER_CAPI_EXPORT extern HResult HFFeatureHubGetExistingIds(PHFFeatureHubExistingIds ids);
/************************************************************************
* Face Pipeline
* Face Pipeline Module
*
* FacePipeline depends on FaceSession, providing extended business for face image algorithms,
* supporting some face attributes, such as face mask detection, face quality detection, face attribute prediction, etc.
************************************************************************/
/**
@@ -1108,7 +1237,7 @@ HYPER_CAPI_EXPORT extern HResult HFGetFaceQualityConfidence(HFSession session, P
* @param confidence Pointer to a floating-point value where the quality confidence will be stored.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFFaceQualityDetect(HFSession session, HFFaceBasicToken singleFace, HFloat *confidence);
HYPER_CAPI_EXPORT extern HResult HFFaceQualityDetect(HFSession session, HFFaceBasicToken singleFace, HPFloat confidence);
/**
* @brief Facial states in the face interaction module.
@@ -1147,6 +1276,7 @@ typedef struct HFFaceInteractionsActions {
* @return HResult indicating success or failure of the function call.
*/
HYPER_CAPI_EXPORT extern HResult HFGetFaceInteractionActionsResult(HFSession session, PHFFaceInteractionsActions actions);
/**
* @brief Struct representing face attribute results.
*
@@ -1191,15 +1321,15 @@ HYPER_CAPI_EXPORT extern HResult HFGetFaceAttributeResult(HFSession session, PHF
* @brief Struct representing face emotion results.
*/
typedef struct HFFaceEmotionResult {
HInt32 num; ///< Number of faces detected.
HPInt32 emotion; ///< Emotion of the detected face.
///< 0: Neutral;
///< 1: Happy;
///< 2: Sad;
///< 3: Surprise;
///< 4: Fear;
///< 5: Disgust;
///< 6: Anger;
HInt32 num; ///< Number of faces detected.
HPInt32 emotion; ///< Emotion of the detected face.
///< 0: Neutral;
///< 1: Happy;
///< 2: Sad;
///< 3: Surprise;
///< 4: Fear;
///< 5: Disgust;
///< 6: Anger;
} HFFaceEmotionResult, *PHFFaceEmotionResult;
/**
@@ -1218,9 +1348,9 @@ HYPER_CAPI_EXPORT extern HResult HFGetFaceEmotionResult(HFSession session, PHFFa
* @brief Structure representing the version information of the InspireFace library.
*/
typedef struct HFInspireFaceVersion {
int major; ///< Major version number.
int minor; ///< Minor version number.
int patch; ///< Patch version number.
HInt32 major; ///< Major version number.
HInt32 minor; ///< Minor version number.
HInt32 patch; ///< Patch version number.
} HFInspireFaceVersion, *PHFInspireFaceVersion;
/**
@@ -1328,7 +1458,7 @@ HYPER_CAPI_EXPORT extern HResult HFDeBugShowResourceStatistics();
* @param count Pointer to an integer where the count of unreleased sessions will be stored.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessionsCount(HInt32 *count);
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessionsCount(HPInt32 count);
/**
* @brief Get the list of unreleased sessions.
@@ -1339,7 +1469,7 @@ HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessionsCount(HInt32 *count
* @param count The number of sessions to retrieve.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessions(HFSession *sessions, HInt32 count);
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessions(PHFSession sessions, HInt32 count);
/**
* @brief Get the count of unreleased image streams.
@@ -1349,7 +1479,7 @@ HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedSessions(HFSession *session
* @param count Pointer to an integer where the count of unreleased image streams will be stored.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedStreamsCount(HInt32 *count);
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedStreamsCount(HPInt32 count);
/**
* @brief Get the list of unreleased image streams.
@@ -1360,7 +1490,7 @@ HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedStreamsCount(HInt32 *count)
* @param count The number of image streams to retrieve.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedStreams(HFImageStream *streams, HInt32 count);
HYPER_CAPI_EXPORT extern HResult HFDeBugGetUnreleasedStreams(PHFImageStream streams, HInt32 count);
#ifdef __cplusplus
}

View File

@@ -8,14 +8,29 @@
#include "engine/face_session.h"
/**
* @brief Struct for managing face algorithm session.
*
* This struct holds the implementation of the face algorithm session.
*/
typedef struct HF_FaceAlgorithmSession {
inspire::FaceSession impl; ///< Implementation of the face context.
} HF_FaceAlgorithmSession; ///< Handle for managing face context.
/**
* @brief Struct for managing camera stream.
*
* This struct holds the implementation of the camera stream.
*/
typedef struct HF_CameraStream {
inspirecv::FrameProcess impl; ///< Implementation of the camera stream.
} HF_CameraStream; ///< Handle for managing camera stream.
/**
* @brief Struct for managing image bitmap.
*
* This struct holds the implementation of the image bitmap.
*/
typedef struct HF_ImageBitmap {
inspirecv::Image impl; ///< Implementation of the image bitmap.
} HF_ImageBitmap; ///< Handle for managing image bitmap.

View File

@@ -8,13 +8,16 @@
#include <stdint.h>
// clang-format off
typedef void* HFImageStream; ///< Handle for image.
typedef void** PHFImageStream; ///< Pointer to Handle for image.
typedef void* HFSession; ///< Handle for context.
typedef void** PHFSession; ///< Pointer to Handle for context.
typedef void* HFImageBitmap; ///< Handle for image bitmap.
typedef void** PHFImageBitmap; ///< Pointer to Handle for image bitmap.
typedef void* HPVoid; ///< Pointer to Void.
typedef void* HFImageStream; ///< Handle for image.
typedef void* HFSession; ///< Handle for context.
typedef void* HFImageBitmap; ///< Handle for image bitmap.
typedef long HLong; ///< Long integer.
typedef float HFloat; ///< Single-precision floating point.
typedef float* HPFloat; ///< Pointer to Single-precision floating point.
typedef float HFloat; ///< Single-precision floating point.
typedef float* HPFloat; ///< Pointer to Single-precision floating point.
typedef double HDouble; ///< Double-precision floating point.
typedef unsigned char HUInt8; ///< Unsigned 8-bit integer.
typedef unsigned char* HPUInt8; ///< Pointer to unsigned 8-bit integer.
@@ -26,35 +29,35 @@ typedef int64_t* HPFaceId; ///< Pointer to Fa
typedef long HResult; ///< Result code.
typedef char* HString; ///< String.
typedef const char* HPath; ///< Const String.
typedef const char* HFormat; ///< Const String.
typedef char HBuffer; ///< Character.
typedef const char* HFormat; ///< Const String.
typedef char HBuffer; ///< Character.
typedef char HChar; ///< Character.
typedef char* HPBuffer; ///< Pointer Character.
typedef char* HPBuffer; ///< Pointer Character.
typedef long HSize; ///< Size
typedef long* HPSize; ///< Pointer Size
typedef long* HPSize; ///< Pointer Size
// clang-format on
typedef struct HFaceRect {
HInt32 x; ///< X-coordinate of the top-left corner of the rectangle.
HInt32 y; ///< Y-coordinate of the top-left corner of the rectangle.
HInt32 width; ///< Width of the rectangle.
HInt32 height; ///< Height of the rectangle.
} HFaceRect; ///< Rectangle representing a face region.
HInt32 x; ///< X-coordinate of the top-left corner of the rectangle.
HInt32 y; ///< Y-coordinate of the top-left corner of the rectangle.
HInt32 width; ///< Width of the rectangle.
HInt32 height; ///< Height of the rectangle.
} HFaceRect, *PHFaceRect; ///< Rectangle representing a face region.
typedef struct HPoint2f{
HFloat x; ///< X-coordinate
HFloat y; ///< Y-coordinate
} HPoint2f;
HFloat x; ///< X-coordinate
HFloat y; ///< Y-coordinate
} HPoint2f, *PHPoint2f;
typedef struct HPoint2i{
HInt32 x; ///< X-coordinate
HInt32 y; ///< Y-coordinate
} HPoint2i;
HInt32 x; ///< X-coordinate
HInt32 y; ///< Y-coordinate
} HPoint2i, *PHPoint2i;
typedef struct HColor {
HFloat r; ///< Red component
HFloat g; ///< Green component
HFloat b; ///< Blue component
} HColor;
HFloat r; ///< Red component
HFloat g; ///< Green component
HFloat b; ///< Blue component
} HColor, *PHColor;
#endif //INSPIREFACE_INTYPEDEF_H

View File

@@ -458,6 +458,18 @@ bool FaceSession::IsDetectModeLandmark() const {
return m_face_track_->IsDetectModeLandmark();
}
void FaceSession::ClearTrackingFace() {
m_face_track_->ClearTrackingFace();
}
void FaceSession::SetTrackLostRecoveryMode(bool value) {
m_face_track_->SetTrackLostRecoveryMode(value);
}
void FaceSession::SetLightTrackConfidenceThreshold(float value) {
m_face_track_->SetLightTrackConfidenceThreshold(value);
}
int32_t FaceSession::SetTrackPreviewSize(const int32_t preview_size) {
m_face_track_->SetTrackPreviewSize(preview_size);
return HSUCCEED;

View File

@@ -188,6 +188,23 @@ public:
*/
bool IsDetectModeLandmark() const;
/**
* @brief Clear the tracking face
*/
void ClearTrackingFace();
/**
* @brief Set the track lost recovery mode
* @param value Track lost recovery mode
*/
void SetTrackLostRecoveryMode(bool value);
/**
* @brief Set the light track confidence threshold
* @param value Light track confidence threshold
*/
void SetLightTrackConfidenceThreshold(float value);
public:
// Accessor methods for various cached data
/**

View File

@@ -318,6 +318,21 @@ int32_t FeatureHubDB::GetFaceFeature(int32_t id, std::vector<float> &feature) {
return HSUCCEED;
}
int32_t FeatureHubDB::GetFaceFeature(int32_t id, FaceEmbedding& feature) {
std::lock_guard<std::mutex> lock(mutex_);
if (!pImpl->m_enable_) {
INSPIRE_LOGW("FeatureHub is disabled, please enable it before it can be served");
return HERR_FT_HUB_DISABLE;
}
feature.embedding = EMBEDDING_DB::GetInstance().GetVector(id);
if (feature.embedding.empty()) {
return HERR_FT_HUB_NOT_FOUND_FEATURE;
}
return HSUCCEED;
}
int32_t FeatureHubDB::ViewDBTable() {
if (!pImpl->m_enable_) {
INSPIRE_LOGE("FeatureHub is disabled, please enable it before it can be served");

View File

@@ -1,10 +1,10 @@
#include "image_processor.h"
#include "log.h"
#if defined(ISF_ENABLE_RGA)
#include "image_processor_rga.h"
#else
#include "image_processor_general.h"
#endif
#include "image_processor_general.h"
namespace inspire {
@@ -12,12 +12,21 @@ namespace nexus {
ImageProcessor::~ImageProcessor() = default;
std::unique_ptr<ImageProcessor> ImageProcessor::Create() {
#if defined(ISF_ENABLE_RGA)
return std::make_unique<RgaImageProcessor>();
#else
return std::make_unique<GeneralImageProcessor>();
#endif
std::unique_ptr<ImageProcessor> ImageProcessor::Create(inspire::Launch::ImageProcessingBackend backend) {
switch (backend) {
case inspire::Launch::IMAGE_PROCESSING_RGA:
#if defined(ISF_ENABLE_RGA)
return std::make_unique<RgaImageProcessor>();
#else
INSPIRE_LOGE("RGA backend is not enabled, using CPU backend instead");
return std::make_unique<GeneralImageProcessor>();
#endif
case inspire::Launch::IMAGE_PROCESSING_CPU:
return std::make_unique<GeneralImageProcessor>();
default:
// Default to CPU backend
return std::make_unique<GeneralImageProcessor>();
}
}
} // namespace nexus

View File

@@ -5,6 +5,8 @@
#include <vector>
#include <inspirecv/inspirecv.h>
#include <memory>
#include "launch.h"
#include "data_type.h"
namespace inspire {
@@ -16,15 +18,13 @@ namespace nexus {
* This interface provides common image processing operations like resize, color conversion,
* padding etc. It can be implemented by different backends based on compile options:
* - Default CPU-based implementation using InspireCV (always available)
* - Hardware accelerated implementation like Rockchip RGA (enabled with ISF_ENABLE_RGA)
* - Other potential hardware acceleration backends can be enabled via corresponding compile flags
* - Hardware accelerated implementation like Rockchip RGA (enabled with ISF_ENABLE_RGA)
*
* The backend implementation is selected at compile time based on which acceleration options
* are enabled. Only one backend will be active at runtime.
* The backend implementation is selected at runtime based on the backend parameter.
*/
class ImageProcessor {
class INSPIRE_API_EXPORT ImageProcessor {
public:
static std::unique_ptr<ImageProcessor> Create();
static std::unique_ptr<ImageProcessor> Create(inspire::Launch::ImageProcessingBackend backend = inspire::Launch::IMAGE_PROCESSING_CPU);
public:
// Virtual destructor
@@ -51,6 +51,12 @@ public:
// Display cache status information
virtual void DumpCacheStatus() const = 0;
// Get aligned width of the image
virtual int32_t GetAlignedWidth(int width) const = 0;
// Set aligned width of the image
virtual void SetAlignedWidth(int width) = 0;
}; // class ImageProcessor
} // namespace nexus

View File

@@ -1,4 +1,5 @@
#include "image_processor_general.h"
#include "log.h"
namespace inspire {
@@ -54,6 +55,17 @@ void GeneralImageProcessor::DumpCacheStatus() const {
INSPIRECV_LOG(INFO) << "GeneralImageProcessor has no cache to dump";
}
int32_t GeneralImageProcessor::GetAlignedWidth(int width) const {
// Not Supported
INSPIRE_LOGE("GeneralImageProcessor::GetAlignedWidth is not supported");
return 0;
}
void GeneralImageProcessor::SetAlignedWidth(int width) {
// Not Supported
INSPIRE_LOGE("GeneralImageProcessor::SetAlignedWidth is not supported");
}
} // namespace nexus
} // namespace inspire

View File

@@ -8,7 +8,7 @@ namespace inspire {
namespace nexus {
class GeneralImageProcessor : public ImageProcessor {
class INSPIRE_API_EXPORT GeneralImageProcessor : public ImageProcessor {
public:
GeneralImageProcessor() = default;
~GeneralImageProcessor() override = default;
@@ -27,6 +27,10 @@ public:
void DumpCacheStatus() const override;
int32_t GetAlignedWidth(int width) const override;
void SetAlignedWidth(int width) override;
private:
struct BufferWrapper {
inspirecv::Image image;

View File

@@ -6,15 +6,26 @@ namespace inspire {
namespace nexus {
RgaImageProcessor::RgaImageProcessor() {}
RgaImageProcessor::RgaImageProcessor() {
aligned_width_ = 4;
}
RgaImageProcessor::~RgaImageProcessor() {}
int32_t RgaImageProcessor::GetAlignedWidth(int width) const {
return aligned_width_;
}
void RgaImageProcessor::SetAlignedWidth(int width) {
aligned_width_ = width;
}
int32_t RgaImageProcessor::Resize(const uint8_t* src_data, int src_width, int src_height, int channels, uint8_t** dst_data, int dst_width,
int dst_height) {
// Calculate width aligned to 4 bytes
int aligned_src_width = (src_width + 3) & ~3; // Round up to nearest multiple of 4
int aligned_dst_width = (dst_width + 3) & ~3;
int aligned_src_width = (src_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
int aligned_dst_width = (dst_width + aligned_width_ - 1) & ~(aligned_width_ - 1);
// std::cout << "aligned_src_width: " << aligned_src_width << ", aligned_dst_width: " << aligned_dst_width << std::endl;
// 1. Get or create source buffer with aligned width
BufferKey src_key{aligned_src_width, src_height, channels};
@@ -38,14 +49,14 @@ int32_t RgaImageProcessor::Resize(const uint8_t* src_data, int src_width, int sr
int ret = imcheck(src_buffer.buffer, dst_buffer.buffer, {}, {});
if (IM_STATUS_NOERROR != ret) {
INSPIRECV_LOG(ERROR) << "RGA parameter check failed: " << imStrError((IM_STATUS)ret);
return false;
return -1;
}
ret = imresize(src_buffer.buffer, dst_buffer.buffer);
if (ret != IM_STATUS_SUCCESS) {
INSPIRECV_LOG(ERROR) << "RGA resize failed: " << imStrError((IM_STATUS)ret);
return false;
return -1;
}
// 5. Return pointer to destination buffer
@@ -71,7 +82,7 @@ int32_t RgaImageProcessor::MarkDone() {
int32_t RgaImageProcessor::SwapColor(const uint8_t* src_data, int src_width, int src_height, int channels, uint8_t** dst_data) {
// Calculate width aligned to 4 bytes
int aligned_src_width = (src_width + 3) & ~3; // Round up to nearest multiple of 4
int aligned_src_width = (src_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
// 1. Get or create source buffer
BufferKey src_key{aligned_src_width, src_height, channels};
auto& src_buffer = GetOrCreateBuffer(src_key);
@@ -119,8 +130,8 @@ int32_t RgaImageProcessor::Padding(const uint8_t* src_data, int src_width, int s
dst_height = src_height + top + bottom;
// Calculate width aligned to 4 bytes
int aligned_src_width = (src_width + 3) & ~3;
int aligned_dst_width = (dst_width + 3) & ~3;
int aligned_src_width = (src_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
int aligned_dst_width = (dst_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
// 1. Get or create source buffer with aligned width
BufferKey src_key{aligned_src_width, src_height, channels};
@@ -173,8 +184,8 @@ int32_t RgaImageProcessor::Padding(const uint8_t* src_data, int src_width, int s
int32_t RgaImageProcessor::ResizeAndPadding(const uint8_t* src_data, int src_width, int src_height, int channels, int dst_width, int dst_height,
uint8_t** dst_data, float& scale) {
// Ensure target dimensions are multiples of 4
int aligned_dst_width = (dst_width + 3) & ~3;
int aligned_dst_height = (dst_height + 3) & ~3;
int aligned_dst_width = (dst_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
int aligned_dst_height = (dst_height + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
// Calculate scale (take minimum to fit target box)
scale = std::min(static_cast<float>(aligned_dst_width) / src_width, static_cast<float>(aligned_dst_height) / src_height);
@@ -184,10 +195,10 @@ int32_t RgaImageProcessor::ResizeAndPadding(const uint8_t* src_data, int src_wid
int resized_h = static_cast<int>(src_height * scale);
// Ensure scaled dimensions are multiples of 4
resized_w = (resized_w + 3) & ~3;
resized_h = (resized_h + 3) & ~3;
resized_w = (resized_w + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
resized_h = (resized_h + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
int aligned_src_width = (src_width + 3) & ~3;
int aligned_src_width = (src_width + aligned_width_ - 1) & ~(aligned_width_ - 1); // Round up to nearest multiple of aligned_width_
// 1. Get source buffer
BufferKey src_key{aligned_src_width, src_height, channels};

View File

@@ -31,7 +31,7 @@ namespace inspire {
namespace nexus {
class RgaImageProcessor : public ImageProcessor {
class INSPIRE_API_EXPORT RgaImageProcessor : public ImageProcessor {
public:
RgaImageProcessor();
~RgaImageProcessor() override;
@@ -48,6 +48,10 @@ public:
int32_t MarkDone() override;
int32_t GetAlignedWidth(int width) const override;
void SetAlignedWidth(int width) override;
public:
struct BufferInfo {
int dma_fd;
@@ -200,6 +204,7 @@ private:
std::unordered_map<BufferKey, RGABuffer, BufferKeyHash> buffer_cache_;
BufferKey last_src_key_{0, 0, 0};
BufferKey last_dst_key_{0, 0, 0};
int32_t aligned_width_{0};
};
} // namespace nexus

View File

@@ -152,6 +152,14 @@ public:
*/
int32_t GetFaceFeature(int32_t id, std::vector<float>& feature);
/**
* @brief Retrieves a face feature by its ID.
* @param id ID of the feature to retrieve.
* @param feature Vector to store the retrieved feature.
* @return int32_t Status code of the retrieval operation.
*/
int32_t GetFaceFeature(int32_t id, FaceEmbedding& feature);
/**
* @brief Views the database table containing face data.
* @return int32_t Status code of the operation.

View File

@@ -19,6 +19,63 @@ enum DATA_FORMAT { NV21 = 0, NV12 = 1, RGBA = 2, RGB = 3, BGR = 4, BGRA = 5 , I4
/**
* @brief A class to handle camera stream and image processing.
*
* This class provides image processing capabilities including rotation, scaling, and format conversion.
* It supports four rotation modes with corresponding transformation matrices.
*
* Rotation Transformation Matrix Diagrams:
*
* 1. ROTATION_0 (No Rotation):
* Original Image (w x h): Scaled Image (w*s x h*s):
* A(0,0) ----------- B(w-1,0) A(0,0) ----------- B(w*s-1,0)
* | | | |
* | Original | => | Scaled |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) C(0,h*s-1) ---- D(w*s-1,h*s-1)
* Point Mapping: A->A(0,0), B->B(w*s-1,0), C->C(0,h*s-1), D->D(w*s-1,h*s-1)
*
* 2. ROTATION_90 (90° Counter-Clockwise):
* Original Image (w x h): Rotated Image (h*s x w*s):
* A(0,0) ----------- B(w-1,0) B(0,0) ----------- A(h*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) D(0,w*s-1) ---- C(h*s-1,w*s-1)
* Point Mapping: A->A(h*s-1,0), B->B(0,0), C->C(h*s-1,w*s-1), D->D(0,w*s-1)
*
* 3. ROTATION_180 (180° Rotation):
* Original Image (w x h): Rotated Image (w*s x h*s):
* A(0,0) ----------- B(w-1,0) D(0,0) ----------- C(w*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) B(0,h*s-1) ---- A(w*s-1,h*s-1)
* Point Mapping: A->A(w*s-1,h*s-1), B->B(0,h*s-1), C->C(w*s-1,0), D->D(0,0)
*
* 4. ROTATION_270 (270° Counter-Clockwise):
* Original Image (w x h): Rotated Image (h*s x w*s):
* A(0,0) ----------- B(w-1,0) D(0,0) ----------- C(h*s-1,0)
* | | | |
* | Original | => | Rotated |
* | | | |
* C(0,h-1) --------- D(w-1,h-1) B(0,w*s-1) ---- A(h*s-1,w*s-1)
* Point Mapping: A->A(h*s-1,w*s-1), B->B(0,w*s-1), C->C(h*s-1,0), D->D(0,0)
*
* Where:
* - w = original image width
* - h = original image height
* - s = scale factor
* - A, B, C, D represent the four corner points of the image
*
* The transformation matrices are computed using setPolyToPoly() with source and destination
* point arrays corresponding to the corner mappings shown above.
*
* Usage Example:
* @code
* auto processor = FrameProcess::Create(buffer, height, width, BGR, ROTATION_90);
* processor.SetPreviewScale(0.5f);
* auto rotated_image = processor.ExecutePreviewImageProcessing(true);
* @endcode
*/
class INSPIRE_API_EXPORT FrameProcess {
public:

View File

@@ -8,72 +8,68 @@
// [Anchor-Begin]
#define HSUCCEED (0) // Success
#define HERR_BASIC_BASE 0X0001 // Basic error types
#define HERR_UNKNOWN HERR_BASIC_BASE // Unknown error
#define HERR_INVALID_PARAM (HERR_BASIC_BASE + 1) // Invalid parameter
#define HERR_INVALID_IMAGE_STREAM_HANDLE (HERR_BASIC_BASE + 24) // Invalid image stream handle
#define HERR_INVALID_CONTEXT_HANDLE (HERR_BASIC_BASE + 25) // Invalid context handle
#define HERR_INVALID_FACE_TOKEN (HERR_BASIC_BASE + 30) // Invalid face token
#define HERR_INVALID_FACE_FEATURE (HERR_BASIC_BASE + 31) // Invalid face feature
#define HERR_INVALID_FACE_LIST (HERR_BASIC_BASE + 32) // Invalid face feature list
#define HERR_INVALID_BUFFER_SIZE (HERR_BASIC_BASE + 33) // Invalid copy token
#define HERR_INVALID_IMAGE_STREAM_PARAM (HERR_BASIC_BASE + 34) // Invalid image param
#define HERR_INVALID_SERIALIZATION_FAILED (HERR_BASIC_BASE + 35) // Invalid face serialization failed
#define HERR_INVALID_DETECTION_INPUT (HERR_BASIC_BASE + 36) // Failed to modify detector input size
#define HERR_INVALID_IMAGE_BITMAP_HANDLE (HERR_BASIC_BASE + 37) // Invalid image bitmap handle
#define HSUCCEED (0) // Success
#define HERR_SESS_BASE 0X500 // Session error types
#define HERR_SESS_FUNCTION_UNUSABLE (HERR_SESS_BASE + 2) // Function not usable
#define HERR_SESS_TRACKER_FAILURE (HERR_SESS_BASE + 3) // Tracker module not initialized
#define HERR_SESS_INVALID_RESOURCE (HERR_SESS_BASE + 10) // Invalid static resource
#define HERR_SESS_NUM_OF_MODELS_NOT_MATCH (HERR_SESS_BASE + 11) // Number of models does not match
#define HERR_SESS_LANDMARK_NUM_NOT_MATCH (HERR_SESS_BASE + 20) // The number of input landmark points does not match
#define HERR_SESS_LANDMARK_NOT_ENABLE (HERR_SESS_BASE + 21) // The number of input landmark points does not match
#define HERR_SESS_KEY_POINT_NUM_NOT_MATCH (HERR_SESS_BASE + 22) // The number of input key points does not match
// Basic error types (1-99)
#define HERR_BASIC_BASE 0x0001 // Basic error types
#define HERR_UNKNOWN HERR_BASIC_BASE // Unknown error (1)
#define HERR_INVALID_PARAM (HERR_BASIC_BASE + 1) // Invalid parameter (2)
#define HERR_INVALID_IMAGE_STREAM_HANDLE (HERR_BASIC_BASE + 2) // Invalid image stream handle (3)
#define HERR_INVALID_CONTEXT_HANDLE (HERR_BASIC_BASE + 3) // Invalid context handle (4)
#define HERR_INVALID_FACE_TOKEN (HERR_BASIC_BASE + 4) // Invalid face token (5)
#define HERR_INVALID_FACE_FEATURE (HERR_BASIC_BASE + 5) // Invalid face feature (6)
#define HERR_INVALID_FACE_LIST (HERR_BASIC_BASE + 6) // Invalid face feature list (7)
#define HERR_INVALID_BUFFER_SIZE (HERR_BASIC_BASE + 7) // Invalid copy token (8)
#define HERR_INVALID_IMAGE_STREAM_PARAM (HERR_BASIC_BASE + 8) // Invalid image param (9)
#define HERR_INVALID_SERIALIZATION_FAILED (HERR_BASIC_BASE + 9) // Invalid face serialization failed (10)
#define HERR_INVALID_DETECTION_INPUT (HERR_BASIC_BASE + 10) // Failed to modify detector input size (11)
#define HERR_INVALID_IMAGE_BITMAP_HANDLE (HERR_BASIC_BASE + 11) // Invalid image bitmap handle (12)
#define HERR_IMAGE_STREAM_DECODE_FAILED (HERR_BASIC_BASE + 12) // ImageStream failed to decode the image (13)
#define HERR_SESS_PIPELINE_FAILURE (HERR_SESS_BASE + 8) // Pipeline module not initialized
// Session error types (100-199)
#define HERR_SESS_BASE 0x0064 // Session error types (100)
#define HERR_SESS_FUNCTION_UNUSABLE (HERR_SESS_BASE + 1) // Function not usable (101)
#define HERR_SESS_TRACKER_FAILURE (HERR_SESS_BASE + 2) // Tracker module not initialized (102)
#define HERR_SESS_PIPELINE_FAILURE (HERR_SESS_BASE + 3) // Pipeline module not initialized (103)
#define HERR_SESS_INVALID_RESOURCE (HERR_SESS_BASE + 4) // Invalid static resource (104)
#define HERR_SESS_LANDMARK_NUM_NOT_MATCH (HERR_SESS_BASE + 5) // The number of input landmark points does not match (105)
#define HERR_SESS_LANDMARK_NOT_ENABLE (HERR_SESS_BASE + 6) // The landmark model is not enabled (106)
#define HERR_SESS_KEY_POINT_NUM_NOT_MATCH (HERR_SESS_BASE + 7) // The number of input key points does not match (107)
#define HERR_SESS_REC_EXTRACT_FAILURE (HERR_SESS_BASE + 8) // Face feature extraction not registered (108)
#define HERR_SESS_REC_CONTRAST_FEAT_ERR (HERR_SESS_BASE + 9) // Incorrect length of feature vector for comparison (109)
#define HERR_SESS_FACE_DATA_ERROR (HERR_SESS_BASE + 10) // Face data parsing (110)
#define HERR_SESS_FACE_REC_OPTION_ERROR (HERR_SESS_BASE + 11) // An optional parameter is incorrect (111)
#define HERR_SESS_REC_EXTRACT_FAILURE (HERR_SESS_BASE + 15) // Face feature extraction not registered
#define HERR_SESS_REC_DEL_FAILURE (HERR_SESS_BASE + 16) // Face feature deletion failed due to out of range index
#define HERR_SESS_REC_UPDATE_FAILURE (HERR_SESS_BASE + 17) // Face feature update failed due to out of range index
#define HERR_SESS_REC_ADD_FEAT_EMPTY (HERR_SESS_BASE + 18) // Feature vector for registration cannot be empty
#define HERR_SESS_REC_FEAT_SIZE_ERR (HERR_SESS_BASE + 19) // Incorrect length of feature vector for registration
#define HERR_SESS_REC_INVALID_INDEX (HERR_SESS_BASE + 20) // Invalid index number
#define HERR_SESS_REC_CONTRAST_FEAT_ERR (HERR_SESS_BASE + 23) // Incorrect length of feature vector for comparison
#define HERR_SESS_REC_BLOCK_FULL (HERR_SESS_BASE + 24) // Feature vector block full
#define HERR_SESS_REC_BLOCK_DEL_FAILURE (HERR_SESS_BASE + 25) // Deletion failed
#define HERR_SESS_REC_BLOCK_UPDATE_FAILURE (HERR_SESS_BASE + 26) // Update failed
#define HERR_SESS_REC_ID_ALREADY_EXIST (HERR_SESS_BASE + 27) // ID already exists
// FeatureHub error types (200-249)
#define HERR_FT_HUB_BASE 0x00C8 // FeatureHub error types (200)
#define HERR_FT_HUB_DISABLE (HERR_FT_HUB_BASE + 1) // FeatureHub is disabled (201)
#define HERR_FT_HUB_INSERT_FAILURE (HERR_FT_HUB_BASE + 2) // Data insertion error (202)
#define HERR_FT_HUB_NOT_FOUND_FEATURE (HERR_FT_HUB_BASE + 3) // Get face feature error (203)
#define HERR_SESS_FACE_DATA_ERROR (HERR_SESS_BASE + 30) // Face data parsing
// Archive error types (250-299)
#define HERR_ARCHIVE_BASE 0x00FA // Archive error types (250)
#define HERR_ARCHIVE_LOAD_FAILURE (HERR_ARCHIVE_BASE + 1) // Archive load failure (251)
#define HERR_ARCHIVE_LOAD_MODEL_FAILURE (HERR_ARCHIVE_BASE + 2) // Model load failure (252)
#define HERR_ARCHIVE_FILE_FORMAT_ERROR (HERR_ARCHIVE_BASE + 3) // The archive format is incorrect (253)
#define HERR_ARCHIVE_REPETITION_LOAD (HERR_ARCHIVE_BASE + 4) // Do not reload the model (254)
#define HERR_ARCHIVE_NOT_LOAD (HERR_ARCHIVE_BASE + 5) // Model not loaded (255)
#define HERR_SESS_FACE_REC_OPTION_ERROR (HERR_SESS_BASE + 40) // An optional parameter is incorrect
// Device/Hardware error types (300-349)
#define HERR_DEVICE_BASE 0x012C // Hardware error types (300)
#define HERR_DEVICE_CUDA_NOT_SUPPORT (HERR_DEVICE_BASE + 1) // CUDA not supported (301)
#define HERR_DEVICE_CUDA_TENSORRT_NOT_SUPPORT (HERR_DEVICE_BASE + 2) // CUDA TensorRT not supported (302)
#define HERR_DEVICE_CUDA_UNKNOWN_ERROR (HERR_DEVICE_BASE + 3) // CUDA unknown error (303)
#define HERR_DEVICE_CUDA_DISABLE (HERR_DEVICE_BASE + 4) // CUDA support is disabled (304)
#define HERR_FT_HUB_DISABLE (HERR_SESS_BASE + 49) // FeatureHub is disabled
#define HERR_FT_HUB_OPEN_ERROR (HERR_SESS_BASE + 50) // Database open error
#define HERR_FT_HUB_NOT_OPENED (HERR_SESS_BASE + 51) // Database not opened
#define HERR_FT_HUB_NO_RECORD_FOUND (HERR_SESS_BASE + 52) // No record found
#define HERR_FT_HUB_CHECK_TABLE_ERROR (HERR_SESS_BASE + 53) // Data table check error
#define HERR_FT_HUB_INSERT_FAILURE (HERR_SESS_BASE + 54) // Data insertion error
#define HERR_FT_HUB_PREPARING_FAILURE (HERR_SESS_BASE + 55) // Data preparation error
#define HERR_FT_HUB_EXECUTING_FAILURE (HERR_SESS_BASE + 56) // SQL execution error
#define HERR_FT_HUB_NOT_VALID_FOLDER_PATH (HERR_SESS_BASE + 57) // Invalid folder path
#define HERR_FT_HUB_ENABLE_REPETITION (HERR_SESS_BASE + 58) // Enable db function repeatedly
#define HERR_FT_HUB_DISABLE_REPETITION (HERR_SESS_BASE + 59) // Disable db function repeatedly
#define HERR_FT_HUB_NOT_FOUND_FEATURE (HERR_SESS_BASE + 60) // Get face feature error
// Extension module error types (350-549)
#define HERR_EXTENSION_BASE 0x015E // Extension module error types (350)
#define HERR_EXTENSION_ERROR (HERR_EXTENSION_BASE + 1) // Extension module error (351)
#define HERR_EXTENSION_MLMODEL_LOAD_FAILED (HERR_EXTENSION_BASE + 2) // MLModel load failed (352)
#define HERR_EXTENSION_HETERO_MODEL_TAG_ERROR (HERR_EXTENSION_BASE + 3) // Incorrect heterogeneous model tag (353)
#define HERR_EXTENSION_HETERO_REC_HEAD_CONFIG_ERROR (HERR_EXTENSION_BASE + 4) // Rec head config error (354)
#define HERR_EXTENSION_HETERO_MODEL_NOT_MATCH (HERR_EXTENSION_BASE + 5) // Heterogeneous model dimensions do not match (355)
#define HERR_EXTENSION_HETERO_MODEL_NOT_LOADED (HERR_EXTENSION_BASE + 6) // Heterogeneous model dimensions not loaded (356)
#define HERR_ARCHIVE_LOAD_FAILURE (HERR_SESS_BASE + 80) // Archive load failure
#define HERR_ARCHIVE_LOAD_MODEL_FAILURE (HERR_SESS_BASE + 81) // Model load failure
#define HERR_ARCHIVE_FILE_FORMAT_ERROR (HERR_SESS_BASE + 82) // The archive format is incorrect
#define HERR_ARCHIVE_REPETITION_LOAD (HERR_SESS_BASE + 83) // Do not reload the model
#define HERR_ARCHIVE_NOT_LOAD (HERR_SESS_BASE + 84) // Model not loaded
#define HERR_DEVICE_BASE 0X900 // hardware error
#define HERR_DEVICE_CUDA_NOT_SUPPORT (HERR_DEVICE_BASE + 1) // CUDA not supported
#define HERR_DEVICE_CUDA_TENSORRT_NOT_SUPPORT (HERR_DEVICE_BASE + 2) // CUDA TensorRT not supported
#define HERR_DEVICE_CUDA_UNKNOWN_ERROR (HERR_DEVICE_BASE + 20) // CUDA unknown error
#define HERR_DEVICE_CUDA_DISABLE (HERR_DEVICE_BASE + 21) // CUDA support is disabled
// [Anchor-End]
#endif // INSPIRE_FACE_HERROR_H

View File

@@ -0,0 +1,30 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "data_type.h"
#ifndef INSPIRE_FACE_INFORMATION_H
#define INSPIRE_FACE_INFORMATION_H
#ifdef __cplusplus
extern "C" {
#endif
// C-style function declarations, using API export macro
INSPIRE_API_EXPORT const char* GetInspireFaceVersionMajorStr();
INSPIRE_API_EXPORT const char* GetInspireFaceVersionMinorStr();
INSPIRE_API_EXPORT const char* GetInspireFaceVersionPatchStr();
INSPIRE_API_EXPORT const char* GetInspireFaceExtendedInformation();
#ifdef __cplusplus
}
#endif
// C-style macro definitions
#define INSPIRE_FACE_VERSION_MAJOR_STR GetInspireFaceVersionMajorStr()
#define INSPIRE_FACE_VERSION_MINOR_STR GetInspireFaceVersionMinorStr()
#define INSPIRE_FACE_VERSION_PATCH_STR GetInspireFaceVersionPatchStr()
#define INSPIRE_FACE_EXTENDED_INFORMATION GetInspireFaceExtendedInformation()
#endif // INSPIRE_FACE_INFORMATION_H

View File

@@ -32,12 +32,19 @@ public:
NN_INFERENCE_TENSORRT_CUDA,
};
// Landmark engine enum
enum LandmarkEngine {
LANDMARK_HYPLMV2_0_25 = 0,
LANDMARK_HYPLMV2_0_50,
LANDMARK_INSIGHTFACE_2D106_TRACK,
};
// Image processing backend engine
enum ImageProcessingBackend {
IMAGE_PROCESSING_CPU = 0, // CPU backend(Default)
IMAGE_PROCESSING_RGA, // Rockchip RGA backend(Hardware support is mandatory)
};
Launch(const Launch&) = delete; // Delete the copy constructor to prevent copying.
Launch& operator=(const Launch&) = delete; // Delete the assignment operator to prevent assignment.
~Launch(); // Destructor needs to be defined where the implementation is complete
@@ -101,9 +108,21 @@ public:
// Get the face detect model list
std::vector<std::string> GetFaceDetectModelList() const;
// Switch the landmark engine
// Switch the landmark engine(It must be used before creating a session)
void SwitchLandmarkEngine(LandmarkEngine engine);
// Switch the image processing backend(It must be used before creating a session)
void SwitchImageProcessingBackend(ImageProcessingBackend backend);
// Get the image processing backend
ImageProcessingBackend GetImageProcessingBackend() const;
// Set the ImageProcess Aligned Width(It must be used before creating a session)
void SetImageProcessAlignedWidth(int32_t width);
// Get the ImageProcess Aligned Width
int32_t GetImageProcessAlignedWidth() const;
private:
// Private constructor for the singleton pattern
Launch();

View File

@@ -0,0 +1,54 @@
#ifndef INSPIRE_FACE_META_H
#define INSPIRE_FACE_META_H
#include <iostream>
#include <string>
#include "data_type.h"
namespace inspire {
/**
* @brief SDK meta information
*/
struct INSPIRE_API_EXPORT SDKInfo {
// version
int version_major;
int version_minor;
int version_patch;
// series name
std::string series;
// build info
std::string build_date;
std::string build_time;
std::string compiler;
std::string platform;
std::string architecture;
// backend info
std::string inference_backend;
std::string inspirecv_backend;
bool rga_backend_enabled;
// description
std::string description;
std::string GetFullVersionInfo() const;
// auxiliary methods: return the string form of the version number
std::string GetVersionMajorStr() const;
std::string GetVersionMinorStr() const;
std::string GetVersionPatchStr() const;
std::string GetVersionString() const;
};
/**
* @brief Get the SDK info
* @return The constant reference of the SDK info
*/
INSPIRE_API_EXPORT const SDKInfo& GetSDKInfo();
} // namespace inspire
#endif // INSPIRE_FACE_META_H

View File

@@ -47,6 +47,23 @@ public:
return new Session(Create(detect_mode, max_detect_face, param, detect_level_px, track_by_detect_mode_fps));
}
/**
* @brief Clear the tracking face
*/
void ClearTrackingFace();
/**
* @brief Set the track lost recovery mode(only for LightTrack mode, default is false)
* @param value The track lost recovery mode value
*/
void SetTrackLostRecoveryMode(bool value);
/**
* @brief Set the light track confidence threshold
* @param value Light track confidence threshold
*/
void SetLightTrackConfidenceThreshold(float value);
/**
* @brief Set the track preview size.
* @param preview_size The preview size.

View File

@@ -1,15 +0,0 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#ifndef INSPIRE_FACE_INFORMATION_H
#define INSPIRE_FACE_INFORMATION_H
#define INSPIRE_FACE_VERSION_MAJOR_STR "@INSPIRE_FACE_VERSION_MAJOR_STR@"
#define INSPIRE_FACE_VERSION_MINOR_STR "@INSPIRE_FACE_VERSION_MINOR_STR@"
#define INSPIRE_FACE_VERSION_PATCH_STR "@INSPIRE_FACE_VERSION_PATCH_STR@"
#define INSPIRE_FACE_EXTENDED_INFORMATION "@EXTENDED_INFORMATION@"
#endif // INSPIRE_FACE_INFORMATION_H

View File

@@ -0,0 +1,137 @@
#include "meta.h"
#include <sstream>
#include <iomanip>
#include <cstdio>
namespace inspire {
// static global SDK info instance
static const SDKInfo g_sdk_info = {
// version
@INSPIRE_FACE_VERSION_MAJOR_STR@, // version_major
@INSPIRE_FACE_VERSION_MINOR_STR@, // version_minor
@INSPIRE_FACE_VERSION_PATCH_STR@, // version_patch
// series name
"@INSPIRE_FACE_SERIES_NAME@", // series
// build info
__DATE__, // build_date
__TIME__, // build_time
#ifdef __GNUC__
"GCC " __VERSION__, // compiler
#elif defined(_MSC_VER)
"MSVC " + std::to_string(_MSC_VER),
#elif defined(__clang__)
"Clang " __VERSION__,
#else
"Unknown Compiler",
#endif
#ifdef _WIN32
"Windows", // platform
#elif defined(__APPLE__)
"MacOS",
#elif defined(__linux__)
"Linux",
#elif defined(__ANDROID__)
"Android",
#else
"Unknown Platform",
#endif
#if defined(__x86_64__) || defined(_M_X64)
"x86_64", // architecture
#elif defined(__i386) || defined(_M_IX86)
"x86",
#elif defined(__aarch64__) || defined(_M_ARM64)
"arm64",
#elif defined(__arm__) || defined(_M_ARM)
"arm",
#else
"unknown",
#endif
// backend info
#if defined(ISF_ENABLE_TENSORRT)
"TensorRT(CUDA)", // inference_backend
#elif defined(ISF_ENABLE_APPLE_EXTENSION)
"CoreML(ANE)",
#elif defined(ISF_ENABLE_RKNN)
"RKNN(RKNPU)",
#else
"MNN(CPU-Only)",
#endif
#if defined(INSPIRECV_BACKEND_OPENCV)
"InspireCV-OpenCV", // inspirecv_backend
#else
"InspireCV",
#endif
#if defined(ISF_ENABLE_RGA)
true, // rga_backend_enabled
#else
false,
#endif
"InspireFace SDK - Advanced Face Recognition Library" // description
};
std::string SDKInfo::GetFullVersionInfo() const {
std::stringstream ss;
ss << "InspireFace SDK " << "[" << series << "]" << " v" << version_major << "." << version_minor << "." << version_patch
<< " (Built: " << build_date << " " << build_time << ")"
<< " [" << platform << "/" << architecture << "/" << compiler << "]"
<< " Backend: " << inference_backend << " " << inspirecv_backend << "[CPU" << (rga_backend_enabled ? ", RGA" : "") << "]";
return ss.str();
}
std::string SDKInfo::GetVersionMajorStr() const {
return std::to_string(version_major);
}
std::string SDKInfo::GetVersionMinorStr() const {
return std::to_string(version_minor);
}
std::string SDKInfo::GetVersionPatchStr() const {
return std::to_string(version_patch);
}
std::string SDKInfo::GetVersionString() const {
return std::to_string(version_major) + "." + std::to_string(version_minor) + "." + std::to_string(version_patch);
}
const SDKInfo& GetSDKInfo() {
return g_sdk_info;
}
} // namespace inspire
extern "C" {
INSPIRE_API_EXPORT const char* GetInspireFaceVersionMajorStr() {
static char buffer[16];
snprintf(buffer, sizeof(buffer), "%d", inspire::g_sdk_info.version_major);
return buffer;
}
INSPIRE_API_EXPORT const char* GetInspireFaceVersionMinorStr() {
static char buffer[16];
snprintf(buffer, sizeof(buffer), "%d", inspire::g_sdk_info.version_minor);
return buffer;
}
INSPIRE_API_EXPORT const char* GetInspireFaceVersionPatchStr() {
static char buffer[16];
snprintf(buffer, sizeof(buffer), "%d", inspire::g_sdk_info.version_patch);
return buffer;
}
INSPIRE_API_EXPORT const char* GetInspireFaceExtendedInformation() {
return inspire::g_sdk_info.description.c_str();
}
}

View File

@@ -37,7 +37,10 @@ public:
* @param name Name of the neural network.
*/
explicit AnyNetAdapter(std::string name) : m_name_(std::move(name)) {
m_processor_ = nexus::ImageProcessor::Create();
m_processor_ = nexus::ImageProcessor::Create(INSPIREFACE_CONTEXT->GetImageProcessingBackend());
#if defined(ISF_ENABLE_RKNN)
m_processor_->SetAlignedWidth(INSPIREFACE_CONTEXT->GetImageProcessAlignedWidth());
#endif
}
~AnyNetAdapter() {

View File

@@ -94,6 +94,27 @@ struct BoundingBox {
int right_bottom_y;
};
inline inspirecv::Rect2i AlignmentBoxToStrideSquareBox(const inspirecv::Rect2i &bbox, int stride) {
// 1. Convert xywh to cxcywh (center point coordinates and width/height)
int center_x = bbox.GetX() + bbox.GetWidth() / 2;
int center_y = bbox.GetY() + bbox.GetHeight() / 2;
int width = bbox.GetWidth();
int height = bbox.GetHeight();
// 2. Get the shortest side of the width and height
int min_side = std::min(width, height);
// 3. Align the shortest side to the stride
int aligned_side = (min_side / stride) * stride;
// 4. Create a square box (keep the center point unchanged)
int half_side = aligned_side / 2;
int new_x = center_x - half_side;
int new_y = center_y - half_side;
// 5. Convert cxcywh back to xywh
return inspirecv::Rect2i(new_x, new_y, aligned_side, aligned_side);
}
inline inspirecv::Rect2i GetNewBox(int src_w, int src_h, inspirecv::Rect2i bbox, float scale) {
// Convert cv::Rect to BoundingBox

View File

@@ -94,6 +94,7 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
switch (proc) {
case PROCESS_MASK: {
if (m_mask_predict_ == nullptr) {
INSPIRE_LOGE("Mask detection disabled");
return HERR_SESS_PIPELINE_FAILURE; // uninitialized
}
std::vector<inspirecv::Point2f> pointsFive;
@@ -110,6 +111,7 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
}
case PROCESS_RGB_LIVENESS: {
if (m_rgb_anti_spoofing_ == nullptr) {
INSPIRE_LOGE("RGB liveness detection disabled");
return HERR_SESS_PIPELINE_FAILURE; // uninitialized
}
// New scheme: padding differences cause errors in inference results
@@ -139,6 +141,10 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
}
inspirecv::Rect2i oriRect(face.rect.x, face.rect.y, face.rect.width, face.rect.height);
auto rect = GetNewBox(originImage.Width(), originImage.Height(), oriRect, 2.7f);
if (Launch::GetInstance()->GetImageProcessingBackend() == Launch::IMAGE_PROCESSING_RGA) {
// RKRGA must be aligned to 16
rect = AlignmentBoxToStrideSquareBox(rect, 16);
}
auto crop = originImage.Crop(rect);
auto score = (*m_rgb_anti_spoofing_)(crop);
// crop.Show();
@@ -148,6 +154,7 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
}
case PROCESS_INTERACTION: {
if (m_blink_predict_ == nullptr) {
INSPIRE_LOGE("Interaction action detection disabled");
return HERR_SESS_PIPELINE_FAILURE; // uninitialized
}
std::vector<std::vector<int>> order_list = {m_landmark_param_->semantic_index.left_eye_region, m_landmark_param_->semantic_index.right_eye_region};
@@ -187,6 +194,7 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
}
case PROCESS_ATTRIBUTE: {
if (m_attribute_predict_ == nullptr) {
INSPIRE_LOGE("Face attribute detection disabled");
return HERR_SESS_PIPELINE_FAILURE; // uninitialized
}
std::vector<inspirecv::Point2f> pointsFive;
@@ -201,6 +209,7 @@ int32_t FacePipelineModule::Process(inspirecv::FrameProcess &processor, const Fa
}
case PROCESS_FACE_EMOTION: {
if (m_face_emotion_ == nullptr) {
INSPIRE_LOGE("Face emotion detection disabled");
return HERR_SESS_PIPELINE_FAILURE; // uninitialized
}
std::vector<inspirecv::Point2f> pointsFive;

View File

@@ -97,6 +97,7 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_CreateSession)(JNIEnv *env, jobje
jfieldID enableFaceAttributeField = env->GetFieldID(customParamClass, "enableFaceAttribute", "I");
jfieldID enableInteractionLivenessField = env->GetFieldID(customParamClass, "enableInteractionLiveness", "I");
jfieldID enableFacePoseField = env->GetFieldID(customParamClass, "enableFacePose", "I");
jfieldID enableFaceEmotionField = env->GetFieldID(customParamClass, "enableFaceEmotion", "I");
// Create HFSessionCustomParameter struct
HFSessionCustomParameter parameter;
@@ -108,6 +109,7 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_CreateSession)(JNIEnv *env, jobje
parameter.enable_face_attribute = env->GetIntField(customParameter, enableFaceAttributeField);
parameter.enable_interaction_liveness = env->GetIntField(customParameter, enableInteractionLivenessField);
parameter.enable_face_pose = env->GetIntField(customParameter, enableFacePoseField);
parameter.enable_face_emotion = env->GetIntField(customParameter, enableFaceEmotionField);
// Create session
HFSession handle;
@@ -345,9 +347,13 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_ExecuteFaceTrack)(JNIEnv *env, jo
jclass tokenClass = env->FindClass("com/insightface/sdk/inspireface/base/FaceBasicToken");
jobjectArray tokenArray = env->NewObjectArray(results.detectedNum, tokenClass, nullptr);
jmethodID tokenConstructor = env->GetMethodID(tokenClass, "<init>", "()V");
jfieldID tokenHandleField = env->GetFieldID(tokenClass, "handle", "J");
jfieldID tokenDataField = env->GetFieldID(tokenClass, "data", "[B");
jfieldID sizeField = env->GetFieldID(tokenClass, "size", "I");
// Get token size first
HInt32 tokenSize = 0;
HFGetFaceBasicTokenSize(&tokenSize);
for (int i = 0; i < results.detectedNum; i++) {
// Set face rect
jobject rect = env->NewObject(faceRectClass, rectConstructor);
@@ -364,11 +370,28 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_ExecuteFaceTrack)(JNIEnv *env, jo
env->SetFloatField(angle, pitchField, *results.angles.pitch);
env->SetObjectArrayElement(angleArray, i, angle);
// Set token
// Create token object
jobject token = env->NewObject(tokenClass, tokenConstructor);
env->SetLongField(token, tokenHandleField, (jlong)results.tokens[i].data);
env->SetIntField(token, sizeField, results.tokens[i].size);
// Create byte array to hold token data
jbyteArray dataArray = env->NewByteArray(tokenSize);
jbyte* buffer = env->GetByteArrayElements(dataArray, nullptr);
// Copy token data using HFCopyFaceBasicToken
HResult copyResult = HFCopyFaceBasicToken(results.tokens[i], reinterpret_cast<char*>(buffer), tokenSize);
if (copyResult == HSUCCEED) {
// Set data and size fields
env->SetObjectField(token, tokenDataField, dataArray);
env->SetIntField(token, sizeField, tokenSize);
env->ReleaseByteArrayElements(dataArray, buffer, 0);
} else {
INSPIRE_LOGE("Failed to copy token data for face %d, error code: %d", i, copyResult);
env->ReleaseByteArrayElements(dataArray, buffer, JNI_ABORT);
env->DeleteLocalRef(dataArray);
}
env->SetObjectArrayElement(tokenArray, i, token);
// Release local references
env->DeleteLocalRef(rect);
env->DeleteLocalRef(angle);
env->DeleteLocalRef(token);
@@ -400,12 +423,17 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_ExecuteFaceTrack)(JNIEnv *env, jo
* @return The face dense landmark array.
*/
JNIEXPORT jobjectArray INSPIRE_FACE_JNI(InspireFace_GetFaceDenseLandmarkFromFaceToken)(JNIEnv *env, jobject thiz, jobject token) {
// Get token handle and size from FaceBasicToken object
// Get token data and size from FaceBasicToken object
jclass tokenClass = env->GetObjectClass(token);
jfieldID handleField = env->GetFieldID(tokenClass, "handle", "J");
jfieldID sizeField = env->GetFieldID(tokenClass, "size", "I");
jlong handle = env->GetLongField(token, handleField);
jint size = env->GetIntField(token, sizeField);
jfieldID tokenDataField = env->GetFieldID(tokenClass, "data", "[B");
jfieldID tokenSizeField = env->GetFieldID(tokenClass, "size", "I");
jbyteArray tokenDataArray = (jbyteArray)env->GetObjectField(token, tokenDataField);
jint tokenSize = env->GetIntField(token, tokenSizeField);
if (tokenDataArray == nullptr) {
INSPIRE_LOGE("Token data array is null");
return nullptr;
}
// Get number of landmarks
int32_t numLandmarks = 0;
@@ -414,14 +442,17 @@ JNIEXPORT jobjectArray INSPIRE_FACE_JNI(InspireFace_GetFaceDenseLandmarkFromFace
// Allocate memory for landmarks
HPoint2f *landmarks = new HPoint2f[numLandmarks];
// Create face token struct
// Create face token struct from byte array data
HFFaceBasicToken faceToken;
faceToken.size = size;
faceToken.data = reinterpret_cast<void *>(handle);
faceToken.size = tokenSize;
faceToken.data = env->GetByteArrayElements(tokenDataArray, nullptr);
// Get landmarks from token
HResult result = HFGetFaceDenseLandmarkFromFaceToken(faceToken, landmarks, numLandmarks);
// Release byte array elements
env->ReleaseByteArrayElements(tokenDataArray, (jbyte*)faceToken.data, JNI_ABORT);
if (result != HSUCCEED) {
INSPIRE_LOGE("Failed to get face dense landmark from face token, error code: %d", result);
delete[] landmarks;
@@ -468,23 +499,31 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_ExtractFaceFeature)(JNIEnv *env,
jfieldID streamHandleField = env->GetFieldID(streamClass, "handle", "J");
jlong streamHandleValue = env->GetLongField(streamHandle, streamHandleField);
// Get token handle and size
// Get token data and size from FaceBasicToken object
jclass tokenClass = env->GetObjectClass(token);
jfieldID tokenHandleField = env->GetFieldID(tokenClass, "handle", "J");
jfieldID tokenDataField = env->GetFieldID(tokenClass, "data", "[B");
jfieldID tokenSizeField = env->GetFieldID(tokenClass, "size", "I");
jlong tokenHandle = env->GetLongField(token, tokenHandleField);
jbyteArray tokenDataArray = (jbyteArray)env->GetObjectField(token, tokenDataField);
jint tokenSize = env->GetIntField(token, tokenSizeField);
if (tokenDataArray == nullptr) {
INSPIRE_LOGE("Token data array is null");
return nullptr;
}
// Create face token struct
// Create face token struct from byte array data
HFFaceBasicToken faceToken;
faceToken.size = tokenSize;
faceToken.data = reinterpret_cast<void *>(tokenHandle);
faceToken.data = env->GetByteArrayElements(tokenDataArray, nullptr);
// Extract face feature
HFFaceFeature feature;
HResult result =
HFFaceFeatureExtract(reinterpret_cast<HFSession>(sessionHandle), reinterpret_cast<HFImageStream>(streamHandleValue), faceToken, &feature);
// Release byte array elements
env->ReleaseByteArrayElements(tokenDataArray, (jbyte*)faceToken.data, JNI_ABORT);
if (result != HSUCCEED) {
INSPIRE_LOGE("Failed to extract face feature, error code: %d", result);
return nullptr;
@@ -527,23 +566,31 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_GetFaceAlignmentImage)(JNIEnv *en
jfieldID streamHandleField = env->GetFieldID(streamClass, "handle", "J");
jlong streamHandleValue = env->GetLongField(streamHandle, streamHandleField);
// Get token handle and size
// Get token data and size from FaceBasicToken object
jclass tokenClass = env->GetObjectClass(token);
jfieldID tokenHandleField = env->GetFieldID(tokenClass, "handle", "J");
jfieldID tokenDataField = env->GetFieldID(tokenClass, "data", "[B");
jfieldID tokenSizeField = env->GetFieldID(tokenClass, "size", "I");
jlong tokenHandle = env->GetLongField(token, tokenHandleField);
jbyteArray tokenDataArray = (jbyteArray)env->GetObjectField(token, tokenDataField);
jint tokenSize = env->GetIntField(token, tokenSizeField);
if (tokenDataArray == nullptr) {
INSPIRE_LOGE("Token data array is null");
return nullptr;
}
// Create face token struct
// Create face token struct from byte array data
HFFaceBasicToken faceToken;
faceToken.size = tokenSize;
faceToken.data = reinterpret_cast<void *>(tokenHandle);
faceToken.data = env->GetByteArrayElements(tokenDataArray, nullptr);
// Get face alignment image
HFImageBitmap imageBitmap;
HResult result = HFFaceGetFaceAlignmentImage(reinterpret_cast<HFSession>(sessionHandle), reinterpret_cast<HFImageStream>(streamHandleValue),
faceToken, &imageBitmap);
// Release byte array elements
env->ReleaseByteArrayElements(tokenDataArray, (jbyte*)faceToken.data, JNI_ABORT);
if (result != HSUCCEED) {
INSPIRE_LOGE("Failed to get face alignment image, error code: %d", result);
return nullptr;
@@ -1292,16 +1339,34 @@ JNIEXPORT jboolean INSPIRE_FACE_JNI(InspireFace_MultipleFacePipelineProcess)(JNI
// Get token data
HFFaceBasicToken *tokens = nullptr;
std::vector<jbyteArray> tokenDataArrays;
std::vector<jbyte*> tokenBuffers;
if (detectedNum > 0) {
jclass tokenClass = env->FindClass("com/insightface/sdk/inspireface/base/FaceBasicToken");
jfieldID handleTokenField = env->GetFieldID(tokenClass, "handle", "J");
jfieldID tokenDataField = env->GetFieldID(tokenClass, "data", "[B");
jfieldID sizeField = env->GetFieldID(tokenClass, "size", "I");
tokens = new HFFaceBasicToken[detectedNum];
tokenDataArrays.resize(detectedNum);
tokenBuffers.resize(detectedNum);
for (int i = 0; i < detectedNum; i++) {
jobject token = env->GetObjectArrayElement(tokenArray, i);
tokens[i].data = (void *)env->GetLongField(token, handleTokenField);
tokens[i].size = env->GetIntField(token, sizeField);
jbyteArray dataArray = (jbyteArray)env->GetObjectField(token, tokenDataField);
jint size = env->GetIntField(token, sizeField);
if (dataArray != nullptr) {
tokens[i].size = size;
tokens[i].data = env->GetByteArrayElements(dataArray, nullptr);
tokenDataArrays[i] = dataArray;
tokenBuffers[i] = (jbyte*)tokens[i].data;
} else {
tokens[i].size = 0;
tokens[i].data = nullptr;
tokenDataArrays[i] = nullptr;
tokenBuffers[i] = nullptr;
}
env->DeleteLocalRef(token);
}
faceData.tokens = tokens;
@@ -1319,6 +1384,7 @@ JNIEXPORT jboolean INSPIRE_FACE_JNI(InspireFace_MultipleFacePipelineProcess)(JNI
jfieldID enableFaceAttributeField = env->GetFieldID(paramClass, "enableFaceAttribute", "I");
jfieldID enableInteractionLivenessField = env->GetFieldID(paramClass, "enableInteractionLiveness", "I");
jfieldID enableFacePoseField = env->GetFieldID(paramClass, "enableFacePose", "I");
jfieldID enableFaceEmotionField = env->GetFieldID(paramClass, "enableFaceEmotion", "I");
// Get parameter values
HFSessionCustomParameter customParam;
customParam.enable_recognition = env->GetIntField(parameter, enableRecognitionField);
@@ -1329,11 +1395,17 @@ JNIEXPORT jboolean INSPIRE_FACE_JNI(InspireFace_MultipleFacePipelineProcess)(JNI
customParam.enable_face_attribute = env->GetIntField(parameter, enableFaceAttributeField);
customParam.enable_interaction_liveness = env->GetIntField(parameter, enableInteractionLivenessField);
customParam.enable_face_pose = env->GetIntField(parameter, enableFacePoseField);
customParam.enable_face_emotion = env->GetIntField(parameter, enableFaceEmotionField);
// Call native function
HResult ret = HFMultipleFacePipelineProcess((HFSession)sessionHandle, (HFImageStream)streamHandleValue, &faceData, customParam);
// Clean up allocated memory
if (tokens != nullptr) {
for (int i = 0; i < detectedNum; i++) {
if (tokenDataArrays[i] != nullptr && tokenBuffers[i] != nullptr) {
env->ReleaseByteArrayElements(tokenDataArrays[i], tokenBuffers[i], JNI_ABORT);
}
}
delete[] tokens;
}
@@ -1676,6 +1748,79 @@ JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_GetFaceAttributeResult)(JNIEnv *e
return attributeObj;
}
/**
* @brief Get the face emotion result.
*
* @param env The JNI environment.
* @param thiz The Java object.
* @param session The session object.
* @return The face emotion result object.
*/
JNIEXPORT jobject INSPIRE_FACE_JNI(InspireFace_GetFaceEmotionResult)(JNIEnv *env, jobject thiz, jobject session) {
// Validate input parameters
if (!env || !session) {
INSPIRE_LOGE("Invalid input parameters");
return nullptr;
}
// Get session handle
jclass sessionClass = env->GetObjectClass(session);
jfieldID handleField = env->GetFieldID(sessionClass, "handle", "J");
jlong sessionHandle = env->GetLongField(session, handleField);
if (!sessionHandle) {
INSPIRE_LOGE("Invalid session handle");
return nullptr;
}
// Get face emotion results
HFFaceEmotionResult results = {};
HResult ret = HFGetFaceEmotionResult((HFSession)sessionHandle, &results);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to get face emotion result, error code: %d", ret);
return nullptr;
}
// Create Java FaceEmotionResult object
jclass emotionClass = env->FindClass("com/insightface/sdk/inspireface/base/FaceEmotionResult");
if (!emotionClass) {
INSPIRE_LOGE("Failed to find FaceEmotionResult class");
return nullptr;
}
jmethodID constructor = env->GetMethodID(emotionClass, "<init>", "()V");
jobject emotionObj = env->NewObject(emotionClass, constructor);
if (!emotionObj) {
INSPIRE_LOGE("Failed to create FaceEmotionResult object");
return nullptr;
}
// Set fields
jfieldID numField = env->GetFieldID(emotionClass, "num", "I");
jfieldID emotionField = env->GetFieldID(emotionClass, "emotion", "[I");
if (!numField || !emotionField) {
INSPIRE_LOGE("Failed to get field IDs");
return nullptr;
}
// Set num
env->SetIntField(emotionObj, numField, results.num);
// Set emotion array
jintArray emotionArray = env->NewIntArray(results.num);
if (!emotionArray) {
INSPIRE_LOGE("Failed to create emotion array");
return nullptr;
}
env->SetIntArrayRegion(emotionArray, 0, results.num, results.emotion);
env->SetObjectField(emotionObj, emotionField, emotionArray);
env->DeleteLocalRef(emotionArray);
return emotionObj;
}
/**
* @brief Query the InspireFace version.
*

View File

@@ -19,6 +19,7 @@
#if defined(ISF_ENABLE_TENSORRT)
#include "cuda_toolkit.h"
#endif
#include "meta.h"
#define APPLE_EXTENSION_SUFFIX ".bundle"
@@ -27,8 +28,15 @@ namespace inspire {
// Implementation class definition
class Launch::Impl {
public:
Impl() : m_load_(false), m_archive_(nullptr), m_cuda_device_id_(0), m_global_coreml_inference_mode_(InferenceWrapper::COREML_ANE) {
Impl()
: m_load_(false),
m_archive_(nullptr),
m_cuda_device_id_(0),
m_global_coreml_inference_mode_(InferenceWrapper::COREML_ANE),
m_image_processing_backend_(IMAGE_PROCESSING_CPU) {
#if defined(ISF_ENABLE_RGA)
m_image_processing_backend_ = IMAGE_PROCESSING_RGA;
INSPIRE_LOGW("Default image processing backend is RGA.");
#if defined(ISF_RKNPU_RV1106)
m_rockchip_dma_heap_path_ = RV1106_CMA_HEAP_PATH;
#else
@@ -38,6 +46,7 @@ public:
#endif
m_face_detect_pixel_list_ = {160, 320, 640};
m_face_detect_model_list_ = {"face_detect_160", "face_detect_320", "face_detect_640"};
INSPIRE_LOGI("%s", GetSDKInfo().GetFullVersionInfo().c_str());
}
// Face Detection pixel size
std::vector<int32_t> m_face_detect_pixel_list_;
@@ -56,6 +65,8 @@ public:
bool m_load_;
int32_t m_cuda_device_id_;
InferenceWrapper::SpecialBackend m_global_coreml_inference_mode_;
Launch::ImageProcessingBackend m_image_processing_backend_;
int32_t m_image_process_aligned_width_{4};
};
// Initialize static members
@@ -291,4 +302,33 @@ void Launch::SwitchLandmarkEngine(LandmarkEngine engine) {
INSPIREFACE_CHECK_MSG(ret, "Failed to switch landmark engine");
}
void Launch::SwitchImageProcessingBackend(ImageProcessingBackend backend) {
std::lock_guard<std::mutex> lock(pImpl->mutex_);
if (backend == IMAGE_PROCESSING_RGA) {
#if defined(ISF_ENABLE_RGA)
pImpl->m_image_processing_backend_ = backend;
#else
INSPIRE_LOGE("RKRGA is not enabled, please check the build configuration.");
#endif // ISF_ENABLE_RGA
return;
} else {
pImpl->m_image_processing_backend_ = backend;
}
}
Launch::ImageProcessingBackend Launch::GetImageProcessingBackend() const {
std::lock_guard<std::mutex> lock(pImpl->mutex_);
return pImpl->m_image_processing_backend_;
}
void Launch::SetImageProcessAlignedWidth(int32_t width) {
std::lock_guard<std::mutex> lock(pImpl->mutex_);
pImpl->m_image_process_aligned_width_ = width;
}
int32_t Launch::GetImageProcessAlignedWidth() const {
std::lock_guard<std::mutex> lock(pImpl->mutex_);
return pImpl->m_image_process_aligned_width_;
}
} // namespace inspire

View File

@@ -14,6 +14,10 @@ public:
return m_face_session_->Configuration(detect_mode, max_detect_face, param, detect_level_px, track_by_detect_mode_fps);
}
void ClearTrackingFace() {
m_face_session_->ClearTrackingFace();
}
~Impl() = default;
void SetTrackPreviewSize(int32_t preview_size) {
@@ -40,6 +44,14 @@ public:
m_face_session_->SetTrackModeDetectInterval(detect_interval);
}
void SetTrackLostRecoveryMode(bool value) {
m_face_session_->SetTrackLostRecoveryMode(value);
}
void SetLightTrackConfidenceThreshold(float value) {
m_face_session_->SetLightTrackConfidenceThreshold(value);
}
int32_t FaceDetectAndTrack(inspirecv::FrameProcess& process, std::vector<FaceTrackWrap>& results) {
int32_t ret = m_face_session_->FaceDetectAndTrack(process);
if (ret < 0) {
@@ -200,6 +212,18 @@ Session Session::Create(DetectModuleMode detect_mode, int32_t max_detect_face, c
return session;
}
void Session::ClearTrackingFace() {
pImpl->ClearTrackingFace();
}
void Session::SetTrackLostRecoveryMode(bool value) {
pImpl->SetTrackLostRecoveryMode(value);
}
void Session::SetLightTrackConfidenceThreshold(float value) {
pImpl->SetLightTrackConfidenceThreshold(value);
}
void Session::SetTrackPreviewSize(int32_t preview_size) {
pImpl->SetTrackPreviewSize(preview_size);
}

View File

@@ -11,8 +11,15 @@ float RNetAdapt::operator()(const inspirecv::Image &bgr_affine) {
// auto resized = bgr_affine.Resize(24, 24);
uint8_t *resized_data = nullptr;
float scale;
m_processor_->Resize(bgr_affine.Data(), bgr_affine.Width(), bgr_affine.Height(), bgr_affine.Channels(), &resized_data, 24, 24);
auto resized = inspirecv::Image::Create(24, 24, bgr_affine.Channels(), resized_data, false);
auto ret = m_processor_->Resize(bgr_affine.Data(), bgr_affine.Width(), bgr_affine.Height(), bgr_affine.Channels(), &resized_data, 24, 24);
inspirecv::Image resized;
if (ret == -1) {
// Some RK devices seem unable to resize to 24x24, fallback to CPU processing
resized = bgr_affine.Resize(24, 24);
} else {
// RGA resize success
resized = inspirecv::Image::Create(24, 24, bgr_affine.Channels(), resized_data, false);
}
AnyTensorOutputs outputs;
Forward(resized, outputs);

View File

@@ -61,8 +61,9 @@ float FaceTrackModule::PredictTrackScore(const inspirecv::Image &raw_face_crop)
bool FaceTrackModule::TrackFace(inspirecv::FrameProcess &image, FaceObjectInternal &face) {
COST_TIME_SIMPLE(TrackFace);
// If the face confidence level is below 0.1, disable tracking
if (face.GetConfidence() < 0.1) {
// If the track lost recovery mode is enabled, the lag information of the previous frame will not be used in the current frame
if (face.GetConfidence() < m_light_track_confidence_threshold_ && !m_track_lost_recovery_mode_) {
// If the face confidence level is below the threshold, disable tracking
face.DisableTracking();
return false;
}
@@ -134,6 +135,13 @@ bool FaceTrackModule::TrackFace(inspirecv::FrameProcess &image, FaceObjectIntern
score = PredictTrackScore(track_crop);
// track_crop.Show("track_crop");
// If the track lost recovery mode is enabled,
// it will determine whether to discard the invalid face that has been tracked in the current frame
if (score < m_light_track_confidence_threshold_ && m_track_lost_recovery_mode_) {
face.DisableTracking();
return false;
}
for (int i = 0; i < m_multiscale_landmark_scales_.size(); i++) {
inspirecv::Image crop;
// Get the RGB image after affine transformation
@@ -222,6 +230,9 @@ void FaceTrackModule::UpdateStream(inspirecv::FrameProcess &image) {
detection_index_ += 1;
if (m_mode_ == DETECT_MODE_ALWAYS_DETECT || m_mode_ == DETECT_MODE_TRACK_BY_DETECT)
trackingFace.clear();
// Record whether the detection has been performed in this frame
bool detection_executed = false;
if (trackingFace.empty() || (detection_interval_ > 0 && detection_index_ % detection_interval_ == 0) || m_mode_ == DETECT_MODE_ALWAYS_DETECT ||
m_mode_ == DETECT_MODE_TRACK_BY_DETECT) {
image.SetPreviewSize(track_preview_size_);
@@ -241,6 +252,7 @@ void FaceTrackModule::UpdateStream(inspirecv::FrameProcess &image) {
Timer det_cost_time;
DetectFace(image_detect, image.GetPreviewScale());
detection_executed = true;
}
if (!candidate_faces_.empty()) {
@@ -250,6 +262,9 @@ void FaceTrackModule::UpdateStream(inspirecv::FrameProcess &image) {
candidate_faces_.clear();
}
// Record the number of faces before tracking
size_t faces_before_tracking = trackingFace.size();
for (std::vector<FaceObjectInternal>::iterator iter = trackingFace.begin(); iter != trackingFace.end();) {
if (!TrackFace(image, *iter)) {
iter = trackingFace.erase(iter);
@@ -257,6 +272,35 @@ void FaceTrackModule::UpdateStream(inspirecv::FrameProcess &image) {
iter++;
}
}
// In the track lost recovery mode, if all faces are triggered to be lost, detection will be executed immediately
if (m_track_lost_recovery_mode_ && !detection_executed && faces_before_tracking > 0 && trackingFace.empty()) {
image.SetPreviewSize(track_preview_size_);
inspirecv::Image image_detect = image.ExecutePreviewImageProcessing(true);
m_debug_preview_image_size_ = image_detect.Width();
DetectFace(image_detect, image.GetPreviewScale());
// Reset the detection index to 0
detection_index_ = 0;
// Add the detected faces to the tracking face list
if (!candidate_faces_.empty()) {
for (int i = 0; i < candidate_faces_.size(); i++) {
trackingFace.push_back(candidate_faces_[i]);
}
candidate_faces_.clear();
}
// Track the faces
for (std::vector<FaceObjectInternal>::iterator iter = trackingFace.begin(); iter != trackingFace.end();) {
if (!TrackFace(image, *iter)) {
iter = trackingFace.erase(iter);
} else {
iter++;
}
}
}
total.Stop();
// std::cout << total << std::endl;
}
@@ -530,6 +574,19 @@ void FaceTrackModule::SetMultiscaleLandmarkLoop(int value) {
m_multiscale_landmark_scales_ = GenerateCropScales(m_landmark_crop_ratio_, m_multiscale_landmark_loop_num_);
}
void FaceTrackModule::SetTrackLostRecoveryMode(bool value) {
m_track_lost_recovery_mode_ = value;
}
void FaceTrackModule::SetLightTrackConfidenceThreshold(float value) {
m_light_track_confidence_threshold_ = value;
}
void FaceTrackModule::ClearTrackingFace() {
trackingFace.clear();
candidate_faces_.clear();
}
int32_t FaceTrackModule::GetDebugPreviewImageSize() const {
return m_debug_preview_image_size_;
}

View File

@@ -187,6 +187,23 @@ public:
*/
void SetMultiscaleLandmarkLoop(int value);
/**
* @brief Set the track lost recovery mode
* @param value Track lost recovery mode
*/
void SetTrackLostRecoveryMode(bool value);
/**
* @brief Set the light track confidence threshold
* @param value Light track confidence threshold
*/
void SetLightTrackConfidenceThreshold(float value);
/**
* @brief Clear the tracking face
*/
void ClearTrackingFace();
public:
std::vector<FaceObjectInternal> trackingFace; ///< Vector of FaceObjects currently being tracked.
@@ -234,8 +251,12 @@ private:
float m_landmark_crop_ratio_ = 1.1f;
float m_light_track_confidence_threshold_ = 0.1; ///< Light track confidence threshold
std::vector<float> m_multiscale_landmark_scales_;
bool m_track_lost_recovery_mode_{false}; ///< Track lost recovery mode(only for LightTrack mode)
std::shared_ptr<LandmarkParam> m_landmark_param_;
};

View File

@@ -1 +1 @@
InspireFace Version: 1.2.2
InspireFace Version: 1.2.3

View File

@@ -83,7 +83,35 @@ set_target_properties(FeatureHubPersistenceSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(CPUWatchingSample api/sample_cpu_watching.c)
target_link_libraries(CPUWatchingSample InspireFace ${ext})
set_target_properties(CPUWatchingSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(CreateSessionSample api/sample_create_session.c)
target_link_libraries(CreateSessionSample InspireFace ${ext})
set_target_properties(CreateSessionSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(GrayInput api/sample_gray_input.c)
target_link_libraries(GrayInput InspireFace ${ext})
set_target_properties(GrayInput PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(SearchFaceSample api/sample_search_face.cpp)
target_link_libraries(SearchFaceSample InspireFace ${ext})
set_target_properties(SearchFaceSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(CppMultithreadingSample api/sample_cpp_multithreading.cpp)
target_link_libraries(CppMultithreadingSample InspireFace ${ext})
set_target_properties(CppMultithreadingSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
# --- C++ API ---
@@ -117,6 +145,20 @@ set_target_properties(CppSampleInspireCV PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(CppSampleGrayInput cpp_api/cpp_gray_input.cpp)
target_link_libraries(CppSampleGrayInput InspireFace ${ext})
set_target_properties(CppSampleGrayInput PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(CppNexusProcessSample cpp_api/cpp_nexus_process.cpp)
target_link_libraries(CppNexusProcessSample InspireFace ${ext})
set_target_properties(CppNexusProcessSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
if(ISF_BUILD_SAMPLE_INTERNAL)
add_executable(FaceTrackerSample source/tracker_sample.cpp)
target_link_libraries(FaceTrackerSample InspireFace ${ext})

View File

@@ -0,0 +1,293 @@
/**
* Created by Jingyu Yan
* @date 2025-07-13
*
* Multi-threading example: One thread performs face tracking and stores tokens,
* another thread monitors tokens and extracts face features.
*/
#include <iostream>
#include <vector>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include <chrono>
#include <memory>
#include <inspireface.h>
// Thread-safe token storage using C++ containers
class ThreadSafeTokenStorage {
private:
std::vector<HFFaceBasicToken> tokens_;
mutable std::mutex mutex_; // Make mutex mutable for const member functions
std::condition_variable cv_;
std::atomic<bool> stop_flag_{false};
public:
// Add a token to the storage
void addToken(const HFFaceBasicToken& token) {
std::lock_guard<std::mutex> lock(mutex_);
// Copy token data
HInt32 tokenSize;
HFGetFaceBasicTokenSize(&tokenSize);
char* tokenBuffer = new char[tokenSize];
HFCopyFaceBasicToken(token, tokenBuffer, tokenSize);
// Create copied token
HFFaceBasicToken copiedToken;
copiedToken.size = tokenSize;
copiedToken.data = tokenBuffer;
tokens_.push_back(copiedToken);
cv_.notify_one();
}
// Get the last token and remove it
bool getLastToken(HFFaceBasicToken& token) {
std::unique_lock<std::mutex> lock(mutex_);
if (tokens_.empty()) {
return false;
}
token = tokens_.back();
tokens_.pop_back();
return true;
}
// Wait for a token with timeout
bool waitForToken(HFFaceBasicToken& token, int timeout_ms = 1000) {
std::unique_lock<std::mutex> lock(mutex_);
if (cv_.wait_for(lock, std::chrono::milliseconds(timeout_ms),
[this] { return !tokens_.empty() || stop_flag_.load(); })) {
if (!tokens_.empty()) {
token = tokens_.back();
tokens_.pop_back();
return true;
}
}
return false;
}
// Check if there are tokens available
bool hasTokens() const {
std::lock_guard<std::mutex> lock(mutex_);
return !tokens_.empty();
}
// Get token count
size_t getTokenCount() const {
std::lock_guard<std::mutex> lock(mutex_);
return tokens_.size();
}
// Stop the threads
void stop() {
stop_flag_.store(true);
cv_.notify_all();
}
// Check if should stop
bool shouldStop() const {
return stop_flag_.load();
}
// Cleanup allocated memory
void cleanup() {
std::lock_guard<std::mutex> lock(mutex_);
for (auto& token : tokens_) {
if (token.data != nullptr) {
delete[] static_cast<char*>(token.data);
}
}
tokens_.clear();
}
~ThreadSafeTokenStorage() {
cleanup();
}
};
// Face tracking thread function
void faceTrackingThread(ThreadSafeTokenStorage& tokenStorage,
HFSession session,
const std::string& imagePath,
int maxFrames = 100) {
std::cout << "Face tracking thread started" << std::endl;
// Load image using C API
HFImageBitmap imageBitmap = {0};
HResult ret = HFCreateImageBitmapFromFilePath(imagePath.c_str(), 3, &imageBitmap);
if (ret != HSUCCEED) {
std::cerr << "Failed to create image bitmap: " << ret << std::endl;
return;
}
// Create image stream
HFImageStream stream;
ret = HFCreateImageStreamFromImageBitmap(imageBitmap, HF_CAMERA_ROTATION_0, &stream);
if (ret != HSUCCEED) {
std::cerr << "Failed to create image stream: " << ret << std::endl;
HFReleaseImageBitmap(imageBitmap);
return;
}
int frameCount = 0;
while (!tokenStorage.shouldStop() && frameCount < maxFrames) {
// Perform face detection and tracking using C API
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret == HSUCCEED && multipleFaceData.detectedNum > 0) {
// Add tokens to storage
for (int i = 0; i < multipleFaceData.detectedNum; i++) {
tokenStorage.addToken(multipleFaceData.tokens[i]);
std::cout << "Frame " << frameCount << ": Added token for face "
<< multipleFaceData.trackIds[i] << std::endl;
}
}
frameCount++;
std::this_thread::sleep_for(std::chrono::milliseconds(100)); // Simulate frame processing
}
// Cleanup
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
std::cout << "Face tracking thread finished after " << frameCount << " frames" << std::endl;
}
// Face feature extraction thread function
void featureExtractionThread(ThreadSafeTokenStorage& tokenStorage,
HFSession session,
const std::string& imagePath) {
std::cout << "Feature extraction thread started" << std::endl;
// Load image using C API (same as tracking thread)
HFImageBitmap imageBitmap = {0};
HResult ret = HFCreateImageBitmapFromFilePath(imagePath.c_str(), 3, &imageBitmap);
if (ret != HSUCCEED) {
std::cerr << "Failed to create image bitmap: " << ret << std::endl;
return;
}
// Create image stream
HFImageStream stream;
ret = HFCreateImageStreamFromImageBitmap(imageBitmap, HF_CAMERA_ROTATION_0, &stream);
if (ret != HSUCCEED) {
std::cerr << "Failed to create image stream: " << ret << std::endl;
HFReleaseImageBitmap(imageBitmap);
return;
}
int extractedCount = 0;
while (!tokenStorage.shouldStop()) {
HFFaceBasicToken token;
// Wait for token with timeout
if (tokenStorage.waitForToken(token, 2000)) {
// Extract face feature using C API
HFFaceFeature feature;
ret = HFCreateFaceFeature(&feature);
if (ret == HSUCCEED) {
ret = HFFaceFeatureExtractTo(session, stream, token, feature);
if (ret == HSUCCEED) {
extractedCount++;
std::cout << "Extracted feature " << extractedCount
<< " (feature size: " << feature.size << ")" << std::endl;
// Print first few feature values as example
std::cout << "Feature values: ";
for (int i = 0; i < std::min(5, feature.size); i++) {
std::cout << feature.data[i] << " ";
}
std::cout << "..." << std::endl;
} else {
std::cerr << "Feature extraction failed with error: " << ret << std::endl;
}
HFReleaseFaceFeature(&feature);
}
// Clean up token memory
if (token.data != nullptr) {
delete[] static_cast<char*>(token.data);
}
} else {
std::cout << "No tokens available, waiting..." << std::endl;
}
}
// Cleanup
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
std::cout << "Feature extraction thread finished, extracted "
<< extractedCount << " features" << std::endl;
}
int main(int argc, char** argv) {
if (argc != 3) {
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
return -1;
}
std::string modelPath = argv[1];
std::string imagePath = argv[2];
// Initialize InspireFace using C API
HResult ret = HFLaunchInspireFace(modelPath.c_str());
if (ret != HSUCCEED) {
std::cerr << "Failed to launch InspireFace: " << ret << std::endl;
return -1;
}
// Create session using C API
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_LIGHT_TRACK, 10, -1, -1, &session);
if (ret != HSUCCEED) {
std::cerr << "Failed to create session: " << ret << std::endl;
return -1;
}
// Set session parameters using C API
HFSessionSetTrackPreviewSize(session, 640);
// Create thread-safe token storage
ThreadSafeTokenStorage tokenStorage;
// Start face tracking thread
std::thread trackingThread(faceTrackingThread, std::ref(tokenStorage), session, imagePath, 50);
// Start feature extraction thread
std::thread extractionThread(featureExtractionThread, std::ref(tokenStorage), session, imagePath);
// Main thread: monitor and print statistics
std::cout << "Main thread: Monitoring token storage..." << std::endl;
for (int i = 0; i < 30; i++) {
std::this_thread::sleep_for(std::chrono::milliseconds(500));
size_t tokenCount = tokenStorage.getTokenCount();
std::cout << "Token storage status: " << tokenCount << " tokens available" << std::endl;
}
// Stop threads
std::cout << "Stopping threads..." << std::endl;
tokenStorage.stop();
// Wait for threads to finish
trackingThread.join();
extractionThread.join();
// Cleanup using C API
HFReleaseInspireFaceSession(session);
std::cout << "All threads finished successfully" << std::endl;
return 0;
}

View File

@@ -0,0 +1,47 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <stdio.h>
#include <stdlib.h>
#include <inspireface.h>
#include <unistd.h>
int main(int argc, char* argv[]) {
const char* resourcePath = argv[1];
HResult ret = HFReloadInspireFace(resourcePath);
if (ret != HSUCCEED)
{
HFLogPrint(HF_LOG_ERROR, "Failed to launch InspireFace: %d", ret);
return 1;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_IR_LIVENESS | HF_ENABLE_FACE_RECOGNITION;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret);
return ret;
}
// Wait for 2000 seconds, watching the cpu usage
sleep(2000);
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release InspireFaceSession error: %d", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,131 @@
/**
* Created by InspireFace
* @date 2025-06-22
* Sample code for creating InspireFace session with all features enabled
*/
#include <stdio.h>
#include <stdlib.h>
#include <inspireface.h>
int main(int argc, char* argv[]) {
HResult ret;
const char* packPath;
HOption option;
HFDetectMode detMode;
HInt32 maxDetectNum;
HInt32 detectPixelLevel;
HFSession session;
HFFaceDetectPixelList pixelLevels;
packPath = argv[1];
HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath);
/* Set log level to debug for detailed output */
HFSetLogLevel(HF_LOG_DEBUG);
/* The resource file must be loaded before it can be used */
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Resource loaded successfully");
/* Query supported pixel levels for face detection */
ret = HFQuerySupportedPixelLevelsForFaceDetection(&pixelLevels);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "HFQuerySupportedPixelLevelsForFaceDetection error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Supported pixel levels for face detection: %d", pixelLevels.size);
for (int i = 0; i < pixelLevels.size; i++) {
HFLogPrint(HF_LOG_INFO, "Supported pixel level %d: %d", i + 1, pixelLevels.pixel_level[i]);
}
/* Enable ALL available functions in the pipeline */
option = HF_ENABLE_FACE_RECOGNITION | // Face recognition
HF_ENABLE_LIVENESS | // RGB liveness detection
HF_ENABLE_IR_LIVENESS | // IR liveness detection
HF_ENABLE_MASK_DETECT | // Mask detection
HF_ENABLE_FACE_ATTRIBUTE | // Face attribute prediction
HF_ENABLE_QUALITY | // Face quality assessment
HF_ENABLE_INTERACTION | // Interaction feature
HF_ENABLE_FACE_POSE | // Face pose estimation
HF_ENABLE_FACE_EMOTION; // Face emotion recognition
HFLogPrint(HF_LOG_INFO, "Enabled features:");
HFLogPrint(HF_LOG_INFO, "- Face Recognition: YES");
HFLogPrint(HF_LOG_INFO, "- RGB Liveness Detection: YES");
HFLogPrint(HF_LOG_INFO, "- IR Liveness Detection: YES");
HFLogPrint(HF_LOG_INFO, "- Mask Detection: YES");
HFLogPrint(HF_LOG_INFO, "- Face Attributes: YES");
HFLogPrint(HF_LOG_INFO, "- Face Quality: YES");
HFLogPrint(HF_LOG_INFO, "- Interaction: YES");
HFLogPrint(HF_LOG_INFO, "- Face Pose: YES");
HFLogPrint(HF_LOG_INFO, "- Face Emotion: YES");
/* Set detection mode - use light track for general purpose */
detMode = HF_DETECT_MODE_LIGHT_TRACK;
HFLogPrint(HF_LOG_INFO, "Detection mode: HF_DETECT_MODE_LIGHT_TRACK");
/* Maximum number of faces to detect */
maxDetectNum = 20;
HFLogPrint(HF_LOG_INFO, "Maximum faces to detect: %d", maxDetectNum);
/* Face detection image input level */
detectPixelLevel = 320;
HFLogPrint(HF_LOG_INFO, "Detection pixel level: %d", detectPixelLevel);
/* Create InspireFace session with all features enabled */
session = NULL;
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create InspireFace session error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "InspireFace session created successfully");
/* Configure session parameters */
ret = HFSessionSetTrackPreviewSize(session, detectPixelLevel);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Set track preview size error: %d", ret);
} else {
HFLogPrint(HF_LOG_INFO, "Track preview size set to: %d", detectPixelLevel);
}
ret = HFSessionSetFilterMinimumFacePixelSize(session, 4);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Set minimum face pixel size error: %d", ret);
} else {
HFLogPrint(HF_LOG_INFO, "Minimum face pixel size set to: 4");
}
/* Session is now ready for use */
HFLogPrint(HF_LOG_INFO, "========================================");
HFLogPrint(HF_LOG_INFO, "Session created successfully with ALL features enabled!");
HFLogPrint(HF_LOG_INFO, "You can now use this session for:");
HFLogPrint(HF_LOG_INFO, "- Face detection and tracking");
HFLogPrint(HF_LOG_INFO, "- Face recognition");
HFLogPrint(HF_LOG_INFO, "- Liveness detection (RGB & IR)");
HFLogPrint(HF_LOG_INFO, "- Mask detection");
HFLogPrint(HF_LOG_INFO, "- Face attribute analysis");
HFLogPrint(HF_LOG_INFO, "- Face quality assessment");
HFLogPrint(HF_LOG_INFO, "- Interaction detection");
HFLogPrint(HF_LOG_INFO, "- Face pose estimation");
HFLogPrint(HF_LOG_INFO, "- Emotion recognition");
HFLogPrint(HF_LOG_INFO, "========================================");
/* Clean up resources */
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Session released successfully");
/* Show resource statistics */
HFLogPrint(HF_LOG_INFO, "");
HFDeBugShowResourceStatistics();
return 0;
}

View File

@@ -0,0 +1,113 @@
/**
* Created by Jingyu Yan
* @date 2025-06-29
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inspireface.h>
uint8_t* read_binary_file(const char* filename, size_t* file_size) {
FILE* file = fopen(filename, "rb");
if (!file) {
printf("Error: Cannot open file %s\n", filename);
return NULL;
}
fseek(file, 0, SEEK_END);
*file_size = ftell(file);
fseek(file, 0, SEEK_SET);
uint8_t* buffer = (uint8_t*)malloc(*file_size);
if (!buffer) {
printf("Error: Memory allocation failed\n");
fclose(file);
return NULL;
}
size_t bytes_read = fread(buffer, 1, *file_size, file);
fclose(file);
if (bytes_read != *file_size) {
printf("Error: File read incomplete. Expected %zu bytes, got %zu bytes\n",
*file_size, bytes_read);
free(buffer);
return NULL;
}
return buffer;
}
void free_binary_data(uint8_t* data) {
if (data) {
free(data);
}
}
int main() {
HResult ret;
ret = HFLaunchInspireFace("test_res/pack/Pikachu");
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
HOption option = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_MASK_DETECT | HF_ENABLE_QUALITY;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HInt32 maxDetectNum = 1;
HInt32 detectPixelLevel = 320;
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED)
{
HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret);
return ret;
}
// HFImageBitmap image;
HFImageStream imageHandle;
/* Load a image */
// ret = HFCreateImageBitmapFromFilePath("/Users/tunm/Downloads/Desktop/outputbw.jpg", 1, &image);
// if (ret != HSUCCEED) {
// HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error.");
// return ret;
// }
// /* Prepare an image parameter structure for configuration */
// ret = HFCreateImageStreamFromImageBitmap(image, HF_CAMERA_ROTATION_0, &imageHandle);
// if (ret != HSUCCEED) {
// HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret);
// return ret;
// }
size_t file_size;
uint8_t* buffer = read_binary_file("/Users/tunm/Downloads/Desktop/outputbw.byte", &file_size);
if (buffer == NULL) {
HFLogPrint(HF_LOG_ERROR, "Read file error.");
return -1;
}
HFImageData imageData;
imageData.data = buffer;
imageData.width = 640;
imageData.height = 480;
imageData.format = HF_STREAM_GRAY;
imageData.rotation = HF_CAMERA_ROTATION_0;
ret = HFCreateImageStream(&imageData, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret);
return ret;
}
HFDeBugImageStreamDecodeSave(imageHandle, "2.jpg");
HFMultipleFaceData multipleFaceData;
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Detect Face error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Face num: %d", multipleFaceData.detectedNum);
return 0;
}

View File

@@ -0,0 +1,130 @@
#include <inspireface.h>
#include <unistd.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
if (argc != 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <feature_path> <image_path>", argv[0]);
return -1;
}
char* packPath = argv[1];
char* featurePath = argv[2];
char* imagePath = argv[3];
HResult ret;
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// 1. Enable feature hub
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.primaryKeyMode = HF_PK_AUTO_INCREMENT;
featureHubConfiguration.enablePersistence = 1;
featureHubConfiguration.persistenceDbPath = featurePath;
featureHubConfiguration.searchThreshold = 0.32f;
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EXHAUSTIVE;
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Enable feature hub error: %d", ret);
return ret;
}
// 2. Create session
HFSession session;
ret = HFCreateInspireFaceSessionOptional(HF_ENABLE_FACE_RECOGNITION, HF_DETECT_MODE_ALWAYS_DETECT, 1, 320, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create session error: %d", ret);
return ret;
}
// 3. Load image
HFImageBitmap image;
ret = HFCreateImageBitmapFromFilePath(imagePath, 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
HFReleaseInspireFaceSession(session);
return ret;
}
// 4. Create image stream
HFImageStream imageHandle;
ret = HFCreateImageStreamFromImageBitmap(image, HF_CAMERA_ROTATION_0, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image stream error: %d", ret);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
return ret;
}
// 5. Detect face
HFMultipleFaceData multipleFaceData;
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute face track error: %d", ret);
HFReleaseImageStream(imageHandle);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
return ret;
}
if (multipleFaceData.detectedNum == 0) {
HFLogPrint(HF_LOG_WARN, "No face detected in image: %s", imagePath);
HFReleaseImageStream(imageHandle);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
return -1;
}
HFLogPrint(HF_LOG_INFO, "Face detected: %d", multipleFaceData.detectedNum);
// 6. Extract feature
HFFaceFeature feature;
ret = HFCreateFaceFeature(&feature);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create face feature error: %d", ret);
HFReleaseImageStream(imageHandle);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
return ret;
}
ret = HFFaceFeatureExtractTo(session, imageHandle, multipleFaceData.tokens[0], feature);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Extract feature error: %d", ret);
HFReleaseFaceFeature(&feature);
HFReleaseImageStream(imageHandle);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
return ret;
}
// 7. Search face
HFloat confidence;
HFFaceFeatureIdentity searchResult = {0};
ret = HFFeatureHubFaceSearch(feature, &confidence, &searchResult);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Search face feature error: %d", ret);
} else {
if (searchResult.id != -1) {
HFLogPrint(HF_LOG_INFO, "Search face feature success!");
HFLogPrint(HF_LOG_INFO, "Found matching face ID: %d", searchResult.id);
HFLogPrint(HF_LOG_INFO, "Confidence score: %.4f", confidence);
} else {
HFLogPrint(HF_LOG_INFO, "No matching face found in database");
}
}
// 8. Clean up
HFReleaseFaceFeature(&feature);
HFReleaseImageStream(imageHandle);
HFReleaseImageBitmap(image);
HFReleaseInspireFaceSession(session);
HFDeBugShowResourceStatistics();
return ret;
}

View File

@@ -0,0 +1,38 @@
#include <iostream>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
int main() {
INSPIREFACE_CONTEXT->Reload("test_res/pack/Pikachu");
// Create session
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
param.enable_liveness = true;
param.enable_mask_detect = true;
param.enable_face_attribute = true;
param.enable_face_quality = true;
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320));
// Prepare image
inspirecv::Image img = inspirecv::Image::Create("data.jpg", 3);
inspirecv::Image gray_img = img.ToGray();
// Create image and frame process
inspirecv::FrameProcess process =
inspirecv::FrameProcess::Create(gray_img.Data(), gray_img.Height(), gray_img.Width(), inspirecv::GRAY, inspirecv::ROTATION_0);
auto decode = process.ExecutePreviewImageProcessing(true);
decode.Write("decode.jpg");
// Detect
std::vector<inspire::FaceTrackWrap> results;
int32_t ret;
ret = session->FaceDetectAndTrack(process, results);
std::cout << "Face size: " << results.size() << std::endl;
for (const auto& result : results) {
// Draw face
inspirecv::Rect2i rect = inspirecv::Rect2i::Create(result.rect.x, result.rect.y, result.rect.width, result.rect.height);
img.DrawRect(rect, inspirecv::Color::Red);
}
img.Write("result.jpg");
return 0;
}

View File

@@ -0,0 +1,29 @@
#include <iostream>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
#include <inspireface/image_process/nexus_processor/image_processor.h>
void test_resize(std::unique_ptr<inspire::nexus::ImageProcessor>& processor, int aligned_width) {
processor->SetAlignedWidth(aligned_width);
// Create image
inspirecv::Image img = inspirecv::Image::Create("kun.jpg", 3);
// Resize image
uint8_t* dst_data = nullptr;
int dst_width = 112;
int dst_height = 112;
processor->Resize(img.Data(), img.Width(), img.Height(), 3, &dst_data, dst_width, dst_height);
inspirecv::Image dst_img(dst_width, dst_height, 3, dst_data, false);
dst_img.Write("dst_w" + std::to_string(aligned_width) + ".jpg");
std::cout << "Save dst image to dst_w" << aligned_width << ".jpg" << std::endl;
}
int main() {
// Create image processor
auto processor = inspire::nexus::ImageProcessor::Create(inspire::Launch::IMAGE_PROCESSING_RGA);
test_resize(processor, 4);
test_resize(processor, 16);
// wrong aligned width
test_resize(processor, 7);
return 0;
}

View File

@@ -19,7 +19,8 @@ int main() {
db_config.search_mode = inspire::SEARCH_MODE_EXHAUSTIVE;
db_config.recognition_threshold = 0.48f;
db_config.primary_key_mode = inspire::AUTO_INCREMENT;
INSPIREFACE_FEATURE_HUB->EnableHub(db_config);
auto ret = INSPIREFACE_FEATURE_HUB->EnableHub(db_config);
INSPIREFACE_CHECK_MSG(ret == HSUCCEED, "EnableHub failed");
// Create a session
auto param = inspire::CustomPipelineParameter();
@@ -43,6 +44,9 @@ int main() {
// Insert face feature into the hub, because the id is INSPIRE_INVALID_ID, so input id is ignored
int64_t result_id;
INSPIREFACE_FEATURE_HUB->FaceFeatureInsert(feature.embedding, INSPIRE_INVALID_ID, result_id);
inspire::FaceEmbedding face_feature;
INSPIREFACE_FEATURE_HUB->GetFaceFeature(result_id, face_feature);
// Prepare a photo of the same person for the query
auto query_image = inspirecv::Image::Create("test_res/data/bulk/jntm.jpg");
@@ -65,9 +69,13 @@ int main() {
INSPIREFACE_CHECK_MSG(search_result.id == result_id, "Search face feature result id is not equal to the inserted id");
// Update the face feature
INSPIREFACE_FEATURE_HUB->FaceFeatureUpdate(query_feature.embedding, result_id);
// Remove the face feature
INSPIREFACE_FEATURE_HUB->FaceFeatureRemove(result_id);
INSPIREFACE_CHECK_MSG(INSPIREFACE_FEATURE_HUB->GetFaceFeatureCount() == 0, "Face feature is not removed");
std::cout << "Remove face feature successfully" << std::endl;
@@ -76,5 +84,11 @@ int main() {
INSPIREFACE_CHECK_MSG(search_result.id == INSPIRE_INVALID_ID, "Search face feature result id is not equal to the inserted id");
std::cout << "Query again, search face feature result: " << search_result.id << std::endl;
// Top-k query
std::vector<inspire::FaceSearchResult> top_k_results;
INSPIREFACE_FEATURE_HUB->SearchFaceFeatureTopK(query_feature.embedding, top_k_results, 10, true);
std::cout << "Top-k query result: " << top_k_results.size() << std::endl;
return 0;
}

View File

@@ -40,6 +40,10 @@ int main(int argc, char** argv) {
ret = session->FaceDetectAndTrack(process, results);
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
// Run pipeline for each face
ret = session->MultipleFacePipelineProcess(process, param, results);
INSPIREFACE_CHECK_MSG(ret == 0, "MultipleFacePipelineProcess failed");
for (auto& result : results) {
std::cout << "result: " << result.trackId << std::endl;
std::cout << "quality: " << result.quality[0] << ", " << result.quality[1] << ", " << result.quality[2] << ", " << result.quality[3] << ", "
@@ -49,6 +53,11 @@ int main(int argc, char** argv) {
image.DrawRect(rect, inspirecv::Color::Red);
inspirecv::TransformMatrix trans = inspirecv::TransformMatrix::Create(result.trans.m00, result.trans.m01, result.trans.tx, result.trans.m10, result.trans.m11, result.trans.ty);
std::cout << "trans: " << trans.GetInverse() << std::endl;
std::vector<inspirecv::Point2f> landmark = session->GetFaceDenseLandmark(result);
for (auto& point : landmark) {
image.DrawCircle(point.As<int>(), 2, inspirecv::Color::Green);
}
}
image.Write("result.jpg");

View File

@@ -0,0 +1,58 @@
#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <chrono>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
int main(int argc, char** argv) {
if (argc != 3) {
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
return -1;
}
std::string model_path = argv[1];
std::string image_path = argv[2];
// Global init(only once)
INSPIREFACE_CONTEXT->Reload(model_path);
// Create image and frame process
inspirecv::Image image = inspirecv::Image::Create(image_path);
inspirecv::FrameProcess process =
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
// Create session
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
param.enable_liveness = true;
param.enable_mask_detect = true;
param.enable_face_attribute = true;
param.enable_face_quality = true;
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_LIGHT_TRACK, 100, param, 640));
session->SetTrackPreviewSize(640);
session->SetTrackModeDetectInterval(10);
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
for (int i = 0; i < 100; i++) {
// Detect and track
std::vector<inspire::FaceTrackWrap> results;
int32_t ret;
// Start time
auto start = std::chrono::high_resolution_clock::now();
ret = session->FaceDetectAndTrack(process, results);
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
auto end = std::chrono::high_resolution_clock::now();
// End time
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << i << " MultipleFacePipelineProcess: " << duration.count() / 1000.0 << " ms" << std::endl;
}
return 0;
}

View File

@@ -52,10 +52,7 @@ int main(int argc, char* argv[]) {
init_test_evaluation_record();
TEST_PRINT_OUTPUT(true);
TEST_PRINT("InspireFace Version: v{}.{}.{}",
INSPIRE_FACE_VERSION_MAJOR_STR,
INSPIRE_FACE_VERSION_MINOR_STR,
INSPIRE_FACE_VERSION_PATCH_STR);
TEST_PRINT("InspireFace Version: v{}.{}.{}", INSPIRE_FACE_VERSION_MAJOR_STR, INSPIRE_FACE_VERSION_MINOR_STR, INSPIRE_FACE_VERSION_PATCH_STR);
TEST_PRINT("Extended Information: {}", INSPIRE_FACE_EXTENDED_INFORMATION);
Catch::Session session;
@@ -89,7 +86,7 @@ int main(int argc, char* argv[]) {
#if defined(ISF_ENABLE_TENSORRT)
int32_t cuda_device_count;
ret = inspire::GetCudaDeviceCount(&cuda_device_count);
if (ret != InspireFace::HSUCCEED) {
if (ret != HSUCCEED) {
TEST_ERROR_PRINT("An error occurred while checking CUDA device support: {}", ret);
return ret;
}

View File

@@ -2,63 +2,50 @@
During the use of InspireFace, some error feedback codes may be generated. Here is a table of error feedback codes.
- As of **June 15, 2025**, the error code definitions have been restructured. Some legacy codes from historical versions have been removed, and a more streamlined version has been reorganized and consolidated.
| Index | Name | Code | Comment |
| --- | --- | --- | --- |
| 1 | HSUCCEED | 0 | Success |
| 2 | HERR_BASIC_BASE | 1 | Basic error types |
| 3 | HERR_UNKNOWN | 1 | Unknown error |
| 4 | HERR_INVALID_PARAM | 2 | Invalid parameter |
| 5 | HERR_INVALID_IMAGE_STREAM_HANDLE | 25 | Invalid image stream handle |
| 6 | HERR_INVALID_CONTEXT_HANDLE | 26 | Invalid context handle |
| 7 | HERR_INVALID_FACE_TOKEN | 31 | Invalid face token |
| 8 | HERR_INVALID_FACE_FEATURE | 32 | Invalid face feature |
| 9 | HERR_INVALID_FACE_LIST | 33 | Invalid face feature list |
| 10 | HERR_INVALID_BUFFER_SIZE | 34 | Invalid copy token |
| 11 | HERR_INVALID_IMAGE_STREAM_PARAM | 35 | Invalid image param |
| 12 | HERR_INVALID_SERIALIZATION_FAILED | 36 | Invalid face serialization failed |
| 13 | HERR_INVALID_DETECTION_INPUT | 37 | Failed to modify detector input size |
| 14 | HERR_INVALID_IMAGE_BITMAP_HANDLE | 38 | Invalid image bitmap handle |
| 15 | HERR_SESS_BASE | 1280 | Session error types |
| 16 | HERR_SESS_FUNCTION_UNUSABLE | 1282 | Function not usable |
| 17 | HERR_SESS_TRACKER_FAILURE | 1283 | Tracker module not initialized |
| 18 | HERR_SESS_INVALID_RESOURCE | 1290 | Invalid static resource |
| 19 | HERR_SESS_NUM_OF_MODELS_NOT_MATCH | 1291 | Number of models does not match |
| 20 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 1300 | The number of input landmark points does not match |
| 21 | HERR_SESS_LANDMARK_NOT_ENABLE | 1301 | The number of input landmark points does not match |
| 22 | HERR_SESS_KEY_POINT_NUM_NOT_MATCH | 1302 | The number of input key points does not match |
| 23 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized |
| 24 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered |
| 25 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index |
| 26 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index |
| 27 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty |
| 28 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration |
| 29 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number |
| 30 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison |
| 31 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full |
| 32 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed |
| 33 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed |
| 34 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists |
| 35 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing |
| 36 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect |
| 37 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled |
| 38 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error |
| 39 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened |
| 40 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found |
| 41 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error |
| 42 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error |
| 43 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error |
| 44 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error |
| 45 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path |
| 46 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly |
| 47 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly |
| 48 | HERR_FT_HUB_NOT_FOUND_FEATURE | 1340 | Get face feature error |
| 49 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure |
| 50 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure |
| 51 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect |
| 52 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model |
| 53 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded |
| 54 | HERR_DEVICE_BASE | 2304 | hardware error |
| 55 | HERR_DEVICE_CUDA_NOT_SUPPORT | 2305 | CUDA not supported |
| 56 | HERR_DEVICE_CUDA_TENSORRT_NOT_SUPPORT | 2306 | CUDA TensorRT not supported |
| 57 | HERR_DEVICE_CUDA_UNKNOWN_ERROR | 2324 | CUDA unknown error |
| 58 | HERR_DEVICE_CUDA_DISABLE | 2325 | CUDA support is disabled |
| 2 | HERR_UNKNOWN | 1 | Unknown error (1) |
| 3 | HERR_INVALID_PARAM | 2 | Invalid parameter (2) |
| 4 | HERR_INVALID_IMAGE_STREAM_HANDLE | 3 | Invalid image stream handle (3) |
| 5 | HERR_INVALID_CONTEXT_HANDLE | 4 | Invalid context handle (4) |
| 6 | HERR_INVALID_FACE_TOKEN | 5 | Invalid face token (5) |
| 7 | HERR_INVALID_FACE_FEATURE | 6 | Invalid face feature (6) |
| 8 | HERR_INVALID_FACE_LIST | 7 | Invalid face feature list (7) |
| 9 | HERR_INVALID_BUFFER_SIZE | 8 | Invalid copy token (8) |
| 10 | HERR_INVALID_IMAGE_STREAM_PARAM | 9 | Invalid image param (9) |
| 11 | HERR_INVALID_SERIALIZATION_FAILED | 10 | Invalid face serialization failed (10) |
| 12 | HERR_INVALID_DETECTION_INPUT | 11 | Failed to modify detector input size (11) |
| 13 | HERR_INVALID_IMAGE_BITMAP_HANDLE | 12 | Invalid image bitmap handle (12) |
| 14 | HERR_IMAGE_STREAM_DECODE_FAILED | 13 | ImageStream failed to decode the image (13) |
| 15 | HERR_SESS_FUNCTION_UNUSABLE | 101 | Function not usable (101) |
| 16 | HERR_SESS_TRACKER_FAILURE | 102 | Tracker module not initialized (102) |
| 17 | HERR_SESS_PIPELINE_FAILURE | 103 | Pipeline module not initialized (103) |
| 18 | HERR_SESS_INVALID_RESOURCE | 104 | Invalid static resource (104) |
| 19 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 105 | The number of input landmark points does not match (105) |
| 20 | HERR_SESS_LANDMARK_NOT_ENABLE | 106 | The landmark model is not enabled (106) |
| 21 | HERR_SESS_KEY_POINT_NUM_NOT_MATCH | 107 | The number of input key points does not match (107) |
| 22 | HERR_SESS_REC_EXTRACT_FAILURE | 108 | Face feature extraction not registered (108) |
| 23 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 109 | Incorrect length of feature vector for comparison (109) |
| 24 | HERR_SESS_FACE_DATA_ERROR | 110 | Face data parsing (110) |
| 25 | HERR_SESS_FACE_REC_OPTION_ERROR | 111 | An optional parameter is incorrect (111) |
| 26 | HERR_FT_HUB_DISABLE | 201 | FeatureHub is disabled (201) |
| 27 | HERR_FT_HUB_INSERT_FAILURE | 202 | Data insertion error (202) |
| 28 | HERR_FT_HUB_NOT_FOUND_FEATURE | 203 | Get face feature error (203) |
| 29 | HERR_ARCHIVE_LOAD_FAILURE | 251 | Archive load failure (251) |
| 30 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 252 | Model load failure (252) |
| 31 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 253 | The archive format is incorrect (253) |
| 32 | HERR_ARCHIVE_REPETITION_LOAD | 254 | Do not reload the model (254) |
| 33 | HERR_ARCHIVE_NOT_LOAD | 255 | Model not loaded (255) |
| 34 | HERR_DEVICE_CUDA_NOT_SUPPORT | 301 | CUDA not supported (301) |
| 35 | HERR_DEVICE_CUDA_TENSORRT_NOT_SUPPORT | 302 | CUDA TensorRT not supported (302) |
| 36 | HERR_DEVICE_CUDA_UNKNOWN_ERROR | 303 | CUDA unknown error (303) |
| 37 | HERR_DEVICE_CUDA_DISABLE | 304 | CUDA support is disabled (304) |
| 38 | HERR_EXTENSION_ERROR | 351 | Extension module error (351) |
| 39 | HERR_EXTENSION_MLMODEL_LOAD_FAILED | 352 | MLModel load failed (352) |
| 40 | HERR_EXTENSION_HETERO_MODEL_TAG_ERROR | 353 | Incorrect heterogeneous model tag (353) |
| 41 | HERR_EXTENSION_HETERO_REC_HEAD_CONFIG_ERROR | 354 | Rec head config error (354) |
| 42 | HERR_EXTENSION_HETERO_MODEL_NOT_MATCH | 355 | Heterogeneous model dimensions do not match (355) |
| 43 | HERR_EXTENSION_HETERO_MODEL_NOT_LOADED | 356 | Heterogeneous model dimensions not loaded (356) |

View File

@@ -0,0 +1,56 @@
@startuml InspireFace SDK Usage Sequence
!theme plain
title Execution Sequence
footer : 20250615 create by Jingyu Yan
header : @InspireFace
participant "Application" as App
participant "InspireFace SDK" as SDK
participant "FaceSession" as Session
note over SDK, Session
Sessions support multiple and are allocated across multiple threads,
but cache data between sessions cannot be mixed
end note
== Initialization Phase ==
App -> SDK: HFLaunchInspireFace
SDK --> App: Return initialization result
== Session Creation & Configuration ==
App -> SDK: HFCreateInspireFaceSessionOptional
SDK -> Session: Create session instance
SDK --> App: Return session handle
== Image Preparation ==
App -> SDK: HFCreateImageStream
SDK --> App: Return image handle
== Face Detection & Processing ==
App -> Session: HFExecuteFaceTrack
activate Session
Session --> App: Return MultipleFaceData
App -> Session: HFMultipleFacePipelineProcessOptional
Session --> App: Pipeline processing completed
== Result Retrieval Phase ==
App -> Session: HFGetFaceMaskConfidence
Session --> App: Mask detection result
App -> Session: HFGetFaceQualityConfidence
Session --> App: Quality assessment result
App -> Session: Other APIs
Session --> App: ....
== Resource Release ==
App -> SDK: HFReleaseImageStream
== Program Termination ==
App -> Session: HFReleaseInspireFaceSession
App -> SDK: HFTerminateInspireFace
@enduml

View File

@@ -100,6 +100,26 @@ services:
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_wheel_manylinux2014_x86.sh
build-manylinux2014-aarch64:
build:
context: .
dockerfile: docker/Dockerfile.manylinux2014_aarch64
environment:
- VERSION=${VERSION}
working_dir: /workspace
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_wheel_manylinux2014_aarch64.sh
build-manylinux_2_36-aarch64:
build:
context: .
dockerfile: docker/Dockerfile.manylinux_2_36_aarch64
environment:
- VERSION=${VERSION}
working_dir: /workspace
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_wheel_manylinux_2_36_aarch64.sh
build-tensorrt-cuda12-ubuntu22:
build:
context: .

View File

@@ -10,15 +10,27 @@ RUN apt-get update && \
curl \
git
# Install CMake
RUN apt-get install -y --no-install-recommends
# Install CMake with architecture detection
RUN apt-get update && \
apt-get install -y --no-install-recommends cmake
# Install CMake 3.20.6
RUN cd /opt && \
wget https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-x86_64.tar.gz && \
tar -zxvf cmake-3.20.6-linux-x86_64.tar.gz && \
ln -s /opt/cmake-3.20.6-linux-x86_64/bin/* /usr/local/bin/ && \
rm cmake-3.20.6-linux-x86_64.tar.gz
# Alternative: Install specific CMake version with architecture detection
# RUN apt-get update && \
# apt-get install -y --no-install-recommends wget && \
# cd /opt && \
# if [ "$(uname -m)" = "x86_64" ]; then \
# wget https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-x86_64.tar.gz && \
# tar -zxvf cmake-3.20.6-linux-x86_64.tar.gz && \
# ln -s /opt/cmake-3.20.6-linux-x86_64/bin/* /usr/local/bin/ && \
# rm cmake-3.20.6-linux-x86_64.tar.gz; \
# elif [ "$(uname -m)" = "aarch64" ]; then \
# wget https://github.com/Kitware/CMake/releases/download/v3.20.6/cmake-3.20.6-linux-aarch64.tar.gz && \
# tar -zxvf cmake-3.20.6-linux-aarch64.tar.gz && \
# ln -s /opt/cmake-3.20.6-linux-aarch64/bin/* /usr/local/bin/ && \
# rm cmake-3.20.6-linux-aarch64.tar.gz; \
# else \
# apt-get install -y cmake; \
# fi
# Set the URL and installation path for the Linaro toolchain
ARG LINARO_URL="https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-linux-gnu/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu.tar.xz"

View File

@@ -0,0 +1,12 @@
# Use manylinux2014_aarch64 as the base image
FROM quay.io/pypa/manylinux2014_aarch64
ARG https_proxy
ARG http_proxy
ARG all_proxy
# Set the working directory
WORKDIR /workspace
# Default to running Bash
CMD ["/bin/bash"]

View File

@@ -0,0 +1,12 @@
# Use manylinux_2_36_aarch64 as the base image
FROM quay.io/pypa/manylinux_2_36_aarch64
ARG https_proxy
ARG http_proxy
ARG all_proxy
# Set the working directory
WORKDIR /workspace
# Default to running Bash
CMD ["/bin/bash"]

View File

@@ -26,4 +26,4 @@ share/python-wheels/
.pytest_cache/
.idea/
*.db
version.txt
version.txt

View File

@@ -100,3 +100,4 @@ python -m unittest discover -s test
1. Ensure that OpenCV and other necessary dependencies are installed on your system
2. Make sure the dynamic library is correctly installed before use
3. Python 3.7 or higher is recommended
4. The default version is CPU, if you want to use the GPU, CoreML, or NPU backend version, you can refer to the [documentation](https://doc.inspireface.online/guides/python-rockchip-device.html) to replace the so and make a Python installation package

View File

@@ -6,4 +6,6 @@ from .inspireface import ImageStream, FaceExtended, FaceInformation, SessionCust
set_logging_level, disable_logging, show_system_resource_statistics, get_recommended_cosine_threshold, cosine_similarity_convert_to_percentage, \
get_similarity_converter_config, set_similarity_converter_config, pull_latest_model, switch_landmark_engine, \
HF_PK_AUTO_INCREMENT, HF_PK_MANUAL_INPUT, HF_SEARCH_MODE_EAGER, HF_SEARCH_MODE_EXHAUSTIVE, \
ignore_check_latest_model, set_cuda_device_id, get_cuda_device_id, print_cuda_device_info, get_num_cuda_devices, check_cuda_device_support, terminate
ignore_check_latest_model, set_cuda_device_id, get_cuda_device_id, print_cuda_device_info, get_num_cuda_devices, check_cuda_device_support, terminate, \
InspireFaceError, InvalidInputError, SystemNotReadyError, ProcessingError, ResourceError, HardwareError, FeatureHubError, \
switch_image_processing_backend, HF_IMAGE_PROCESSING_CPU, HF_IMAGE_PROCESSING_RGA, set_image_process_aligned_width, use_oss_download

View File

@@ -0,0 +1,242 @@
from . import herror as errcode
from typing import Optional, Dict, Any
class InspireFaceError(Exception):
"""Base class for all InspireFace exceptions"""
def __init__(self, message: str, error_code: Optional[int] = None, **context):
super().__init__(message)
self.error_code = error_code
self.context = context
self._error_name = self._get_error_name(error_code) if error_code else None
def _get_error_name(self, error_code: int) -> str:
"""Get error name corresponding to error code"""
for name, value in errcode.__dict__.items():
if isinstance(value, int) and value == error_code:
return name
return f"UNKNOWN_ERROR"
def __str__(self):
base_msg = super().__str__()
if self.error_code is not None:
return f"[{self._error_name}({self.error_code})] {base_msg}"
return base_msg
@property
def error_name(self) -> Optional[str]:
"""Get error name"""
return self._error_name
class InvalidInputError(InspireFaceError, ValueError):
"""Input parameter/data format error"""
pass
class SystemNotReadyError(InspireFaceError, RuntimeError):
"""System not initialized or resources not ready"""
pass
class ProcessingError(InspireFaceError):
"""Business logic error during processing"""
pass
class ResourceError(InspireFaceError, OSError):
"""Resource-related errors (handles, memory, files, etc.)"""
pass
class HardwareError(InspireFaceError):
"""Hardware-related errors (CUDA, devices, etc.)"""
pass
class FeatureHubError(InspireFaceError):
"""Feature hub related errors"""
pass
# === Error code mapping table ===
ERROR_CODE_MAPPING = {
# Input parameter errors
'invalid_input': [
errcode.HERR_INVALID_PARAM,
errcode.HERR_INVALID_IMAGE_STREAM_PARAM,
errcode.HERR_INVALID_BUFFER_SIZE,
errcode.HERR_INVALID_DETECTION_INPUT,
],
# System not ready
'system_not_ready': [
errcode.HERR_ARCHIVE_NOT_LOAD,
errcode.HERR_SESS_INVALID_RESOURCE,
errcode.HERR_ARCHIVE_LOAD_MODEL_FAILURE,
],
# Processing errors
'processing': [
errcode.HERR_SESS_FUNCTION_UNUSABLE,
errcode.HERR_SESS_TRACKER_FAILURE,
errcode.HERR_SESS_PIPELINE_FAILURE,
errcode.HERR_SESS_REC_EXTRACT_FAILURE,
errcode.HERR_SESS_LANDMARK_NOT_ENABLE,
errcode.HERR_IMAGE_STREAM_DECODE_FAILED,
],
# Resource errors
'resource': [
errcode.HERR_INVALID_IMAGE_STREAM_HANDLE,
errcode.HERR_INVALID_CONTEXT_HANDLE,
errcode.HERR_INVALID_FACE_TOKEN,
errcode.HERR_INVALID_FACE_FEATURE,
errcode.HERR_INVALID_FACE_LIST,
errcode.HERR_INVALID_IMAGE_BITMAP_HANDLE,
],
# Hardware errors
'hardware': [
errcode.HERR_DEVICE_CUDA_NOT_SUPPORT,
errcode.HERR_DEVICE_CUDA_UNKNOWN_ERROR,
],
# Feature hub errors
'feature_hub': [
errcode.HERR_FT_HUB_DISABLE,
errcode.HERR_FT_HUB_INSERT_FAILURE,
errcode.HERR_FT_HUB_NOT_FOUND_FEATURE,
],
}
def check_error(error_code: int, operation: str = "", **context):
"""
Check error code and raise corresponding exception
Args:
error_code: Error code returned by C library
operation: Operation description for building error message
**context: Additional context information
Raises:
Corresponding InspireFaceError subclass exception
"""
if error_code == errcode.HSUCCEED:
return
# Get error name
error_name = None
for name, value in errcode.__dict__.items():
if isinstance(value, int) and value == error_code:
error_name = name
break
# Build basic error message
if operation:
message = f"{operation} failed"
if error_name:
message += f": {error_name}"
else:
message = error_name or f"Unknown error (code: {error_code})"
# Select exception type based on error code
exception_class = InspireFaceError # Default exception type
for category, codes in ERROR_CODE_MAPPING.items():
if error_code in codes:
if category == 'invalid_input':
exception_class = InvalidInputError
elif category == 'system_not_ready':
exception_class = SystemNotReadyError
elif category == 'processing':
exception_class = ProcessingError
elif category == 'resource':
exception_class = ResourceError
elif category == 'hardware':
exception_class = HardwareError
elif category == 'feature_hub':
exception_class = FeatureHubError
break
# Raise corresponding exception
raise exception_class(message, error_code, **context)
# === Convenient validation functions ===
def validate_image_format(image, operation: str = "Image validation"):
"""Validate image format"""
import numpy as np
if not isinstance(image, np.ndarray):
raise InvalidInputError(
f"{operation}: Input must be a numpy array",
errcode.HERR_INVALID_PARAM,
input_type=type(image).__name__
)
if len(image.shape) != 3:
raise InvalidInputError(
f"{operation}: Image must be 3-dimensional (H, W, C)",
errcode.HERR_INVALID_IMAGE_STREAM_PARAM,
actual_shape=image.shape
)
h, w, c = image.shape
if c not in [3, 4]:
raise InvalidInputError(
f"{operation}: Image must have 3 or 4 channels",
errcode.HERR_INVALID_IMAGE_STREAM_PARAM,
actual_channels=c
)
def validate_feature_data(data, operation: str = "Feature validation"):
"""Validate feature data format"""
import numpy as np
if not isinstance(data, np.ndarray):
raise InvalidInputError(
f"{operation}: Feature data must be a numpy array",
errcode.HERR_INVALID_FACE_FEATURE,
input_type=type(data).__name__
)
if data.dtype != np.float32:
raise InvalidInputError(
f"{operation}: Feature data must be in float32 format",
errcode.HERR_INVALID_FACE_FEATURE,
actual_dtype=str(data.dtype)
)
def validate_session_initialized(session, operation: str = "Session operation"):
"""Validate if session is initialized"""
if session is None or session._sess is None:
raise ResourceError(
f"{operation}: Session not initialized",
errcode.HERR_INVALID_CONTEXT_HANDLE
)
# === Exception handling decorators for special scenarios ===
def handle_c_api_errors(operation_name: str):
"""Decorator for wrapping C API calls"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if not isinstance(e, InspireFaceError):
# Wrap non-InspireFace exceptions as ProcessingError
raise ProcessingError(
f"{operation_name}: {str(e)}",
context={'original_exception': type(e).__name__}
) from e
raise
return wrapper
return decorator

View File

@@ -0,0 +1,50 @@
"""
InspireFace Error Codes
Auto-generated error code definitions from cpp/inspireface/include/inspireface/herror.h
Generated on: 2025-07-01
"""
HSUCCEED = 0 # Success
HERR_UNKNOWN = 1 # Unknown error (1)
HERR_INVALID_PARAM = 2 # Invalid parameter (2)
HERR_INVALID_IMAGE_STREAM_HANDLE = 3 # Invalid image stream handle (3)
HERR_INVALID_CONTEXT_HANDLE = 4 # Invalid context handle (4)
HERR_INVALID_FACE_TOKEN = 5 # Invalid face token (5)
HERR_INVALID_FACE_FEATURE = 6 # Invalid face feature (6)
HERR_INVALID_FACE_LIST = 7 # Invalid face feature list (7)
HERR_INVALID_BUFFER_SIZE = 8 # Invalid copy token (8)
HERR_INVALID_IMAGE_STREAM_PARAM = 9 # Invalid image param (9)
HERR_INVALID_SERIALIZATION_FAILED = 10 # Invalid face serialization failed (10)
HERR_INVALID_DETECTION_INPUT = 11 # Failed to modify detector input size (11)
HERR_INVALID_IMAGE_BITMAP_HANDLE = 12 # Invalid image bitmap handle (12)
HERR_IMAGE_STREAM_DECODE_FAILED = 13 # ImageStream failed to decode the image (13)
HERR_SESS_FUNCTION_UNUSABLE = 101 # Function not usable (101)
HERR_SESS_TRACKER_FAILURE = 102 # Tracker module not initialized (102)
HERR_SESS_PIPELINE_FAILURE = 103 # Pipeline module not initialized (103)
HERR_SESS_INVALID_RESOURCE = 104 # Invalid static resource (104)
HERR_SESS_LANDMARK_NUM_NOT_MATCH = 105 # The number of input landmark points does not match (105)
HERR_SESS_LANDMARK_NOT_ENABLE = 106 # The landmark model is not enabled (106)
HERR_SESS_KEY_POINT_NUM_NOT_MATCH = 107 # The number of input key points does not match (107)
HERR_SESS_REC_EXTRACT_FAILURE = 108 # Face feature extraction not registered (108)
HERR_SESS_REC_CONTRAST_FEAT_ERR = 109 # Incorrect length of feature vector for comparison (109)
HERR_SESS_FACE_DATA_ERROR = 110 # Face data parsing (110)
HERR_SESS_FACE_REC_OPTION_ERROR = 111 # An optional parameter is incorrect (111)
HERR_FT_HUB_DISABLE = 201 # FeatureHub is disabled (201)
HERR_FT_HUB_INSERT_FAILURE = 202 # Data insertion error (202)
HERR_FT_HUB_NOT_FOUND_FEATURE = 203 # Get face feature error (203)
HERR_ARCHIVE_LOAD_FAILURE = 251 # Archive load failure (251)
HERR_ARCHIVE_LOAD_MODEL_FAILURE = 252 # Model load failure (252)
HERR_ARCHIVE_FILE_FORMAT_ERROR = 253 # The archive format is incorrect (253)
HERR_ARCHIVE_REPETITION_LOAD = 254 # Do not reload the model (254)
HERR_ARCHIVE_NOT_LOAD = 255 # Model not loaded (255)
HERR_DEVICE_CUDA_NOT_SUPPORT = 301 # CUDA not supported (301)
HERR_DEVICE_CUDA_TENSORRT_NOT_SUPPORT = 302 # CUDA TensorRT not supported (302)
HERR_DEVICE_CUDA_UNKNOWN_ERROR = 303 # CUDA unknown error (303)
HERR_DEVICE_CUDA_DISABLE = 304 # CUDA support is disabled (304)
HERR_EXTENSION_ERROR = 351 # Extension module error (351)
HERR_EXTENSION_MLMODEL_LOAD_FAILED = 352 # MLModel load failed (352)
HERR_EXTENSION_HETERO_MODEL_TAG_ERROR = 353 # Incorrect heterogeneous model tag (353)
HERR_EXTENSION_HETERO_REC_HEAD_CONFIG_ERROR = 354 # Rec head config error (354)
HERR_EXTENSION_HETERO_MODEL_NOT_MATCH = 355 # Heterogeneous model dimensions do not match (355)
HERR_EXTENSION_HETERO_MODEL_NOT_LOADED = 356 # Heterogeneous model dimensions not loaded (356)

View File

@@ -6,6 +6,15 @@ from typing import Tuple, List
from dataclasses import dataclass
from loguru import logger
from .utils import ResourceManager
from .utils.resource import set_use_oss_download
from . import herror as errcode
# Exception system
from .exception import (
check_error, validate_image_format, validate_feature_data,
validate_session_initialized, handle_c_api_errors,
InspireFaceError, InvalidInputError, SystemNotReadyError,
ProcessingError, ResourceError, HardwareError, FeatureHubError
)
# If True, the latest model will not be verified
IGNORE_VERIFICATION_OF_THE_LATEST_MODEL = False
@@ -14,6 +23,14 @@ def ignore_check_latest_model(ignore: bool):
global IGNORE_VERIFICATION_OF_THE_LATEST_MODEL
IGNORE_VERIFICATION_OF_THE_LATEST_MODEL = ignore
def use_oss_download(use_oss: bool = True):
"""Enable OSS download instead of ModelScope (for backward compatibility)
Args:
use_oss (bool): If True, use OSS download; if False, use ModelScope (default)
"""
set_use_oss_download(use_oss)
class ImageStream(object):
"""
ImageStream class handles the conversion of image data from various sources into a format compatible with the InspireFace library.
@@ -34,11 +51,10 @@ class ImageStream(object):
ImageStream: An instance of the ImageStream class initialized with the provided image data.
Raises:
Exception: If the image does not have 3 or 4 channels.
InvalidInputError: If the image does not have 3 or 4 channels.
"""
validate_image_format(image, "Load from CV image")
h, w, c = image.shape
if c != 3 and c != 4:
raise Exception("The channel must be 3 or 4.")
return ImageStream(image, w, h, stream_format, rotation)
@staticmethod
@@ -87,7 +103,7 @@ class ImageStream(object):
rotation (int): The rotation applied to the image.
Raises:
Exception: If there is an error in creating the image stream.
ResourceError: If there is an error in creating the image stream.
"""
self.rotate = rotation
self.data_format = stream_format
@@ -103,16 +119,14 @@ class ImageStream(object):
image_struct.rotation = self.rotate
self._handle = HFImageStream()
ret = HFCreateImageStream(PHFImageData(image_struct), self._handle)
if ret != 0:
raise Exception("Error in creating ImageStream")
check_error(ret, "Create ImageStream", width=width, height=height, format=stream_format)
def write_to_file(self, file_path: str):
"""
Write the image stream to a file. Like PATH/image.jpg
"""
ret = HFDeBugImageStreamDecodeSave(self._handle, file_path)
if ret != 0:
logger.error(f"Write ImageStream to file error: {ret}")
check_error(ret, "Write ImageStream to file", file_path=file_path)
def release(self):
"""
@@ -123,7 +137,7 @@ class ImageStream(object):
if self._handle is not None:
ret = HFReleaseImageStream(self._handle)
if ret != 0:
logger.error(f"Release ImageStream error: {ret}")
logger.warning(f"Failed to release ImageStream: error code {ret}")
def __del__(self):
"""
@@ -219,8 +233,7 @@ class FaceInformation:
buffer_size = token_size.value
self.buffer = create_string_buffer(buffer_size)
ret = HFCopyFaceBasicToken(_token, self.buffer, token_size)
if ret != 0:
logger.error("Failed to copy face basic token")
check_error(ret, "Copy face basic token", track_id=track_id)
# Store the copied token.
self._token = HFFaceBasicToken()
@@ -289,22 +302,29 @@ class InspireFaceSession(object):
max_detect_num: int = 10, detect_pixel_level=-1, track_by_detect_mode_fps=-1):
"""
Initializes a new session with the provided configuration parameters.
Args:
param (int or SessionCustomParameter): Configuration parameters or flags.
detect_mode (int): Detection mode to be used (e.g., image-based detection).
max_detect_num (int): Maximum number of faces to detect.
Raises:
Exception: If session creation fails.
SystemNotReadyError: If InspireFace is not launched.
ProcessingError: If session creation fails.
"""
# Initialize _sess to None first to prevent AttributeError in __del__
self._sess = None
self.multiple_faces = None
self.param = param
# If InspireFace is not initialized, run launch() use Pikachu model
if not query_launch_status():
ret = launch()
if not ret:
raise Exception("Launch InspireFace failure")
raise SystemNotReadyError("Failed to launch InspireFace automatically")
self.multiple_faces = None
self._sess = HFSession()
self.param = param
if isinstance(self.param, SessionCustomParameter):
ret = HFCreateInspireFaceSession(self.param._c_struct(), detect_mode, max_detect_num, detect_pixel_level,
track_by_detect_mode_fps, self._sess)
@@ -312,26 +332,33 @@ class InspireFaceSession(object):
ret = HFCreateInspireFaceSessionOptional(self.param, detect_mode, max_detect_num, detect_pixel_level,
track_by_detect_mode_fps, self._sess)
else:
raise NotImplemented("")
if ret != 0:
st = f"Create session error: {ret}"
raise Exception(st)
raise InvalidInputError("Session parameter must be SessionCustomParameter or int",
context={'param_type': type(self.param).__name__})
check_error(ret, "Create InspireFace session",
detect_mode=detect_mode, max_detect_num=max_detect_num)
@handle_c_api_errors("Face detection")
def face_detection(self, image) -> List[FaceInformation]:
"""
Detects faces in the given image and returns a list of FaceInformation objects containing detailed face data.
Args:
image (np.ndarray or ImageStream): The image in which to detect faces.
Returns:
List[FaceInformation]: A list of detected face information.
Raises:
ResourceError: If session is not initialized.
ProcessingError: If face detection fails.
"""
validate_session_initialized(self, "Face detection")
stream = self._get_image_stream(image)
self.multiple_faces = HFMultipleFaceData()
ret = HFExecuteFaceTrack(self._sess, stream.handle,
PHFMultipleFaceData(self.multiple_faces))
if ret != 0:
logger.error(f"Face detection error: ", {ret})
return []
PHFMultipleFaceData(self.multiple_faces))
check_error(ret, "Execute face tracking")
if self.multiple_faces.detectedNum > 0:
boxes = self._get_faces_boundary_boxes()
@@ -369,11 +396,12 @@ class InspireFaceSession(object):
return []
def get_face_five_key_points(self, single_face: FaceInformation):
"""Get five key points for a detected face"""
validate_session_initialized(self, "Get face five key points")
num_landmarks = 5
landmarks_array = (HPoint2f * num_landmarks)()
ret = HFGetFaceFiveKeyPointsFromFaceToken(single_face._token, landmarks_array, num_landmarks)
if ret != 0:
logger.error(f"An error occurred obtaining a dense landmark for a single face: {ret}")
check_error(ret, "Get face five key points", track_id=single_face.track_id)
landmark = []
for point in landmarks_array:
@@ -383,12 +411,13 @@ class InspireFaceSession(object):
return np.asarray(landmark).reshape(-1, 2)
def get_face_dense_landmark(self, single_face: FaceInformation):
"""Get dense landmarks for a detected face"""
validate_session_initialized(self, "Get face dense landmark")
num_landmarks = HInt32()
HFGetNumOfFaceDenseLandmark(byref(num_landmarks))
landmarks_array = (HPoint2f * num_landmarks.value)()
ret = HFGetFaceDenseLandmarkFromFaceToken(single_face._token, landmarks_array, num_landmarks)
if ret != 0:
logger.error(f"An error occurred obtaining a dense landmark for a single face: {ret}")
check_error(ret, "Get face dense landmark", track_id=single_face.track_id)
landmark = []
for point in landmarks_array:
@@ -398,14 +427,16 @@ class InspireFaceSession(object):
return np.asarray(landmark).reshape(-1, 2)
def print_track_cost_spend(self):
"""Print tracking cost statistics"""
validate_session_initialized(self, "Print track cost spend")
ret = HFSessionPrintTrackCostSpend(self._sess)
if ret != 0:
logger.error(f"Print track cost spend error: {ret}")
check_error(ret, "Print track cost spend")
def set_enable_track_cost_spend(self, enable: bool):
"""Enable or disable track cost spend monitoring"""
validate_session_initialized(self, "Set enable track cost spend")
ret = HFSessionSetEnableTrackCostSpend(self._sess, enable)
if ret != 0:
logger.error(f"Set enable track cost spend error: {ret}")
check_error(ret, "Set enable track cost spend", enable=enable)
def set_detection_confidence_threshold(self, threshold: float):
"""
@@ -413,13 +444,10 @@ class InspireFaceSession(object):
Args:
threshold (float): The confidence threshold for face detection.
Notes:
If setting the detection confidence threshold fails, an error is logged with the returned status code.
"""
validate_session_initialized(self, "Set detection confidence threshold")
ret = HFSessionSetFaceDetectThreshold(self._sess, threshold)
if ret != 0:
logger.error(f"Set detection confidence threshold error: {ret}")
check_error(ret, "Set detection confidence threshold", threshold=threshold)
def set_track_preview_size(self, size=192):
"""
@@ -427,39 +455,48 @@ class InspireFaceSession(object):
Args:
size (int, optional): The size of the preview area for face tracking. Default is 192.
Notes:
If setting the preview size fails, an error is logged with the returned status code.
"""
validate_session_initialized(self, "Set track preview size")
ret = HFSessionSetTrackPreviewSize(self._sess, size)
if ret != 0:
logger.error(f"Set track preview size error: {ret}")
check_error(ret, "Set track preview size", size=size)
def set_filter_minimum_face_pixel_size(self, min_size=32):
"""Set minimum face pixel size filter"""
validate_session_initialized(self, "Set filter minimum face pixel size")
ret = HFSessionSetFilterMinimumFacePixelSize(self._sess, min_size)
if ret != 0:
logger.error(f"Set filter minimum face pixel size error: {ret}")
check_error(ret, "Set filter minimum face pixel size", min_size=min_size)
def set_track_mode_smooth_ratio(self, ratio=0.025):
"""Set track mode smooth ratio"""
validate_session_initialized(self, "Set track mode smooth ratio")
ret = HFSessionSetTrackModeSmoothRatio(self._sess, ratio)
if ret != 0:
logger.error(f"Set track mode smooth ratio error: {ret}")
check_error(ret, "Set track mode smooth ratio", ratio=ratio)
def set_track_mode_num_smooth_cache_frame(self, num=15):
"""Set track mode number of smooth cache frames"""
validate_session_initialized(self, "Set track mode num smooth cache frame")
ret = HFSessionSetTrackModeNumSmoothCacheFrame(self._sess, num)
if ret != 0:
logger.error(f"Set track mode num smooth cache frame error: {ret}")
check_error(ret, "Set track mode num smooth cache frame", num=num)
def set_track_model_detect_interval(self, num=20):
"""Set track model detect interval"""
validate_session_initialized(self, "Set track model detect interval")
ret = HFSessionSetTrackModeDetectInterval(self._sess, num)
if ret != 0:
logger.error(f"Set track model detect interval error: {ret}")
check_error(ret, "Set track model detect interval", num=num)
def set_landmark_augmentation_num(self, num=1):
"""Set landmark augmentation number"""
validate_session_initialized(self, "Set landmark augmentation num")
ret = HFSessionSetLandmarkAugmentationNum(self._sess, num)
if ret != 0:
logger.error(f"Set landmark augmentation num error: {ret}")
check_error(ret, "Set landmark augmentation num", num=num)
def set_track_lost_recovery_mode(self, value=False):
"""Set track lost recovery mode"""
validate_session_initialized(self, "Set track lost recovery mode")
ret = HFSessionSetTrackLostRecoveryMode(self._sess, value)
check_error(ret, "Set track lost recovery mode", value=value)
@handle_c_api_errors("Face pipeline processing")
def face_pipeline(self, image, faces: List[FaceInformation], exec_param) -> List[FaceExtended]:
"""
Processes detected faces to extract additional attributes based on the provided execution parameters.
@@ -471,10 +508,8 @@ class InspireFaceSession(object):
Returns:
List[FaceExtended]: A list of FaceExtended objects with updated attributes like mask confidence, liveness, etc.
Notes:
If the face pipeline processing fails, an error is logged and an empty list is returned.
"""
validate_session_initialized(self, "Face pipeline processing")
stream = self._get_image_stream(image)
fn, pm, flag = self._get_processing_function_and_param(exec_param)
tokens = [face._token for face in faces]
@@ -486,9 +521,7 @@ class InspireFaceSession(object):
multi_faces.tokens = tokens_ptr
ret = fn(self._sess, stream.handle, PHFMultipleFaceData(multi_faces), pm)
if ret != 0:
logger.error(f"Face pipeline error: {ret}")
return []
check_error(ret, "Face pipeline processing", num_faces=len(faces))
extends = [FaceExtended(-1.0, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, 0, 0, -1, -1, -1, -1) for _ in range(len(faces))]
self._update_mask_confidence(exec_param, flag, extends)
@@ -500,6 +533,7 @@ class InspireFaceSession(object):
return extends
@handle_c_api_errors("Face feature extraction")
def face_feature_extract(self, image, face_information: FaceInformation):
"""
Extracts facial features from a specified face within an image for recognition or comparison purposes.
@@ -510,10 +544,8 @@ class InspireFaceSession(object):
Returns:
np.ndarray: A numpy array containing the extracted facial features, or None if the extraction fails.
Notes:
If the feature extraction process fails, an error is logged and None is returned.
"""
validate_session_initialized(self, "Face feature extraction")
stream = self._get_image_stream(image)
feature_length = HInt32()
HFGetFeatureLength(byref(feature_length))
@@ -522,55 +554,59 @@ class InspireFaceSession(object):
ret = HFFaceFeatureExtractCpy(self._sess, stream.handle, face_information._token,
feature.ctypes.data_as(ctypes.POINTER(HFloat)))
if ret != 0:
logger.error(f"Face feature extract error: {ret}")
return None
check_error(ret, "Face feature extraction", track_id=face_information.track_id)
return feature
@staticmethod
def _get_image_stream(image):
"""Convert image to ImageStream if needed"""
if isinstance(image, np.ndarray):
return ImageStream.load_from_cv_image(image)
elif isinstance(image, ImageStream):
return image
else:
raise NotImplemented("Place check input type.")
raise InvalidInputError("Image must be numpy.ndarray or ImageStream",
context={'input_type': type(image).__name__})
@staticmethod
def _get_processing_function_and_param(exec_param):
"""Get processing function and parameters"""
if isinstance(exec_param, SessionCustomParameter):
return HFMultipleFacePipelineProcess, exec_param._c_struct(), "object"
elif isinstance(exec_param, int):
return HFMultipleFacePipelineProcessOptional, exec_param, "bitmask"
else:
raise NotImplemented("Unsupported parameter type")
raise InvalidInputError("exec_param must be SessionCustomParameter or int",
context={'param_type': type(exec_param).__name__})
def _update_mask_confidence(self, exec_param, flag, extends):
"""Update mask confidence in extends list"""
if (flag == "object" and exec_param.enable_mask_detect) or (
flag == "bitmask" and exec_param & HF_ENABLE_MASK_DETECT):
mask_results = HFFaceMaskConfidence()
ret = HFGetFaceMaskConfidence(self._sess, PHFFaceMaskConfidence(mask_results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(mask_results.num):
extends[i].mask_confidence = mask_results.confidence[i]
else:
logger.error(f"Get mask result error: {ret}")
logger.warning(f"Failed to get mask confidence: error code {ret}")
def _update_face_interact_confidence(self, exec_param, flag, extends):
"""Update face interaction confidence in extends list"""
if (flag == "object" and exec_param.enable_interaction_liveness) or (
flag == "bitmask" and exec_param & HF_ENABLE_INTERACTION):
results = HFFaceInteractionState()
ret = HFGetFaceInteractionStateResult(self._sess, PHFFaceInteractionState(results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(results.num):
extends[i].left_eye_status_confidence = results.leftEyeStatusConfidence[i]
extends[i].right_eye_status_confidence = results.rightEyeStatusConfidence[i]
else:
logger.error(f"Get face interact result error: {ret}")
logger.warning(f"Failed to get face interaction state: error code {ret}")
actions = HFFaceInteractionsActions()
ret = HFGetFaceInteractionActionsResult(self._sess, PHFFaceInteractionsActions(actions))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(results.num):
extends[i].action_normal = actions.normal[i]
extends[i].action_shake = actions.shake[i]
@@ -578,89 +614,95 @@ class InspireFaceSession(object):
extends[i].action_head_raise = actions.headRaise[i]
extends[i].action_blink = actions.blink[i]
else:
logger.error(f"Get face action result error: {ret}")
logger.warning(f"Failed to get face interaction actions: error code {ret}")
def _update_face_emotion_confidence(self, exec_param, flag, extends):
"""Update face emotion confidence in extends list"""
if (flag == "object" and exec_param.enable_face_emotion) or (
flag == "bitmask" and exec_param & HF_ENABLE_FACE_EMOTION):
emotion_results = HFFaceEmotionResult()
ret = HFGetFaceEmotionResult(self._sess, PHFFaceEmotionResult(emotion_results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(emotion_results.num):
extends[i].emotion = emotion_results.emotion[i]
else:
logger.error(f"Get face emotion result error: {ret}")
logger.warning(f"Failed to get face emotion result: error code {ret}")
def _update_rgb_liveness_confidence(self, exec_param, flag, extends: List[FaceExtended]):
"""Update RGB liveness confidence in extends list"""
if (flag == "object" and exec_param.enable_liveness) or (
flag == "bitmask" and exec_param & HF_ENABLE_LIVENESS):
liveness_results = HFRGBLivenessConfidence()
ret = HFGetRGBLivenessConfidence(self._sess, PHFRGBLivenessConfidence(liveness_results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(liveness_results.num):
extends[i].rgb_liveness_confidence = liveness_results.confidence[i]
else:
logger.error(f"Get rgb liveness result error: {ret}")
logger.warning(f"Failed to get RGB liveness confidence: error code {ret}")
def _update_face_attribute_confidence(self, exec_param, flag, extends: List[FaceExtended]):
"""Update face attribute confidence in extends list"""
if (flag == "object" and exec_param.enable_face_attribute) or (
flag == "bitmask" and exec_param & HF_ENABLE_FACE_ATTRIBUTE):
attribute_results = HFFaceAttributeResult()
ret = HFGetFaceAttributeResult(self._sess, PHFFaceAttributeResult(attribute_results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(attribute_results.num):
extends[i].gender = attribute_results.gender[i]
extends[i].age_bracket = attribute_results.ageBracket[i]
extends[i].race = attribute_results.race[i]
else:
logger.error(f"Get face attribute result error: {ret}")
logger.warning(f"Failed to get face attribute result: error code {ret}")
def _update_face_quality_confidence(self, exec_param, flag, extends: List[FaceExtended]):
"""Update face quality confidence in extends list"""
if (flag == "object" and exec_param.enable_face_quality) or (
flag == "bitmask" and exec_param & HF_ENABLE_QUALITY):
quality_results = HFFaceQualityConfidence()
ret = HFGetFaceQualityConfidence(self._sess, PHFFaceQualityConfidence(quality_results))
if ret == 0:
if ret == errcode.HSUCCEED:
for i in range(quality_results.num):
extends[i].quality_confidence = quality_results.confidence[i]
else:
logger.error(f"Get quality result error: {ret}")
logger.warning(f"Failed to get face quality confidence: error code {ret}")
def _get_faces_boundary_boxes(self) -> List:
"""Get face boundary boxes from detection results"""
num_of_faces = self.multiple_faces.detectedNum
rects_ptr = self.multiple_faces.rects
rects = [(rects_ptr[i].x, rects_ptr[i].y, rects_ptr[i].width, rects_ptr[i].height) for i in range(num_of_faces)]
return rects
def _get_faces_track_ids(self) -> List:
"""Get face track IDs from detection results"""
num_of_faces = self.multiple_faces.detectedNum
track_ids_ptr = self.multiple_faces.trackIds
track_ids = [track_ids_ptr[i] for i in range(num_of_faces)]
return track_ids
def _get_faces_euler_angle(self) -> List:
"""Get face euler angles from detection results"""
num_of_faces = self.multiple_faces.detectedNum
euler_angle = self.multiple_faces.angles
angles = [(euler_angle.roll[i], euler_angle.yaw[i], euler_angle.pitch[i]) for i in range(num_of_faces)]
return angles
def _get_faces_track_counts(self) -> List:
"""Get face track counts from detection results"""
num_of_faces = self.multiple_faces.detectedNum
track_counts_ptr = self.multiple_faces.trackCounts
track_counts = [track_counts_ptr[i] for i in range(num_of_faces)]
return track_counts
def _get_faces_tokens(self) -> List[HFFaceBasicToken]:
"""Get face tokens from detection results"""
num_of_faces = self.multiple_faces.detectedNum
tokens_ptr = self.multiple_faces.tokens
tokens = [tokens_ptr[i] for i in range(num_of_faces)]
return tokens
def release(self):
"""Release session resources"""
if self._sess is not None:
HFReleaseInspireFaceSession(self._sess)
self._sess = None
@@ -670,6 +712,38 @@ class InspireFaceSession(object):
# == Global API ==
def _check_modelscope_availability():
"""
Check if ModelScope is available when needed and provide helpful error message if not.
Exits the program if ModelScope is needed but not available and OSS is not enabled.
"""
import sys
from .utils.resource import USE_OSS_DOWNLOAD
# Dynamic check for ModelScope availability (don't rely on cached MODELSCOPE_AVAILABLE)
modelscope_available = True
try:
from modelscope.hub.snapshot_download import snapshot_download
print("ModelScope import successful")
except Exception as e:
modelscope_available = False
print(f"ModelScope import failed: {e}")
if not USE_OSS_DOWNLOAD and not modelscope_available:
print("ModelScope is not available, cannot download models!")
print("\nPlease choose one of the following solutions:")
print("1. Reinstall ModelScope with all dependencies:")
print(" pip install --upgrade modelscope")
print("\n2. Install missing dependencies manually:")
print(" pip install filelock")
print("\n3. Switch to OSS download mode:")
print(" import inspireface as isf")
print(" isf.use_oss_download(True) # Execute before calling launch()")
print("\nNote: OSS download requires stable international network connection")
sys.exit(1)
def launch(model_name: str = "Pikachu", resource_path: str = None) -> bool:
"""
Launches the InspireFace system with the specified resource directory.
@@ -681,21 +755,26 @@ def launch(model_name: str = "Pikachu", resource_path: str = None) -> bool:
Returns:
bool: True if the system was successfully launched, False otherwise.
Notes:
A specific error is logged if duplicate loading is detected or if there is any other launch failure.
Raises:
SystemNotReadyError: If launch fails due to resource issues.
"""
if resource_path is None:
sm = ResourceManager()
from .utils.resource import USE_OSS_DOWNLOAD
# Check if ModelScope is available when needed
_check_modelscope_availability()
# Use ModelScope by default unless OSS is forced
sm = ResourceManager(use_modelscope=not USE_OSS_DOWNLOAD)
resource_path = sm.get_model(model_name, ignore_verification=IGNORE_VERIFICATION_OF_THE_LATEST_MODEL)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFLaunchInspireFace(path_c)
if ret != 0:
if ret == 1363:
if ret == errcode.HERR_ARCHIVE_REPETITION_LOAD:
logger.warning("Duplicate loading was found")
return True
else:
logger.error(f"Launch InspireFace failure: {ret}")
return False
check_error(ret, "Launch InspireFace", model_name=model_name, resource_path=resource_path)
return True
def pull_latest_model(model_name: str = "Pikachu") -> str:
@@ -706,8 +785,14 @@ def pull_latest_model(model_name: str = "Pikachu") -> str:
model_name (str): the name of the model to use.
Returns:
str: Path to the downloaded model.
"""
sm = ResourceManager()
from .utils.resource import USE_OSS_DOWNLOAD
# Check if ModelScope is available when needed
_check_modelscope_availability()
sm = ResourceManager(use_modelscope=not USE_OSS_DOWNLOAD)
resource_path = sm.get_model(model_name, re_download=True)
return resource_path
@@ -720,18 +805,24 @@ def reload(model_name: str = "Pikachu", resource_path: str = None) -> bool:
resource_path (str): if None, use the default model path.
Returns:
bool: True if reload was successful.
"""
if resource_path is None:
sm = ResourceManager()
from .utils.resource import USE_OSS_DOWNLOAD
# Check if ModelScope is available when needed
_check_modelscope_availability()
sm = ResourceManager(use_modelscope=not USE_OSS_DOWNLOAD)
resource_path = sm.get_model(model_name, ignore_verification=IGNORE_VERIFICATION_OF_THE_LATEST_MODEL)
path_c = String(bytes(resource_path, encoding="utf8"))
ret = HFReloadInspireFace(path_c)
if ret != 0:
if ret == 1363:
if ret == errcode.HERR_ARCHIVE_REPETITION_LOAD:
logger.warning("Duplicate loading was found")
return True
else:
logger.error(f"Launch InspireFace failure: {ret}")
return False
check_error(ret, "Reload InspireFace", model_name=model_name, resource_path=resource_path)
return True
def terminate() -> bool:
@@ -740,13 +831,9 @@ def terminate() -> bool:
Returns:
bool: True if the system was successfully terminated, False otherwise.
Notes:
"""
ret = HFTerminateInspireFace()
if ret != 0:
logger.error(f"Terminate InspireFace failure: {ret}")
return False
check_error(ret, "Terminate InspireFace")
return True
def query_launch_status() -> bool:
@@ -758,16 +845,25 @@ def query_launch_status() -> bool:
"""
status = HInt32()
ret = HFQueryInspireFaceLaunchStatus(byref(status))
if ret != 0:
logger.error(f"Query launch status error: {ret}")
return False
check_error(ret, "Query launch status")
return status.value == 1
def switch_landmark_engine(engine: int):
"""Switch landmark engine"""
ret = HFSwitchLandmarkEngine(engine)
if ret != 0:
logger.error(f"Switch landmark engine error: {ret}")
return False
check_error(ret, "Switch landmark engine", engine=engine)
return True
def switch_image_processing_backend(backend: int):
"""Switch image processing backend"""
ret = HFSwitchImageProcessingBackend(backend)
check_error(ret, "Switch image processing backend", backend=backend)
return True
def set_image_process_aligned_width(width: int):
"""Set the image process aligned width"""
ret = HFSetImageProcessAlignedWidth(width)
check_error(ret, "Set image process aligned width", width=width)
return True
@dataclass
@@ -776,9 +872,9 @@ class FeatureHubConfiguration:
Configuration settings for managing the feature hub, including database and search settings.
Attributes:
feature_block_num (int): Number of features per block in the database.
enable_use_db (bool): Flag to indicate if the database should be used.
db_path (str): Path to the database file.
primary_key_mode (int): Primary key mode for the database.
enable_persistence (bool): Flag to indicate if the database should be used.
persistence_db_path (str): Path to the database file.
search_threshold (float): The threshold value for considering a match.
search_mode (int): The mode of searching in the database.
"""
@@ -813,14 +909,9 @@ def feature_hub_enable(config: FeatureHubConfiguration) -> bool:
Returns:
bool: True if successfully enabled, False otherwise.
Notes:
Logs an error if enabling the feature hub fails.
"""
ret = HFFeatureHubDataEnable(config._c_struct())
if ret != 0:
logger.error(f"FeatureHub enable failure: {ret}")
return False
check_error(ret, "Enable FeatureHub")
return True
@@ -830,14 +921,9 @@ def feature_hub_disable() -> bool:
Returns:
bool: True if successfully disabled, False otherwise.
Notes:
Logs an error if disabling the feature hub fails.
"""
ret = HFFeatureHubDataDisable()
if ret != 0:
logger.error(f"FeatureHub disable failure: {ret}")
return False
check_error(ret, "Disable FeatureHub")
return True
@@ -851,10 +937,9 @@ def feature_comparison(feature1: np.ndarray, feature2: np.ndarray) -> float:
Returns:
float: A similarity score, where -1.0 indicates an error during comparison.
Notes:
Logs an error if the comparison process fails.
"""
validate_feature_data(feature1, "Feature comparison")
validate_feature_data(feature2, "Feature comparison")
faces = [feature1, feature2]
feats = []
for face in faces:
@@ -866,10 +951,7 @@ def feature_comparison(feature1: np.ndarray, feature2: np.ndarray) -> float:
comparison_result = HFloat()
ret = HFFaceComparison(feats[0], feats[1], HPFloat(comparison_result))
if ret != 0:
logger.error(f"Comparison error: {ret}")
return -1.0
check_error(ret, "Face feature comparison")
return float(comparison_result.value)
@@ -879,8 +961,7 @@ class FaceIdentity(object):
Attributes:
feature (np.ndarray): The facial features as a numpy array.
custom_id (int): A custom identifier for the face identity.
tag (str): A tag or label associated with the face identity.
id (int): A custom identifier for the face identity.
Methods:
__init__: Initializes a new instance of FaceIdentity.
@@ -894,13 +975,9 @@ class FaceIdentity(object):
Args:
data (np.ndarray): The facial feature data.
custom_id (int): A custom identifier for tracking or referencing the face identity.
tag (str): A descriptive tag or label for the face identity.
id (int): A custom identifier for tracking or referencing the face identity.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
validate_feature_data(data, "FaceIdentity initialization")
self.feature = data
self.id = id
@@ -960,16 +1037,11 @@ def feature_hub_face_insert(face_identity: FaceIdentity) -> Tuple[bool, int]:
face_identity (FaceIdentity): The face identity to insert.
Returns:
bool: True if the face identity was successfully inserted, False otherwise.
Notes:
Logs an error if the insertion process fails.
Tuple[bool, int]: (True, allocated_id) if the face identity was successfully inserted.
"""
alloc_id = HFaceId()
ret = HFFeatureHubInsertFeature(face_identity._c_struct(), HPFaceId(alloc_id))
if ret != 0:
logger.error(f"Failed to insert face feature data into FeatureHub: {ret}")
return False, -1
check_error(ret, "Insert face feature into FeatureHub", identity_id=face_identity.id)
return True, int(alloc_id.value)
@@ -997,20 +1069,14 @@ def feature_hub_face_search(data: np.ndarray) -> SearchResult:
Returns:
SearchResult: The search result containing the confidence and the most similar identity found.
Notes:
If the search operation fails, logs an error and returns a SearchResult with a confidence of -1.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
validate_feature_data(data, "FeatureHub face search")
feature = HFFaceFeature(size=HInt32(data.size), data=data.ctypes.data_as(HPFloat))
confidence = HFloat()
most_similar = HFFaceFeatureIdentity()
ret = HFFeatureHubFaceSearch(feature, HPFloat(confidence), PHFFaceFeatureIdentity(most_similar))
if ret != 0:
logger.error(f"Failed to search face: {ret}")
return SearchResult(confidence=-1, similar_identity=FaceIdentity(np.zeros(0), most_similar.id))
check_error(ret, "Search face in FeatureHub")
if most_similar.id != -1:
search_identity = FaceIdentity.from_ctypes(most_similar)
return SearchResult(confidence=confidence.value, similar_identity=search_identity)
@@ -1029,19 +1095,13 @@ def feature_hub_face_search_top_k(data: np.ndarray, top_k: int) -> List[Tuple]:
Returns:
List[Tuple]: A list of tuples, each containing the confidence and custom ID of the top results.
Notes:
If the search operation fails, an empty list is returned.
"""
if data.dtype != np.float32:
logger.error("The input data must be in float32 format")
raise ValueError("The input data must be in float32 format")
validate_feature_data(data, "FeatureHub face search top k")
feature = HFFaceFeature(size=HInt32(data.size), data=data.ctypes.data_as(HPFloat))
results = HFSearchTopKResults()
ret = HFFeatureHubFaceSearchTopK(feature, top_k, PHFSearchTopKResults(results))
outputs = []
if ret == 0:
if ret == errcode.HSUCCEED:
for idx in range(results.size):
confidence = results.confidence[idx]
id_ = results.ids[idx]
@@ -1104,9 +1164,7 @@ def feature_hub_get_face_identity(custom_id: int):
"""
identify = HFFaceFeatureIdentity()
ret = HFFeatureHubGetFaceIdentity(HFaceId(custom_id), PHFFaceFeatureIdentity(identify))
if ret != 0:
logger.error("Get face identity errors from FeatureHub")
return None
check_error(ret, "Get face identity from FeatureHub", custom_id=custom_id)
return FaceIdentity.from_ctypes(identify)
@@ -1123,8 +1181,7 @@ def feature_hub_get_face_count() -> int:
"""
count = HInt32()
ret = HFFeatureHubGetFaceCount(HPInt32(count))
if ret != 0:
logger.error(f"Failed to get count: {ret}")
check_error(ret, "Get face count")
return int(count.value)
@@ -1151,8 +1208,7 @@ def view_table_in_terminal():
Logs an error if the operation to view the table fails.
"""
ret = HFFeatureHubViewDBTable()
if ret != 0:
logger.error(f"Failed to view DB: {ret}")
check_error(ret, "View DB table")
def get_recommended_cosine_threshold() -> float:
"""
@@ -1249,10 +1305,7 @@ def set_expansive_hardware_rockchip_dma_heap_path(path: str):
Sets the path to the expansive hardware Rockchip DMA heap.
"""
ret = HFSetExpansiveHardwareRockchipDmaHeapPath(path)
if ret != 0:
logger.error(f"Failed to set expansive hardware Rockchip DMA heap path: {ret}")
return False
return True
check_error(ret, "Set expansive hardware Rockchip DMA heap path", path=path)
def query_expansive_hardware_rockchip_dma_heap_path() -> str:
"""
@@ -1260,9 +1313,7 @@ def query_expansive_hardware_rockchip_dma_heap_path() -> str:
"""
path = HString()
ret = HFQueryExpansiveHardwareRockchipDmaHeapPath(path)
if ret != 0:
logger.error(f"Failed to query expansive hardware Rockchip DMA heap path: {ret}")
return None
check_error(ret, "Query expansive hardware Rockchip DMA heap path")
return str(path.value)
@@ -1271,8 +1322,7 @@ def set_cuda_device_id(device_id: int):
Sets the CUDA device ID.
"""
ret = HFSetCudaDeviceId(device_id)
if ret != 0:
logger.error(f"Failed to set CUDA device ID: {ret}")
check_error(ret, "Set CUDA device ID", device_id=device_id)
def get_cuda_device_id() -> int:
"""
@@ -1280,8 +1330,7 @@ def get_cuda_device_id() -> int:
"""
id = HInt32()
ret = HFGetCudaDeviceId(id)
if ret != 0:
logger.error(f"Failed to get CUDA device ID: {ret}")
check_error(ret, "Get CUDA device ID")
return int(id.value)
def print_cuda_device_info():
@@ -1296,8 +1345,7 @@ def get_num_cuda_devices() -> int:
"""
num = HInt32()
ret = HFGetNumCudaDevices(num)
if ret != 0:
logger.error(f"Failed to get number of CUDA devices: {ret}")
check_error(ret, "Get number of CUDA devices")
return int(num.value)
def check_cuda_device_support() -> bool:
@@ -1306,6 +1354,5 @@ def check_cuda_device_support() -> bool:
"""
is_support = HInt32()
ret = HFCheckCudaDeviceSupport(is_support)
if ret != 0:
logger.error(f"Failed to check CUDA device support: {ret}")
check_error(ret, "Check CUDA device support")
return bool(is_support.value)

View File

@@ -1,3 +1,18 @@
"""
InspireFace Resource Manager
This module provides model downloading functionality with two modes:
1. Original mode: Download models from COS (Tencent Cloud Object Storage)
2. ModelScope mode: Download models from ModelScope platform
ModelScope mode usage:
rm = ResourceManager(use_modelscope=True, modelscope_model_id="tunmxy/InspireFace")
model_path = rm.get_model("Gundam_RV1106")
Requirements for ModelScope mode:
pip install modelscope
"""
import os
import sys
from pathlib import Path
@@ -5,6 +20,24 @@ import urllib.request
import ssl
import hashlib
try:
from modelscope.hub.snapshot_download import snapshot_download
MODELSCOPE_AVAILABLE = True
except ImportError:
MODELSCOPE_AVAILABLE = False
# Global configuration for resource downloading
USE_OSS_DOWNLOAD = False # If True, force use OSS download instead of ModelScope
def set_use_oss_download(use_oss: bool):
"""Set whether to use OSS download instead of ModelScope
Args:
use_oss (bool): If True, use OSS download; if False, use ModelScope (default)
"""
global USE_OSS_DOWNLOAD
USE_OSS_DOWNLOAD = use_oss
def get_file_hash_sha256(file_path):
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
@@ -13,45 +46,98 @@ def get_file_hash_sha256(file_path):
return sha256.hexdigest()
class ResourceManager:
def __init__(self):
"""Initialize resource manager and create necessary directories"""
def __init__(self, use_modelscope: bool = True, modelscope_model_id: str = "tunmxy/InspireFace"):
"""Initialize resource manager and create necessary directories
Args:
use_modelscope: Whether to download models from ModelScope platform
modelscope_model_id: ModelScope model ID (default: tunmxy/InspireFace)
"""
self.user_home = Path.home()
self.base_dir = self.user_home / '.inspireface'
self.models_dir = self.base_dir / 'models'
# ModelScope configuration
self.use_modelscope = use_modelscope
self.modelscope_model_id = modelscope_model_id
self.modelscope_cache_dir = self.base_dir / 'ms'
# Create directories
self.base_dir.mkdir(exist_ok=True)
self.models_dir.mkdir(exist_ok=True)
if self.use_modelscope:
self.modelscope_cache_dir.mkdir(exist_ok=True)
# Check ModelScope availability
if self.use_modelscope and not MODELSCOPE_AVAILABLE:
raise ImportError(
"ModelScope is not available. You have two options:\n"
"1. Install ModelScope: pip install modelscope\n"
"2. Switch to OSS download mode by calling: inspireface.use_oss_download(True) before using InspireFace"
)
# Model URLs
self._MODEL_LIST = {
"Pikachu": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu",
"url": "https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/Pikachu",
"filename": "Pikachu",
"md5": "a7ca2d8de26fb1adc1114b437971d841e14afc894fa9869618139da10e0d4357"
"md5": "5037ba1f49905b783a1c973d5d58b834a645922cc2814c8e3ca630a38dc24431"
},
"Megatron": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron",
"url": "https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/Megatron",
"filename": "Megatron",
"md5": "709fddf024d9f34ec034d8ef79a4779e1543b867b05e428c1d4b766f69287050"
},
"Megatron_TRT": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron_TRT",
"url": "https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/Megatron_TRT",
"filename": "Megatron_TRT",
"md5": "bc9123bdc510954b28d703b8ffe6023f469fb81123fd0b0b27fd452dfa369bab"
},
"Gundam_RK356X": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK356X",
"url": "https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/Gundam_RK356X",
"filename": "Gundam_RK356X",
"md5": "0fa12a425337ed98bd82610768a50de71cf93ef42a0929ba06cc94c86f4bd415"
},
"Gundam_RK3588": {
"url": "https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK3588",
"url": "https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/Gundam_RK3588",
"filename": "Gundam_RK3588",
"md5": "66070e8d654408b666a2210bd498a976bbad8b33aef138c623e652f8d956641e"
}
}
def _download_from_modelscope(self, model_name: str) -> str:
"""Download model from ModelScope platform
Args:
model_name: Name of the model to download
Returns:
str: Path to the downloaded model file
"""
if not MODELSCOPE_AVAILABLE:
raise ImportError("ModelScope is not available. Please install it with: pip install modelscope")
print(f"Downloading model '{model_name}' from ModelScope...")
try:
# Download specific model file from ModelScope
cache_dir = snapshot_download(
model_id=self.modelscope_model_id,
cache_dir=str(self.modelscope_cache_dir),
allow_file_pattern=[model_name] # Only download the specific model file
)
model_file_path = Path(cache_dir) / model_name
if not model_file_path.exists():
raise FileNotFoundError(f"Model file '{model_name}' not found in downloaded repository")
print(f"ModelScope download completed: {model_file_path}")
return str(model_file_path)
except Exception as e:
raise RuntimeError(f"Failed to download model from ModelScope: {e}")
def get_model(self, name: str, re_download: bool = False, ignore_verification: bool = False) -> str:
"""
Get model path. Download if not exists or re_download is True.
@@ -64,6 +150,15 @@ class ResourceManager:
Returns:
str: Full path to model file
"""
# Check global OSS setting first, then instance setting
use_oss = USE_OSS_DOWNLOAD
use_modelscope_actual = self.use_modelscope and not use_oss
# Use ModelScope download if enabled and OSS is not forced
if use_modelscope_actual:
return self._download_from_modelscope_with_cache(name, re_download)
# Original download logic for backwards compatibility
if name not in self._MODEL_LIST:
raise ValueError(f"Model '{name}' not found. Available models: {list(self._MODEL_LIST.keys())}")
@@ -124,13 +219,51 @@ class ResourceManager:
if downloading_flag.exists():
downloading_flag.unlink()
raise RuntimeError(f"Failed to download model: {e}")
def _download_from_modelscope_with_cache(self, name: str, re_download: bool = False) -> str:
"""Download model from ModelScope with local caching logic
# Usage example
Args:
name: Model name
re_download: Force re-download if True
Returns:
str: Path to the model file
"""
# Check if model exists in ModelScope cache
model_file_path = self.modelscope_cache_dir / name
if model_file_path.exists() and not re_download:
print(f"Using cached model '{name}' from ModelScope")
return str(model_file_path)
# Download from ModelScope
return self._download_from_modelscope(name)
# Usage examples
if __name__ == "__main__":
try:
# Example 1: Default mode (ModelScope)
print("=== Default mode (ModelScope) ===")
rm = ResourceManager()
model_path = rm.get_model("Pikachu")
print(f"Model path: {model_path}")
model_path = rm.get_model("Gundam_RV1106")
print(f"ModelScope model path: {model_path}")
# Example 2: Force OSS mode using global setting
print("\n=== OSS mode (global setting) ===")
set_use_oss_download(True)
rm_oss = ResourceManager()
model_path_oss = rm_oss.get_model("Pikachu")
print(f"OSS model path: {model_path_oss}")
# Reset to default
set_use_oss_download(False)
# Example 3: Explicit ModelScope mode
print("\n=== Explicit ModelScope mode ===")
rm_ms = ResourceManager(use_modelscope=True, modelscope_model_id="tunmxy/InspireFace")
model_path_ms = rm_ms.get_model("Gundam_RV1106")
print(f"Explicit ModelScope model path: {model_path_ms}")
except Exception as e:
print(f"Error: {e}")

View File

View File

@@ -2,7 +2,6 @@ import os
import cv2
import inspireface as isf
import click
import numpy as np
race_tags = ["Black", "Asian", "Latino/Hispanic", "Middle Eastern", "White"]
gender_tags = ["Female", "Male"]
@@ -20,6 +19,7 @@ def case_face_detection_image(image_path, show):
This is a sample application for face detection and tracking using an image.
It also includes pipeline extensions such as RGB liveness, mask detection, and face quality evaluation.
"""
isf.switch_image_processing_backend(isf.HF_IMAGE_PROCESSING_CPU)
opt = isf.HF_ENABLE_FACE_RECOGNITION | isf.HF_ENABLE_QUALITY | isf.HF_ENABLE_MASK_DETECT | \
isf.HF_ENABLE_LIVENESS | isf.HF_ENABLE_INTERACTION | isf.HF_ENABLE_FACE_ATTRIBUTE | isf.HF_ENABLE_FACE_EMOTION
session = isf.InspireFaceSession(opt, isf.HF_DETECT_MODE_ALWAYS_DETECT)

View File

@@ -55,6 +55,7 @@ def case_face_tracker_from_video(source, show, out):
session.set_track_mode_num_smooth_cache_frame(15)
session.set_filter_minimum_face_pixel_size(0)
session.set_track_model_detect_interval(0)
session.set_track_lost_recovery_mode(True)
session.set_enable_track_cost_spend(True)
# Determine if the source is a digital webcam index or a video file path.
try:

View File

@@ -1,5 +1,4 @@
import os
import cv2
import inspireface as isf
import numpy as np
import os

View File

@@ -0,0 +1,123 @@
import os
import inspireface as isf
import numpy as np
import os
import random
random.seed(43)
def gen_feature():
# Generate a random vector of length 512 and normalize it
vector = np.random.uniform(-1, 1, 512).astype(np.float32)
normalized_vector = vector / np.linalg.norm(vector)
return normalized_vector
def gen_similar_feature(feature):
noise_strength = 0.3
noise = np.random.uniform(-1, 1, len(feature)).astype(np.float32)
noise = noise / np.linalg.norm(noise)
similar_vector = feature + noise_strength * noise
similar_vector = similar_vector / np.linalg.norm(similar_vector)
return similar_vector
def case_feature_hub():
# Set db path
db_path = "test.db"
if os.path.exists(db_path):
os.remove(db_path)
# Configure the feature management system.
feature_hub_config = isf.FeatureHubConfiguration(
primary_key_mode=isf.HF_PK_AUTO_INCREMENT,
enable_persistence=True,
persistence_db_path=db_path,
search_threshold=0.48,
search_mode=isf.HF_SEARCH_MODE_EAGER,
)
ret = isf.feature_hub_enable(feature_hub_config)
assert ret, "Failed to enable FeatureHub."
embedding_list = []
for i in range(10):
# Generate a random embedding
embedding = gen_feature()
face_identity = isf.FaceIdentity(embedding, -1)
# Insert the face identity into the feature hub
ret, id = isf.feature_hub_face_insert(face_identity)
assert ret, "Failed to insert face identity."
print(f"Inserted face id: {id}")
embedding_list.append(embedding)
print(f"face count: {isf.feature_hub_get_face_count()}")
assert isf.feature_hub_get_face_count() == 10, "Failed to insert face identity."
# Search for the face identity
query_embedding = embedding_list[3]
search_result = isf.feature_hub_face_search(query_embedding)
assert search_result, "Failed to search for face identity."
# The auto-increment id is calculated starting from 1
assert search_result.similar_identity.id == 3 + 1, "Failed to search for face identity."
print(f"search confidence: {search_result.confidence}")
# Update the face identity
update_embedding = gen_feature()
update_face_identity = isf.FaceIdentity(update_embedding, search_result.similar_identity.id)
ret = isf.feature_hub_face_update(update_face_identity)
assert ret, "Failed to update face identity."
print(f"Updated face id: {search_result.similar_identity.id}")
# Search for the face identity again
search_result = isf.feature_hub_face_search(query_embedding)
assert search_result, "Failed to search for face identity."
assert search_result.similar_identity.id == -1, "Failed to update face identity."
print(f"search confidence: {search_result.confidence}, id: {search_result.similar_identity.id}")
# Delete the face identity
ret = isf.feature_hub_face_remove(4)
assert ret, "Failed to delete face identity."
print(f"Deleted face id: {4}")
# Search for the face identity again
search_result = isf.feature_hub_face_search(embedding_list[3])
assert search_result.similar_identity.id == -1, "Failed to delete face identity."
print(f"search confidence: {search_result.confidence}, id: {search_result.similar_identity.id}")
print(f"face count: {isf.feature_hub_get_face_count()}")
assert isf.feature_hub_get_face_count() == 9, "Failed to delete face identity."
# Top-k sample
# Set target face id = 9
target_face_id = 9
# Set k = 3
k = 4
# Gen k similar features
similar_features = []
for i in range(k):
similar_features.append(gen_similar_feature(embedding_list[target_face_id - 1]))
# Insert the similar features into the feature hub
expect_ids = []
for similar_feature in similar_features:
similar_face_identity = isf.FaceIdentity(similar_feature, -1)
ret, id = isf.feature_hub_face_insert(similar_face_identity)
assert ret, "Failed to insert similar face identity."
print(f"Inserted similar face id: {id}")
expect_ids.append(id)
# Insert the target face id
expect_ids.append(target_face_id)
print(f"expect ids: {expect_ids}")
# Search the top-k similar features
top_k_search_result = isf.feature_hub_face_search_top_k(embedding_list[target_face_id - 1], 1000)
assert len(top_k_search_result) == k + 1, "Failed to search for top-k similar features."
for result in top_k_search_result:
confidence, _id = result
print(f"search confidence: {confidence}, id: {_id}")
assert _id in expect_ids, "Failed to search for top-k similar features."
print(f"search confidence: {confidence}, id: {_id}")
if __name__ == "__main__":
case_feature_hub()

View File

@@ -13,6 +13,15 @@ def get_version():
except FileNotFoundError:
return "0.0.0"
def get_post_version():
"""Get post version number"""
post_path = os.path.join(os.path.dirname(__file__), 'post')
try:
with open(post_path, 'r') as f:
return f.read().strip()
except FileNotFoundError:
return ""
def get_wheel_platform_tag():
"""Get wheel package platform tag"""
system = platform.system().lower()
@@ -105,7 +114,7 @@ lib_path = os.path.join('modules', 'core', 'libs', system, arch, '*')
setup(
name='inspireface',
version=get_version(),
version=get_version() + get_post_version(),
packages=find_packages(),
# package_data path should be relative to package directory
package_data={
@@ -113,7 +122,9 @@ setup(
},
install_requires=[
'numpy',
'loguru'
'loguru',
'filelock',
'modelscope'
],
author='Jingyu Yan',
author_email='tunmxy@163.com',

View File

@@ -1,57 +0,0 @@
# Error Feedback Codes
During the use of InspireFace, some error feedback codes may be generated. Here is a table of error feedback codes.
| Index | Name | Code | Comment |
| --- | --- | --- | --- |
| 1 | HSUCCEED | 0 | Success |
| 2 | HERR_BASIC_BASE | 1 | Basic error types |
| 3 | HERR_UNKNOWN | 1 | Unknown error |
| 4 | HERR_INVALID_PARAM | 2 | Invalid parameter |
| 5 | HERR_INVALID_IMAGE_STREAM_HANDLE | 25 | Invalid image stream handle |
| 6 | HERR_INVALID_CONTEXT_HANDLE | 26 | Invalid context handle |
| 7 | HERR_INVALID_FACE_TOKEN | 31 | Invalid face token |
| 8 | HERR_INVALID_FACE_FEATURE | 32 | Invalid face feature |
| 9 | HERR_INVALID_FACE_LIST | 33 | Invalid face feature list |
| 10 | HERR_INVALID_BUFFER_SIZE | 34 | Invalid copy token |
| 11 | HERR_INVALID_IMAGE_STREAM_PARAM | 35 | Invalid image param |
| 12 | HERR_INVALID_SERIALIZATION_FAILED | 36 | Invalid face serialization failed |
| 13 | HERR_INVALID_DETECTION_INPUT | 37 | Failed to modify detector input size |
| 14 | HERR_INVALID_IMAGE_BITMAP_HANDLE | 38 | Invalid image bitmap handle |
| 15 | HERR_SESS_BASE | 1280 | Session error types |
| 16 | HERR_SESS_FUNCTION_UNUSABLE | 1282 | Function not usable |
| 17 | HERR_SESS_TRACKER_FAILURE | 1283 | Tracker module not initialized |
| 18 | HERR_SESS_INVALID_RESOURCE | 1290 | Invalid static resource |
| 19 | HERR_SESS_NUM_OF_MODELS_NOT_MATCH | 1291 | Number of models does not match |
| 20 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 1300 | The number of input landmark points does not match |
| 21 | HERR_SESS_LANDMARK_NOT_ENABLE | 1301 | The number of input landmark points does not match |
| 22 | HERR_SESS_KEY_POINT_NUM_NOT_MATCH | 1302 | The number of input key points does not match |
| 23 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized |
| 24 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered |
| 25 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index |
| 26 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index |
| 27 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty |
| 28 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration |
| 29 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number |
| 30 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison |
| 31 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full |
| 32 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed |
| 33 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed |
| 34 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists |
| 35 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing |
| 36 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect |
| 37 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled |
| 38 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error |
| 39 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened |
| 40 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found |
| 41 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error |
| 42 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error |
| 43 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error |
| 44 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error |
| 45 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path |
| 46 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly |
| 47 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly |
| 48 | HERR_FT_HUB_NOT_FOUND_FEATURE | 1340 | Get face feature error |
| 49 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure |
| 50 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure |
| 51 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect |
| 52 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model |
| 53 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded |

View File

@@ -0,0 +1,109 @@
import click
import re
from datetime import datetime
# Function to calculate the actual error code value based on the expressions
def calculate_error_code_value(error_code_str, error_definitions):
try:
# Replace the hex values and error definitions with actual values
error_code_str = re.sub(r'0X([0-9A-F]+)', lambda m: str(int(m.group(1), 16)), error_code_str)
for error_name, error_value in error_definitions.items():
error_code_str = error_code_str.replace(error_name, str(error_value))
# Evaluate the expression to get the actual error code value
return eval(error_code_str)
except Exception as e:
# If there is an error in evaluation (like undefined reference), return the original string
return error_code_str
# Define a function to parse the error codes between anchors in a given C header file content
def parse_and_calculate_error_codes(header_content):
# Define the start and end anchor tags
start_anchor = '// [Anchor-Begin]'
end_anchor = '// [Anchor-End]'
# Find the index of start and end anchor tags
start_index = header_content.find(start_anchor)
end_index = header_content.find(end_anchor, start_index)
# Extract the content between anchors
anchor_content = header_content[start_index+len(start_anchor):end_index].strip()
# Split the content into lines
lines = anchor_content.split('\n')
# Dictionary to hold the error names and their evaluated values
error_definitions = {}
# List to hold the parsed error codes
error_codes = []
# Process each line
for line in lines:
line = line.strip()
# Skip empty lines and lines that don't start with #define
if not line or not line.startswith('#define'):
continue
# Split by comment marker and handle cases with no comment
parts = line.split('//')
define_part = parts[0].strip()
comment_part = parts[1].strip() if len(parts) > 1 else ''
# Extract the error name and code, handling parentheses
define_parts = define_part.split(None, 2) # Split into max 3 parts
if len(define_parts) >= 2: # Changed from == 3
error_name = define_parts[1]
error_code_str = define_parts[2].strip('()') if len(define_parts) > 2 else ''
# Calculate the actual error code value
error_code_value = calculate_error_code_value(error_code_str, error_definitions)
# Store the calculated error code value for later reference
error_definitions[error_name] = error_code_value
# Skip base offset definitions (ending with _BASE)
if error_name.endswith('_BASE'):
continue
# Append the extracted information to the error_codes list
error_codes.append((error_name, error_code_value, comment_part))
return error_codes
# Click command for processing the header file and outputting Python error table
@click.command()
@click.argument('header_path', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.argument('output_path', type=click.Path(file_okay=True, dir_okay=False, writable=True))
def process_header(header_path, output_path):
# Read the header file content
with open(header_path, 'r', encoding='utf-8') as file:
header_content = file.read()
# Parse and calculate the error codes from the header content
parsed_error_codes = parse_and_calculate_error_codes(header_content)
# Generate Python file content
python_content = f'''"""
InspireFace Error Codes
Auto-generated error code definitions from {header_path}
Generated on: {datetime.now().strftime("%Y-%m-%d")}
"""
'''
# Generate simple error code definitions
for name, code, comment in parsed_error_codes:
if comment:
python_content += f"{name} = {code} # {comment}\n"
else:
python_content += f"{name} = {code}\n"
# Write the Python file
with open(output_path, 'w', encoding='utf-8') as py_file:
py_file.write(python_content)
click.echo(f"Python error table has been written to {output_path}")
click.echo(f"Generated {len(parsed_error_codes)} error code definitions")
if __name__ == '__main__':
process_header()

View File

@@ -32,7 +32,7 @@ def main(model_dir, models):
if os.path.exists(file_path):
md5 = get_file_hash_sha256(file_path)
model_info[file] = {
"url": f"https://github.com/HyperInspire/InspireFace/releases/download/v1.x/{file}",
"url": f"https://inspireface-1259028827.cos.ap-singapore.myqcloud.com/inspireface_modelzoo/t4/{file}",
"filename": file,
"md5": md5
}

View File

@@ -56,9 +56,13 @@ def parse_and_calculate_error_codes(header_content):
# Calculate the actual error code value
error_code_value = calculate_error_code_value(error_code_str, error_definitions)
# Store the calculated error code value
# Store the calculated error code value for later reference
error_definitions[error_name] = error_code_value
# Skip base offset definitions (ending with _BASE)
if error_name.endswith('_BASE'):
continue
# Append the extracted information to the error_codes list
error_codes.append((error_name, error_code_value, comment_part))
@@ -80,6 +84,8 @@ def process_header(header_path, output_path):
During the use of InspireFace, some error feedback codes may be generated. Here is a table of error feedback codes.
- As of **June 15, 2025**, the error code definitions have been restructured. Some legacy codes from historical versions have been removed, and a more streamlined version has been reorganized and consolidated.
"""
# Prepare the Markdown table header

View File

@@ -0,0 +1,53 @@
import os
import click
from modelscope.hub.api import HubApi
@click.command()
@click.option(
'--model-id',
default='tunmxy/InspireFace',
help='ModelScope model ID'
)
@click.option(
'--model-dir',
required=True,
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help='Local model directory path'
)
@click.option(
'--token',
help='ModelScope access token, if not provided will get from MODELSCOPE_TOKEN environment variable'
)
def upload_model(model_id, model_dir, token):
"""Upload model to ModelScope"""
# Get token
if not token:
token = os.getenv('MODELSCOPE_TOKEN')
if not token:
click.echo("Error: No token provided and MODELSCOPE_TOKEN environment variable not set", err=True)
raise click.Abort()
try:
# Login and upload
api = HubApi()
api.login(token)
click.echo(f"Starting to upload model to {model_id}...")
click.echo(f"Local directory: {model_dir}")
api.push_model(
model_id=model_id,
model_dir=model_dir
)
click.echo("✅ Model uploaded successfully!")
except Exception as e:
click.echo(f"❌ Upload failed: {e}", err=True)
raise click.Abort()
if __name__ == '__main__':
upload_model()