Merge pull request #2597 from tunmx/dev/inspireface110

Update InspireFace to 1.1.0
This commit is contained in:
Jia Guo
2024-06-20 10:19:48 +08:00
committed by GitHub
89 changed files with 4097 additions and 511 deletions

View File

@@ -7,4 +7,9 @@ test_res/*
resource/*
pack/*
*.zip
3rdparty
3rdparty/
.macos_cache/
*.framework
.cache/*
.vscode/*
build_local/*

View File

@@ -1,13 +1,23 @@
#cmake_minimum_required(VERSION 3.19)
cmake_minimum_required(VERSION 3.10)
project(InspireFace)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --std=c++14")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
# Current version
set(INSPIRE_FACE_VERSION_MAJOR 1)
set(INSPIRE_FACE_VERSION_MINOR 1)
set(INSPIRE_FACE_VERSION_PATCH 0)
# Converts the version number to a string
string(CONCAT INSPIRE_FACE_VERSION_MAJOR_STR ${INSPIRE_FACE_VERSION_MAJOR})
string(CONCAT INSPIRE_FACE_VERSION_MINOR_STR ${INSPIRE_FACE_VERSION_MINOR})
string(CONCAT INSPIRE_FACE_VERSION_PATCH_STR ${INSPIRE_FACE_VERSION_PATCH})
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/information.h.in ${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/information.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/version.txt.in ${CMAKE_CURRENT_SOURCE_DIR}/cpp/inspireface/version.txt)
# Set the ISF_THIRD_PARTY_DIR variable to allow it to be set externally from the command line, or use the default path if it is not set
set(ISF_THIRD_PARTY_DIR "${CMAKE_CURRENT_SOURCE_DIR}/3rdparty" CACHE PATH "Path to the third-party libraries directory")
@@ -78,8 +88,24 @@ set(SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/cpp/)
# OpenCV dependency configuration
if (APPLE)
set(PLAT darwin)
find_package(OpenCV REQUIRED)
if (IOS)
message(IOS_3RDPARTY=${IOS_3RDPARTY})
set(CMAKE_XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO)
set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO)
link_directories(${IOS_3RDPARTY})
include_directories(${IOS_3RDPARTY}/opencv2.framework)
set(MNN_FRAMEWORK_PATH "${IOS_3RDPARTY}/MNN.framework")
include_directories("${MNN_FRAMEWORK_PATH}/")
set(OpenCV_DIR "${IOS_3RDPARTY}/opencv2.framework")
# find_package(OpenCV REQUIRED)
else()
message("Use apple device")
set(PLAT darwin)
find_package(OpenCV REQUIRED)
endif ()
else()
if (ISF_BUILD_LINUX_ARM7 OR ISF_BUILD_LINUX_AARCH64)
add_definitions("-DDISABLE_GUI")
@@ -115,35 +141,43 @@ else()
find_package(OpenCV REQUIRED)
endif ()
set(ISF_LINUX_MNN_CUDA "" CACHE STRING "Path to CUDA directory")
# MNN Config
if (NOT ANDROID)
if (ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA)
# Use MNN Cuda
message("Global MNN CUDA device inference")
add_definitions("-DISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA")
set(MNN_INCLUDE_DIRS ${ISF_LINUX_MNN_CUDA}/include)
link_directories(${ISF_LINUX_MNN_CUDA}/lib)
set(MNN_LIBS MNN)
elseif(DEFINED MNN_STATIC_PATH)
message("Using static MNN from specified path: ${MNN_STATIC_PATH}")
set(MNN_INCLUDE_DIRS "${MNN_STATIC_PATH}/include")
set(MNN_LIBS "${MNN_STATIC_PATH}/lib/libMNN.a")
else ()
# Default or fallback case for MNN setup
message("Default or fallback case for MNN setup")
if (ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA)
message("Global MNN CUDA device inference")
add_definitions("-DISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA")
# Use MNN Cuda
if (NOT DEFINED ISF_LINUX_MNN_CUDA)
# MNN Options
set(MNN_BUILD_SHARED_LIBS OFF CACHE BOOL "Build MNN as a shared library")
set(MNN_CUDA ON)
add_subdirectory(${ISF_THIRD_PARTY_DIR}/MNN EXCLUDE_FROM_ALL)
set(MNN_INCLUDE_DIRS "${ISF_THIRD_PARTY_DIR}/MNN/include")
set(MNN_LIBS MNN)
else()
message(ISF_LINUX_MNN_CUDA=${ISF_LINUX_MNN_CUDA})
set(MNN_INCLUDE_DIRS ${ISF_LINUX_MNN_CUDA}/include)
link_directories(${ISF_LINUX_MNN_CUDA}/)
set(MNN_LIBS MNN)
endif()
endif ()
elseif(DEFINED MNN_STATIC_PATH)
message("Using static MNN from specified path: ${MNN_STATIC_PATH}")
set(MNN_INCLUDE_DIRS "${MNN_STATIC_PATH}/include")
set(MNN_LIBS "${MNN_STATIC_PATH}/lib/libMNN.a")
elseif(IOS)
message(Build iOS)
else ()
# Default or fallback case for MNN setup
message("Default or fallback case for MNN setup")
# MNN Options
set(MNN_BUILD_SHARED_LIBS OFF CACHE BOOL "Build MNN as a shared library")
add_subdirectory(${ISF_THIRD_PARTY_DIR}/MNN EXCLUDE_FROM_ALL)
set(MNN_INCLUDE_DIRS "${ISF_THIRD_PARTY_DIR}/MNN/include")
set(MNN_LIBS MNN)
endif ()
endif()
# Set install path
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install")

View File

@@ -8,6 +8,12 @@ If you require further information on tracking development branches, CI/CD proce
Please contact [contact@insightface.ai](mailto:contact@insightface.ai?subject=InspireFace) for commercial support, including obtaining and integrating higher accuracy models, as well as custom development.
## Top News
**`2024-06-18`** Added face detection feature with tracking-by-detection mode.
**`2024-06-01`** Adapted for accelerated inference on CUDA-enabled devices.
## 1. Preparation
### 1.1. Clone 3rdparty
@@ -105,12 +111,12 @@ We have completed the adaptation and testing of the software across various oper
| 2 | | ARMv8 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) |
| 3 | | x86/x86_64 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | [![test](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/test_ubuntu_x86_Pikachu.yaml?style=for-the-badge&label=Test&color=blue)](https://github.com/HyperInspire/InspireFace/actions/workflows/test_ubuntu_x86_Pikachu.yaml) |
| 4 | | ARMv7 | RV1109RV1126 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) |
| 5 | | x86/x86_64 | CUDA | ![build](https://img.shields.io/badge/OFFLINE-PASSING-green?style=for-the-badge) | |
| 5 | | x86/x86_64 | CUDA | ![build](https://img.shields.io/badge/OFFLINE-PASSING-green?style=for-the-badge) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) |
| 6 | **macOS** | Intel x86 | - | ![build](https://img.shields.io/badge/OFFLINE-PASSING-green?style=for-the-badge) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) |
| 7 | | Apple Silicon | - | ![build](https://img.shields.io/badge/OFFLINE-PASSING-green?style=for-the-badge) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) |
| 8 | **iOS** | ARM | - | | |
| 9 | **Android** | ARMv7 | - | | |
| 10 | | ARMv8 | - | | |
| 8 | **iOS** | ARM | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | |
| 9 | **Android** | ARMv7 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | |
| 10 | | ARMv8 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | |
- Complete compilation scripts and successful compilation.
- Pass unit tests on physical devices.
@@ -129,6 +135,9 @@ build-cross-armv7-armhf
# Build armv7 with support RV1109RV1126 device NPU cross-complie
docker-compose up build-cross-rv1109rv1126-armhf
# Build Android with support arm64-v8a and armeabi-v7a
docker-compose up build-cross-android
# Build all
docker-compose up
```
@@ -154,7 +163,7 @@ HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
@@ -296,6 +305,22 @@ During the process of building the test program using CMake, it will involve sel
**Note**: If you want to view the benchmark test report, you can click on the [link](doc/Benchmark-Remark(Updating).md).
### Quick Test
If you need to perform a quick test, you can use the script we provide. This script will automatically download the test file `test_res` and build the test program to run the test.
*Note: If you need to enable more comprehensive tests, you can adjust the options in the script as needed.*
```bash
# If you are using Ubuntu, you can execute this.
bash ci/quick_test_linux_x86_usual.sh
# If you are using another system (including Ubuntu), you can execute this.
bash ci/quick_test_local.sh
```
Every time code is committed, tests are run on GitHub Actions.
## 5. Function Support
The following functionalities and technologies are currently supported.

View File

@@ -0,0 +1,68 @@
#!/bin/bash
# Exit immediately if any command exits with a non-zero status
set -e
TARGET_DIR="test_res"
DOWNLOAD_URL="https://github.com/tunmx/inspireface-store/raw/main/resource/test_res-lite.zip"
ZIP_FILE="test_res-lite.zip"
BUILD_DIRNAME="quick_test_build"
TEST_DIR="./build/${BUILD_DIRNAME}/test"
TEST_EXECUTABLE="./test/Test"
# Check if the target directory already exists
if [ ! -d "$TARGET_DIR" ]; then
echo "Directory '$TARGET_DIR' does not exist. Downloading..."
# Download the dataset zip file
wget -q "$DOWNLOAD_URL" -O "$ZIP_FILE"
echo "Extracting '$ZIP_FILE' to '$TARGET_DIR'..."
# Unzip the downloaded file
unzip "$ZIP_FILE"
# Remove the downloaded zip file and unnecessary folders
rm "$ZIP_FILE"
rm -rf "__MACOSX"
echo "Download and extraction complete."
else
echo "Directory '$TARGET_DIR' already exists. Skipping download."
fi
# Get the absolute path of the target directory
FULL_TEST_DIR="$(realpath ${TARGET_DIR})"
# Create the build directory if it doesn't exist
mkdir -p build/${BUILD_DIRNAME}/
# Change directory to the build directory
# Disable the shellcheck warning for potential directory changes
# shellcheck disable=SC2164
cd build/${BUILD_DIRNAME}/
# Configure the CMake build system
cmake -DCMAKE_BUILD_TYPE=Release \
-DISF_BUILD_WITH_SAMPLE=OFF \
-DISF_BUILD_WITH_TEST=ON \
-DISF_ENABLE_BENCHMARK=ON \
-DISF_ENABLE_USE_LFW_DATA=OFF \
-DISF_ENABLE_TEST_EVALUATION=OFF \
-DISF_BUILD_SHARED_LIBS=OFF ../../
# Compile the project using 4 parallel jobs
make -j4
# Create a symbolic link to the extracted test data directory
ln -s ${FULL_TEST_DIR} .
# Check if the test executable file exists
if [ ! -f "$TEST_EXECUTABLE" ]; then
# If not, print an error message and exit with a non-zero status code
echo "Error: Test executable '$TEST_EXECUTABLE' not found. Please ensure it is built correctly."
exit 1
else
# If it exists, print a message and run the test executable
echo "Test executable found. Running tests..."
"$TEST_EXECUTABLE"
fi

View File

@@ -0,0 +1,133 @@
#!/bin/bash
reorganize_structure() {
local base_path=$1
# Define the new main directories
local main_dirs=("lib" "sample" "test")
# Check if the base path exists
if [[ ! -d "$base_path" ]]; then
echo "Error: The path '$base_path' does not exist."
return 1
fi
# Create new main directories at the base path
for dir in "${main_dirs[@]}"; do
mkdir -p "$base_path/$dir"
done
# Find all architecture directories (e.g., arm64-v8a, armeabi-v7a)
local arch_dirs=($(find "$base_path" -maxdepth 1 -type d -name "arm*"))
for arch_dir in "${arch_dirs[@]}"; do
# Get the architecture name (e.g., arm64-v8a)
local arch=$(basename "$arch_dir")
# Operate on each main directory
for main_dir in "${main_dirs[@]}"; do
# Create a specific directory for each architecture under the main directory
mkdir -p "$base_path/$main_dir/$arch"
# Selectively copy content based on the directory type
case "$main_dir" in
lib)
# Copy the lib directory
if [ -d "$arch_dir/InspireFace/lib" ]; then
cp -r "$arch_dir/InspireFace/lib/"* "$base_path/$main_dir/$arch/"
fi
;;
sample)
# Copy the sample directory
if [ -d "$arch_dir/sample" ]; then
cp -r "$arch_dir/sample/"* "$base_path/$main_dir/$arch/"
fi
;;
test)
# Copy the test directory
if [ -d "$arch_dir/test" ]; then
cp -r "$arch_dir/test/"* "$base_path/$main_dir/$arch/"
fi
;;
esac
done
# Copy version.txt file to the base path, ignoring duplicates
if [ -f "$arch_dir/version.txt" ]; then
cp -f "$arch_dir/version.txt" "$base_path/version.txt"
fi
done
# Delete the original architecture directories
for arch_dir in "${arch_dirs[@]}"; do
rm -rf "$arch_dir"
done
echo "Reorganization complete."
}
# Reusable function to handle 'install' directory operations
move_install_files() {
local root_dir="$1"
local install_dir="$root_dir/install"
# Step 1: Check if the 'install' directory exists
if [ ! -d "$install_dir" ]; then
echo "Error: 'install' directory does not exist in $root_dir"
exit 1
fi
# Step 2: Delete all other files/folders except 'install'
find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} +
# Step 3: Move all files from 'install' to the root directory
mv "$install_dir"/* "$root_dir" 2>/dev/null
# Step 4: Remove the empty 'install' directory
rmdir "$install_dir"
echo "Files from 'install' moved to $root_dir, and 'install' directory deleted."
}
build() {
arch=$1
NDK_API_LEVEL=$2
mkdir -p ${BUILD_FOLDER_PATH}/${arch}
pushd ${BUILD_FOLDER_PATH}/${arch}
cmake ${SCRIPT_DIR} \
-G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
-DANDROID_TOOLCHAIN=clang \
-DANDROID_ABI=${arch} \
-DANDROID_NATIVE_API_LEVEL=${NDK_API_LEVEL} \
-DANDROID_STL=c++_static \
-DMNN_BUILD_FOR_ANDROID_COMMAND=true \
-DISF_BUILD_WITH_SAMPLE=ON \
-DISF_BUILD_WITH_TEST=ON \
-DISF_ENABLE_BENCHMARK=ON \
-DISF_ENABLE_USE_LFW_DATA=OFF \
-DISF_ENABLE_TEST_EVALUATION=OFF \
-DISF_BUILD_SHARED_LIBS=ON \
-DOpenCV_DIR=${OPENCV_DIR}
make -j4
make install
popd
move_install_files "${BUILD_FOLDER_PATH}/${arch}"
}
if [ -n "$VERSION" ]; then
TAG="-$VERSION"
else
TAG=""
fi
SCRIPT_DIR=$(pwd) # Project dir
BUILD_FOLDER_PATH="build/inspireface-android${TAG}"
build arm64-v8a 21
build armeabi-v7a 21
reorganize_structure "${BUILD_FOLDER_PATH}"

View File

@@ -0,0 +1,156 @@
#!/bin/bash
# Reusable function to handle 'install' directory operations
move_install_files() {
local root_dir="$1"
local install_dir="$root_dir/install"
# Step 1: Check if the 'install' directory exists
if [ ! -d "$install_dir" ]; then
echo "Error: 'install' directory does not exist in $root_dir"
exit 1
fi
# Step 2: Delete all other files/folders except 'install'
find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} +
# Step 3: Move all files from 'install' to the root directory
mv "$install_dir"/* "$root_dir" 2>/dev/null
# Step 4: Remove the empty 'install' directory
rmdir "$install_dir"
echo "Files from 'install' moved to $root_dir, and 'install' directory deleted."
}
# Define download URLs
MNN_IOS_URL="https://github.com/alibaba/MNN/releases/download/2.8.1/mnn_2.8.1_ios_armv82_cpu_metal_coreml.zip"
OPENCV_IOS_URL="https://github.com/opencv/opencv/releases/download/4.5.1/opencv-4.5.1-ios-framework.zip"
# Set the cache directory
MACOS_CACHE="$PWD/.macos_cache/"
# Create the directory if it does not exist
mkdir -p "${MACOS_CACHE}"
# Function to download and unzip a file if the required framework does not exist
download_and_unzip() {
local url=$1
local dir=$2
local framework_name=$3 # Name of the framework directory to check
# Check if the framework already exists
if [ ! -d "${dir}${framework_name}" ]; then
local file_name=$(basename "$url")
local full_path="${dir}${file_name}"
# Check if the zip file already exists
if [ ! -f "$full_path" ]; then
echo "Downloading ${file_name}..."
# Download the file
curl -sL "$url" -o "$full_path"
else
echo "${file_name} already downloaded. Proceeding to unzip."
fi
# Unzip the file to a temporary directory
echo "Unzipping ${file_name}..."
unzip -q "$full_path" -d "${dir}"
rm "$full_path"
# Move the framework if it's in a subdirectory specific to the iOS build
if [ "${framework_name}" == "MNN.framework" ]; then
mv "${dir}ios_build/Release-iphoneos/${framework_name}" "${dir}"
rm -rf "${dir}ios_build" # Clean up the subdirectory
fi
echo "${framework_name} has been set up."
else
echo "${framework_name} already exists in ${dir}. Skipping download and unzip."
fi
}
# Download and unzip MNN iOS package
download_and_unzip "$MNN_IOS_URL" "$MACOS_CACHE" "MNN.framework"
# Download and unzip OpenCV iOS package
download_and_unzip "$OPENCV_IOS_URL" "$MACOS_CACHE" "opencv2.framework"
if [ -n "$VERSION" ]; then
TAG="-$VERSION"
else
TAG=""
fi
TOOLCHAIN="$PWD/toolchain/ios.toolchain.cmake"
BUILD_DIR="build/inspireface-ios$TAG"
mkdir -p "$BUILD_DIR"
cd "$BUILD_DIR"
cmake \
-DIOS_3RDPARTY="${MACOS_CACHE}" \
-DCMAKE_TOOLCHAIN_FILE=${TOOLCHAIN} \
-DCMAKE_OSX_ARCHITECTURES=arm64 \
-DENABLE_BITCODE=0 \
-DIOS_DEPLOYMENT_TARGET=11.0 \
-DISF_BUILD_WITH_SAMPLE=OFF \
-DISF_BUILD_WITH_TEST=OFF \
-DISF_BUILD_SHARED_LIBS=OFF \
../..
make -j8
make install
move_install_files "$(pwd)"
# Set the framework name
FRAMEWORK_NAME=InspireFace
# Specify the version of the framework
FRAMEWORK_VERSION=1.0.0
# Root build directory
BUILD_DIR="$(pwd)"
BUILD_LIB_DIR="$BUILD_DIR/InspireFace"
# Create the framework structure
FRAMEWORK_DIR=$BUILD_DIR/$FRAMEWORK_NAME.framework
mkdir -p $FRAMEWORK_DIR
mkdir -p $FRAMEWORK_DIR/Headers
mkdir -p $FRAMEWORK_DIR/Resources
# Copy the static library to the framework directory
cp $BUILD_LIB_DIR/lib/libInspireFace.a $FRAMEWORK_DIR/$FRAMEWORK_NAME
# Copy header files to the framework's Headers directory
cp $BUILD_LIB_DIR/include/*.h $FRAMEWORK_DIR/Headers/
# Create Info.plist
cat <<EOF >$FRAMEWORK_DIR/Resources/Info.plist
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleExecutable</key>
<string>$FRAMEWORK_NAME</string>
<key>CFBundleIdentifier</key>
<string>com.example.$FRAMEWORK_NAME</string>
<key>CFBundleName</key>
<string>$FRAMEWORK_NAME</string>
<key>CFBundleVersion</key>
<string>$FRAMEWORK_VERSION</string>
<key>CFBundleShortVersionString</key>
<string>$FRAMEWORK_VERSION</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
</dict>
</plist>
EOF
echo "Framework $FRAMEWORK_NAME.framework has been created at $FRAMEWORK_DIR"

View File

@@ -38,14 +38,14 @@ cd ${BUILD_FOLDER_PATH}
cmake -DCMAKE_SYSTEM_NAME=Linux \
-DCMAKE_BUILD_TYPE=Release \
-DISF_BUILD_WITH_SAMPLE=OFF \
-DISF_BUILD_WITH_TEST=OFF \
-DISF_ENABLE_BENCHMARK=OFF \
-DISF_ENABLE_USE_LFW_DATA=OFF \
-DISF_ENABLE_TEST_EVALUATION=OFF \
-DISF_BUILD_WITH_SAMPLE=ON \
-DISF_BUILD_WITH_TEST=ON \
-DISF_ENABLE_BENCHMARK=ON \
-DISF_ENABLE_USE_LFW_DATA=ON \
-DISF_ENABLE_TEST_EVALUATION=ON \
-DMNN_CUDA=ON \
-DISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA=ON \
-DISF_LINUX_MNN_CUDA=/home/tunm/software/MNN-2.7.0/build_cuda/install \
-DISF_BUILD_SHARED_LIBS=ON ${SCRIPT_DIR}
-DISF_LINUX_MNN_CUDA=/home/tunm/softwate/MNN-2.7.2/build_cuda ${SCRIPT_DIR}
make -j4

View File

@@ -0,0 +1,53 @@
#!/bin/bash
# Reusable function to handle 'install' directory operations
move_install_files() {
local root_dir="$1"
local install_dir="$root_dir/install"
# Step 1: Check if the 'install' directory exists
if [ ! -d "$install_dir" ]; then
echo "Error: 'install' directory does not exist in $root_dir"
exit 1
fi
# Step 2: Delete all other files/folders except 'install'
find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} +
# Step 3: Move all files from 'install' to the root directory
mv "$install_dir"/* "$root_dir" 2>/dev/null
# Step 4: Remove the empty 'install' directory
rmdir "$install_dir"
echo "Files from 'install' moved to $root_dir, and 'install' directory deleted."
}
if [ -n "$VERSION" ]; then
TAG="-$VERSION"
else
TAG=""
fi
BUILD_FOLDER_PATH="build/inspireface-linux-cuda${TAG}"
SCRIPT_DIR=$(pwd) # Project dir
mkdir -p ${BUILD_FOLDER_PATH}
# shellcheck disable=SC2164
cd ${BUILD_FOLDER_PATH}
cmake -DCMAKE_SYSTEM_NAME=Linux \
-DCMAKE_BUILD_TYPE=Release \
-DISF_BUILD_WITH_SAMPLE=ON \
-DISF_BUILD_WITH_TEST=ON \
-DISF_ENABLE_BENCHMARK=ON \
-DISF_ENABLE_USE_LFW_DATA=ON \
-DISF_ENABLE_TEST_EVALUATION=ON \
-DMNN_CUDA=ON \
-DISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA=ON \
-DISF_LINUX_MNN_CUDA=/host/softwate/MNN-2.7.2/build_cuda \
-DOpenCV_DIR=3rdparty/inspireface-precompile/opencv/4.5.1/opencv-ubuntu18-x86/lib/cmake/opencv4 ${SCRIPT_DIR}
make -j4
move_install_files "$(pwd)"

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Set the framework name
FRAMEWORK_NAME=InspireFace
# Specify the version of the framework
FRAMEWORK_VERSION=1.0.0
# Root build directory
BUILD_DIR=build/inspireface-ios/install/InspireFace
# Create the framework structure
FRAMEWORK_DIR=$BUILD_DIR/$FRAMEWORK_NAME.framework
mkdir -p $FRAMEWORK_DIR
mkdir -p $FRAMEWORK_DIR/Headers
mkdir -p $FRAMEWORK_DIR/Resources
# Copy the static library to the framework directory
cp $BUILD_DIR/lib/libInspireFace.a $FRAMEWORK_DIR/$FRAMEWORK_NAME
# Copy header files to the framework's Headers directory
cp $BUILD_DIR/include/*.h $FRAMEWORK_DIR/Headers/
# Create Info.plist
cat <<EOF >$FRAMEWORK_DIR/Resources/Info.plist
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleExecutable</key>
<string>$FRAMEWORK_NAME</string>
<key>CFBundleIdentifier</key>
<string>com.example.$FRAMEWORK_NAME</string>
<key>CFBundleName</key>
<string>$FRAMEWORK_NAME</string>
<key>CFBundleVersion</key>
<string>$FRAMEWORK_VERSION</string>
<key>CFBundleShortVersionString</key>
<string>$FRAMEWORK_VERSION</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
</dict>
</plist>
EOF
echo "Framework $FRAMEWORK_NAME.framework has been created at $FRAMEWORK_DIR"

View File

@@ -1,24 +1,19 @@
cmake_minimum_required(VERSION 3.10)
project(InspireFaceSDK)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --std=c++14")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
# Current version
set(INSPIRE_FACE_VERSION_MAJOR 1)
set(INSPIRE_FACE_VERSION_MINOR 0)
set(INSPIRE_FACE_VERSION_PATCH 1)
# Converts the version number to a string
string(CONCAT INSPIRE_FACE_VERSION_MAJOR_STR ${INSPIRE_FACE_VERSION_MAJOR})
string(CONCAT INSPIRE_FACE_VERSION_MINOR_STR ${INSPIRE_FACE_VERSION_MINOR})
string(CONCAT INSPIRE_FACE_VERSION_PATCH_STR ${INSPIRE_FACE_VERSION_PATCH})
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/information.h.in ${CMAKE_CURRENT_SOURCE_DIR}/information.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.txt.in ${CMAKE_CURRENT_SOURCE_DIR}/version.txt)
option(ISF_BUILD_SHARED_LIBS "Build shared libraries (DLLs)." ON)
option(ISF_ENABLE_TRACKING_BY_DETECTION "Use the tracking-by-detection mode." OFF)
if(ISF_ENABLE_TRACKING_BY_DETECTION)
add_definitions("-DISF_ENABLE_TRACKING_BY_DETECTION")
find_package(Eigen3 REQUIRED)
include_directories(${EIGEN3_INCLUDE_DIRS})
endif()
file(GLOB_RECURSE SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
set(SOURCE_FILES ${SOURCE_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/c_api/inspireface.cc) # Add C_API file
@@ -32,6 +27,11 @@ endif()
# OpenCV
set(LINK_THIRD_LIBS ${OpenCV_LIBS} ${MNN_LIBS})
if(ISF_ENABLE_TRACKING_BY_DETECTION)
set(LINK_THIRD_LIBS ${LINK_THIRD_LIBS} Eigen3::Eigen)
endif()
# SQLite3
set(SOURCE_FILES ${SOURCE_FILES} ${ISF_THIRD_PARTY_DIR}/inspireface-precompile/sqlite/sqlite3.c) # Add SQLite3 C_API file
set(SQLITE_INCLUDE ${ISF_THIRD_PARTY_DIR}/inspireface-precompile/sqlite/)
@@ -69,7 +69,7 @@ if (ISF_ENABLE_RKNN)
set(NEED_INCLUDE ${NEED_INCLUDE} ${ISF_RKNN_API_INCLUDE_DIRS})
endif ()
if (ISF_BUILD_LINUX_ARM7)
if (ISF_BUILD_LINUX_ARM7 OR ANDROID)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon")
endif()
@@ -89,15 +89,33 @@ target_include_directories(InspireFace PUBLIC
${NEED_INCLUDE}
)
if (NOT ANDROID)
if (ANDROID)
target_link_libraries(InspireFace PUBLIC ${LINK_THIRD_LIBS} jnigraphics log)
set_target_properties(InspireFace PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/
)
elseif(IOS)
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ObjC")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ObjC")
target_link_libraries(InspireFace
"-fobjc-arc"
"-framework opencv2"
"-framework Metal"
"-framework CoreML"
"-framework Foundation"
"-framework CoreVideo"
"-framework CoreMedia"
${MNN_FRAMEWORK_PATH}
)
else()
target_link_libraries(InspireFace PUBLIC ${LINK_THIRD_LIBS})
set_target_properties(InspireFace PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/
ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/
)
else()
)
endif()
# Print Message
@@ -105,6 +123,10 @@ message(STATUS "InspireFace Core:")
message(STATUS "\t Version: ${INSPIRE_FACE_VERSION_MAJOR}.${INSPIRE_FACE_VERSION_MINOR}.${INSPIRE_FACE_VERSION_PATCH}")
message(STATUS "\t MNN_INCLUDE_DIRS: ${MNN_INCLUDE_DIRS}")
message(STATUS "\t MNN_LIBS: ${MNN_LIBS}")
message(STATUS "\t ENABLE_TRACKING_BY_DETECTION: ${ISF_ENABLE_TRACKING_BY_DETECTION}")
if(ISF_ENABLE_TRACKING_BY_DETECTION)
message(STATUS "\t EIGEN3_PATH: ${EIGEN3_INCLUDE_DIRS}")
endif()
message(STATUS "\t ISF_BUILD_SHARED_LIBS: ${ISF_BUILD_SHARED_LIBS}")
message(STATUS "\t ISF_ENABLE_RKNN: ${ISF_ENABLE_RKNN}")
if (ISF_ENABLE_RKNN)
@@ -125,6 +147,8 @@ install(TARGETS InspireFace
# Install header file
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/c_api/inspireface.h DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/include)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/c_api/intypedef.h DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/include)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/herror.h DESTINATION ${CMAKE_INSTALL_PREFIX}/InspireFace/include)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/version.txt DESTINATION ${CMAKE_INSTALL_PREFIX}/)

View File

@@ -76,7 +76,7 @@ HResult HFReleaseInspireFaceSession(HFSession handle) {
}
HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HFSession *handle) {
HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, HFSession *handle) {
inspire::ContextCustomParameter param;
param.enable_mask_detect = parameter.enable_mask_detect;
param.enable_age = parameter.enable_age;
@@ -86,13 +86,15 @@ HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectM
param.enable_interaction_liveness = parameter.enable_interaction_liveness;
param.enable_ir_liveness = parameter.enable_ir_liveness;
param.enable_recognition = parameter.enable_recognition;
inspire::DetectMode detMode = inspire::DETECT_MODE_IMAGE;
if (detectMode == HF_DETECT_MODE_VIDEO) {
detMode = inspire::DETECT_MODE_VIDEO;
inspire::DetectMode detMode = inspire::DETECT_MODE_ALWAYS_DETECT;
if (detectMode == HF_DETECT_MODE_LIGHT_TRACK) {
detMode = inspire::DETECT_MODE_LIGHT_TRACK;
} else if (detectMode == HF_DETECT_MODE_TRACK_BY_DETECTION) {
detMode = inspire::DETECT_MODE_TRACK_BY_DETECT;
}
HF_FaceAlgorithmSession *ctx = new HF_FaceAlgorithmSession();
auto ret = ctx->impl.Configuration(detMode, maxDetectFaceNum, param);
auto ret = ctx->impl.Configuration(detMode, maxDetectFaceNum, param, detectPixelLevel, trackByDetectModeFPS);
if (ret != HSUCCEED) {
delete ctx;
*handle = nullptr;
@@ -104,7 +106,7 @@ HResult HFCreateInspireFaceSession(HFSessionCustomParameter parameter, HFDetectM
return ret;
}
HResult HFCreateInspireFaceSessionOptional(HOption customOption, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HFSession *handle) {
HResult HFCreateInspireFaceSessionOptional(HOption customOption, HFDetectMode detectMode, HInt32 maxDetectFaceNum, HInt32 detectPixelLevel, HInt32 trackByDetectModeFPS, HFSession *handle) {
inspire::ContextCustomParameter param;
if (customOption & HF_ENABLE_FACE_RECOGNITION) {
param.enable_recognition = true;
@@ -130,12 +132,15 @@ HResult HFCreateInspireFaceSessionOptional(HOption customOption, HFDetectMode de
if (customOption & HF_ENABLE_INTERACTION) {
param.enable_interaction_liveness = true;
}
inspire::DetectMode detMode = inspire::DETECT_MODE_IMAGE;
if (detectMode == HF_DETECT_MODE_VIDEO) {
detMode = inspire::DETECT_MODE_VIDEO;
inspire::DetectMode detMode = inspire::DETECT_MODE_ALWAYS_DETECT;
if (detectMode == HF_DETECT_MODE_LIGHT_TRACK) {
detMode = inspire::DETECT_MODE_LIGHT_TRACK;
} else if (detectMode == HF_DETECT_MODE_TRACK_BY_DETECTION) {
detMode = inspire::DETECT_MODE_TRACK_BY_DETECT;
}
HF_FaceAlgorithmSession *ctx = new HF_FaceAlgorithmSession();
auto ret = ctx->impl.Configuration(detMode, maxDetectFaceNum, param);
auto ret = ctx->impl.Configuration(detMode, maxDetectFaceNum, param, detectPixelLevel, trackByDetectModeFPS);
if (ret != HSUCCEED) {
delete ctx;
*handle = nullptr;
@@ -178,6 +183,17 @@ HResult HFSessionSetTrackPreviewSize(HFSession session, HInt32 previewSize) {
return ctx->impl.SetTrackPreviewSize(previewSize);
}
HResult HFSessionSetFilterMinimumFacePixelSize(HFSession session, HInt32 minSize) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession* ) session;
if (ctx == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
return ctx->impl.SetTrackFaceMinimumSize(minSize);
}
HResult HFSessionSetFaceTrackMode(HFSession session, HFDetectMode detectMode) {
if (session == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
@@ -186,9 +202,9 @@ HResult HFSessionSetFaceTrackMode(HFSession session, HFDetectMode detectMode) {
if (ctx == nullptr) {
return HERR_INVALID_CONTEXT_HANDLE;
}
inspire::DetectMode detMode = inspire::DETECT_MODE_IMAGE;
if (detectMode == HF_DETECT_MODE_VIDEO) {
detMode = inspire::DETECT_MODE_VIDEO;
inspire::DetectMode detMode = inspire::DETECT_MODE_ALWAYS_DETECT;
if (detectMode == HF_DETECT_MODE_LIGHT_TRACK) {
detMode = inspire::DETECT_MODE_LIGHT_TRACK;
}
return ctx->impl.SetDetectMode(detMode);
}
@@ -620,6 +636,6 @@ HResult HFSetLogLevel(HFLogLevel level) {
}
HResult HFLogDisable() {
INSPIRE_SET_LOG_LEVEL(inspire::LOG_NONE);
INSPIRE_SET_LOG_LEVEL(inspire::ISF_LOG_NONE);
return HSUCCEED;
}

View File

@@ -136,8 +136,10 @@ typedef struct HFSessionCustomParameter {
* @brief Enumeration for face detection modes.
*/
typedef enum HFDetectMode {
HF_DETECT_MODE_IMAGE, ///< Image detection mode, always detect.
HF_DETECT_MODE_VIDEO, ///< Video detection mode, face tracking.
HF_DETECT_MODE_ALWAYS_DETECT, ///< Image detection mode, always detect, applicable to images.
HF_DETECT_MODE_LIGHT_TRACK, ///< Video detection mode, face tracking, applicable to video streaming, front camera.
HF_DETECT_MODE_TRACK_BY_DETECTION, ///< Video detection mode, face tracking, applicable to high resolution, monitoring, capturing
// (You need a specific option turned on at compile time to use it).
} HFDetectMode;
/**
@@ -146,6 +148,10 @@ typedef enum HFDetectMode {
* @param parameter Custom parameters for session.
* @param detectMode Detection mode to be used.
* @param maxDetectFaceNum Maximum number of faces to detect.
* @param detectPixelLevel Modify the input resolution level of the detector, the larger the better,
* the need to input a multiple of 160, such as 160, 320, 640, the default value -1 is 160.
* @param trackByDetectModeFPS If you are using the MODE_TRACK_BY_DETECTION tracking mode,
* this value is used to set the fps frame rate of your current incoming video stream, which defaults to -1 at 30fps.
* @param handle Pointer to the context handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
@@ -153,6 +159,8 @@ HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSession(
HFSessionCustomParameter parameter,
HFDetectMode detectMode,
HInt32 maxDetectFaceNum,
HInt32 detectPixelLevel,
HInt32 trackByDetectModeFPS,
HFSession *handle
);
@@ -162,6 +170,10 @@ HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSession(
* @param customOption Custom option for additional configuration.
* @param detectMode Detection mode to be used.
* @param maxDetectFaceNum Maximum number of faces to detect.
* @param detectPixelLevel Modify the input resolution level of the detector, the larger the better,
* the need to input a multiple of 160, such as 160, 320, 640, the default value -1 is 160.
* @param trackByDetectModeFPS If you are using the MODE_TRACK_BY_DETECTION tracking mode,
* this value is used to set the fps frame rate of your current incoming video stream, which defaults to -1 at 30fps.
* @param handle Pointer to the context handle that will be returned.
* @return HResult indicating the success or failure of the operation.
*/
@@ -169,6 +181,8 @@ HYPER_CAPI_EXPORT extern HResult HFCreateInspireFaceSessionOptional(
HOption customOption,
HFDetectMode detectMode,
HInt32 maxDetectFaceNum,
HInt32 detectPixelLevel,
HInt32 trackByDetectModeFPS,
HFSession *handle
);
@@ -211,8 +225,8 @@ typedef struct HFMultipleFaceData {
HInt32 detectedNum; ///< Number of faces detected.
HFaceRect *rects; ///< Array of bounding rectangles for each face.
HInt32 *trackIds; ///< Array of track IDs for each face.
HFFaceEulerAngle angles; ///< Euler angles for each face.
PHFFaceBasicToken tokens; ///< Tokens associated with each face.
HFFaceEulerAngle angles; ///< Euler angles for each face.
PHFFaceBasicToken tokens; ///< Tokens associated with each face.
} HFMultipleFaceData, *PHFMultipleFaceData;
/**
@@ -226,13 +240,13 @@ typedef struct HFMultipleFaceData {
HYPER_CAPI_EXPORT extern HResult HFSessionSetTrackPreviewSize(HFSession session, HInt32 previewSize);
/**
* @brief Set the face track mode in the session.
* @brief Set the minimum number of face pixels that the face detector can capture, and people below this number will be filtered.
*
* @param session Handle to the session.
* @param detectMode The mode of the detection mode for tracking.
* @param minSize The minimum pixel value, default value is 24.
* @return HResult indicating the success or failure of the operation.
*/
HYPER_CAPI_EXPORT extern HResult HFSessionSetFaceTrackMode(HFSession session, HFDetectMode detectMode);
HYPER_CAPI_EXPORT extern HResult HFSessionSetFilterMinimumFacePixelSize(HFSession session, HInt32 minSize);
/**
* @brief Set the face detect threshold in the session.

View File

@@ -308,6 +308,10 @@ public:
std::vector<Point2f> keyPointFive;
void setId(int id) {
face_id_ = id;
}
private:
TRACK_STATE tracking_state_;
// std::shared_ptr<FaceAction> face_action_;

View File

@@ -14,8 +14,11 @@ namespace inspire {
FaceContext::FaceContext() = default;
int32_t FaceContext::Configuration(DetectMode detect_mode, int32_t max_detect_face,
CustomPipelineParameter param) {
int32_t FaceContext::Configuration(DetectMode detect_mode,
int32_t max_detect_face,
CustomPipelineParameter param,
int32_t detect_level_px,
int32_t track_by_detect_mode_fps) {
m_detect_mode_ = detect_mode;
m_max_detect_face_ = max_detect_face;
m_parameter_ = param;
@@ -26,9 +29,9 @@ int32_t FaceContext::Configuration(DetectMode detect_mode, int32_t max_detect_fa
return HERR_ARCHIVE_LOAD_FAILURE;
}
m_face_track_ = std::make_shared<FaceTrack>(m_max_detect_face_);
m_face_track_ = std::make_shared<FaceTrack>(m_detect_mode_, m_max_detect_face_, 20, 192, detect_level_px, track_by_detect_mode_fps);
m_face_track_->Configuration(INSPIRE_LAUNCH->getMArchive());
SetDetectMode(m_detect_mode_);
// SetDetectMode(m_detect_mode_);
m_face_recognition_ = std::make_shared<FeatureExtraction>(INSPIRE_LAUNCH->getMArchive(), m_parameter_.enable_recognition);
if (m_face_recognition_->QueryStatus() != HSUCCEED) {
@@ -62,7 +65,7 @@ int32_t FaceContext::FaceDetectAndTrack(CameraStream &image) {
if (m_face_track_ == nullptr) {
return HERR_SESS_TRACKER_FAILURE;
}
m_face_track_->UpdateStream(image, m_always_detect_);
m_face_track_->UpdateStream(image);
for (int i = 0; i < m_face_track_->trackingFace.size(); ++i) {
auto &face = m_face_track_->trackingFace[i];
HyperFaceData data = FaceObjectToHyperFaceData(face, i);
@@ -255,7 +258,7 @@ int32_t FaceContext::FaceQualityDetect(FaceBasicData& data, float &result) {
int32_t FaceContext::SetDetectMode(DetectMode mode) {
m_detect_mode_ = mode;
if (m_detect_mode_ == DetectMode::DETECT_MODE_IMAGE) {
if (m_detect_mode_ == DetectMode::DETECT_MODE_ALWAYS_DETECT) {
m_always_detect_ = true;
} else {
m_always_detect_ = false;
@@ -263,9 +266,14 @@ int32_t FaceContext::SetDetectMode(DetectMode mode) {
return HSUCCEED;
}
int32_t FaceContext::SetTrackPreviewSize(const int32_t preview_size) {
int32_t FaceContext::SetTrackPreviewSize(const int32_t preview_size) {
m_face_track_->SetTrackPreviewSize(preview_size);
return HSUCCEED;
}
int32_t FaceContext::SetTrackFaceMinimumSize(int32_t minSize) {
m_face_track_->SetMinimumFacePxSize(minSize);
return HSUCCEED;
}
} // namespace hyper

View File

@@ -26,16 +26,6 @@
namespace inspire {
/**
* @enum DetectMode
* @brief Enumeration for different detection modes.
*/
enum DetectMode {
DETECT_MODE_IMAGE = 0, ///< Image detection mode: Always detect
DETECT_MODE_VIDEO, ///< Image detection mode: Face track
};
/**
* @struct CustomPipelineParameter
* @brief Structure to hold custom parameters for the face detection and processing pipeline.
@@ -77,7 +67,11 @@ public:
* @param param Custom parameters for the face pipeline.
* @return int32_t Returns 0 on success, non-zero for any error.
*/
int32_t Configuration(DetectMode detect_mode, int32_t max_detect_face, CustomPipelineParameter param);
int32_t Configuration(DetectMode detect_mode,
int32_t max_detect_face,
CustomPipelineParameter param,
int32_t detect_level_px = -1,
int32_t track_by_detect_mode_fps = -1);
/**
* @brief Performs face detection and tracking on a given image stream.
@@ -155,6 +149,13 @@ public:
*/
int32_t SetTrackPreviewSize(int32_t preview_size);
/**
* @brief Filter the minimum face pixel size.
* @param minSize The minimum pixel value.
* @return int32_t Status code of the operation.
*/
int32_t SetTrackFaceMinimumSize(int32_t minSize);
/**
* @brief Sets the mode for face detection.
* @param mode You can select mode for track or detect.

View File

@@ -20,6 +20,7 @@
#define HERR_INVALID_BUFFER_SIZE (HERR_BASIC_BASE+33) // Invalid copy token
#define HERR_INVALID_IMAGE_STREAM_PARAM (HERR_BASIC_BASE+34) // Invalid image param
#define HERR_INVALID_SERIALIZATION_FAILED (HERR_BASIC_BASE+35) // Invalid face serialization failed
#define HERR_INVALID_DETECTION_INPUT (HERR_BASIC_BASE+36) // Failed to modify detector input size
#define HERR_SESS_BASE 0X500 // Session error types
#define HERR_SESS_FUNCTION_UNUSABLE (HERR_SESS_BASE+2) // Function not usable

View File

@@ -6,7 +6,7 @@
#define HYPERFACEREPO_INFORMATION_H
#define INSPIRE_FACE_VERSION_MAJOR_STR "1"
#define INSPIRE_FACE_VERSION_MINOR_STR "0"
#define INSPIRE_FACE_VERSION_PATCH_STR "1"
#define INSPIRE_FACE_VERSION_MINOR_STR "1"
#define INSPIRE_FACE_VERSION_PATCH_STR "0"
#endif //HYPERFACEREPO_INFORMATION_H

View File

@@ -16,19 +16,20 @@
#ifdef ANDROID
// Android platform log macros
const std::string TAG = "InspireFace";
#define INSPIRE_LOGD(...) inspire::LogManager::getInstance()->logAndroid(inspire::LOG_DEBUG, TAG, __VA_ARGS__)
#define INSPIRE_LOGI(...) inspire::LogManager::getInstance()->logAndroid(inspire::LOG_INFO, TAG, __VA_ARGS__)
#define INSPIRE_LOGW(...) inspire::LogManager::getInstance()->logAndroid(inspire::LOG_WARN, TAG, __VA_ARGS__)
#define INSPIRE_LOGE(...) inspire::LogManager::getInstance()->logAndroid(inspire::LOG_ERROR, TAG, __VA_ARGS__)
#define INSPIRE_LOGF(...) inspire::LogManager::getInstance()->logAndroid(inspire::LOG_FATAL, TAG, __VA_ARGS__)
#include <android/log.h>
#define INSPIRE_ANDROID_LOG_TAG "InspireFace"
#define INSPIRE_LOGD(...) inspire::LogManager::getInstance()->logAndroid(inspire::ISF_LOG_DEBUG, INSPIRE_ANDROID_LOG_TAG, __VA_ARGS__)
#define INSPIRE_LOGI(...) inspire::LogManager::getInstance()->logAndroid(inspire::ISF_LOG_INFO, INSPIRE_ANDROID_LOG_TAG, __VA_ARGS__)
#define INSPIRE_LOGW(...) inspire::LogManager::getInstance()->logAndroid(inspire::ISF_LOG_WARN, INSPIRE_ANDROID_LOG_TAG, __VA_ARGS__)
#define INSPIRE_LOGE(...) inspire::LogManager::getInstance()->logAndroid(inspire::ISF_LOG_ERROR, INSPIRE_ANDROID_LOG_TAG, __VA_ARGS__)
#define INSPIRE_LOGF(...) inspire::LogManager::getInstance()->logAndroid(inspire::ISF_LOG_FATAL, INSPIRE_ANDROID_LOG_TAG, __VA_ARGS__)
#else
// Standard platform log macros
#define INSPIRE_LOGD(...) inspire::LogManager::getInstance()->logStandard(inspire::LOG_DEBUG, __FILENAME__, __FUNCTION__, __LINE__, __VA_ARGS__)
#define INSPIRE_LOGI(...) inspire::LogManager::getInstance()->logStandard(inspire::LOG_INFO, "", "", -1, __VA_ARGS__)
#define INSPIRE_LOGW(...) inspire::LogManager::getInstance()->logStandard(inspire::LOG_WARN, __FILENAME__, "", __LINE__, __VA_ARGS__)
#define INSPIRE_LOGE(...) inspire::LogManager::getInstance()->logStandard(inspire::LOG_ERROR, __FILENAME__, "", __LINE__, __VA_ARGS__)
#define INSPIRE_LOGF(...) inspire::LogManager::getInstance()->logStandard(inspire::LOG_FATAL, __FILENAME__, __FUNCTION__, __LINE__, __VA_ARGS__)
#define INSPIRE_LOGD(...) inspire::LogManager::getInstance()->logStandard(inspire::ISF_LOG_DEBUG, __FILENAME__, __FUNCTION__, __LINE__, __VA_ARGS__)
#define INSPIRE_LOGI(...) inspire::LogManager::getInstance()->logStandard(inspire::ISF_LOG_INFO, "", "", -1, __VA_ARGS__)
#define INSPIRE_LOGW(...) inspire::LogManager::getInstance()->logStandard(inspire::ISF_LOG_WARN, __FILENAME__, "", __LINE__, __VA_ARGS__)
#define INSPIRE_LOGE(...) inspire::LogManager::getInstance()->logStandard(inspire::ISF_LOG_ERROR, __FILENAME__, "", __LINE__, __VA_ARGS__)
#define INSPIRE_LOGF(...) inspire::LogManager::getInstance()->logStandard(inspire::ISF_LOG_FATAL, __FILENAME__, __FUNCTION__, __LINE__, __VA_ARGS__)
#endif
@@ -39,12 +40,12 @@ namespace inspire {
// Log levels
enum LogLevel {
LOG_NONE = 0,
LOG_DEBUG,
LOG_INFO,
LOG_WARN,
LOG_ERROR,
LOG_FATAL
ISF_LOG_NONE = 0,
ISF_LOG_DEBUG,
ISF_LOG_INFO,
ISF_LOG_WARN,
ISF_LOG_ERROR,
ISF_LOG_FATAL
};
class INSPIRE_API LogManager {
@@ -54,7 +55,7 @@ private:
static std::mutex mutex;
// Private constructor
LogManager() : currentLevel(LOG_DEBUG) {} // Default log level is DEBUG
LogManager() : currentLevel(ISF_LOG_DEBUG) {} // Default log level is DEBUG
public:
// Disable copy construction and assignment
@@ -83,15 +84,15 @@ public:
#ifdef ANDROID
// Method for logging on the Android platform
void logAndroid(LogLevel level, const char* tag, const char* format, ...) const {
if (level < currentLevel) return;
if (currentLevel == ISF_LOG_NONE || level < currentLevel) return;
int androidLevel;
switch (level) {
case LOG_DEBUG: androidLevel = ANDROID_LOG_DEBUG; break;
case LOG_INFO: androidLevel = ANDROID_LOG_INFO; break;
case LOG_WARN: androidLevel = ANDROID_LOG_WARN; break;
case LOG_ERROR: androidLevel = ANDROID_LOG_ERROR; break;
case LOG_FATAL: androidLevel = ANDROID_LOG_FATAL; break;
case ISF_LOG_DEBUG: androidLevel = ANDROID_LOG_DEBUG; break;
case ISF_LOG_INFO: androidLevel = ANDROID_LOG_INFO; break;
case ISF_LOG_WARN: androidLevel = ANDROID_LOG_WARN; break;
case ISF_LOG_ERROR: androidLevel = ANDROID_LOG_ERROR; break;
case ISF_LOG_FATAL: androidLevel = ANDROID_LOG_FATAL; break;
default: androidLevel = ANDROID_LOG_DEFAULT;
}
@@ -104,7 +105,7 @@ public:
// Method for standard platform logging
void logStandard(LogLevel level, const char* filename, const char* function, int line, const char* format, ...) const {
// Check whether the current level is LOG NONE or the log level is not enough to log
if (currentLevel == LOG_NONE || level < currentLevel) return;
if (currentLevel == ISF_LOG_NONE || level < currentLevel) return;
// Build log prefix dynamically based on available data
bool hasPrintedPrefix = false;
@@ -127,9 +128,9 @@ public:
}
// Set text color for different log levels
if (level == LOG_ERROR || level == LOG_FATAL) {
if (level == ISF_LOG_ERROR || level == ISF_LOG_FATAL) {
printf("\033[1;31m"); // Red color for errors and fatal issues
} else if (level == LOG_WARN) {
} else if (level == ISF_LOG_WARN) {
printf("\033[1;33m"); // Yellow color for warnings
}
@@ -140,7 +141,7 @@ public:
va_end(args);
// Reset text color if needed
if (level == LOG_ERROR || level == LOG_WARN || level == LOG_FATAL) {
if (level == ISF_LOG_ERROR || level == ISF_LOG_WARN || level == ISF_LOG_FATAL) {
printf("\033[0m"); // Reset color
}

View File

@@ -4,6 +4,7 @@
#pragma once
#ifndef BIGGUYSMAIN_ANYNET_H
#define BIGGUYSMAIN_ANYNET_H
#include <utility>
#include "../data_type.h"
@@ -18,12 +19,12 @@ namespace inspire {
using AnyTensorOutputs = std::vector<std::pair<std::string, std::vector<float>>>;
/**
* @class AnyNet
* @brief Generic neural network class for various inference tasks.
*
* This class provides a general interface for different types of neural networks,
* facilitating loading parameters, initializing models, and executing forward passes.
*/
* @class AnyNet
* @brief Generic neural network class for various inference tasks.
*
* This class provides a general interface for different types of neural networks,
* facilitating loading parameters, initializing models, and executing forward passes.
*/
class INSPIRE_API AnyNet {
CONFIGURABLE_SUPPORT
@@ -32,25 +33,26 @@ public:
* @brief Constructor for AnyNet.
* @param name Name of the neural network.
*/
explicit AnyNet(std::string name):m_name_(std::move(name)) {}
explicit AnyNet(std::string name) : m_name_(std::move(name)) {}
~AnyNet() {
m_nn_inference_->Finalize();
}
/**
* @brief Loads parameters and initializes the model for inference.
* @param param Parameters for network configuration.
* @param model Pointer to the model.
* @param type Type of the inference helper (default: kMnn).
* @return int32_t Status of the loading and initialization process.
*/
int32_t loadData(InspireModel &model, InferenceHelper::HelperType type = InferenceHelper::kMnn) {
/**
* @brief Loads parameters and initializes the model for inference.
* @param param Parameters for network configuration.
* @param model Pointer to the model.
* @param type Type of the inference helper (default: kMnn).
* @return int32_t Status of the loading and initialization process.
*/
int32_t
loadData(InspireModel &model, InferenceHelper::HelperType type = InferenceHelper::kMnn, bool dynamic = false) {
m_infer_type_ = type;
// must
pushData<int>(model.Config(), "model_index", 0);
pushData<std::string>(model.Config(), "input_layer", "");
pushData<std::vector<std::string>>(model.Config(), "outputs_layers", {"", });
pushData<std::vector<std::string>>(model.Config(), "outputs_layers", {"",});
pushData<std::vector<int>>(model.Config(), "input_size", {320, 320});
pushData<std::vector<float>>(model.Config(), "mean", {127.5f, 127.5f, 127.5f});
pushData<std::vector<float>>(model.Config(), "norm", {0.0078125f, 0.0078125f, 0.0078125f});
@@ -67,7 +69,7 @@ public:
m_nn_inference_.reset(InferenceHelper::Create(m_infer_type_));
m_nn_inference_->SetNumThreads(getData<int>("threads"));
#if defined(ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA) && !defined(ISF_ENABLE_RKNN)
LOGW("You have forced the global use of MNN_CUDA as the neural network inference backend");
INSPIRE_LOGW("You have forced the global use of MNN_CUDA as the neural network inference backend");
m_nn_inference_->SetSpecialBackend(InferenceHelper::kMnnCuda);
#endif
m_output_tensor_info_list_.clear();
@@ -77,7 +79,8 @@ public:
for (auto &name: outputs_layers) {
m_output_tensor_info_list_.push_back(OutputTensorInfo(name, out_tensor_type));
}
auto ret = m_nn_inference_->Initialize(model.buffer, model.bufferSize, m_input_tensor_info_list_, m_output_tensor_info_list_);
auto ret = m_nn_inference_->Initialize(model.buffer, model.bufferSize, m_input_tensor_info_list_,
m_output_tensor_info_list_);
if (ret != InferenceHelper::kRetOk) {
INSPIRE_LOGE("NN Initialize fail");
return ret;
@@ -91,9 +94,9 @@ public:
m_input_image_size_ = {width, height};
int channel = getData<int>("input_channel");
if (getData<bool>("nchw")) {
input_tensor_info.tensor_dims = { 1, channel, m_input_image_size_.height, m_input_image_size_.width };
input_tensor_info.tensor_dims = {1, channel, m_input_image_size_.height, m_input_image_size_.width};
} else {
input_tensor_info.tensor_dims = { 1, m_input_image_size_.height, m_input_image_size_.width, channel };
input_tensor_info.tensor_dims = {1, m_input_image_size_.height, m_input_image_size_.width, channel};
}
input_tensor_info.data_type = getData<int>("data_type");
@@ -121,6 +124,12 @@ public:
m_input_tensor_info_list_.push_back(input_tensor_info);
if (dynamic) {
m_nn_inference_->ResizeInput(m_input_tensor_info_list_);
}
return 0;
}
@@ -129,8 +138,8 @@ public:
* @param data The input matrix (image) to process.
* @param outputs Outputs of the network (tensor outputs).
*/
void Forward(const Matrix &data, AnyTensorOutputs& outputs) {
InputTensorInfo& input_tensor_info = getMInputTensorInfoList()[0];
void Forward(const Matrix &data, AnyTensorOutputs &outputs) {
InputTensorInfo &input_tensor_info = getMInputTensorInfoList()[0];
if (m_infer_type_ == InferenceHelper::kRknn) {
// Start by simply implementing a temporary color shift on the outside
if (getData<bool>("swap_color")) {
@@ -149,7 +158,7 @@ public:
* @brief Performs a forward pass of the network.
* @param outputs Outputs of the network (tensor outputs).
*/
void Forward(AnyTensorOutputs& outputs) {
void Forward(AnyTensorOutputs &outputs) {
// LOGD("ppPreProcess");
if (m_nn_inference_->PreProcess(m_input_tensor_info_list_) != InferenceHelper::kRetOk) {
@@ -210,12 +219,12 @@ private:
};
template <typename ImageT, typename TensorT>
template<typename ImageT, typename TensorT>
AnyTensorOutputs ForwardService(
std::shared_ptr<AnyNet> net,
const ImageT &input,
std::function<void(const ImageT&, TensorT&)> transform) {
InputTensorInfo& input_tensor_info = net->getMInputTensorInfoList()[0];
std::function<void(const ImageT &, TensorT &)> transform) {
InputTensorInfo &input_tensor_info = net->getMInputTensorInfoList()[0];
TensorT transform_tensor;
transform(input, transform_tensor);
input_tensor_info.data = transform_tensor.data; // input tensor only support cv2::Mat
@@ -228,27 +237,27 @@ AnyTensorOutputs ForwardService(
/**
* @brief Executes a forward pass through the neural network for a given input, with preprocessing.
* @tparam ImageT Type of the input image.
* @tparam TensorT Type of the transformed tensor.
* @tparam PreprocessCallbackT Type of the preprocessing callback function.
* @param net Shared pointer to the AnyNet neural network object.
* @param input The input image to be processed.
* @param callback Preprocessing callback function to be applied to the input.
* @param transform Transformation function to convert the input image to a tensor.
* @return AnyTensorOutputs Outputs of the network (tensor outputs).
*
* This template function handles the preprocessing of the input image, transformation to tensor,
* and then passes it through the neural network to get the output. The function is generic and
* can work with different types of images and tensors, as specified by the template parameters.
*/
template <typename ImageT, typename TensorT, typename PreprocessCallbackT>
* @brief Executes a forward pass through the neural network for a given input, with preprocessing.
* @tparam ImageT Type of the input image.
* @tparam TensorT Type of the transformed tensor.
* @tparam PreprocessCallbackT Type of the preprocessing callback function.
* @param net Shared pointer to the AnyNet neural network object.
* @param input The input image to be processed.
* @param callback Preprocessing callback function to be applied to the input.
* @param transform Transformation function to convert the input image to a tensor.
* @return AnyTensorOutputs Outputs of the network (tensor outputs).
*
* This template function handles the preprocessing of the input image, transformation to tensor,
* and then passes it through the neural network to get the output. The function is generic and
* can work with different types of images and tensors, as specified by the template parameters.
*/
template<typename ImageT, typename TensorT, typename PreprocessCallbackT>
AnyTensorOutputs ForwardService(
std::shared_ptr<AnyNet> net,
const ImageT &input,
PreprocessCallbackT &callback,
std::function<void(const ImageT&, TensorT&, PreprocessCallbackT&)> transform) {
InputTensorInfo& input_tensor_info = net->getMInputTensorInfoList()[0];
std::function<void(const ImageT &, TensorT &, PreprocessCallbackT &)> transform) {
InputTensorInfo &input_tensor_info = net->getMInputTensorInfoList()[0];
TensorT transform_tensor;
transform(input, transform_tensor, callback);
input_tensor_info.data = transform_tensor.data; // input tensor only support cv2::Mat
@@ -260,7 +269,6 @@ AnyTensorOutputs ForwardService(
}
} // namespace
#endif //BIGGUYSMAIN_ANYNET_H

View File

@@ -0,0 +1 @@
Plan, not implement.

View File

@@ -0,0 +1,33 @@
#ifndef MNN_ADAPTER_IMPL__
#define MNN_ADAPTER_IMPL__
#include "../inference_adapter.h"
#include "opencv2/opencv.hpp"
#include <MNN/ImageProcess.hpp>
#include <MNN/Interpreter.hpp>
#include <MNN/MNNDefine.h>
#include <MNN/Tensor.hpp>
#include <MNN/MNNForwardType.h>
class MNNCVAdapter : public InferenceAdapter {
public:
MNNCVAdapter() {};
~MNNCVAdapter() override {};
private:
std::shared_ptr<MNN::Interpreter> detect_model_;
MNN::Tensor *input_{};
std::vector<MNN::Tensor*> output_tensors_;
MNN::Session *sess{};
std::vector<int> tensor_shape_;
MNN::ScheduleConfig _config;
MNNForwardType backend_;
int width_{};
int height_{};
float mean[3]{};
float normal[3]{};
};
#endif // MNN_ADAPTER_IMPL__

View File

@@ -0,0 +1,105 @@
#ifndef INSPIREFACE_OMNI_INFERENACE__
#define INSPIREFACE_OMNI_INFERENACE__
#include <cstdint>
#include <cmath>
#include <string>
#include <vector>
#include <array>
#include <memory>
class XOutputData {
public:
XOutputData() : size(0), data(nullptr) {}
std::vector<float> CopyToFloatArray() {
if (!buffer.empty()) {
return buffer;
}
std::vector<float> floatArray;
floatArray.resize(size);
std::memcpy(floatArray.data(), data, size * sizeof(float));
return floatArray;
}
public:
size_t size;
float *data; // Use pointer
std::vector<float> buffer; // Use copy
};
typedef std::vector<XOutputData> XOutputDataList;
class XTransform {
public:
XTransform() : swap_color(false) {
std::fill(std::begin(normalize.mean), std::end(normalize.mean), 0.0f);
std::fill(std::begin(normalize.norm), std::end(normalize.norm), 1.0f);
}
bool swap_color;
struct {
float mean[3];
float norm[3];
} normalize;
};
class XInputData {
public:
XInputData() : nchw(false), bgr(false), height(0), width(0), channel(0), data(nullptr) {}
public:
bool nchw;
bool bgr;
int32_t height;
int32_t width;
int32_t channel;
uint8_t *data;
};
typedef enum {
xEngineMNN,
xEngineRKNN,
} EngineType;
class InferenceAdapter {
public:
enum {
xRetOk = 0,
xRetErr = -1,
};
typedef enum {
xDefaultCPU,
xMNNCuda,
} SpecialBackend;
public:
virtual ~InferenceAdapter() {};
virtual int32_t SetNumThreads(const int32_t num_threads) = 0;
virtual int32_t Initialize(const std::string& model_filename, const XTransform& transform, const std::string& input_name, const std::vector<std::string> &outputs_name) = 0;
virtual int32_t Initialize(char* model_buffer, int model_size, const std::string& input_name, const XTransform& transform, const std::vector<std::string> &outputs_name) = 0;
virtual int32_t Finalize(void) = 0;
virtual int32_t SetInputsData(const std::vector<XInputData>& batch, ) = 0;
virtual int32_t Forward(std::vector<XOutputDataList> &batch_outputs) = 0;
virtual int32_t ResizeInputs() = 0;
virtual int32_t SetSpecialBackend(SpecialBackend backend) {
special_backend_ = backend;
return xRetOk;
};
protected:
EngineType engine_type_;
SpecialBackend special_backend_ = xDefaultCPU;
};
#endif // INSPIREFACE_OMNI_INFERENACE__

View File

@@ -0,0 +1,185 @@
#ifndef MNN_SIMPLE_INFER_H
#define MNN_SIMPLE_INFER_H
#include "opencv2/opencv.hpp"
#include <MNN/ImageProcess.hpp>
#include <MNN/Interpreter.hpp>
#include <MNN/MNNDefine.h>
#include <MNN/Tensor.hpp>
#include <MNN/MNNForwardType.h>
class MNNCVAdapterInference {
public:
MNNCVAdapterInference(const std::string &model, int thread, const float mean[], const float normal[],
bool use_model_bin = false) {
#ifdef FORWARD_CUDA
backend_ = MNN_FORWARD_CUDA;
#else
backend_ = MNN_FORWARD_CPU;
#endif
if (use_model_bin) {
detect_model_ = std::shared_ptr<MNN::Interpreter>(
MNN::Interpreter::createFromBuffer(model.c_str(), model.size()));
} else {
detect_model_ = std::shared_ptr<MNN::Interpreter>(
MNN::Interpreter::createFromFile(model.c_str()));
}
_config.type = backend_;
_config.numThread = thread;
MNN::BackendConfig backendConfig;
backendConfig.precision = MNN::BackendConfig::Precision_High;
backendConfig.power = MNN::BackendConfig::Power_High;
_config.backendConfig = &backendConfig;
for (int i = 0; i < 3; i++) {
this->mean[i] = mean[i];
this->normal[i] = normal[i];
}
}
~MNNCVAdapterInference() {
detect_model_->releaseModel();
detect_model_->releaseSession(sess);
}
void Init(const std::string &input, const std::vector<std::string> &outputs, int width,
int height) {
sess = detect_model_->createSession(_config);
tensor_shape_ = {1, 3, height, width};
input_ = detect_model_->getSessionInput(sess, input.c_str());
// Resize input tensor
detect_model_->resizeTensor(input_, tensor_shape_);
// Store output tensors and resize them
for (const auto& output_name : outputs) {
auto output_tensor = detect_model_->getSessionOutput(sess, output_name.c_str());
output_tensors_.emplace_back(output_tensor);
}
detect_model_->resizeSession(sess);
width_ = width;
height_ = height;
}
std::vector<std::vector<float>> Infer(const cv::Mat &mat) {
assert(mat.rows == height_);
assert(mat.cols == width_);
MNN::CV::ImageProcess::Config config;
config.destFormat = MNN::CV::ImageFormat::BGR;
config.sourceFormat = MNN::CV::BGR;
for (int i = 0; i < 3; i++) {
config.mean[i] = mean[i];
config.normal[i] = normal[i];
}
std::unique_ptr<MNN::CV::ImageProcess> process(
MNN::CV::ImageProcess::create(config));
process->convert(mat.data, mat.cols, mat.rows, (int) mat.step1(), input_);
detect_model_->runSession(sess);
std::vector<std::vector<float>> results;
for (auto output : output_tensors_) {
auto dimType = input_->getDimensionType();
if (output->getType().code != halide_type_float) {
dimType = MNN::Tensor::TENSORFLOW;
}
std::shared_ptr<MNN::Tensor> outputUser(new MNN::Tensor(output, dimType));
output->copyToHostTensor(outputUser.get());
auto type = outputUser->getType();
auto size = outputUser->elementSize();
std::vector<float> tempValues(size);
if (type.code == halide_type_float) {
auto values = outputUser->host<float>();
for (int i = 0; i < size; ++i) {
tempValues[i] = values[i];
}
}
results.push_back(tempValues);
}
return results;
}
std::vector<std::vector<std::vector<float>>> BatchInfer(const std::vector<cv::Mat> &images) {
int batch_size = images.size();
tensor_shape_[0] = batch_size; // Update batch size
detect_model_->resizeTensor(input_, tensor_shape_);
detect_model_->resizeSession(sess);
MNN::CV::ImageProcess::Config config;
config.destFormat = MNN::CV::ImageFormat::BGR;
config.sourceFormat = MNN::CV::BGR;
for (int i = 0; i < 3; i++) {
config.mean[i] = mean[i];
config.normal[i] = normal[i];
}
std::unique_ptr<MNN::CV::ImageProcess> process(
MNN::CV::ImageProcess::create(config));
std::shared_ptr<MNN::Tensor> inputUser(new MNN::Tensor(input_, MNN::Tensor::TENSORFLOW));
auto size_h = inputUser->height();
auto size_w = inputUser->width();
auto bpp = inputUser->channel();
for (int batch = 0; batch < batch_size; ++batch) {
const auto& mat = images[batch];
assert(mat.rows == height_);
assert(mat.cols == width_);
// No need to setScale since the images are already resized
process->convert(mat.data, mat.cols, mat.rows, (int)mat.step1(), inputUser->host<uint8_t>() + inputUser->stride(0) * batch * inputUser->getType().bytes(), size_w, size_h, bpp, 0, inputUser->getType());
}
input_->copyFromHostTensor(inputUser.get());
detect_model_->runSession(sess);
std::vector<std::vector<std::vector<float>>> all_results(batch_size);
for (auto output : output_tensors_) {
auto dimType = input_->getDimensionType();
if (output->getType().code != halide_type_float) {
dimType = MNN::Tensor::TENSORFLOW;
}
std::shared_ptr<MNN::Tensor> outputUser(new MNN::Tensor(output, dimType));
output->copyToHostTensor(outputUser.get());
auto type = outputUser->getType();
auto size = outputUser->elementSize() / batch_size;
for (int batch = 0; batch < batch_size; ++batch) {
std::vector<float> tempValues(size);
if (type.code == halide_type_float) {
auto values = outputUser->host<float>() + batch * outputUser->stride(0);
for (int i = 0; i < size; ++i) {
tempValues[i] = values[i];
}
}
all_results[batch].push_back(tempValues);
}
}
return all_results;
}
float mean[3]{};
float normal[3]{};
private:
std::shared_ptr<MNN::Interpreter> detect_model_;
MNN::Tensor *input_{};
std::vector<MNN::Tensor*> output_tensors_;
MNN::Session *sess{};
std::vector<int> tensor_shape_;
MNN::ScheduleConfig _config;
MNNForwardType backend_;
int width_{};
int height_{};
};
#endif // MNN_SIMPLE_INFER_H

View File

@@ -273,6 +273,8 @@ public:
return kRetOk;
};
virtual int32_t ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) = 0;
virtual std::vector<std::string> GetInputNames() = 0;
protected:

View File

@@ -84,11 +84,13 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector<InputTensorInfo>
if ((input_tensor->channel() == input_tensor_info.GetChannel()) && (input_tensor->height() == input_tensor_info.GetHeight()) && (input_tensor->width() == input_tensor_info.GetWidth())) {
/* OK */
} else {
PRINT_E("W: %d != %d\n", input_tensor->width() , input_tensor_info.GetWidth());
PRINT_E("H: %d != %d\n", input_tensor->height() , input_tensor_info.GetHeight());
PRINT_E("C: %d != %d\n", input_tensor->channel() , input_tensor_info.GetChannel());
PRINT_E("Incorrect input tensor size\n");
return kRetErr;
INSPIRE_LOGW("W: %d != %d", input_tensor->width() , input_tensor_info.GetWidth());
INSPIRE_LOGW("H: %d != %d", input_tensor->height() , input_tensor_info.GetHeight());
INSPIRE_LOGW("C: %d != %d", input_tensor->channel() , input_tensor_info.GetChannel());
INSPIRE_LOGW("There may be some risk of input that is not used by model default");
net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() });
net_->resizeSession(session_);
return kRetOk;
}
} else {
PRINT("Input tensor size is set from the model\n");
@@ -103,6 +105,7 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector<InputTensorInfo>
/* In case the input size is not fixed */
net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() });
net_->resizeSession(session_);
INSPIRE_LOGE("GO RESIZE");
} else {
PRINT_E("Model input size is not set\n");
return kRetErr;
@@ -156,21 +159,22 @@ int32_t InferenceHelperMnn::Initialize(char* model_buffer, int model_size, std::
PRINT_E("Failed to load model model buffer\n");
return kRetErr;
}
MNN::ScheduleConfig scheduleConfig;
scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android
MNN::BackendConfig bnconfig;
bnconfig.power = MNN::BackendConfig::Power_High;
bnconfig.precision = MNN::BackendConfig::Precision_High;
if (special_backend_ == kMnnCuda) {
INSPIRE_LOGD("Enable CUDA");
scheduleConfig.type = MNN_FORWARD_CUDA;
bnconfig.power = MNN::BackendConfig::Power_Normal;
bnconfig.precision = MNN::BackendConfig::Precision_Normal;
} else {
scheduleConfig.type = MNN_FORWARD_CPU;
}
scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android
MNN::BackendConfig bnconfig;
bnconfig.power = MNN::BackendConfig::Power_High;
bnconfig.precision = MNN::BackendConfig::Precision_High;
scheduleConfig.backendConfig = &bnconfig;
scheduleConfig.backendConfig = &bnconfig;
session_ = net_->createSession(scheduleConfig);
// LOG_INFO("fuck");
// LOG_INFO("-INPUT: {}", net_->getSessionInputAll(session_).size());
// PRINT("-INPUT: %lu", net_->getSessionInputAll(session_).size());
// LOGD("-INPUT: %lu", net_->getSessionInputAll(session_).size());
@@ -348,3 +352,12 @@ int32_t InferenceHelperMnn::Process(std::vector<OutputTensorInfo>& output_tensor
std::vector<std::string> InferenceHelperMnn::GetInputNames() {
return input_names_;
}
int32_t InferenceHelperMnn::ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) {
for (const auto& input_tensor_info : input_tensor_info_list) {
auto input_tensor = net_->getSessionInput(session_, input_tensor_info.name.c_str());
net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() });
net_->resizeSession(session_);
}
return 0;
}

View File

@@ -44,6 +44,8 @@ public:
int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) override;
int32_t ParameterInitialization(std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) override;
int32_t ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) override;
std::vector<std::string> GetInputNames() override;
private:

View File

@@ -259,5 +259,10 @@ std::vector<std::string> InferenceHelperRKNN::GetInputNames() {
return input_names_;
}
int32_t InferenceHelperRKNN::ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) {
// The function is not supported
return 0;
}
#endif // INFERENCE_HELPER_ENABLE_RKNN

View File

@@ -4,6 +4,7 @@
#ifndef RKNPU_PTOCR_INFERENCE_HELPER_RKNN_H
#define RKNPU_PTOCR_INFERENCE_HELPER_RKNN_H
#ifdef INFERENCE_HELPER_ENABLE_RKNN
/* for general */
#include <cstdint>
@@ -32,6 +33,7 @@ public:
int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) override;
int32_t ParameterInitialization(std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) override;
std::vector<std::string> GetInputNames() override;
int32_t ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) override;
private:
rknn_context net_;
int32_t num_threads_;
@@ -44,5 +46,5 @@ private:
};
#endif // INFERENCE_HELPER_ENABLE_RKNN
#endif //RKNPU_PTOCR_INFERENCE_HELPER_RKNN_H

View File

@@ -153,4 +153,9 @@ std::vector<std::string> InferenceHelperRknnAdapter::GetInputNames() {
return std::vector<std::string>();
}
int32_t InferenceHelperRknnAdapter::ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) {
// The function is not supported
return 0;
}
#endif // INFERENCE_HELPER_ENABLE_RKNN

View File

@@ -5,6 +5,8 @@
#ifndef HYPERFACEREPO_INFERENCE_HELPER_RKNN_ADAPTER_H
#define HYPERFACEREPO_INFERENCE_HELPER_RKNN_ADAPTER_H
#ifdef INFERENCE_HELPER_ENABLE_RKNN
/* for general */
#include <cstdint>
#include <cmath>
@@ -31,11 +33,14 @@ public:
int32_t ParameterInitialization(std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) override;
std::vector<std::string> GetInputNames() override;
int32_t ResizeInput(const std::vector<InputTensorInfo>& input_tensor_info_list) override;
private:
std::shared_ptr<RKNNAdapter> net_;
int32_t num_threads_;
};
#endif // INFERENCE_HELPER_ENABLE_RKNN
#endif //HYPERFACEREPO_INFERENCE_HELPER_RKNN_ADAPTER_H

View File

@@ -641,6 +641,16 @@ inline cv::Rect GetNewBox(int src_w, int src_h, cv::Rect bbox, float scale) {
return new_bbox;
}
template<typename T>
inline bool isShortestSideGreaterThan(const cv::Rect_<T>& rect, T value) {
// Find the shortest edge
T shortestSide = std::min(rect.width, rect.height);
// Determines whether the shortest edge is greater than the given value
return shortestSide > value;
}
} // namespace inspire
#endif

View File

@@ -19,23 +19,27 @@ FaceLocList FaceDetect::operator()(const Matrix &bgr) {
int ori_h = bgr.rows;
int w, h;
float scale;
cv::Mat pad;
if (ori_w == m_input_size_ && ori_h == m_input_size_) {
// If the input image already matches the desired size, no need to resize, just pad
cv::copyMakeBorder(bgr, pad, 0, 0, 0, 0, cv::BORDER_CONSTANT, 0.0f);
}
if (ori_w > ori_h) {
scale = (float) m_input_size_ /
ori_w;
scale = static_cast<float>(m_input_size_) / ori_w;
w = m_input_size_;
h = ori_h * scale;
} else {
scale = (float) m_input_size_ /
ori_h;
scale = static_cast<float>(m_input_size_) / ori_h;
h = m_input_size_;
w = ori_w * scale;
}
int wpad = m_input_size_ - w;
int hpad = m_input_size_ - h;
cv::Mat resized_img;
cv::resize(bgr,resized_img,cv::Size(w, h));
cv::Mat pad;
cv::copyMakeBorder(resized_img, pad, 0, hpad, 0, wpad, cv::BORDER_CONSTANT,0.0f);
cv::resize(bgr, resized_img, cv::Size(w, h));
cv::copyMakeBorder(resized_img, pad, 0, hpad, 0, wpad, cv::BORDER_CONSTANT, 0.0f);
// LOGD("Prepare");
AnyTensorOutputs outputs;

View File

@@ -24,7 +24,7 @@ public:
* @param nms_threshold The threshold for non-maximum suppression.
* @param cls_threshold The threshold for classification score.
*/
explicit FaceDetect(int input_size = 160, float nms_threshold = 0.5f, float cls_threshold = 0.5f);
explicit FaceDetect(int input_size = 160, float nms_threshold = 0.4f, float cls_threshold = 0.5f);
/**
* @brief Detects faces in a given image.

View File

@@ -5,19 +5,42 @@
#include "face_track.h"
#include "log.h"
#include "landmark/mean_shape.h"
#include <algorithm>
#include <cstddef>
#include <opencv2/opencv.hpp>
#include "middleware/costman.h"
#include "middleware/model_archive/inspire_archive.h"
#include "middleware/utils.h"
#include "herror.h"
namespace inspire {
FaceTrack::FaceTrack(int max_detected_faces, int detection_interval, int track_preview_size) :
max_detected_faces_(max_detected_faces),
detection_interval_(detection_interval),
track_preview_size_(track_preview_size){
FaceTrack::FaceTrack(DetectMode mode,
int max_detected_faces,
int detection_interval,
int track_preview_size,
int dynamic_detection_input_level,
int TbD_mode_fps) :
m_mode_(mode),
max_detected_faces_(max_detected_faces),
detection_interval_(detection_interval),
track_preview_size_(track_preview_size),
m_dynamic_detection_input_level_(dynamic_detection_input_level){
detection_index_ = -1;
tracking_idx_ = 0;
if (TbD_mode_fps < 0) {
TbD_mode_fps = 30;
}
if (m_mode_ == DETECT_MODE_TRACK_BY_DETECT) {
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
m_TbD_tracker_ = std::make_shared<BYTETracker>(TbD_mode_fps, 30);
#else
m_mode_ = DETECT_MODE_ALWAYS_DETECT;
INSPIRE_LOGW("If you want to use tracking-by-detection in this release, you must turn on the option symbol ISF_ENABLE_TRACKING_BY_DETECTION at compile time");
#endif
}
}
@@ -77,7 +100,8 @@ bool FaceTrack::TrackFace(CameraStream &image, FaceObject &face) {
// LOGD("get affine crop ok");
double time1 = (double) cv::getTickCount();
crop = image.GetAffineRGBImage(affine, 112, 112);
// cv::imshow("w", crop);
// cv::waitKey(0);
cv::Mat affine_inv;
cv::invertAffineTransform(affine, affine_inv);
double _diff =
@@ -139,7 +163,6 @@ bool FaceTrack::TrackFace(CameraStream &image, FaceObject &face) {
face.setTransMatrix(trans_m);
face.EnableTracking();
// LOGD("ready face TrackFace state %d ", face.TrackingState());
}
}
@@ -211,16 +234,17 @@ bool FaceTrack::TrackFace(CameraStream &image, FaceObject &face) {
return true;
}
void FaceTrack::UpdateStream(CameraStream &image, bool is_detect) {
void FaceTrack::UpdateStream(CameraStream &image) {
auto timeStart = (double) cv::getTickCount();
detection_index_ += 1;
if (is_detect)
if (m_mode_ == DETECT_MODE_ALWAYS_DETECT || m_mode_ == DETECT_MODE_TRACK_BY_DETECT)
trackingFace.clear();
// LOGD("%d, %d", detection_index_, detection_interval_);
if (detection_index_ % detection_interval_ == 0 || is_detect) {
if (detection_index_ % detection_interval_ == 0 || m_mode_ == DETECT_MODE_ALWAYS_DETECT || m_mode_ == DETECT_MODE_TRACK_BY_DETECT) {
// Timer t_blacking;
image.SetPreviewSize(track_preview_size_);
cv::Mat image_detect = image.GetPreviewImage(true);
nms();
for (auto const &face: trackingFace) {
cv::Rect m_mask_rect = face.GetRectSquare();
@@ -240,26 +264,24 @@ void FaceTrack::UpdateStream(CameraStream &image, bool is_detect) {
auto timeStart = (double) cv::getTickCount();
DetectFace(image_detect, image.GetPreviewScale());
det_use_time_ = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
// LOGD("detect track");
}
if (!candidate_faces_.empty()) {
// LOGD("push track face");
for (int i = 0; i < candidate_faces_.size(); i++) {
trackingFace.push_back(candidate_faces_[i]);
}
candidate_faces_.clear();
}
// Timer t_track;
for (std::vector<FaceObject>::iterator iter = trackingFace.begin();
iter != trackingFace.end();) {
iter != trackingFace.end();) {
if (!TrackFace(image, *iter)) {
iter = trackingFace.erase(iter);
} else {
iter++;
}
}
// LOGD("Track Cost %f", t_track.GetCostTimeUpdate());
track_total_use_time_ = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
@@ -305,25 +327,70 @@ void FaceTrack::BlackingTrackingRegion(cv::Mat &image, cv::Rect &rect_mask) {
void FaceTrack::DetectFace(const cv::Mat &input, float scale) {
std::vector<FaceLoc> boxes = (*m_face_detector_)(input);
std::vector<cv::Rect> bbox;
bbox.resize(boxes.size());
for (int i = 0; i < boxes.size(); i++) {
bbox[i] = cv::Rect(cv::Point(static_cast<int>(boxes[i].x1), static_cast<int>(boxes[i].y1)),
cv::Point(static_cast<int>(boxes[i].x2), static_cast<int>(boxes[i].y2)));
tracking_idx_ = tracking_idx_ + 1;
FaceObject faceinfo(tracking_idx_, bbox[i], FaceLandmark::NUM_OF_LANDMARK);
faceinfo.detect_bbox_ = bbox[i];
// for (auto box: boxes) {
// cv::Rect r(cv::Point2f(box.x1, box.y1), cv::Point2f(box.x2, box.y2));
// cv::rectangle(input, r, cv::Scalar(255, 0, 0), 3);
// }
// cv::imshow("w", input);
// cv::waitKey(0);
if (m_mode_ == DETECT_MODE_TRACK_BY_DETECT) {
// Control that the number of faces detected does not exceed the maximum limit
if (candidate_faces_.size() < max_detected_faces_) {
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
std::vector<Object> objects;
auto num_of_effective = std::min(boxes.size(), (size_t )max_detected_faces_);
for (size_t i = 0; i < num_of_effective; i++) {
Object obj;
const auto box = boxes[i];
obj.rect = Rect_<float>(box.x1, box.y1, box.x2 - box.x1, box.y2 - box.y1);
if (!isShortestSideGreaterThan<float>(obj.rect, filter_minimum_face_px_size)) {
// Filter too small face detection box
continue;
}
obj.label = 0; // assuming all detections are faces
obj.prob = box.score;
objects.push_back(obj);
}
vector<STrack> output_stracks = m_TbD_tracker_->update(objects);
for (const auto &st_track : output_stracks) {
cv::Rect rect = cv::Rect_<float>(st_track.tlwh[0], st_track.tlwh[1], st_track.tlwh[2], st_track.tlwh[3]);
FaceObject faceinfo(st_track.track_id, rect, FaceLandmark::NUM_OF_LANDMARK);
faceinfo.detect_bbox_ = rect;
candidate_faces_.push_back(faceinfo);
} else {
// If the maximum limit is exceeded, you can choose to discard the currently detected face or choose the face to discard according to the policy
// For example, face confidence can be compared and faces with lower confidence can be discarded
// Take the example of simply discarding the last face
candidate_faces_.pop_back();
}
#endif
} else {
std::vector<cv::Rect> bbox;
bbox.resize(boxes.size());
for (int i = 0; i < boxes.size(); i++) {
bbox[i] = cv::Rect(cv::Point(static_cast<int>(boxes[i].x1), static_cast<int>(boxes[i].y1)),
cv::Point(static_cast<int>(boxes[i].x2), static_cast<int>(boxes[i].y2)));
if (!isShortestSideGreaterThan<float>(bbox[i], filter_minimum_face_px_size)) {
// Filter too small face detection box
continue;
}
if (m_mode_ == DETECT_MODE_ALWAYS_DETECT) {
// Always detect mode without assigning an id
tracking_idx_ = -1;
} else {
tracking_idx_ = tracking_idx_ + 1;
}
FaceObject faceinfo(tracking_idx_, bbox[i], FaceLandmark::NUM_OF_LANDMARK);
faceinfo.detect_bbox_ = bbox[i];
// Control that the number of faces detected does not exceed the maximum limit
if (candidate_faces_.size() < max_detected_faces_) {
candidate_faces_.push_back(faceinfo);
} else {
// If the maximum limit is exceeded, you can choose to discard the currently detected face or choose the face to discard according to the policy
// For example, face confidence can be compared and faces with lower confidence can be discarded
// Take the example of simply discarding the last face
candidate_faces_.pop_back();
}
}
}
}
int FaceTrack::Configuration(inspire::InspireArchive &archive) {
@@ -376,9 +443,22 @@ int FaceTrack::InitLandmarkModel(InspireModel &model) {
}
int FaceTrack::InitDetectModel(InspireModel &model) {
auto input_size = model.Config().get<std::vector<int>>("input_size");
std::vector<int> input_size;
if (m_dynamic_detection_input_level_ != -1) {
if (m_dynamic_detection_input_level_ % 160 != 0 || m_dynamic_detection_input_level_ < 160) {
INSPIRE_LOGE("The input size '%d' for the custom detector is not valid. \
Please use a multiple of 160 (minimum 160) for the input dimensions, such as 320 or 640.", m_dynamic_detection_input_level_);
return HERR_INVALID_DETECTION_INPUT;
}
// Wide-Range mode temporary value
input_size = {m_dynamic_detection_input_level_, m_dynamic_detection_input_level_};
model.Config().set<std::vector<int>>("input_size", input_size);
} else {
input_size = model.Config().get<std::vector<int>>("input_size");
}
bool dym = true;
m_face_detector_ = std::make_shared<FaceDetect>(input_size[0]);
auto ret = m_face_detector_->loadData(model, model.modelType);
auto ret = m_face_detector_->loadData(model, model.modelType, dym);
if (ret != InferenceHelper::kRetOk) {
return HERR_ARCHIVE_LOAD_FAILURE;
}
@@ -407,12 +487,16 @@ void FaceTrack::SetDetectThreshold(float value) {
m_face_detector_->SetClsThreshold(value);
}
void FaceTrack::SetMinimumFacePxSize(float value) {
filter_minimum_face_px_size = value;
}
double FaceTrack::GetTrackTotalUseTime() const {
return track_total_use_time_;
}
void FaceTrack::SetTrackPreviewSize(int preview_size) {
track_preview_size_ = preview_size;
track_preview_size_ = preview_size;
}

View File

@@ -11,9 +11,24 @@
#include "middleware/camera_stream/camera_stream.h"
#include "quality/face_pose_quality.h"
#include "middleware/model_archive/inspire_archive.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#include "tracker_optional/bytetrack/BYTETracker.h"
#endif
namespace inspire {
/**
* @enum DetectMode
* @brief Enumeration for different detection modes.
*/
enum DetectMode {
DETECT_MODE_ALWAYS_DETECT = 0, ///< Detection mode: Always detect
DETECT_MODE_LIGHT_TRACK, ///< Detection mode: Light face track
DETECT_MODE_TRACK_BY_DETECT, ///< Detection mode: Tracking by detection
};
/**
* @class FaceTrack
* @brief Class for tracking faces in video streams.
@@ -29,8 +44,9 @@ public:
* @param max_detected_faces Maximum number of faces to be detected.
* @param detection_interval Interval between detections to track faces.
* @param track_preview_size Size of the preview for tracking.
* @param dynamic_detection_input_level Change the detector input size.
*/
explicit FaceTrack(int max_detected_faces = 1, int detection_interval = 20, int track_preview_size = 192);
explicit FaceTrack(DetectMode mode, int max_detected_faces = 1, int detection_interval = 20, int track_preview_size = 192, int dynamic_detection_input_level = -1, int TbD_mode_fps=30);
/**
* @brief Configures the face tracking with models.
@@ -44,7 +60,7 @@ public:
* @param image Camera stream to process.
* @param is_detect Flag to enable/disable face detection.
*/
void UpdateStream(CameraStream &image, bool is_detect);
void UpdateStream(CameraStream &image);
/**
* @brief Sets the preview size for tracking.
@@ -122,7 +138,6 @@ private:
*/
int InitFacePoseModel(InspireModel& model);
public:
/**
@@ -136,19 +151,25 @@ public:
* */
void SetDetectThreshold(float value);
/**
* @brief Fix detect threshold
* */
void SetMinimumFacePxSize(float value);
public:
std::vector<FaceObject> trackingFace; ///< Vector of FaceObjects currently being tracked.
private:
const int max_detected_faces_; ///< Maximum number of faces to detect.
std::vector<FaceObject> candidate_faces_; ///< Vector of candidate FaceObjects for tracking.
int detection_index_; ///< Current detection index.
int detection_interval_; ///< Interval between detections.
int tracking_idx_; ///< Current tracking index.
double det_use_time_; ///< Time used for detection.
double track_total_use_time_; ///< Total time used for tracking.
const int max_detected_faces_; ///< Maximum number of faces to detect.
int track_preview_size_; ///< Size of the tracking preview.
int filter_minimum_face_px_size = 24; ///< Minimum face pixel allowed to be retained (take the edge with the smallest Rect).
private:
@@ -156,7 +177,14 @@ private:
std::shared_ptr<FaceLandmark> m_landmark_predictor_; ///< Shared pointer to the landmark predictor.
std::shared_ptr<RNet> m_refine_net_; ///< Shared pointer to the RNet model.
std::shared_ptr<FacePoseQuality> m_face_quality_; ///< Shared pointer to the face pose quality assessor.
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
std::shared_ptr<BYTETracker> m_TbD_tracker_; ///< Shared pointer to the Bytetrack.
#endif
int m_dynamic_detection_input_level_ = -1; ///< Detector size class for dynamic input.
DetectMode m_mode_; ///< Detect mode
};
} // namespace hyper

View File

@@ -0,0 +1,245 @@
#include "BYTETracker.h"
#include <fstream>
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
BYTETracker::BYTETracker(int frame_rate, int track_buffer)
{
track_thresh = 0.5;
high_thresh = 0.6;
match_thresh = 0.8;
frame_id = 0;
max_time_lost = int(frame_rate / 30.0 * track_buffer);
cout << "Init ByteTrack!" << endl;
}
BYTETracker::~BYTETracker()
{
}
vector<STrack> BYTETracker::update(const vector<Object>& objects)
{
////////////////// Step 1: Get detections //////////////////
this->frame_id++;
vector<STrack> activated_stracks;
vector<STrack> refind_stracks;
vector<STrack> removed_stracks;
vector<STrack> lost_stracks;
vector<STrack> detections;
vector<STrack> detections_low;
vector<STrack> detections_cp;
vector<STrack> tracked_stracks_swap;
vector<STrack> resa, resb;
vector<STrack> output_stracks;
vector<STrack*> unconfirmed;
vector<STrack*> tracked_stracks;
vector<STrack*> strack_pool;
vector<STrack*> r_tracked_stracks;
if (objects.size() > 0)
{
for (int i = 0; i < objects.size(); i++)
{
vector<float> tlbr_;
tlbr_.resize(4);
tlbr_[0] = objects[i].rect.x;
tlbr_[1] = objects[i].rect.y;
tlbr_[2] = objects[i].rect.x + objects[i].rect.width;
tlbr_[3] = objects[i].rect.y + objects[i].rect.height;
float score = objects[i].prob;
STrack strack(STrack::tlbr_to_tlwh(tlbr_), score);
if (score >= track_thresh)
{
detections.push_back(strack);
}
else
{
detections_low.push_back(strack);
}
}
}
// Add newly detected tracklets to tracked_stracks
for (int i = 0; i < this->tracked_stracks.size(); i++)
{
if (!this->tracked_stracks[i].is_activated)
unconfirmed.push_back(&this->tracked_stracks[i]);
else
tracked_stracks.push_back(&this->tracked_stracks[i]);
}
////////////////// Step 2: First association, with IoU //////////////////
strack_pool = joint_stracks(tracked_stracks, this->lost_stracks);
STrack::multi_predict(strack_pool, this->kalman_filter);
vector<vector<float> > dists;
int dist_size = 0, dist_size_size = 0;
dists = iou_distance(strack_pool, detections, dist_size, dist_size_size);
vector<vector<int> > matches;
vector<int> u_track, u_detection;
linear_assignment(dists, dist_size, dist_size_size, match_thresh, matches, u_track, u_detection);
for (int i = 0; i < matches.size(); i++)
{
STrack *track = strack_pool[matches[i][0]];
STrack *det = &detections[matches[i][1]];
if (track->state == TrackState::Tracked)
{
track->update(*det, this->frame_id);
activated_stracks.push_back(*track);
}
else
{
track->re_activate(*det, this->frame_id, false);
refind_stracks.push_back(*track);
}
}
////////////////// Step 3: Second association, using low score dets //////////////////
for (int i = 0; i < u_detection.size(); i++)
{
detections_cp.push_back(detections[u_detection[i]]);
}
detections.clear();
detections.assign(detections_low.begin(), detections_low.end());
for (int i = 0; i < u_track.size(); i++)
{
if (strack_pool[u_track[i]]->state == TrackState::Tracked)
{
r_tracked_stracks.push_back(strack_pool[u_track[i]]);
}
}
dists.clear();
dists = iou_distance(r_tracked_stracks, detections, dist_size, dist_size_size);
matches.clear();
u_track.clear();
u_detection.clear();
linear_assignment(dists, dist_size, dist_size_size, 0.5, matches, u_track, u_detection);
for (int i = 0; i < matches.size(); i++)
{
STrack *track = r_tracked_stracks[matches[i][0]];
STrack *det = &detections[matches[i][1]];
if (track->state == TrackState::Tracked)
{
track->update(*det, this->frame_id);
activated_stracks.push_back(*track);
}
else
{
track->re_activate(*det, this->frame_id, false);
refind_stracks.push_back(*track);
}
}
for (int i = 0; i < u_track.size(); i++)
{
STrack *track = r_tracked_stracks[u_track[i]];
if (track->state != TrackState::Lost)
{
track->mark_lost();
lost_stracks.push_back(*track);
}
}
// Deal with unconfirmed tracks, usually tracks with only one beginning frame
detections.clear();
detections.assign(detections_cp.begin(), detections_cp.end());
dists.clear();
dists = iou_distance(unconfirmed, detections, dist_size, dist_size_size);
matches.clear();
vector<int> u_unconfirmed;
u_detection.clear();
linear_assignment(dists, dist_size, dist_size_size, 0.7, matches, u_unconfirmed, u_detection);
for (int i = 0; i < matches.size(); i++)
{
unconfirmed[matches[i][0]]->update(detections[matches[i][1]], this->frame_id);
activated_stracks.push_back(*unconfirmed[matches[i][0]]);
}
for (int i = 0; i < u_unconfirmed.size(); i++)
{
STrack *track = unconfirmed[u_unconfirmed[i]];
track->mark_removed();
removed_stracks.push_back(*track);
}
////////////////// Step 4: Init new stracks //////////////////
for (int i = 0; i < u_detection.size(); i++)
{
STrack *track = &detections[u_detection[i]];
if (track->score < this->high_thresh)
continue;
track->activate(this->kalman_filter, this->frame_id);
activated_stracks.push_back(*track);
}
////////////////// Step 5: Update state //////////////////
for (int i = 0; i < this->lost_stracks.size(); i++)
{
if (this->frame_id - this->lost_stracks[i].end_frame() > this->max_time_lost)
{
this->lost_stracks[i].mark_removed();
removed_stracks.push_back(this->lost_stracks[i]);
}
}
for (int i = 0; i < this->tracked_stracks.size(); i++)
{
if (this->tracked_stracks[i].state == TrackState::Tracked)
{
tracked_stracks_swap.push_back(this->tracked_stracks[i]);
}
}
this->tracked_stracks.clear();
this->tracked_stracks.assign(tracked_stracks_swap.begin(), tracked_stracks_swap.end());
this->tracked_stracks = joint_stracks(this->tracked_stracks, activated_stracks);
this->tracked_stracks = joint_stracks(this->tracked_stracks, refind_stracks);
//std::cout << activated_stracks.size() << std::endl;
this->lost_stracks = sub_stracks(this->lost_stracks, this->tracked_stracks);
for (int i = 0; i < lost_stracks.size(); i++)
{
this->lost_stracks.push_back(lost_stracks[i]);
}
this->lost_stracks = sub_stracks(this->lost_stracks, this->removed_stracks);
for (int i = 0; i < removed_stracks.size(); i++)
{
this->removed_stracks.push_back(removed_stracks[i]);
}
remove_duplicate_stracks(resa, resb, this->tracked_stracks, this->lost_stracks);
this->tracked_stracks.clear();
this->tracked_stracks.assign(resa.begin(), resa.end());
this->lost_stracks.clear();
this->lost_stracks.assign(resb.begin(), resb.end());
for (int i = 0; i < this->tracked_stracks.size(); i++)
{
if (this->tracked_stracks[i].is_activated)
{
output_stracks.push_back(this->tracked_stracks[i]);
}
}
return output_stracks;
}
#endif

View File

@@ -0,0 +1,53 @@
#pragma once
#include "STrack.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
struct Object
{
cv::Rect_<float> rect;
int label;
float prob;
};
class BYTETracker
{
public:
BYTETracker(int frame_rate = 30, int track_buffer = 30);
~BYTETracker();
vector<STrack> update(const vector<Object>& objects);
Scalar get_color(int idx);
private:
vector<STrack*> joint_stracks(vector<STrack*> &tlista, vector<STrack> &tlistb);
vector<STrack> joint_stracks(vector<STrack> &tlista, vector<STrack> &tlistb);
vector<STrack> sub_stracks(vector<STrack> &tlista, vector<STrack> &tlistb);
void remove_duplicate_stracks(vector<STrack> &resa, vector<STrack> &resb, vector<STrack> &stracksa, vector<STrack> &stracksb);
void linear_assignment(vector<vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
vector<vector<int> > &matches, vector<int> &unmatched_a, vector<int> &unmatched_b);
vector<vector<float> > iou_distance(vector<STrack*> &atracks, vector<STrack> &btracks, int &dist_size, int &dist_size_size);
vector<vector<float> > iou_distance(vector<STrack> &atracks, vector<STrack> &btracks);
vector<vector<float> > ious(vector<vector<float> > &atlbrs, vector<vector<float> > &btlbrs);
double lapjv(const vector<vector<float> > &cost, vector<int> &rowsol, vector<int> &colsol,
bool extend_cost = false, float cost_limit = LONG_MAX, bool return_cost = true);
private:
float track_thresh;
float high_thresh;
float match_thresh;
int frame_id;
int max_time_lost;
vector<STrack> tracked_stracks;
vector<STrack> lost_stracks;
vector<STrack> removed_stracks;
byte_kalman::KalmanFilter kalman_filter;
};
#endif

View File

@@ -0,0 +1,196 @@
#include "STrack.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
STrack::STrack(vector<float> tlwh_, float score)
{
_tlwh.resize(4);
_tlwh.assign(tlwh_.begin(), tlwh_.end());
is_activated = false;
track_id = 0;
state = TrackState::New;
tlwh.resize(4);
tlbr.resize(4);
static_tlwh();
static_tlbr();
frame_id = 0;
tracklet_len = 0;
this->score = score;
start_frame = 0;
}
STrack::~STrack()
{
}
void STrack::activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id)
{
this->kalman_filter = kalman_filter;
this->track_id = this->next_id();
vector<float> _tlwh_tmp(4);
_tlwh_tmp[0] = this->_tlwh[0];
_tlwh_tmp[1] = this->_tlwh[1];
_tlwh_tmp[2] = this->_tlwh[2];
_tlwh_tmp[3] = this->_tlwh[3];
vector<float> xyah = tlwh_to_xyah(_tlwh_tmp);
DETECTBOX xyah_box;
xyah_box[0] = xyah[0];
xyah_box[1] = xyah[1];
xyah_box[2] = xyah[2];
xyah_box[3] = xyah[3];
auto mc = this->kalman_filter.initiate(xyah_box);
this->mean = mc.first;
this->covariance = mc.second;
static_tlwh();
static_tlbr();
this->tracklet_len = 0;
this->state = TrackState::Tracked;
if (frame_id == 1)
{
this->is_activated = true;
}
//this->is_activated = true;
this->frame_id = frame_id;
this->start_frame = frame_id;
}
void STrack::re_activate(STrack &new_track, int frame_id, bool new_id)
{
vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
DETECTBOX xyah_box;
xyah_box[0] = xyah[0];
xyah_box[1] = xyah[1];
xyah_box[2] = xyah[2];
xyah_box[3] = xyah[3];
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
this->mean = mc.first;
this->covariance = mc.second;
static_tlwh();
static_tlbr();
this->tracklet_len = 0;
this->state = TrackState::Tracked;
this->is_activated = true;
this->frame_id = frame_id;
this->score = new_track.score;
if (new_id)
this->track_id = next_id();
}
void STrack::update(STrack &new_track, int frame_id)
{
this->frame_id = frame_id;
this->tracklet_len++;
vector<float> xyah = tlwh_to_xyah(new_track.tlwh);
DETECTBOX xyah_box;
xyah_box[0] = xyah[0];
xyah_box[1] = xyah[1];
xyah_box[2] = xyah[2];
xyah_box[3] = xyah[3];
auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
this->mean = mc.first;
this->covariance = mc.second;
static_tlwh();
static_tlbr();
this->state = TrackState::Tracked;
this->is_activated = true;
this->score = new_track.score;
}
void STrack::static_tlwh()
{
if (this->state == TrackState::New)
{
tlwh[0] = _tlwh[0];
tlwh[1] = _tlwh[1];
tlwh[2] = _tlwh[2];
tlwh[3] = _tlwh[3];
return;
}
tlwh[0] = mean[0];
tlwh[1] = mean[1];
tlwh[2] = mean[2];
tlwh[3] = mean[3];
tlwh[2] *= tlwh[3];
tlwh[0] -= tlwh[2] / 2;
tlwh[1] -= tlwh[3] / 2;
}
void STrack::static_tlbr()
{
tlbr.clear();
tlbr.assign(tlwh.begin(), tlwh.end());
tlbr[2] += tlbr[0];
tlbr[3] += tlbr[1];
}
vector<float> STrack::tlwh_to_xyah(vector<float> tlwh_tmp)
{
vector<float> tlwh_output = tlwh_tmp;
tlwh_output[0] += tlwh_output[2] / 2;
tlwh_output[1] += tlwh_output[3] / 2;
tlwh_output[2] /= tlwh_output[3];
return tlwh_output;
}
vector<float> STrack::to_xyah()
{
return tlwh_to_xyah(tlwh);
}
vector<float> STrack::tlbr_to_tlwh(vector<float> &tlbr)
{
tlbr[2] -= tlbr[0];
tlbr[3] -= tlbr[1];
return tlbr;
}
void STrack::mark_lost()
{
state = TrackState::Lost;
}
void STrack::mark_removed()
{
state = TrackState::Removed;
}
int STrack::next_id()
{
static int _count = 0;
_count++;
return _count;
}
int STrack::end_frame()
{
return this->frame_id;
}
void STrack::multi_predict(vector<STrack*> &stracks, byte_kalman::KalmanFilter &kalman_filter)
{
for (int i = 0; i < stracks.size(); i++)
{
if (stracks[i]->state != TrackState::Tracked)
{
stracks[i]->mean[7] = 0;
}
kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance);
}
}
#endif

View File

@@ -0,0 +1,52 @@
#pragma once
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#include <opencv2/opencv.hpp>
#include "kalmanFilter.h"
using namespace cv;
using namespace std;
enum TrackState { New = 0, Tracked, Lost, Removed };
class STrack
{
public:
STrack(vector<float> tlwh_, float score);
~STrack();
vector<float> static tlbr_to_tlwh(vector<float> &tlbr);
void static multi_predict(vector<STrack*> &stracks, byte_kalman::KalmanFilter &kalman_filter);
void static_tlwh();
void static_tlbr();
vector<float> tlwh_to_xyah(vector<float> tlwh_tmp);
vector<float> to_xyah();
void mark_lost();
void mark_removed();
int next_id();
int end_frame();
void activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id);
void re_activate(STrack &new_track, int frame_id, bool new_id = false);
void update(STrack &new_track, int frame_id);
public:
bool is_activated;
int track_id;
int state;
vector<float> _tlwh;
vector<float> tlwh;
vector<float> tlbr;
int frame_id;
int tracklet_len;
int start_frame;
KAL_MEAN mean;
KAL_COVA covariance;
float score;
private:
byte_kalman::KalmanFilter kalman_filter;
};
#endif

View File

@@ -0,0 +1,40 @@
#pragma once
#include <cstddef>
#include <vector>
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#include <Eigen/Core>
#include <Eigen/Dense>
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> DETECTBOX;
typedef Eigen::Matrix<float, -1, 4, Eigen::RowMajor> DETECTBOXSS;
typedef Eigen::Matrix<float, 1, 128, Eigen::RowMajor> FEATURE;
typedef Eigen::Matrix<float, Eigen::Dynamic, 128, Eigen::RowMajor> FEATURESS;
//typedef std::vector<FEATURE> FEATURESS;
//Kalmanfilter
//typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_FILTER;
typedef Eigen::Matrix<float, 1, 8, Eigen::RowMajor> KAL_MEAN;
typedef Eigen::Matrix<float, 8, 8, Eigen::RowMajor> KAL_COVA;
typedef Eigen::Matrix<float, 1, 4, Eigen::RowMajor> KAL_HMEAN;
typedef Eigen::Matrix<float, 4, 4, Eigen::RowMajor> KAL_HCOVA;
using KAL_DATA = std::pair<KAL_MEAN, KAL_COVA>;
using KAL_HDATA = std::pair<KAL_HMEAN, KAL_HCOVA>;
//main
using RESULT_DATA = std::pair<int, DETECTBOX>;
//tracker:
using TRACKER_DATA = std::pair<int, FEATURESS>;
using MATCH_DATA = std::pair<int, int>;
typedef struct t {
std::vector<MATCH_DATA> matches;
std::vector<int> unmatched_tracks;
std::vector<int> unmatched_detections;
}TRACHER_MATCHD;
//linear_assignment:
typedef Eigen::Matrix<float, -1, -1, Eigen::RowMajor> DYNAMICM;
#endif

View File

@@ -0,0 +1,155 @@
#include "kalmanFilter.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#include <Eigen/Cholesky>
namespace byte_kalman
{
const double KalmanFilter::chi2inv95[10] = {
0,
3.8415,
5.9915,
7.8147,
9.4877,
11.070,
12.592,
14.067,
15.507,
16.919
};
KalmanFilter::KalmanFilter()
{
int ndim = 4;
double dt = 1.;
_motion_mat = Eigen::MatrixXf::Identity(8, 8);
for (int i = 0; i < ndim; i++) {
_motion_mat(i, ndim + i) = dt;
}
_update_mat = Eigen::MatrixXf::Identity(4, 8);
this->_std_weight_position = 1. / 20;
this->_std_weight_velocity = 1. / 160;
}
KAL_DATA KalmanFilter::initiate(const DETECTBOX &measurement)
{
DETECTBOX mean_pos = measurement;
DETECTBOX mean_vel;
for (int i = 0; i < 4; i++) mean_vel(i) = 0;
KAL_MEAN mean;
for (int i = 0; i < 8; i++) {
if (i < 4) mean(i) = mean_pos(i);
else mean(i) = mean_vel(i - 4);
}
KAL_MEAN std;
std(0) = 2 * _std_weight_position * measurement[3];
std(1) = 2 * _std_weight_position * measurement[3];
std(2) = 1e-2;
std(3) = 2 * _std_weight_position * measurement[3];
std(4) = 10 * _std_weight_velocity * measurement[3];
std(5) = 10 * _std_weight_velocity * measurement[3];
std(6) = 1e-5;
std(7) = 10 * _std_weight_velocity * measurement[3];
KAL_MEAN tmp = std.array().square();
KAL_COVA var = tmp.asDiagonal();
return std::make_pair(mean, var);
}
void KalmanFilter::predict(KAL_MEAN &mean, KAL_COVA &covariance)
{
//revise the data;
DETECTBOX std_pos;
std_pos << _std_weight_position * mean(3),
_std_weight_position * mean(3),
1e-2,
_std_weight_position * mean(3);
DETECTBOX std_vel;
std_vel << _std_weight_velocity * mean(3),
_std_weight_velocity * mean(3),
1e-5,
_std_weight_velocity * mean(3);
KAL_MEAN tmp;
tmp.block<1, 4>(0, 0) = std_pos;
tmp.block<1, 4>(0, 4) = std_vel;
tmp = tmp.array().square();
KAL_COVA motion_cov = tmp.asDiagonal();
KAL_MEAN mean1 = this->_motion_mat * mean.transpose();
KAL_COVA covariance1 = this->_motion_mat * covariance *(_motion_mat.transpose());
covariance1 += motion_cov;
mean = mean1;
covariance = covariance1;
}
KAL_HDATA KalmanFilter::project(const KAL_MEAN &mean, const KAL_COVA &covariance)
{
DETECTBOX std;
std << _std_weight_position * mean(3), _std_weight_position * mean(3),
1e-1, _std_weight_position * mean(3);
KAL_HMEAN mean1 = _update_mat * mean.transpose();
KAL_HCOVA covariance1 = _update_mat * covariance * (_update_mat.transpose());
Eigen::Matrix<float, 4, 4> diag = std.asDiagonal();
diag = diag.array().square().matrix();
covariance1 += diag;
// covariance1.diagonal() << diag;
return std::make_pair(mean1, covariance1);
}
KAL_DATA
KalmanFilter::update(
const KAL_MEAN &mean,
const KAL_COVA &covariance,
const DETECTBOX &measurement)
{
KAL_HDATA pa = project(mean, covariance);
KAL_HMEAN projected_mean = pa.first;
KAL_HCOVA projected_cov = pa.second;
//chol_factor, lower =
//scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
//kalmain_gain =
//scipy.linalg.cho_solve((cho_factor, lower),
//np.dot(covariance, self._upadte_mat.T).T,
//check_finite=False).T
Eigen::Matrix<float, 4, 8> B = (covariance * (_update_mat.transpose())).transpose();
Eigen::Matrix<float, 8, 4> kalman_gain = (projected_cov.llt().solve(B)).transpose(); // eg.8x4
Eigen::Matrix<float, 1, 4> innovation = measurement - projected_mean; //eg.1x4
auto tmp = innovation * (kalman_gain.transpose());
KAL_MEAN new_mean = (mean.array() + tmp.array()).matrix();
KAL_COVA new_covariance = covariance - kalman_gain * projected_cov*(kalman_gain.transpose());
return std::make_pair(new_mean, new_covariance);
}
Eigen::Matrix<float, 1, -1>
KalmanFilter::gating_distance(
const KAL_MEAN &mean,
const KAL_COVA &covariance,
const std::vector<DETECTBOX> &measurements,
bool only_position)
{
KAL_HDATA pa = this->project(mean, covariance);
if (only_position) {
printf("not implement!");
exit(0);
}
KAL_HMEAN mean1 = pa.first;
KAL_HCOVA covariance1 = pa.second;
// Eigen::Matrix<float, -1, 4, Eigen::RowMajor> d(size, 4);
DETECTBOXSS d(measurements.size(), 4);
int pos = 0;
for (DETECTBOX box : measurements) {
d.row(pos++) = box - mean1;
}
Eigen::Matrix<float, -1, -1, Eigen::RowMajor> factor = covariance1.llt().matrixL();
Eigen::Matrix<float, -1, -1> z = factor.triangularView<Eigen::Lower>().solve<Eigen::OnTheRight>(d).transpose();
auto zz = ((z.array())*(z.array())).matrix();
auto square_maha = zz.colwise().sum();
return square_maha;
}
}
#endif

View File

@@ -0,0 +1,34 @@
#pragma once
#include "dataType.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
namespace byte_kalman
{
class KalmanFilter
{
public:
static const double chi2inv95[10];
KalmanFilter();
KAL_DATA initiate(const DETECTBOX& measurement);
void predict(KAL_MEAN& mean, KAL_COVA& covariance);
KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance);
KAL_DATA update(const KAL_MEAN& mean,
const KAL_COVA& covariance,
const DETECTBOX& measurement);
Eigen::Matrix<float, 1, -1> gating_distance(
const KAL_MEAN& mean,
const KAL_COVA& covariance,
const std::vector<DETECTBOX>& measurements,
bool only_position = false);
private:
Eigen::Matrix<float, 8, 8, Eigen::RowMajor> _motion_mat;
Eigen::Matrix<float, 4, 8, Eigen::RowMajor> _update_mat;
float _std_weight_position;
float _std_weight_velocity;
};
}
#endif

View File

@@ -0,0 +1,345 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#include "lapjv.h"
/** Column-reduction and reduction transfer for a dense cost matrix.
*/
int_t _ccrrt_dense(const uint_t n, cost_t *cost[],
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
int_t n_free_rows;
boolean *unique;
for (uint_t i = 0; i < n; i++) {
x[i] = -1;
v[i] = LARGE;
y[i] = 0;
}
for (uint_t i = 0; i < n; i++) {
for (uint_t j = 0; j < n; j++) {
const cost_t c = cost[i][j];
if (c < v[j]) {
v[j] = c;
y[j] = i;
}
PRINTF("i=%d, j=%d, c[i,j]=%f, v[j]=%f y[j]=%d\n", i, j, c, v[j], y[j]);
}
}
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(y, n);
NEW(unique, boolean, n);
memset(unique, TRUE, n);
{
int_t j = n;
do {
j--;
const int_t i = y[j];
if (x[i] < 0) {
x[i] = j;
}
else {
unique[i] = FALSE;
y[j] = -1;
}
} while (j > 0);
}
n_free_rows = 0;
for (uint_t i = 0; i < n; i++) {
if (x[i] < 0) {
free_rows[n_free_rows++] = i;
}
else if (unique[i]) {
const int_t j = x[i];
cost_t min = LARGE;
for (uint_t j2 = 0; j2 < n; j2++) {
if (j2 == (uint_t)j) {
continue;
}
const cost_t c = cost[i][j2] - v[j2];
if (c < min) {
min = c;
}
}
PRINTF("v[%d] = %f - %f\n", j, v[j], min);
v[j] -= min;
}
}
FREE(unique);
return n_free_rows;
}
/** Augmenting row reduction for a dense cost matrix.
*/
int_t _carr_dense(
const uint_t n, cost_t *cost[],
const uint_t n_free_rows,
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
uint_t current = 0;
int_t new_free_rows = 0;
uint_t rr_cnt = 0;
PRINT_INDEX_ARRAY(x, n);
PRINT_INDEX_ARRAY(y, n);
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(free_rows, n_free_rows);
while (current < n_free_rows) {
int_t i0;
int_t j1, j2;
cost_t v1, v2, v1_new;
boolean v1_lowers;
rr_cnt++;
PRINTF("current = %d rr_cnt = %d\n", current, rr_cnt);
const int_t free_i = free_rows[current++];
j1 = 0;
v1 = cost[free_i][0] - v[0];
j2 = -1;
v2 = LARGE;
for (uint_t j = 1; j < n; j++) {
PRINTF("%d = %f %d = %f\n", j1, v1, j2, v2);
const cost_t c = cost[free_i][j] - v[j];
if (c < v2) {
if (c >= v1) {
v2 = c;
j2 = j;
}
else {
v2 = v1;
v1 = c;
j2 = j1;
j1 = j;
}
}
}
i0 = y[j1];
v1_new = v[j1] - (v2 - v1);
v1_lowers = v1_new < v[j1];
PRINTF("%d %d 1=%d,%f 2=%d,%f v1'=%f(%d,%g) \n", free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers, v[j1] - v1_new);
if (rr_cnt < current * n) {
if (v1_lowers) {
v[j1] = v1_new;
}
else if (i0 >= 0 && j2 >= 0) {
j1 = j2;
i0 = y[j2];
}
if (i0 >= 0) {
if (v1_lowers) {
free_rows[--current] = i0;
}
else {
free_rows[new_free_rows++] = i0;
}
}
}
else {
PRINTF("rr_cnt=%d >= %d (current=%d * n=%d)\n", rr_cnt, current * n, current, n);
if (i0 >= 0) {
free_rows[new_free_rows++] = i0;
}
}
x[free_i] = j1;
y[j1] = free_i;
}
return new_free_rows;
}
/** Find columns with minimum d[j] and put them on the SCAN list.
*/
uint_t _find_dense(const uint_t n, uint_t lo, cost_t *d, int_t *cols, int_t *y)
{
uint_t hi = lo + 1;
cost_t mind = d[cols[lo]];
for (uint_t k = hi; k < n; k++) {
int_t j = cols[k];
if (d[j] <= mind) {
if (d[j] < mind) {
hi = lo;
mind = d[j];
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
return hi;
}
// Scan all columns in TODO starting from arbitrary column in SCAN
// and try to decrease d of the TODO columns using the SCAN column.
int_t _scan_dense(const uint_t n, cost_t *cost[],
uint_t *plo, uint_t*phi,
cost_t *d, int_t *cols, int_t *pred,
int_t *y, cost_t *v)
{
uint_t lo = *plo;
uint_t hi = *phi;
cost_t h, cred_ij;
while (lo != hi) {
int_t j = cols[lo++];
const int_t i = y[j];
const cost_t mind = d[j];
h = cost[i][j] - v[j] - mind;
PRINTF("i=%d j=%d h=%f\n", i, j, h);
// For all columns in TODO
for (uint_t k = hi; k < n; k++) {
j = cols[k];
cred_ij = cost[i][j] - v[j] - h;
if (cred_ij < d[j]) {
d[j] = cred_ij;
pred[j] = i;
if (cred_ij == mind) {
if (y[j] < 0) {
return j;
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
}
}
*plo = lo;
*phi = hi;
return -1;
}
/** Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
*
* This is a dense matrix version.
*
* \return The closest free column index.
*/
int_t find_path_dense(
const uint_t n, cost_t *cost[],
const int_t start_i,
int_t *y, cost_t *v,
int_t *pred)
{
uint_t lo = 0, hi = 0;
int_t final_j = -1;
uint_t n_ready = 0;
int_t *cols;
cost_t *d;
NEW(cols, int_t, n);
NEW(d, cost_t, n);
for (uint_t i = 0; i < n; i++) {
cols[i] = i;
pred[i] = start_i;
d[i] = cost[start_i][i] - v[i];
}
PRINT_COST_ARRAY(d, n);
while (final_j == -1) {
// No columns left on the SCAN list.
if (lo == hi) {
PRINTF("%d..%d -> find\n", lo, hi);
n_ready = lo;
hi = _find_dense(n, lo, d, cols, y);
PRINTF("check %d..%d\n", lo, hi);
PRINT_INDEX_ARRAY(cols, n);
for (uint_t k = lo; k < hi; k++) {
const int_t j = cols[k];
if (y[j] < 0) {
final_j = j;
}
}
}
if (final_j == -1) {
PRINTF("%d..%d -> scan\n", lo, hi);
final_j = _scan_dense(
n, cost, &lo, &hi, d, cols, pred, y, v);
PRINT_COST_ARRAY(d, n);
PRINT_INDEX_ARRAY(cols, n);
PRINT_INDEX_ARRAY(pred, n);
}
}
PRINTF("found final_j=%d\n", final_j);
PRINT_INDEX_ARRAY(cols, n);
{
const cost_t mind = d[cols[lo]];
for (uint_t k = 0; k < n_ready; k++) {
const int_t j = cols[k];
v[j] += d[j] - mind;
}
}
FREE(cols);
FREE(d);
return final_j;
}
/** Augment for a dense cost matrix.
*/
int_t _ca_dense(
const uint_t n, cost_t *cost[],
const uint_t n_free_rows,
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
int_t *pred;
NEW(pred, int_t, n);
for (int_t *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
int_t i = -1, j;
uint_t k = 0;
PRINTF("looking at free_i=%d\n", *pfree_i);
j = find_path_dense(n, cost, *pfree_i, y, v, pred);
ASSERT(j >= 0);
ASSERT(j < n);
while (i != *pfree_i) {
PRINTF("augment %d\n", j);
PRINT_INDEX_ARRAY(pred, n);
i = pred[j];
PRINTF("y[%d]=%d -> %d\n", j, y[j], i);
y[j] = i;
PRINT_INDEX_ARRAY(x, n);
SWAP_INDICES(j, x[i]);
k++;
if (k >= n) {
ASSERT(FALSE);
}
}
}
FREE(pred);
return 0;
}
/** Solve dense sparse LAP.
*/
int lapjv_internal(
const uint_t n, cost_t *cost[],
int_t *x, int_t *y)
{
int ret;
int_t *free_rows;
cost_t *v;
NEW(free_rows, int_t, n);
NEW(v, cost_t, n);
ret = _ccrrt_dense(n, cost, free_rows, x, y, v);
int i = 0;
while (ret > 0 && i < 2) {
ret = _carr_dense(n, cost, ret, free_rows, x, y, v);
i++;
}
if (ret > 0) {
ret = _ca_dense(n, cost, ret, free_rows, x, y, v);
}
FREE(v);
FREE(free_rows);
return ret;
}
#endif

View File

@@ -0,0 +1,65 @@
#ifndef LAPJV_H
#define LAPJV_H
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
#define LARGE 1000000
#if !defined TRUE
#define TRUE 1
#endif
#if !defined FALSE
#define FALSE 0
#endif
#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) { return -1; }
#define FREE(x) if (x != 0) { free(x); x = 0; }
#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
#if 0
#include <assert.h>
#define ASSERT(cond) assert(cond)
#define PRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
#define PRINT_COST_ARRAY(a, n) \
while (1) { \
printf(#a" = ["); \
if ((n) > 0) { \
printf("%f", (a)[0]); \
for (uint_t j = 1; j < n; j++) { \
printf(", %f", (a)[j]); \
} \
} \
printf("]\n"); \
break; \
}
#define PRINT_INDEX_ARRAY(a, n) \
while (1) { \
printf(#a" = ["); \
if ((n) > 0) { \
printf("%d", (a)[0]); \
for (uint_t j = 1; j < n; j++) { \
printf(", %d", (a)[j]); \
} \
} \
printf("]\n"); \
break; \
}
#else
#define ASSERT(cond)
#define PRINTF(fmt, ...)
#define PRINT_COST_ARRAY(a, n)
#define PRINT_INDEX_ARRAY(a, n)
#endif
typedef signed int int_t;
typedef unsigned int uint_t;
typedef double cost_t;
typedef char boolean;
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
extern int_t lapjv_internal(
const uint_t n, cost_t *cost[],
int_t *x, int_t *y);
#endif
#endif // LAPJV_H

View File

@@ -0,0 +1,432 @@
#include "BYTETracker.h"
#include "lapjv.h"
#ifdef ISF_ENABLE_TRACKING_BY_DETECTION
vector<STrack*> BYTETracker::joint_stracks(vector<STrack*> &tlista, vector<STrack> &tlistb)
{
map<int, int> exists;
vector<STrack*> res;
for (int i = 0; i < tlista.size(); i++)
{
exists.insert(pair<int, int>(tlista[i]->track_id, 1));
res.push_back(tlista[i]);
}
for (int i = 0; i < tlistb.size(); i++)
{
int tid = tlistb[i].track_id;
if (!exists[tid] || exists.count(tid) == 0)
{
exists[tid] = 1;
res.push_back(&tlistb[i]);
}
}
return res;
}
vector<STrack> BYTETracker::joint_stracks(vector<STrack> &tlista, vector<STrack> &tlistb)
{
map<int, int> exists;
vector<STrack> res;
for (int i = 0; i < tlista.size(); i++)
{
exists.insert(pair<int, int>(tlista[i].track_id, 1));
res.push_back(tlista[i]);
}
for (int i = 0; i < tlistb.size(); i++)
{
int tid = tlistb[i].track_id;
if (!exists[tid] || exists.count(tid) == 0)
{
exists[tid] = 1;
res.push_back(tlistb[i]);
}
}
return res;
}
vector<STrack> BYTETracker::sub_stracks(vector<STrack> &tlista, vector<STrack> &tlistb)
{
map<int, STrack> stracks;
for (int i = 0; i < tlista.size(); i++)
{
stracks.insert(pair<int, STrack>(tlista[i].track_id, tlista[i]));
}
for (int i = 0; i < tlistb.size(); i++)
{
int tid = tlistb[i].track_id;
if (stracks.count(tid) != 0)
{
stracks.erase(tid);
}
}
vector<STrack> res;
std::map<int, STrack>::iterator it;
for (it = stracks.begin(); it != stracks.end(); ++it)
{
res.push_back(it->second);
}
return res;
}
void BYTETracker::remove_duplicate_stracks(vector<STrack> &resa, vector<STrack> &resb, vector<STrack> &stracksa, vector<STrack> &stracksb)
{
vector<vector<float> > pdist = iou_distance(stracksa, stracksb);
vector<pair<int, int> > pairs;
for (int i = 0; i < pdist.size(); i++)
{
for (int j = 0; j < pdist[i].size(); j++)
{
if (pdist[i][j] < 0.15)
{
pairs.push_back(pair<int, int>(i, j));
}
}
}
vector<int> dupa, dupb;
for (int i = 0; i < pairs.size(); i++)
{
int timep = stracksa[pairs[i].first].frame_id - stracksa[pairs[i].first].start_frame;
int timeq = stracksb[pairs[i].second].frame_id - stracksb[pairs[i].second].start_frame;
if (timep > timeq)
dupb.push_back(pairs[i].second);
else
dupa.push_back(pairs[i].first);
}
for (int i = 0; i < stracksa.size(); i++)
{
vector<int>::iterator iter = find(dupa.begin(), dupa.end(), i);
if (iter == dupa.end())
{
resa.push_back(stracksa[i]);
}
}
for (int i = 0; i < stracksb.size(); i++)
{
vector<int>::iterator iter = find(dupb.begin(), dupb.end(), i);
if (iter == dupb.end())
{
resb.push_back(stracksb[i]);
}
}
}
void BYTETracker::linear_assignment(vector<vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
vector<vector<int> > &matches, vector<int> &unmatched_a, vector<int> &unmatched_b)
{
if (cost_matrix.size() == 0)
{
for (int i = 0; i < cost_matrix_size; i++)
{
unmatched_a.push_back(i);
}
for (int i = 0; i < cost_matrix_size_size; i++)
{
unmatched_b.push_back(i);
}
return;
}
vector<int> rowsol; vector<int> colsol;
float c = lapjv(cost_matrix, rowsol, colsol, true, thresh);
for (int i = 0; i < rowsol.size(); i++)
{
if (rowsol[i] >= 0)
{
vector<int> match;
match.push_back(i);
match.push_back(rowsol[i]);
matches.push_back(match);
}
else
{
unmatched_a.push_back(i);
}
}
for (int i = 0; i < colsol.size(); i++)
{
if (colsol[i] < 0)
{
unmatched_b.push_back(i);
}
}
}
vector<vector<float> > BYTETracker::ious(vector<vector<float> > &atlbrs, vector<vector<float> > &btlbrs)
{
vector<vector<float> > ious;
if (atlbrs.size()*btlbrs.size() == 0)
return ious;
ious.resize(atlbrs.size());
for (int i = 0; i < ious.size(); i++)
{
ious[i].resize(btlbrs.size());
}
//bbox_ious
for (int k = 0; k < btlbrs.size(); k++)
{
vector<float> ious_tmp;
float box_area = (btlbrs[k][2] - btlbrs[k][0] + 1)*(btlbrs[k][3] - btlbrs[k][1] + 1);
for (int n = 0; n < atlbrs.size(); n++)
{
float iw = min(atlbrs[n][2], btlbrs[k][2]) - max(atlbrs[n][0], btlbrs[k][0]) + 1;
if (iw > 0)
{
float ih = min(atlbrs[n][3], btlbrs[k][3]) - max(atlbrs[n][1], btlbrs[k][1]) + 1;
if(ih > 0)
{
float ua = (atlbrs[n][2] - atlbrs[n][0] + 1)*(atlbrs[n][3] - atlbrs[n][1] + 1) + box_area - iw * ih;
ious[n][k] = iw * ih / ua;
}
else
{
ious[n][k] = 0.0;
}
}
else
{
ious[n][k] = 0.0;
}
}
}
return ious;
}
vector<vector<float> > BYTETracker::iou_distance(vector<STrack*> &atracks, vector<STrack> &btracks, int &dist_size, int &dist_size_size)
{
vector<vector<float> > cost_matrix;
if (atracks.size() * btracks.size() == 0)
{
dist_size = atracks.size();
dist_size_size = btracks.size();
return cost_matrix;
}
vector<vector<float> > atlbrs, btlbrs;
for (int i = 0; i < atracks.size(); i++)
{
atlbrs.push_back(atracks[i]->tlbr);
}
for (int i = 0; i < btracks.size(); i++)
{
btlbrs.push_back(btracks[i].tlbr);
}
dist_size = atracks.size();
dist_size_size = btracks.size();
vector<vector<float> > _ious = ious(atlbrs, btlbrs);
for (int i = 0; i < _ious.size();i++)
{
vector<float> _iou;
for (int j = 0; j < _ious[i].size(); j++)
{
_iou.push_back(1 - _ious[i][j]);
}
cost_matrix.push_back(_iou);
}
return cost_matrix;
}
vector<vector<float> > BYTETracker::iou_distance(vector<STrack> &atracks, vector<STrack> &btracks)
{
vector<vector<float> > atlbrs, btlbrs;
for (int i = 0; i < atracks.size(); i++)
{
atlbrs.push_back(atracks[i].tlbr);
}
for (int i = 0; i < btracks.size(); i++)
{
btlbrs.push_back(btracks[i].tlbr);
}
vector<vector<float> > _ious = ious(atlbrs, btlbrs);
vector<vector<float> > cost_matrix;
for (int i = 0; i < _ious.size(); i++)
{
vector<float> _iou;
for (int j = 0; j < _ious[i].size(); j++)
{
_iou.push_back(1 - _ious[i][j]);
}
cost_matrix.push_back(_iou);
}
return cost_matrix;
}
double BYTETracker::lapjv(const vector<vector<float> > &cost, vector<int> &rowsol, vector<int> &colsol,
bool extend_cost, float cost_limit, bool return_cost)
{
vector<vector<float> > cost_c;
cost_c.assign(cost.begin(), cost.end());
vector<vector<float> > cost_c_extended;
int n_rows = cost.size();
int n_cols = cost[0].size();
rowsol.resize(n_rows);
colsol.resize(n_cols);
int n = 0;
if (n_rows == n_cols)
{
n = n_rows;
}
else
{
if (!extend_cost)
{
cout << "set extend_cost=True" << endl;
system("pause");
exit(0);
}
}
if (extend_cost || cost_limit < LONG_MAX)
{
n = n_rows + n_cols;
cost_c_extended.resize(n);
for (int i = 0; i < cost_c_extended.size(); i++)
cost_c_extended[i].resize(n);
if (cost_limit < LONG_MAX)
{
for (int i = 0; i < cost_c_extended.size(); i++)
{
for (int j = 0; j < cost_c_extended[i].size(); j++)
{
cost_c_extended[i][j] = cost_limit / 2.0;
}
}
}
else
{
float cost_max = -1;
for (int i = 0; i < cost_c.size(); i++)
{
for (int j = 0; j < cost_c[i].size(); j++)
{
if (cost_c[i][j] > cost_max)
cost_max = cost_c[i][j];
}
}
for (int i = 0; i < cost_c_extended.size(); i++)
{
for (int j = 0; j < cost_c_extended[i].size(); j++)
{
cost_c_extended[i][j] = cost_max + 1;
}
}
}
for (int i = n_rows; i < cost_c_extended.size(); i++)
{
for (int j = n_cols; j < cost_c_extended[i].size(); j++)
{
cost_c_extended[i][j] = 0;
}
}
for (int i = 0; i < n_rows; i++)
{
for (int j = 0; j < n_cols; j++)
{
cost_c_extended[i][j] = cost_c[i][j];
}
}
cost_c.clear();
cost_c.assign(cost_c_extended.begin(), cost_c_extended.end());
}
double **cost_ptr;
cost_ptr = new double *[sizeof(double *) * n];
for (int i = 0; i < n; i++)
cost_ptr[i] = new double[sizeof(double) * n];
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
cost_ptr[i][j] = cost_c[i][j];
}
}
int* x_c = new int[sizeof(int) * n];
int *y_c = new int[sizeof(int) * n];
int ret = lapjv_internal(n, cost_ptr, x_c, y_c);
if (ret != 0)
{
cout << "Calculate Wrong!" << endl;
system("pause");
exit(0);
}
double opt = 0.0;
if (n != n_rows)
{
for (int i = 0; i < n; i++)
{
if (x_c[i] >= n_cols)
x_c[i] = -1;
if (y_c[i] >= n_rows)
y_c[i] = -1;
}
for (int i = 0; i < n_rows; i++)
{
rowsol[i] = x_c[i];
}
for (int i = 0; i < n_cols; i++)
{
colsol[i] = y_c[i];
}
if (return_cost)
{
for (int i = 0; i < rowsol.size(); i++)
{
if (rowsol[i] != -1)
{
//cout << i << "\t" << rowsol[i] << "\t" << cost_ptr[i][rowsol[i]] << endl;
opt += cost_ptr[i][rowsol[i]];
}
}
}
}
else if (return_cost)
{
for (int i = 0; i < rowsol.size(); i++)
{
opt += cost_ptr[i][rowsol[i]];
}
}
for (int i = 0; i < n; i++)
{
delete[]cost_ptr[i];
}
delete[]cost_ptr;
delete[]x_c;
delete[]y_c;
return opt;
}
Scalar BYTETracker::get_color(int idx)
{
idx += 3;
return Scalar(37 * idx % 255, 17 * idx % 255, 29 * idx % 255);
}
#endif

View File

@@ -1 +1 @@
InspireFace Version: 1.0.1
InspireFace Version: 1.1.0

View File

@@ -38,6 +38,13 @@ set_target_properties(MTFaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face detection and tracking
add_executable(FaceTrackVideoSample cpp/sample_face_track_video.cpp)
target_link_libraries(FaceTrackVideoSample InspireFace ${ext})
set_target_properties(FaceTrackVideoSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face recognition
add_executable(FaceRecognitionSample cpp/sample_face_recognition.cpp)
target_link_libraries(FaceRecognitionSample InspireFace ${ext})
@@ -60,6 +67,14 @@ set_target_properties(FaceComparisonSample PROPERTIES
)
# Examples of face recognition
add_executable(FaceDetect cpp/face_detect.cpp)
target_link_libraries(FaceDetect InspireFace ${ext})
set_target_properties(FaceDetect PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Platform watershed
if (ISF_BUILD_LINUX_ARM7 OR ISF_BUILD_LINUX_AARCH64)
# Typically this is an embedded system or development board scenario where some GUI-related functions are not supported

View File

@@ -0,0 +1,47 @@
//
// Created by tunm on 2024/5/26.
//
#include <cstddef>
#include <iostream>
#include <opencv2/core/types.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <vector>
#include "data_type.h"
#include "opencv2/opencv.hpp"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/Initialization_module/launch.h"
using namespace inspire;
int main() {
INSPIRE_LAUNCH->Load("test_res/pack/Megatron");
auto archive = INSPIRE_LAUNCH->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_detect", detModel);
std::vector<int> input_size = {640, 640};
detModel.Config().set<std::vector<int>>("input_size", input_size);
FaceDetect detect(input_size[0]);
detect.loadData(detModel, detModel.modelType, true);
auto img = cv::imread("/Users/tunm/Downloads/xtl.png");
double time;
time = (double) cv::getTickCount();
std::vector<FaceLoc> results = detect(img);
time = ((double) cv::getTickCount() - time) / cv::getTickFrequency();
std::cout << "use time" << time << "\n";
for (size_t i = 0; i < results.size(); i++) {
auto &item = results[i];
cv::rectangle(img, cv::Point2f(item.x1, item.y1), cv::Point2f(item.x2, item.y2), cv::Scalar(0, 0, 255), 4);
}
cv::imshow("w", img);
cv::waitKey(0);
return 0;
}

View File

@@ -31,7 +31,7 @@ int main(int argc, char* argv[]) {
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;

View File

@@ -55,7 +55,7 @@ int main(int argc, char* argv[]) {
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;

View File

@@ -52,7 +52,7 @@ int main(int argc, char* argv[]) {
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;

View File

@@ -29,17 +29,22 @@ int main(int argc, char* argv[]) {
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Face detection image input level
HInt32 detectPixelLevel = 640;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
HFSessionSetTrackPreviewSize(session, 640);
HFSessionSetFilterMinimumFacePixelSize(session, 32);
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
@@ -81,7 +86,7 @@ int main(int argc, char* argv[]) {
// Use OpenCV's Rect to receive face bounding boxes
auto rect = cv::Rect(multipleFaceData.rects[index].x, multipleFaceData.rects[index].y,
multipleFaceData.rects[index].width, multipleFaceData.rects[index].height);
cv::rectangle(draw, rect, cv::Scalar(0, 100, 255), 1);
cv::rectangle(draw, rect, cv::Scalar(0, 100, 255), 4);
// Print FaceID, In IMAGE-MODE it is changing, in VIDEO-MODE it is fixed, but it may be lost
std::cout << "FaceID: " << multipleFaceData.trackIds[index] << std::endl;

View File

@@ -29,12 +29,12 @@ int main(int argc, char* argv[]) {
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_LIGHT_TRACK;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
HInt32 maxDetectNum = 50;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
@@ -61,21 +61,23 @@ int main(int argc, char* argv[]) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
for (int i = 0; i < 100; i++) {
auto current_time = (double) cv::getTickCount();
auto current_time = (double) cv::getTickCount();
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
auto cost = ((double) cv::getTickCount() - current_time) / cv::getTickFrequency() * 1000;
std::cout << "coes: " << cost << std::endl;
}
auto cost = ((double) cv::getTickCount() - current_time) / cv::getTickFrequency() * 1000;
std::cout << "coes: " << cost << std::endl;
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {

View File

@@ -40,12 +40,12 @@ int main(int argc, char* argv[]) {
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm session
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;

View File

@@ -0,0 +1,172 @@
#include <iostream>
#include "c_api/intypedef.h"
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
void drawMode(cv::Mat& frame, HFDetectMode mode) {
std::string modeText;
switch (mode) {
case HF_DETECT_MODE_ALWAYS_DETECT:
modeText = "Mode: Image Detection";
break;
case HF_DETECT_MODE_LIGHT_TRACK:
modeText = "Mode: Video Detection";
break;
case HF_DETECT_MODE_TRACK_BY_DETECTION:
modeText = "Mode: Track by Detection";
break;
default:
modeText = "Mode: Unknown";
break;
}
cv::putText(frame, modeText, cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(255, 255, 255), 2);
}
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <video_path>\n";
return 1;
}
auto packPath = argv[1];
auto videoPath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Video file Path: " << videoPath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Video or frame sequence mode uses VIDEO-MODE, which is face detection with tracking
HFDetectMode detMode = HF_DETECT_MODE_TRACK_BY_DETECTION;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Face detection image input level
HInt32 detectPixelLevel = 640;
// fps in tracking-by-detection mode
HInt32 trackByDetectFps = 20;
HFSession session = {0};
// Handle of the current face SDK algorithm context
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, trackByDetectFps, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
HFSessionSetTrackPreviewSize(session, 640);
HFSessionSetFilterMinimumFacePixelSize(session, 32);
// Open the video file
cv::VideoCapture cap(videoPath);
if (!cap.isOpened()) {
std::cout << "The source entered is not a video or read error." << std::endl;
return 1;
}
// Get the video properties
int frame_width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
int frame_height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
int fps = static_cast<int>(cap.get(cv::CAP_PROP_FPS));
cv::Size frame_size(frame_width, frame_height);
// Define the codec and create VideoWriter object
cv::VideoWriter outputVideo("output_video.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), fps, frame_size, true);
if (!outputVideo.isOpened()) {
std::cerr << "Could not open the output video for write: output_video.avi\n";
return -1;
}
cv::Mat frame;
while (cap.read(frame)) {
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = frame.data; // Data buffer
imageParam.width = frame.cols; // Target view width
imageParam.height = frame.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
double time = (double) cv::getTickCount();
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
time = ((double) cv::getTickCount() - time) / cv::getTickFrequency();
std::cout << "use time" << time << "\n";
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
std::cout << "Num of face: " << faceNum << std::endl;
// Copy a new image to draw
cv::Mat draw = frame.clone();
// Draw detection mode on the frame
drawMode(draw, detMode);
for (int index = 0; index < faceNum; ++index) {
// std::cout << "========================================" << std::endl;
// std::cout << "Process face index: " << index << std::endl;
// Use OpenCV's Rect to receive face bounding boxes
auto rect = cv::Rect(multipleFaceData.rects[index].x, multipleFaceData.rects[index].y,
multipleFaceData.rects[index].width, multipleFaceData.rects[index].height);
cv::rectangle(draw, rect, cv::Scalar(0, 100, 255), 5);
// Print FaceID, In VIDEO-MODE it is fixed, but it may be lost
auto trackId = multipleFaceData.trackIds[index];
// std::cout << "FaceID: " << trackId << std::endl;
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle of the head
// std::cout << "Roll: " << multipleFaceData.angles.roll[index]
// << ", Yaw: " << multipleFaceData.angles.yaw[index]
// << ", Pitch: " << multipleFaceData.angles.pitch[index] << std::endl;
// Add TrackID to the drawing
cv::putText(draw, "ID: " + std::to_string(trackId), cv::Point(rect.x, rect.y - 10),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 2);
}
cv::imshow("w", draw);
cv::waitKey(1);
// Write the frame into the file
outputVideo.write(draw);
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
}
// Release the VideoCapture and VideoWriter objects
cap.release();
outputVideo.release();
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -18,9 +18,9 @@ TEST_CASE("test_Evaluation", "[face_evaluation") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 5, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 5, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
float mostSim = -1.0f;
@@ -56,9 +56,9 @@ TEST_CASE("test_Evaluation", "[face_evaluation") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 5, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 5, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
std::vector<int> labels;
std::vector<float> confidences;

View File

@@ -14,9 +14,9 @@ TEST_CASE("test_FeatureContext", "[face_context]") {
SECTION("Test the new context positive process") {
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
ret = HFReleaseInspireFaceSession(session);
REQUIRE(ret == HSUCCEED);

View File

@@ -15,14 +15,14 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_liveness = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Get a face picture
HFImageStream img1Handle;
auto img1 = cv::imread(GET_DATA("images/image_T1.jpeg"));
auto img1 = cv::imread(GET_DATA("data/bulk/image_T1.jpeg"));
ret = CVImageToImageStream(img1, img1Handle);
REQUIRE(ret == HSUCCEED);
@@ -47,7 +47,7 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
// fake face
HFImageStream img2Handle;
auto img2 = cv::imread(GET_DATA("images/rgb_fake.jpg"));
auto img2 = cv::imread(GET_DATA("data/bulk/rgb_fake.jpg"));
ret = CVImageToImageStream(img2, img2Handle);
REQUIRE(ret == HSUCCEED);
ret = HFExecuteFaceTrack(session, img2Handle, &multipleFaceData);
@@ -75,14 +75,14 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_mask_detect = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Get a face picture
HFImageStream img1Handle;
auto img1 = cv::imread(GET_DATA("images/mask2.jpg"));
auto img1 = cv::imread(GET_DATA("data/bulk/mask2.jpg"));
ret = CVImageToImageStream(img1, img1Handle);
REQUIRE(ret == HSUCCEED);
@@ -107,7 +107,7 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
// no mask face
HFImageStream img2Handle;
auto img2 = cv::imread(GET_DATA("images/face_sample.png"));
auto img2 = cv::imread(GET_DATA("data/bulk/face_sample.png"));
ret = CVImageToImageStream(img2, img2Handle);
REQUIRE(ret == HSUCCEED);
ret = HFExecuteFaceTrack(session, img2Handle, &multipleFaceData);
@@ -132,15 +132,15 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
SECTION("face quality") {
HResult ret;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HInt32 option = HF_ENABLE_QUALITY;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, detMode, 3, &session);
ret = HFCreateInspireFaceSessionOptional(option, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Get a face picture
HFImageStream superiorHandle;
auto superior = cv::imread(GET_DATA("images/yifei.jpg"));
auto superior = cv::imread(GET_DATA("data/bulk/yifei.jpg"));
ret = CVImageToImageStream(superior, superiorHandle);
REQUIRE(ret == HSUCCEED);
@@ -160,7 +160,7 @@ TEST_CASE("test_FacePipeline", "[face_pipeline]") {
// blur image
HFImageStream blurHandle;
auto blur = cv::imread(GET_DATA("images/blur.jpg"));
auto blur = cv::imread(GET_DATA("data/bulk/blur.jpg"));
ret = CVImageToImageStream(blur, blurHandle);
REQUIRE(ret == HSUCCEED);

View File

@@ -18,9 +18,9 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
SECTION("Face detection from image") {
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
spdlog::error("error ret :{}", ret);
REQUIRE(ret == HSUCCEED);
@@ -74,9 +74,9 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
SECTION("Face tracking stability from frames") {
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_VIDEO;
HFDetectMode detMode = HF_DETECT_MODE_LIGHT_TRACK;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
auto expectedId = 1;
@@ -86,7 +86,7 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
for (int i = 0; i < filenames.size(); ++i) {
auto filename = filenames[i];
HFImageStream imgHandle;
auto image = cv::imread(GET_DATA("video_frames/" + filename));
auto image = cv::imread(GET_DATA("data/video_frames/" + filename));
ret = CVImageToImageStream(image, imgHandle);
REQUIRE(ret == HSUCCEED);
@@ -101,7 +101,7 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
auto rect = multipleFaceData.rects[0];
cv::Rect cvRect(rect.x, rect.y, rect.width, rect.height);
cv::rectangle(image, cvRect, cv::Scalar(255, 0, 124), 2);
std::string save = GET_SAVE_DATA("video_frames") + "/" + std::to_string(i) + ".jpg";
std::string save = GET_SAVE_DATA("data/video_frames") + "/" + std::to_string(i) + ".jpg";
cv::imwrite(save, image);
auto id = multipleFaceData.trackIds[0];
// TEST_PRINT("{}", id);
@@ -123,9 +123,9 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
SECTION("Head pose estimation") {
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Extract basic face information from photos
@@ -229,9 +229,9 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
int loop = 1000;
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Prepare an image
@@ -241,8 +241,6 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
REQUIRE(ret == HSUCCEED);
BenchmarkRecord record(getBenchmarkRecordFile());
// Case: Execute the benchmark using the IMAGE mode
ret = HFSessionSetFaceTrackMode(session, HF_DETECT_MODE_IMAGE);
REQUIRE(ret == HSUCCEED);
HFMultipleFaceData multipleFaceData = {0};
auto start = (double) cv::getTickCount();
@@ -255,15 +253,41 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
TEST_PRINT("<Benchmark> Face Detect -> Loop: {}, Total Time: {:.5f}ms, Average Time: {:.5f}ms", loop, cost, cost / loop);
record.insertBenchmarkData("Face Detect", loop, cost, cost / loop);
// Case: Execute the benchmark using the VIDEO mode(Track)
ret = HFSessionSetFaceTrackMode(session, HF_DETECT_MODE_VIDEO);
ret = HFReleaseImageStream(imgHandle);
REQUIRE(ret == HSUCCEED);
multipleFaceData = {0};
start = (double) cv::getTickCount();
ret = HFReleaseInspireFaceSession(session);
REQUIRE(ret == HSUCCEED);
#else
TEST_PRINT("Skip the face detection benchmark test. To run it, you need to turn on the benchmark test.");
#endif
}
SECTION("Face light track benchmark") {
#ifdef ISF_ENABLE_BENCHMARK
int loop = 1000;
HResult ret;
HFSessionCustomParameter parameter = {0};
HFDetectMode detMode = HF_DETECT_MODE_LIGHT_TRACK;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Prepare an image
HFImageStream imgHandle;
auto image = cv::imread(GET_DATA("data/bulk/kun.jpg"));
ret = CVImageToImageStream(image, imgHandle);
REQUIRE(ret == HSUCCEED);
BenchmarkRecord record(getBenchmarkRecordFile());
// Case: Execute the benchmark using the VIDEO mode(Track)
REQUIRE(ret == HSUCCEED);
HFMultipleFaceData multipleFaceData = {0};
auto start = (double) cv::getTickCount();
for (int i = 0; i < loop; ++i) {
ret = HFExecuteFaceTrack(session, imgHandle, &multipleFaceData);
}
cost = ((double) cv::getTickCount() - start) / cv::getTickFrequency() * 1000;
auto cost = ((double) cv::getTickCount() - start) / cv::getTickFrequency() * 1000;
REQUIRE(ret == HSUCCEED);
REQUIRE(multipleFaceData.detectedNum == 1);
TEST_PRINT("<Benchmark> Face Track -> Loop: {}, Total Time: {:.5f}ms, Average Time: {:.5f}ms", loop, cost, cost / loop);
@@ -275,8 +299,9 @@ TEST_CASE("test_FaceTrack", "[face_track]") {
ret = HFReleaseInspireFaceSession(session);
REQUIRE(ret == HSUCCEED);
#else
TEST_PRINT("Skip the face detection benchmark test. To run it, you need to turn on the benchmark test.");
TEST_PRINT("Skip the face light track benchmark test. To run it, you need to turn on the benchmark test.");
#endif
}
}

View File

@@ -17,9 +17,9 @@ TEST_CASE("test_FeatureManage", "[feature_manage]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -38,7 +38,7 @@ TEST_CASE("test_FeatureManage", "[feature_manage]") {
REQUIRE(ret == HSUCCEED);
// Get a face picture
cv::Mat kunImage = cv::imread(GET_DATA("images/kun.jpg"));
cv::Mat kunImage = cv::imread(GET_DATA("data/bulk/kun.jpg"));
HFImageData imageData = {0};
imageData.data = kunImage.data;
imageData.height = kunImage.rows;
@@ -121,9 +121,9 @@ TEST_CASE("test_FeatureManage", "[feature_manage]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -169,9 +169,9 @@ TEST_CASE("test_FeatureManage", "[feature_manage]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
// Face track
@@ -323,9 +323,9 @@ TEST_CASE("test_SearchTopK", "[feature_search_top_k]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -470,9 +470,9 @@ TEST_CASE("test_FeatureBenchmark", "[feature_benchmark]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -565,9 +565,9 @@ TEST_CASE("test_FeatureBenchmark", "[feature_benchmark]") {
HPath path = modelPath.c_str();
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -658,9 +658,9 @@ TEST_CASE("test_FeatureBenchmark", "[feature_benchmark]") {
HPath path = modelPath.c_str();
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -787,9 +787,9 @@ TEST_CASE("test_FeatureBenchmark", "[feature_benchmark]") {
HPath path = modelPath.c_str();
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");
@@ -894,9 +894,9 @@ TEST_CASE("test_FeatureBenchmark", "[feature_benchmark]") {
HPath path = modelPath.c_str();
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3,-1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");

View File

@@ -15,9 +15,9 @@ TEST_CASE("test_HelpTools", "[help_tools]") {
HResult ret;
HFSessionCustomParameter parameter = {0};
parameter.enable_recognition = 1;
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
HFSession session;
ret = HFCreateInspireFaceSession(parameter, detMode, 3, &session);
ret = HFCreateInspireFaceSession(parameter, detMode, 3, -1, -1, &session);
REQUIRE(ret == HSUCCEED);
HFFeatureHubConfiguration configuration = {0};
auto dbPath = GET_SAVE_DATA(".test");

View File

@@ -17,7 +17,7 @@ TEST_CASE("test_CameraStream", "[camera_stream") {
SECTION("DecodingRotatedImages") {
FaceContext ctx;
CustomPipelineParameter param;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
std::vector<std::string> rotated_filename_list = {
@@ -51,7 +51,7 @@ TEST_CASE("test_CameraStream", "[camera_stream") {
SECTION("DecodingNV21Image") {
FaceContext ctx;
CustomPipelineParameter param;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
int32_t width = 402;

View File

@@ -17,7 +17,7 @@ TEST_CASE("test_FaceDetectTrack", "[face_track]") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_face_quality = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_VIDEO, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_LIGHT_TRACK, 1, param);
REQUIRE(ret == HSUCCEED);
// Prepare a picture of a face

View File

@@ -18,7 +18,7 @@ TEST_CASE("test_FaceData", "[face_data]") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_face_quality = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_VIDEO, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
// Prepare a picture of a face

View File

@@ -16,7 +16,7 @@ TEST_CASE("test_FacePipeline", "[face_pipe") {
SECTION("FaceContextInit") {
FaceContext ctx;
CustomPipelineParameter param;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
}
@@ -24,7 +24,7 @@ TEST_CASE("test_FacePipeline", "[face_pipe") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_mask_detect = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
{
@@ -67,7 +67,7 @@ TEST_CASE("test_FacePipeline", "[face_pipe") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_liveness = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
{

View File

@@ -19,7 +19,7 @@ TEST_CASE("test_FaceRecognition", "[face_rec]") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_recognition = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
}
@@ -27,7 +27,7 @@ TEST_CASE("test_FaceRecognition", "[face_rec]") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_recognition = false; // Disable the face recognition function
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
auto image = cv::imread(GET_DATA("images/cxk.jpg"));
@@ -49,7 +49,7 @@ TEST_CASE("test_FaceRecognition", "[face_rec]") {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_recognition = true;
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_IMAGE, 1, param);
auto ret = ctx.Configuration(DetectMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
REQUIRE(ret == HSUCCEED);
std::vector<std::string> list = {

View File

@@ -19,3 +19,4 @@ Here are the translation details for the compilation parameters as per your requ
| ISF_ENABLE_TEST_EVALUATION | OFF | Enable evaluation functionality for test cases, must be used together with ISF_ENABLE_USE_LFW_DATA |
| ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA | OFF | Enable global MNN_CUDA inference mode, requires device support for CUDA |
| ISF_LINUX_MNN_CUDA | "" | Specific MNN library path, requires pre-compiled MNN library supporting MNN_CUDA, only effective when ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA is enabled |
| ISF_ENABLE_TRACKING_BY_DETECTION | OFF | Enable tracking-by-detection face detection mode, which references the Eigen library |

View File

@@ -40,3 +40,23 @@ services:
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_cross_aarch64.sh
build-cross-android:
build:
context: .
dockerfile: docker/Dockerfile.android # Use the arm-linux-gnueabihf tool chain
environment:
- VERSION=${VERSION}
working_dir: /workspace
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_android.sh
build-cuda-ubuntu20:
build:
context: .
dockerfile: docker/Dockerfile.cuda.ubuntu20
environment:
- VERSION=${VERSION}
working_dir: /workspace
volumes:
- .:/workspace # Mount the project root directory to the container
command: bash command/build_linux_cuda.sh

View File

@@ -0,0 +1,56 @@
# Use Ubuntu 18.04 as the base image
FROM ubuntu:18.04
# Update the package list and install basic development tools
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
software-properties-common \
wget \
curl \
git \
unzip
# Download and install CMake
ARG CMAKE_URL="https://github.com/Kitware/CMake/releases/download/v3.17.0-rc3/cmake-3.17.0-rc3-Linux-x86_64.sh"
RUN mkdir /opt/cmake && \
wget -qO /opt/cmake/install-cmake.sh ${CMAKE_URL} && \
chmod +x /opt/cmake/install-cmake.sh && \
/opt/cmake/install-cmake.sh --skip-license --prefix=/usr/local && \
rm /opt/cmake/install-cmake.sh
# Set the URL for the Android NDK and OpenCV Android SDK
ARG ANDROID_NDK_URL="https://dl.google.com/android/repository/android-ndk-r18b-linux-x86_64.zip"
ARG OPENCV_URL="https://github.com/opencv/opencv/releases/download/4.5.1/opencv-4.5.1-android-sdk.zip"
# Set the installation path for the Android NDK and OpenCV Android SDK
ARG ANDROID_NDK_PATH="/opt/android-ndk-r18b"
ARG OPENCV_PATH="/opt/opencv-android-sdk"
# Download and extract the Android NDK
RUN mkdir -p ${ANDROID_NDK_PATH} && \
wget -qO /tmp/android-ndk.zip ${ANDROID_NDK_URL} && \
unzip /tmp/android-ndk.zip -d /opt && \
rm /tmp/android-ndk.zip
# Set environment variable to point to the NDK directory
ENV ANDROID_NDK=${ANDROID_NDK_PATH}
# Download and extract the OpenCV Android SDK
RUN mkdir -p ${OPENCV_PATH} && \
wget -qO /tmp/opencv-android-sdk.zip ${OPENCV_URL} && \
unzip /tmp/opencv-android-sdk.zip -d ${OPENCV_PATH} && \
rm /tmp/opencv-android-sdk.zip
# Set environment variable to point to the OpenCV SDK directory
ENV OPENCV_DIR=${OPENCV_PATH}/OpenCV-android-sdk/sdk/native/jni
# Clean temporary files to reduce image size
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set the working directory
WORKDIR /workspace
# Default to running Bash
CMD ["/bin/bash"]

View File

@@ -0,0 +1,38 @@
# Use Ubuntu 18.04 as the base image
FROM nvidia/cuda:11.1.1-cudnn8-devel-ubuntu20.04
ARG https_proxy
ARG http_proxy
ARG all_proxy
RUN apt-get update
# Install OpenCV dependency
RUN apt-get install -y --no-install-recommends libgtk-3-dev
RUN apt-get install -y --no-install-recommends libavcodec-dev
RUN apt-get install -y --no-install-recommends libavformat-dev
RUN apt-get install -y --no-install-recommends libswscale-dev
RUN apt-get install -y --no-install-recommends libjpeg-dev
RUN apt-get install -y --no-install-recommends libpng-dev
# Update the package list and install basic development tools
RUN apt-get install -y --no-install-recommends build-essential
RUN apt-get install -y --no-install-recommends software-properties-common
RUN apt-get install -y --no-install-recommends wget
RUN apt-get install -y --no-install-recommends curl
RUN apt-get install -y --no-install-recommends git
RUN apt-get install -y --no-install-recommends vim
# Install CMake
RUN apt-get install -y --no-install-recommends cmake
# Clean temporary files to reduce image size
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set the working directory
WORKDIR /workspace
# Default to running Bash
CMD ["/bin/bash"]

View File

@@ -0,0 +1,38 @@
# Use Ubuntu 18.04 as the base image
FROM nvidia/cuda:11.2.2-devel-ubuntu20.04
ARG https_proxy
ARG http_proxy
ARG all_proxy
RUN apt-get update
# Install OpenCV dependency
# RUN apt-get install -y --no-install-recommends libgtk-3-dev
# RUN apt-get install -y --no-install-recommends libavcodec-dev
# RUN apt-get install -y --no-install-recommends libavformat-dev
# RUN apt-get install -y --no-install-recommends libswscale-dev
# RUN apt-get install -y --no-install-recommends libjpeg-dev
# RUN apt-get install -y --no-install-recommends libpng-dev
# Update the package list and install basic development tools
RUN apt-get install -y --no-install-recommends build-essential
RUN apt-get install -y --no-install-recommends software-properties-common
RUN apt-get install -y --no-install-recommends wget
RUN apt-get install -y --no-install-recommends curl
RUN apt-get install -y --no-install-recommends git
RUN apt-get install -y --no-install-recommends vim
# Install CMake
RUN apt-get install -y --no-install-recommends cmake
# Clean temporary files to reduce image size
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Set the working directory
WORKDIR /workspace
# Default to running Bash
CMD ["/bin/bash"]

View File

@@ -0,0 +1,30 @@
tmp/
*.pyc
*.pyo
*.pyd
__pycache__/
*.so
*.dylib
venv/
env/
.build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
.pytest_cache/
.idea/

View File

@@ -1016,33 +1016,35 @@ HFSessionCustomParameter = struct_HFSessionCustomParameter# /Users/tunm/work/Ins
PHFSessionCustomParameter = POINTER(struct_HFSessionCustomParameter)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 132
enum_HFDetectMode = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 141
enum_HFDetectMode = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 142
HF_DETECT_MODE_IMAGE = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 141
HF_DETECT_MODE_ALWAYS_DETECT = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 142
HF_DETECT_MODE_VIDEO = (HF_DETECT_MODE_IMAGE + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 141
HF_DETECT_MODE_LIGHT_TRACK = (HF_DETECT_MODE_ALWAYS_DETECT + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 142
HFDetectMode = enum_HFDetectMode# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 141
HF_DETECT_MODE_TRACK_BY_DETECTION = (HF_DETECT_MODE_LIGHT_TRACK + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 142
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 152
HFDetectMode = enum_HFDetectMode# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 142
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 157
if _libs[_LIBRARY_FILENAME].has("HFCreateInspireFaceSession", "cdecl"):
HFCreateInspireFaceSession = _libs[_LIBRARY_FILENAME].get("HFCreateInspireFaceSession", "cdecl")
HFCreateInspireFaceSession.argtypes = [HFSessionCustomParameter, HFDetectMode, HInt32, POINTER(HFSession)]
HFCreateInspireFaceSession.argtypes = [HFSessionCustomParameter, HFDetectMode, HInt32, HInt32, HInt32, POINTER(HFSession)]
HFCreateInspireFaceSession.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 168
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 179
if _libs[_LIBRARY_FILENAME].has("HFCreateInspireFaceSessionOptional", "cdecl"):
HFCreateInspireFaceSessionOptional = _libs[_LIBRARY_FILENAME].get("HFCreateInspireFaceSessionOptional", "cdecl")
HFCreateInspireFaceSessionOptional.argtypes = [HOption, HFDetectMode, HInt32, POINTER(HFSession)]
HFCreateInspireFaceSessionOptional.argtypes = [HOption, HFDetectMode, HInt32, HInt32, HInt32, POINTER(HFSession)]
HFCreateInspireFaceSessionOptional.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 181
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 194
if _libs[_LIBRARY_FILENAME].has("HFReleaseInspireFaceSession", "cdecl"):
HFReleaseInspireFaceSession = _libs[_LIBRARY_FILENAME].get("HFReleaseInspireFaceSession", "cdecl")
HFReleaseInspireFaceSession.argtypes = [HFSession]
HFReleaseInspireFaceSession.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 191
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 204
class struct_HFFaceBasicToken(Structure):
pass
@@ -1055,11 +1057,11 @@ struct_HFFaceBasicToken._fields_ = [
('data', HPVoid),
]
HFFaceBasicToken = struct_HFFaceBasicToken# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 191
HFFaceBasicToken = struct_HFFaceBasicToken# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 204
PHFFaceBasicToken = POINTER(struct_HFFaceBasicToken)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 191
PHFFaceBasicToken = POINTER(struct_HFFaceBasicToken)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 204
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 202
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 215
class struct_HFFaceEulerAngle(Structure):
pass
@@ -1074,9 +1076,9 @@ struct_HFFaceEulerAngle._fields_ = [
('pitch', POINTER(HFloat)),
]
HFFaceEulerAngle = struct_HFFaceEulerAngle# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 202
HFFaceEulerAngle = struct_HFFaceEulerAngle# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 215
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 216
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 229
class struct_HFMultipleFaceData(Structure):
pass
@@ -1095,47 +1097,47 @@ struct_HFMultipleFaceData._fields_ = [
('tokens', PHFFaceBasicToken),
]
HFMultipleFaceData = struct_HFMultipleFaceData# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 216
HFMultipleFaceData = struct_HFMultipleFaceData# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 229
PHFMultipleFaceData = POINTER(struct_HFMultipleFaceData)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 216
PHFMultipleFaceData = POINTER(struct_HFMultipleFaceData)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 229
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 226
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 239
if _libs[_LIBRARY_FILENAME].has("HFSessionSetTrackPreviewSize", "cdecl"):
HFSessionSetTrackPreviewSize = _libs[_LIBRARY_FILENAME].get("HFSessionSetTrackPreviewSize", "cdecl")
HFSessionSetTrackPreviewSize.argtypes = [HFSession, HInt32]
HFSessionSetTrackPreviewSize.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 235
if _libs[_LIBRARY_FILENAME].has("HFSessionSetFaceTrackMode", "cdecl"):
HFSessionSetFaceTrackMode = _libs[_LIBRARY_FILENAME].get("HFSessionSetFaceTrackMode", "cdecl")
HFSessionSetFaceTrackMode.argtypes = [HFSession, HFDetectMode]
HFSessionSetFaceTrackMode.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 248
if _libs[_LIBRARY_FILENAME].has("HFSessionSetFilterMinimumFacePixelSize", "cdecl"):
HFSessionSetFilterMinimumFacePixelSize = _libs[_LIBRARY_FILENAME].get("HFSessionSetFilterMinimumFacePixelSize", "cdecl")
HFSessionSetFilterMinimumFacePixelSize.argtypes = [HFSession, HInt32]
HFSessionSetFilterMinimumFacePixelSize.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 244
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 257
if _libs[_LIBRARY_FILENAME].has("HFSessionSetFaceDetectThreshold", "cdecl"):
HFSessionSetFaceDetectThreshold = _libs[_LIBRARY_FILENAME].get("HFSessionSetFaceDetectThreshold", "cdecl")
HFSessionSetFaceDetectThreshold.argtypes = [HFSession, HFloat]
HFSessionSetFaceDetectThreshold.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 254
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 267
if _libs[_LIBRARY_FILENAME].has("HFExecuteFaceTrack", "cdecl"):
HFExecuteFaceTrack = _libs[_LIBRARY_FILENAME].get("HFExecuteFaceTrack", "cdecl")
HFExecuteFaceTrack.argtypes = [HFSession, HFImageStream, PHFMultipleFaceData]
HFExecuteFaceTrack.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 271
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 284
if _libs[_LIBRARY_FILENAME].has("HFCopyFaceBasicToken", "cdecl"):
HFCopyFaceBasicToken = _libs[_LIBRARY_FILENAME].get("HFCopyFaceBasicToken", "cdecl")
HFCopyFaceBasicToken.argtypes = [HFFaceBasicToken, HPBuffer, HInt32]
HFCopyFaceBasicToken.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 285
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 298
if _libs[_LIBRARY_FILENAME].has("HFGetFaceBasicTokenSize", "cdecl"):
HFGetFaceBasicTokenSize = _libs[_LIBRARY_FILENAME].get("HFGetFaceBasicTokenSize", "cdecl")
HFGetFaceBasicTokenSize.argtypes = [HPInt32]
HFGetFaceBasicTokenSize.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 299
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 312
class struct_HFFaceFeature(Structure):
pass
@@ -1148,31 +1150,31 @@ struct_HFFaceFeature._fields_ = [
('data', HPFloat),
]
HFFaceFeature = struct_HFFaceFeature# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 299
HFFaceFeature = struct_HFFaceFeature# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 312
PHFFaceFeature = POINTER(struct_HFFaceFeature)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 299
PHFFaceFeature = POINTER(struct_HFFaceFeature)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 312
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 311
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 324
if _libs[_LIBRARY_FILENAME].has("HFFaceFeatureExtract", "cdecl"):
HFFaceFeatureExtract = _libs[_LIBRARY_FILENAME].get("HFFaceFeatureExtract", "cdecl")
HFFaceFeatureExtract.argtypes = [HFSession, HFImageStream, HFFaceBasicToken, PHFFaceFeature]
HFFaceFeatureExtract.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 323
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 336
if _libs[_LIBRARY_FILENAME].has("HFFaceFeatureExtractCpy", "cdecl"):
HFFaceFeatureExtractCpy = _libs[_LIBRARY_FILENAME].get("HFFaceFeatureExtractCpy", "cdecl")
HFFaceFeatureExtractCpy.argtypes = [HFSession, HFImageStream, HFFaceBasicToken, HPFloat]
HFFaceFeatureExtractCpy.restype = HResult
enum_HFSearchMode = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 336
enum_HFSearchMode = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
HF_SEARCH_MODE_EAGER = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 336
HF_SEARCH_MODE_EAGER = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
HF_SEARCH_MODE_EXHAUSTIVE = (HF_SEARCH_MODE_EAGER + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 336
HF_SEARCH_MODE_EXHAUSTIVE = (HF_SEARCH_MODE_EAGER + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
HFSearchMode = enum_HFSearchMode# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 336
HFSearchMode = enum_HFSearchMode# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 362
class struct_HFFeatureHubConfiguration(Structure):
pass
@@ -1191,21 +1193,21 @@ struct_HFFeatureHubConfiguration._fields_ = [
('searchMode', HFSearchMode),
]
HFFeatureHubConfiguration = struct_HFFeatureHubConfiguration# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
HFFeatureHubConfiguration = struct_HFFeatureHubConfiguration# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 362
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 361
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 374
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubDataEnable", "cdecl"):
HFFeatureHubDataEnable = _libs[_LIBRARY_FILENAME].get("HFFeatureHubDataEnable", "cdecl")
HFFeatureHubDataEnable.argtypes = [HFFeatureHubConfiguration]
HFFeatureHubDataEnable.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 367
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 380
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubDataDisable", "cdecl"):
HFFeatureHubDataDisable = _libs[_LIBRARY_FILENAME].get("HFFeatureHubDataDisable", "cdecl")
HFFeatureHubDataDisable.argtypes = []
HFFeatureHubDataDisable.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 379
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 392
class struct_HFFaceFeatureIdentity(Structure):
pass
@@ -1220,11 +1222,11 @@ struct_HFFaceFeatureIdentity._fields_ = [
('feature', PHFFaceFeature),
]
HFFaceFeatureIdentity = struct_HFFaceFeatureIdentity# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 379
HFFaceFeatureIdentity = struct_HFFaceFeatureIdentity# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 392
PHFFaceFeatureIdentity = POINTER(struct_HFFaceFeatureIdentity)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 379
PHFFaceFeatureIdentity = POINTER(struct_HFFaceFeatureIdentity)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 392
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 388
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 401
class struct_HFSearchTopKResults(Structure):
pass
@@ -1239,92 +1241,89 @@ struct_HFSearchTopKResults._fields_ = [
('customIds', HPInt32),
]
HFSearchTopKResults = struct_HFSearchTopKResults# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 388
HFSearchTopKResults = struct_HFSearchTopKResults# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 401
PHFSearchTopKResults = POINTER(struct_HFSearchTopKResults)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 388
PHFSearchTopKResults = POINTER(struct_HFSearchTopKResults)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 401
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 399
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 412
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubFaceSearchThresholdSetting", "cdecl"):
HFFeatureHubFaceSearchThresholdSetting = _libs[_LIBRARY_FILENAME].get("HFFeatureHubFaceSearchThresholdSetting", "cdecl")
HFFeatureHubFaceSearchThresholdSetting.argtypes = [c_float]
HFFeatureHubFaceSearchThresholdSetting.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 410
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 423
if _libs[_LIBRARY_FILENAME].has("HFFaceComparison", "cdecl"):
HFFaceComparison = _libs[_LIBRARY_FILENAME].get("HFFaceComparison", "cdecl")
HFFaceComparison.argtypes = [HFFaceFeature, HFFaceFeature, HPFloat]
HFFaceComparison.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 418
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 431
if _libs[_LIBRARY_FILENAME].has("HFGetFeatureLength", "cdecl"):
HFGetFeatureLength = _libs[_LIBRARY_FILENAME].get("HFGetFeatureLength", "cdecl")
HFGetFeatureLength.argtypes = [HPInt32]
HFGetFeatureLength.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 427
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 440
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubInsertFeature", "cdecl"):
HFFeatureHubInsertFeature = _libs[_LIBRARY_FILENAME].get("HFFeatureHubInsertFeature", "cdecl")
HFFeatureHubInsertFeature.argtypes = [HFFaceFeatureIdentity]
HFFeatureHubInsertFeature.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 437
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 450
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubFaceSearch", "cdecl"):
HFFeatureHubFaceSearch = _libs[_LIBRARY_FILENAME].get("HFFeatureHubFaceSearch", "cdecl")
HFFeatureHubFaceSearch.argtypes = [HFFaceFeature, HPFloat, PHFFaceFeatureIdentity]
HFFeatureHubFaceSearch.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 447
for _lib in _libs.values():
if not _lib.has("HFFeatureHubFaceSearchTopK", "cdecl"):
continue
HFFeatureHubFaceSearchTopK = _lib.get("HFFeatureHubFaceSearchTopK", "cdecl")
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 460
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubFaceSearchTopK", "cdecl"):
HFFeatureHubFaceSearchTopK = _libs[_LIBRARY_FILENAME].get("HFFeatureHubFaceSearchTopK", "cdecl")
HFFeatureHubFaceSearchTopK.argtypes = [HFFaceFeature, HInt32, PHFSearchTopKResults]
HFFeatureHubFaceSearchTopK.restype = HResult
break
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 455
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 468
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubFaceRemove", "cdecl"):
HFFeatureHubFaceRemove = _libs[_LIBRARY_FILENAME].get("HFFeatureHubFaceRemove", "cdecl")
HFFeatureHubFaceRemove.argtypes = [HInt32]
HFFeatureHubFaceRemove.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 463
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 476
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubFaceUpdate", "cdecl"):
HFFeatureHubFaceUpdate = _libs[_LIBRARY_FILENAME].get("HFFeatureHubFaceUpdate", "cdecl")
HFFeatureHubFaceUpdate.argtypes = [HFFaceFeatureIdentity]
HFFeatureHubFaceUpdate.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 472
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 485
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubGetFaceIdentity", "cdecl"):
HFFeatureHubGetFaceIdentity = _libs[_LIBRARY_FILENAME].get("HFFeatureHubGetFaceIdentity", "cdecl")
HFFeatureHubGetFaceIdentity.argtypes = [HInt32, PHFFaceFeatureIdentity]
HFFeatureHubGetFaceIdentity.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 480
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 493
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubGetFaceCount", "cdecl"):
HFFeatureHubGetFaceCount = _libs[_LIBRARY_FILENAME].get("HFFeatureHubGetFaceCount", "cdecl")
HFFeatureHubGetFaceCount.argtypes = [POINTER(HInt32)]
HFFeatureHubGetFaceCount.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 487
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 500
if _libs[_LIBRARY_FILENAME].has("HFFeatureHubViewDBTable", "cdecl"):
HFFeatureHubViewDBTable = _libs[_LIBRARY_FILENAME].get("HFFeatureHubViewDBTable", "cdecl")
HFFeatureHubViewDBTable.argtypes = []
HFFeatureHubViewDBTable.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 506
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 519
if _libs[_LIBRARY_FILENAME].has("HFMultipleFacePipelineProcess", "cdecl"):
HFMultipleFacePipelineProcess = _libs[_LIBRARY_FILENAME].get("HFMultipleFacePipelineProcess", "cdecl")
HFMultipleFacePipelineProcess.argtypes = [HFSession, HFImageStream, PHFMultipleFaceData, HFSessionCustomParameter]
HFMultipleFacePipelineProcess.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 522
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 535
if _libs[_LIBRARY_FILENAME].has("HFMultipleFacePipelineProcessOptional", "cdecl"):
HFMultipleFacePipelineProcessOptional = _libs[_LIBRARY_FILENAME].get("HFMultipleFacePipelineProcessOptional", "cdecl")
HFMultipleFacePipelineProcessOptional.argtypes = [HFSession, HFImageStream, PHFMultipleFaceData, HInt32]
HFMultipleFacePipelineProcessOptional.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 534
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 547
class struct_HFRGBLivenessConfidence(Structure):
pass
@@ -1337,17 +1336,17 @@ struct_HFRGBLivenessConfidence._fields_ = [
('confidence', HPFloat),
]
HFRGBLivenessConfidence = struct_HFRGBLivenessConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 534
HFRGBLivenessConfidence = struct_HFRGBLivenessConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 547
PHFRGBLivenessConfidence = POINTER(struct_HFRGBLivenessConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 534
PHFRGBLivenessConfidence = POINTER(struct_HFRGBLivenessConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 547
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 547
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 560
if _libs[_LIBRARY_FILENAME].has("HFGetRGBLivenessConfidence", "cdecl"):
HFGetRGBLivenessConfidence = _libs[_LIBRARY_FILENAME].get("HFGetRGBLivenessConfidence", "cdecl")
HFGetRGBLivenessConfidence.argtypes = [HFSession, PHFRGBLivenessConfidence]
HFGetRGBLivenessConfidence.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 558
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 571
class struct_HFFaceMaskConfidence(Structure):
pass
@@ -1360,17 +1359,17 @@ struct_HFFaceMaskConfidence._fields_ = [
('confidence', HPFloat),
]
HFFaceMaskConfidence = struct_HFFaceMaskConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 558
HFFaceMaskConfidence = struct_HFFaceMaskConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 571
PHFFaceMaskConfidence = POINTER(struct_HFFaceMaskConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 558
PHFFaceMaskConfidence = POINTER(struct_HFFaceMaskConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 571
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 570
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 583
if _libs[_LIBRARY_FILENAME].has("HFGetFaceMaskConfidence", "cdecl"):
HFGetFaceMaskConfidence = _libs[_LIBRARY_FILENAME].get("HFGetFaceMaskConfidence", "cdecl")
HFGetFaceMaskConfidence.argtypes = [HFSession, PHFFaceMaskConfidence]
HFGetFaceMaskConfidence.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 581
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 594
class struct_HFFaceQualityConfidence(Structure):
pass
@@ -1383,23 +1382,23 @@ struct_HFFaceQualityConfidence._fields_ = [
('confidence', HPFloat),
]
HFFaceQualityConfidence = struct_HFFaceQualityConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 581
HFFaceQualityConfidence = struct_HFFaceQualityConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 594
PHFFaceQualityConfidence = POINTER(struct_HFFaceQualityConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 581
PHFFaceQualityConfidence = POINTER(struct_HFFaceQualityConfidence)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 594
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 593
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 606
if _libs[_LIBRARY_FILENAME].has("HFGetFaceQualityConfidence", "cdecl"):
HFGetFaceQualityConfidence = _libs[_LIBRARY_FILENAME].get("HFGetFaceQualityConfidence", "cdecl")
HFGetFaceQualityConfidence.argtypes = [HFSession, PHFFaceQualityConfidence]
HFGetFaceQualityConfidence.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 605
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 618
if _libs[_LIBRARY_FILENAME].has("HFFaceQualityDetect", "cdecl"):
HFFaceQualityDetect = _libs[_LIBRARY_FILENAME].get("HFFaceQualityDetect", "cdecl")
HFFaceQualityDetect.argtypes = [HFSession, HFFaceBasicToken, POINTER(HFloat)]
HFFaceQualityDetect.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 618
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 631
class struct_HFInspireFaceVersion(Structure):
pass
@@ -1414,45 +1413,45 @@ struct_HFInspireFaceVersion._fields_ = [
('patch', c_int),
]
HFInspireFaceVersion = struct_HFInspireFaceVersion# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 618
HFInspireFaceVersion = struct_HFInspireFaceVersion# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 631
PHFInspireFaceVersion = POINTER(struct_HFInspireFaceVersion)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 618
PHFInspireFaceVersion = POINTER(struct_HFInspireFaceVersion)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 631
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 628
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 641
if _libs[_LIBRARY_FILENAME].has("HFQueryInspireFaceVersion", "cdecl"):
HFQueryInspireFaceVersion = _libs[_LIBRARY_FILENAME].get("HFQueryInspireFaceVersion", "cdecl")
HFQueryInspireFaceVersion.argtypes = [PHFInspireFaceVersion]
HFQueryInspireFaceVersion.restype = HResult
enum_HFLogLevel = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
enum_HFLogLevel = c_int# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_NONE = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_NONE = 0# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_DEBUG = (HF_LOG_NONE + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_DEBUG = (HF_LOG_NONE + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_INFO = (HF_LOG_DEBUG + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_INFO = (HF_LOG_DEBUG + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_WARN = (HF_LOG_INFO + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_WARN = (HF_LOG_INFO + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_ERROR = (HF_LOG_WARN + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_ERROR = (HF_LOG_WARN + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HF_LOG_FATAL = (HF_LOG_ERROR + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HF_LOG_FATAL = (HF_LOG_ERROR + 1)# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
HFLogLevel = enum_HFLogLevel# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 640
HFLogLevel = enum_HFLogLevel# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 653
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 645
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 658
if _libs[_LIBRARY_FILENAME].has("HFSetLogLevel", "cdecl"):
HFSetLogLevel = _libs[_LIBRARY_FILENAME].get("HFSetLogLevel", "cdecl")
HFSetLogLevel.argtypes = [HFLogLevel]
HFSetLogLevel.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 650
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 663
if _libs[_LIBRARY_FILENAME].has("HFLogDisable", "cdecl"):
HFLogDisable = _libs[_LIBRARY_FILENAME].get("HFLogDisable", "cdecl")
HFLogDisable.argtypes = []
HFLogDisable.restype = HResult
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 663
# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 676
if _libs[_LIBRARY_FILENAME].has("HFDeBugImageStreamImShow", "cdecl"):
HFDeBugImageStreamImShow = _libs[_LIBRARY_FILENAME].get("HFDeBugImageStreamImShow", "cdecl")
HFDeBugImageStreamImShow.argtypes = [HFImageStream]
@@ -1516,27 +1515,27 @@ HFImageData = struct_HFImageData# /Users/tunm/work/InspireFace/cpp/inspireface/c
HFSessionCustomParameter = struct_HFSessionCustomParameter# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 132
HFFaceBasicToken = struct_HFFaceBasicToken# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 191
HFFaceBasicToken = struct_HFFaceBasicToken# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 204
HFFaceEulerAngle = struct_HFFaceEulerAngle# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 202
HFFaceEulerAngle = struct_HFFaceEulerAngle# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 215
HFMultipleFaceData = struct_HFMultipleFaceData# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 216
HFMultipleFaceData = struct_HFMultipleFaceData# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 229
HFFaceFeature = struct_HFFaceFeature# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 299
HFFaceFeature = struct_HFFaceFeature# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 312
HFFeatureHubConfiguration = struct_HFFeatureHubConfiguration# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 349
HFFeatureHubConfiguration = struct_HFFeatureHubConfiguration# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 362
HFFaceFeatureIdentity = struct_HFFaceFeatureIdentity# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 379
HFFaceFeatureIdentity = struct_HFFaceFeatureIdentity# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 392
HFSearchTopKResults = struct_HFSearchTopKResults# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 388
HFSearchTopKResults = struct_HFSearchTopKResults# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 401
HFRGBLivenessConfidence = struct_HFRGBLivenessConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 534
HFRGBLivenessConfidence = struct_HFRGBLivenessConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 547
HFFaceMaskConfidence = struct_HFFaceMaskConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 558
HFFaceMaskConfidence = struct_HFFaceMaskConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 571
HFFaceQualityConfidence = struct_HFFaceQualityConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 581
HFFaceQualityConfidence = struct_HFFaceQualityConfidence# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 594
HFInspireFaceVersion = struct_HFInspireFaceVersion# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 618
HFInspireFaceVersion = struct_HFInspireFaceVersion# /Users/tunm/work/InspireFace/cpp/inspireface/c_api/inspireface.h: 631
# No inserted files

View File

@@ -131,7 +131,6 @@ class ImageStream(object):
return self._handle
# == Session API ==
@dataclass
@@ -234,6 +233,7 @@ class SessionCustomParameter:
return custom_param
class InspireFaceSession(object):
"""
Manages a session for face detection and recognition processes using the InspireFace library.
@@ -244,8 +244,9 @@ class InspireFaceSession(object):
param (int or SessionCustomParameter): Configuration parameters or flags for the session.
"""
def __init__(self, param, detect_mode: int = HF_DETECT_MODE_IMAGE,
max_detect_num: int = 10):
def __init__(self, param, detect_mode: int = HF_DETECT_MODE_ALWAYS_DETECT,
max_detect_num: int = 10, detect_pixel_level=-1, track_by_detect_mode_fps=-1):
"""
Initializes a new session with the provided configuration parameters.
Args:
@@ -259,9 +260,11 @@ class InspireFaceSession(object):
self._sess = HFSession()
self.param = param
if isinstance(self.param, SessionCustomParameter):
ret = HFCreateInspireFaceSession(self.param._c_struct(), detect_mode, max_detect_num, self._sess)
ret = HFCreateInspireFaceSession(self.param._c_struct(), detect_mode, max_detect_num, detect_pixel_level,
track_by_detect_mode_fps, self._sess)
elif isinstance(self.param, int):
ret = HFCreateInspireFaceSessionOptional(self.param, detect_mode, max_detect_num, self._sess)
ret = HFCreateInspireFaceSessionOptional(self.param, detect_mode, max_detect_num, detect_pixel_level,
track_by_detect_mode_fps, self._sess)
else:
raise NotImplemented("")
if ret != 0:
@@ -314,20 +317,6 @@ class InspireFaceSession(object):
else:
return []
def set_track_mode(self, mode: int):
"""
Sets the tracking mode for the face detection session.
Args:
mode (int): An integer representing the tracking mode to be used.
Notes:
If setting the mode fails, an error is logged with the returned status code.
"""
ret = HFSessionSetFaceTrackMode(self._sess, mode)
if ret != 0:
logger.error(f"Set track mode error: {ret}")
def set_track_preview_size(self, size=192):
"""
Sets the preview size for the face tracking session.
@@ -342,6 +331,12 @@ class InspireFaceSession(object):
if ret != 0:
logger.error(f"Set track preview size error: {ret}")
def set_filter_minimum_face_pixel_size(self, min_size=32):
ret = HFSessionSetFilterMinimumFacePixelSize(self._sess, min_size)
if ret != 0:
logger.error(f"Set filter minimum face pixel size error: {ret}")
def face_pipeline(self, image, faces: List[FaceInformation], exec_param) -> List[FaceExtended]:
"""
Processes detected faces to extract additional attributes based on the provided execution parameters.
@@ -494,6 +489,7 @@ class InspireFaceSession(object):
def __del__(self):
self.release()
# == Global API ==
def launch(resource_path: str) -> bool:
"""
@@ -623,7 +619,6 @@ def feature_comparison(feature1: np.ndarray, feature2: np.ndarray) -> float:
return float(comparison_result.value)
class FaceIdentity(object):
"""
Represents an identity based on facial features, associating the features with a custom ID and a tag.
@@ -688,6 +683,7 @@ class FaceIdentity(object):
feature=PHFFaceFeature(feature)
)
def feature_hub_set_search_threshold(threshold: float):
"""
Sets the search threshold for face matching in the FeatureHub.
@@ -697,6 +693,7 @@ def feature_hub_set_search_threshold(threshold: float):
"""
HFFeatureHubFaceSearchThresholdSetting(threshold)
def feature_hub_face_insert(face_identity: FaceIdentity) -> bool:
"""
Inserts a face identity into the FeatureHub database.
@@ -716,6 +713,7 @@ def feature_hub_face_insert(face_identity: FaceIdentity) -> bool:
return False
return True
@dataclass
class SearchResult:
"""
@@ -728,6 +726,7 @@ class SearchResult:
confidence: float
similar_identity: FaceIdentity
def feature_hub_face_search(data: np.ndarray) -> SearchResult:
"""
Searches for the most similar face identity in the feature hub based on provided facial features.
@@ -755,6 +754,7 @@ def feature_hub_face_search(data: np.ndarray) -> SearchResult:
none = FaceIdentity(np.zeros(0), most_similar.customId, "None")
return SearchResult(confidence=confidence.value, similar_identity=none)
def feature_hub_face_search_top_k(data: np.ndarray, top_k: int) -> List[Tuple]:
"""
Searches for the top 'k' most similar face identities in the feature hub based on provided facial features.
@@ -780,6 +780,7 @@ def feature_hub_face_search_top_k(data: np.ndarray, top_k: int) -> List[Tuple]:
outputs.append((confidence, customId))
return outputs
def feature_hub_face_update(face_identity: FaceIdentity) -> bool:
"""
Updates an existing face identity in the feature hub.
@@ -799,6 +800,7 @@ def feature_hub_face_update(face_identity: FaceIdentity) -> bool:
return False
return True
def feature_hub_face_remove(custom_id: int) -> bool:
"""
Removes a face identity from the feature hub using its custom ID.
@@ -818,6 +820,7 @@ def feature_hub_face_remove(custom_id: int) -> bool:
return False
return True
def feature_hub_get_face_identity(custom_id: int):
"""
Retrieves a face identity from the feature hub using its custom ID.
@@ -839,6 +842,7 @@ def feature_hub_get_face_identity(custom_id: int):
return FaceIdentity.from_ctypes(identify)
def feature_hub_get_face_count() -> int:
"""
Retrieves the total count of face identities stored in the feature hub.
@@ -856,6 +860,7 @@ def feature_hub_get_face_count() -> int:
return int(count.value)
def view_table_in_terminal():
"""
Displays the database table of face identities in the terminal.
@@ -867,6 +872,7 @@ def view_table_in_terminal():
if ret != 0:
logger.error(f"Failed to view DB: {ret}")
def version() -> str:
"""
Retrieves the version of the InspireFace library.
@@ -878,6 +884,7 @@ def version() -> str:
HFQueryInspireFaceVersion(PHFInspireFaceVersion(ver))
return f"{ver.major}.{ver.minor}.{ver.patch}"
def set_logging_level(level: int) -> None:
"""
Sets the logging level of the InspireFace library.
@@ -887,6 +894,7 @@ def set_logging_level(level: int) -> None:
"""
HFSetLogLevel(level)
def disable_logging() -> None:
"""
Disables all logging from the InspireFace library.

View File

@@ -5,7 +5,7 @@ from inspireface.modules.core.native import HF_ENABLE_NONE, HF_ENABLE_FACE_RECOG
HF_ENABLE_MASK_DETECT, HF_ENABLE_AGE_PREDICT, HF_ENABLE_GENDER_PREDICT, HF_ENABLE_QUALITY, HF_ENABLE_INTERACTION
# Face track mode
from inspireface.modules.core.native import HF_DETECT_MODE_IMAGE, HF_DETECT_MODE_VIDEO
from inspireface.modules.core.native import HF_DETECT_MODE_ALWAYS_DETECT, HF_DETECT_MODE_LIGHT_TRACK, HF_DETECT_MODE_TRACK_BY_DETECTION
# Image format
from inspireface.modules.core.native import HF_STREAM_RGB, HF_STREAM_BGR, HF_STREAM_RGBA, HF_STREAM_BGRA, HF_STREAM_YUV_NV12, HF_STREAM_YUV_NV21

View File

@@ -18,7 +18,7 @@ def case_face_detection_image(resource_path, image_path):
# Optional features, loaded during session creation based on the modules specified.
opt = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_IMAGE)
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_ALWAYS_DETECT)
# Load the image using OpenCV.
image = cv2.imread(image_path)

View File

@@ -20,7 +20,7 @@ def case_face_recognition(resource_path, test_data_folder):
# Enable face recognition features.
opt = HF_ENABLE_FACE_RECOGNITION
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_IMAGE)
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_ALWAYS_DETECT)
# Configure the feature management system.
feature_hub_config = ifac.FeatureHubConfiguration(

View File

@@ -26,7 +26,7 @@ def case_face_tracker_from_video(resource_path, source, show):
# Optional features, loaded during session creation based on the modules specified.
opt = HF_ENABLE_NONE
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_VIDEO) # Use video mode
session = ifac.InspireFaceSession(opt, HF_DETECT_MODE_LIGHT_TRACK) # Use video mode
# Determine if the source is a digital webcam index or a video file path.
try:

View File

@@ -33,7 +33,7 @@ TEST_FACE_COMPARISON_IMAGE_THRESHOLD = 0.45
TEST_PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# Current project path
CURRENT_PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CURRENT_PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Main project path
MAIN_PROJECT_PATH = os.path.dirname(CURRENT_PROJECT_PATH)

View File

@@ -181,7 +181,6 @@ def lfw_generator(directory_path):
def batch_import_lfw_faces(lfw_path, engine: ifac.InspireFaceSession, num_of_faces: int):
engine.set_track_mode(HF_DETECT_MODE_IMAGE)
generator = lfw_generator(lfw_path)
registered_faces = 0

View File

@@ -16,7 +16,7 @@ class CameraStreamCase(unittest.TestCase):
def test_stream_rotation(self) -> None:
# Prepare material
engine = ifac.InspireFaceSession(HF_ENABLE_NONE, HF_DETECT_MODE_IMAGE)
engine = ifac.InspireFaceSession(HF_ENABLE_NONE, HF_DETECT_MODE_ALWAYS_DETECT)
# Prepare rotation images
rotation_images_filenames = ["rotate/rot_0.jpg", "rotate/rot_90.jpg", "rotate/rot_180.jpg","rotate/rot_270.jpg"]
rotation_images = [cv2.imread(get_test_data(path)) for path in rotation_images_filenames]

View File

@@ -12,13 +12,12 @@ class FaceRecognitionBaseCase(unittest.TestCase):
def setUp(self) -> None:
# Prepare material
track_mode = HF_DETECT_MODE_IMAGE
track_mode = HF_DETECT_MODE_ALWAYS_DETECT
param = ifac.SessionCustomParameter()
param.enable_recognition = True
self.engine = ifac.InspireFaceSession(param, track_mode, 10)
def test_face_feature_extraction(self):
self.engine.set_track_mode(mode=HF_DETECT_MODE_IMAGE)
# Prepare a image
image = cv2.imread(get_test_data("bulk/kun.jpg"))
self.assertIsNotNone(image)
@@ -38,7 +37,6 @@ class FaceRecognitionBaseCase(unittest.TestCase):
self.assertIsNotNone(feature)
#
def test_face_comparison(self):
self.engine.set_track_mode(mode=HF_DETECT_MODE_IMAGE)
# Prepare two pictures of someone
images_path_list = [get_test_data("bulk/kun.jpg"), get_test_data("bulk/jntm.jpg")]
self.assertEqual(len(images_path_list), 2, "Only 2 photos can be used for the 1v1 scene.")
@@ -89,7 +87,7 @@ class FaceRecognitionCRUDMemoryCase(unittest.TestCase):
search_threshold=TEST_FACE_COMPARISON_IMAGE_THRESHOLD,
)
ifac.feature_hub_enable(config)
track_mode = HF_DETECT_MODE_IMAGE
track_mode = HF_DETECT_MODE_ALWAYS_DETECT
param = ifac.SessionCustomParameter()
param.enable_recognition = True
cls.engine = ifac.InspireFaceSession(param, track_mode)
@@ -188,7 +186,7 @@ class FaceRecognitionFeatureExtractCase(unittest.TestCase):
self.stream = ifac.ImageStream.load_from_cv_image(image)
self.assertIsNotNone(self.stream)
# Prepare material
track_mode = HF_DETECT_MODE_IMAGE
track_mode = HF_DETECT_MODE_ALWAYS_DETECT
param = ifac.SessionCustomParameter()
param.enable_recognition = True
self.engine = ifac.InspireFaceSession(param, track_mode)
@@ -207,7 +205,6 @@ class FaceRecognitionFeatureExtractCase(unittest.TestCase):
@benchmark(test_name="Feature Extract", loop=1000)
def test_benchmark_feature_extract(self):
self.engine.set_track_mode(HF_DETECT_MODE_IMAGE)
for _ in range(self.loop):
feature = self.engine.face_feature_extract(self.stream, self.face)
self.assertEqual(TEST_MODEL_FACE_FEATURE_LENGTH, feature.size)

View File

@@ -9,10 +9,13 @@ class FaceTrackerCase(unittest.TestCase):
def setUp(self) -> None:
# Prepare material
track_mode = HF_DETECT_MODE_IMAGE # Use video mode
track_mode = HF_DETECT_MODE_ALWAYS_DETECT
self.engine = ifac.InspireFaceSession(param=ifac.SessionCustomParameter(),
detect_mode=track_mode)
self.engine_tk = ifac.InspireFaceSession(param=ifac.SessionCustomParameter(),
detect_mode=HF_DETECT_MODE_LIGHT_TRACK)
def test_face_detection_from_image(self):
image = cv2.imread(get_test_data("bulk/kun.jpg"))
self.assertIsNotNone(image)
@@ -33,8 +36,6 @@ class FaceTrackerCase(unittest.TestCase):
self.assertEqual(len(self.engine.face_detection(any_image)), 0)
def test_face_pose(self):
self.engine.set_track_mode(HF_DETECT_MODE_IMAGE)
# Test yaw (shake one's head)
left_face = cv2.imread(get_test_data("pose/left_face.jpeg"))
self.assertIsNotNone(left_face)
@@ -84,11 +85,9 @@ class FaceTrackerCase(unittest.TestCase):
self.assertEqual(True, right_face_roll > 30)
def test_face_track_from_video(self):
self.engine.set_track_mode(HF_DETECT_MODE_VIDEO)
# Read a video file
video_gen = read_video_generator(get_test_data("video/810_1684206192.mp4"))
results = [self.engine.face_detection(frame) for frame in video_gen]
results = [self.engine_tk.face_detection(frame) for frame in video_gen]
num_of_frame = len(results)
num_of_track_loss = len([faces for faces in results if not faces])
total_track_ids = [faces[0].track_id for faces in results if faces]
@@ -117,30 +116,27 @@ class FaceTrackerBenchmarkCase(unittest.TestCase):
self.image = cv2.imread(get_test_data("bulk/kun.jpg"))
self.assertIsNotNone(self.image)
# Prepare material
track_mode = HF_DETECT_MODE_VIDEO # Use video mode
self.engine = ifac.InspireFaceSession(HF_ENABLE_NONE, track_mode, )
self.engine = ifac.InspireFaceSession(HF_ENABLE_NONE, HF_DETECT_MODE_ALWAYS_DETECT, )
self.engine_tk = ifac.InspireFaceSession(HF_ENABLE_NONE, HF_DETECT_MODE_LIGHT_TRACK, )
# Prepare video data
self.video_gen = read_video_generator(get_test_data("video/810_1684206192.mp4"))
@benchmark(test_name="Face Detect", loop=1000)
def test_benchmark_face_detect(self):
self.engine.set_track_mode(HF_DETECT_MODE_IMAGE)
for _ in range(self.loop):
faces = self.engine.face_detection(self.image)
self.assertEqual(len(faces), 1, "No face detected may have an error, please check.")
@benchmark(test_name="Face Track", loop=1000)
def test_benchmark_face_track(self):
self.engine.set_track_mode(HF_DETECT_MODE_VIDEO)
for _ in range(self.loop):
faces = self.engine.face_detection(self.image)
faces = self.engine_tk.face_detection(self.image)
self.assertEqual(len(faces), 1, "No face detected may have an error, please check.")
@benchmark(test_name="Face Track(Video)", loop=345)
def test_benchmark_face_track_video(self):
self.engine.set_track_mode(HF_DETECT_MODE_VIDEO)
for frame in self.video_gen:
faces = self.engine.face_detection(frame)
faces = self.engine_tk.face_detection(frame)
self.assertEqual(len(faces), 1, "No face detected may have an error, please check.")
@classmethod

Binary file not shown.

Before

Width:  |  Height:  |  Size: 202 KiB

After

Width:  |  Height:  |  Size: 39 KiB

View File

@@ -0,0 +1,486 @@
# This file is part of the ios-cmake project. It was retrieved from
# https://github.com/cristeab/ios-cmake.git, which is a fork of
# https://code.google.com/p/ios-cmake/. Which in turn is based off of
# the Platform/Darwin.cmake and Platform/UnixPaths.cmake files which
# are included with CMake 2.8.4
#
# The ios-cmake project is licensed under the new BSD license.
#
# Copyright (c) 2014, Bogdan Cristea and LTE Engineering Software,
# Kitware, Inc., Insight Software Consortium. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is based off of the Platform/Darwin.cmake and
# Platform/UnixPaths.cmake files which are included with CMake 2.8.4
# It has been altered for iOS development.
#
# Updated by Alex Stewart (alexs.mac@gmail.com)
#
# *****************************************************************************
# Now maintained by Alexander Widerberg (widerbergaren [at] gmail.com)
# under the BSD-3-Clause license
# https://github.com/leetal/ios-cmake
# *****************************************************************************
#
# INFORMATION / HELP
#
# The following variables control the behaviour of this toolchain:
#
# IOS_PLATFORM: OS (default) or SIMULATOR or SIMULATOR64 or TVOS or SIMULATOR_TVOS or WATCHOS or SIMULATOR_WATCHOS
# OS = Build for iPhoneOS.
# OS64 = Build for arm64 arm64e iPhoneOS.
# SIMULATOR = Build for x86 i386 iPhone Simulator.
# SIMULATOR64 = Build for x86_64 iPhone Simulator.
# TVOS = Build for AppleTVOS.
# SIMULATOR_TVOS = Build for x86_64 AppleTV Simulator.
# WATCHOS = Build for armv7k arm64_32 for WatchOS.
# SIMULATOR_WATCHOS = Build for x86_64 for Watch Simulator.
# CMAKE_OSX_SYSROOT: Path to the iOS SDK to use. By default this is
# automatically determined from IOS_PLATFORM and xcodebuild, but
# can also be manually specified (although this should not be required).
# CMAKE_IOS_DEVELOPER_ROOT: Path to the Developer directory for the iOS platform
# being compiled for. By default this is automatically determined from
# CMAKE_OSX_SYSROOT, but can also be manually specified (although this should
# not be required).
# ENABLE_BITCODE: (1|0) Enables or disables bitcode support. Default 1 (true)
# ENABLE_ARC: (1|0) Enables or disables ARC support. Default 1 (true, ARC enabled by default)
# ENABLE_VISIBILITY: (1|0) Enables or disables symbol visibility support. Default 0 (false, visibility hidden by default)
# IOS_ARCH: (armv7 armv7s armv7k arm64 arm64e arm64_32 i386 x86_64) If specified, will override the default architectures for the given IOS_PLATFORM
# OS = armv7 armv7s arm64 arm64e (if applicable)
# OS64 = arm64 arm64e (if applicable)
# SIMULATOR = i386 x86_64
# SIMULATOR64 = x86_64
# TVOS = arm64
# SIMULATOR_TVOS = x86_64 (i386 has since long been deprecated)
# WATCHOS = armv7k arm64_32 (if applicable)
# SIMULATOR_WATCHOS = x86_64 (i386 has since long been deprecated)
#
# This toolchain defines the following variables for use externally:
#
# XCODE_VERSION: Version number (not including Build version) of Xcode detected.
# IOS_SDK_VERSION: Version of iOS SDK being used.
# CMAKE_OSX_ARCHITECTURES: Architectures being compiled for (generated from
# IOS_PLATFORM).
#
# This toolchain defines the following macros for use externally:
#
# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE XCODE_VARIANT)
# A convenience macro for setting xcode specific properties on targets.
# Available variants are: All, Release, RelWithDebInfo, Debug, MinSizeRel
# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1" "all").
#
# find_host_package (PROGRAM ARGS)
# A macro used to find executable programs on the host system, not within the
# iOS environment. Thanks to the android-cmake project for providing the
# command.
# Fix for PThread library not in path
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
set(CMAKE_HAVE_THREADS_LIBRARY 1)
set(CMAKE_USE_WIN32_THREADS_INIT 0)
set(CMAKE_USE_PTHREADS_INIT 1)
# Get the Xcode version being used.
execute_process(COMMAND xcodebuild -version
OUTPUT_VARIABLE XCODE_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}")
string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}")
message(STATUS "Building with Xcode version: ${XCODE_VERSION}")
# Default to building for iPhoneOS if not specified otherwise, and we cannot
# determine the platform from the CMAKE_OSX_ARCHITECTURES variable. The use
# of CMAKE_OSX_ARCHITECTURES is such that try_compile() projects can correctly
# determine the value of IOS_PLATFORM from the root project, as
# CMAKE_OSX_ARCHITECTURES is propagated to them by CMake.
if (NOT DEFINED IOS_PLATFORM)
if (CMAKE_OSX_ARCHITECTURES)
if (CMAKE_OSX_ARCHITECTURES MATCHES ".*arm.*")
set(IOS_PLATFORM "OS")
elseif (CMAKE_OSX_ARCHITECTURES MATCHES "i386")
set(IOS_PLATFORM "SIMULATOR")
elseif (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
set(IOS_PLATFORM "SIMULATOR64")
elseif (CMAKE_OSX_ARCHITECTURES MATCHES "armv7k")
set(IOS_PLATFORM "WATCHOS")
endif()
endif()
if (NOT IOS_PLATFORM)
set(IOS_PLATFORM "OS")
endif()
endif()
set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING
"Type of iOS platform for which to build.")
# Determine the platform name and architectures for use in xcodebuild commands
# from the specified IOS_PLATFORM name.
if (IOS_PLATFORM STREQUAL "OS")
set(XCODE_IOS_PLATFORM iphoneos)
if(NOT IOS_ARCH)
if (XCODE_VERSION VERSION_GREATER 10.0)
set(IOS_ARCH armv7 armv7s arm64 arm64e)
else()
set(IOS_ARCH armv7 armv7s arm64)
endif()
endif()
elseif (IOS_PLATFORM STREQUAL "OS64")
set(XCODE_IOS_PLATFORM iphoneos)
if(NOT IOS_ARCH)
if (XCODE_VERSION VERSION_GREATER 10.0)
set(IOS_ARCH arm64 arm64e)
else()
set(IOS_ARCH arm64)
endif()
endif()
elseif (IOS_PLATFORM STREQUAL "SIMULATOR")
set(XCODE_IOS_PLATFORM iphonesimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH i386 x86_64)
endif()
elseif(IOS_PLATFORM STREQUAL "SIMULATOR64")
set(XCODE_IOS_PLATFORM iphonesimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH x86_64)
endif()
elseif (IOS_PLATFORM STREQUAL "TVOS")
set(XCODE_IOS_PLATFORM appletvos)
if(NOT IOS_ARCH)
set(IOS_ARCH arm64)
endif()
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS")
set(XCODE_IOS_PLATFORM appletvsimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH x86_64)
endif()
elseif (IOS_PLATFORM STREQUAL "WATCHOS")
set(XCODE_IOS_PLATFORM watchos)
if(NOT IOS_ARCH)
if (XCODE_VERSION VERSION_GREATER 10.0)
set(IOS_ARCH armv7k arm64_32)
else()
set(IOS_ARCH armv7k)
endif()
endif()
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_WATCHOS")
set(XCODE_IOS_PLATFORM watchsimulator)
if(NOT IOS_ARCH)
set(IOS_ARCH x86_64)
endif()
else()
message(FATAL_ERROR "Invalid IOS_PLATFORM: ${IOS_PLATFORM}")
endif()
message(STATUS "Configuring iOS build for platform: ${IOS_PLATFORM}, "
"architecture(s): ${IOS_ARCH}")
# If user did not specify the SDK root to use, then query xcodebuild for it.
execute_process(COMMAND xcodebuild -version -sdk ${XCODE_IOS_PLATFORM} Path
OUTPUT_VARIABLE CMAKE_OSX_SYSROOT_INT
OUTPUT_QUIET ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# If user did not specify the SDK root to use, then query xcodebuild for it.
if (NOT DEFINED CMAKE_OSX_SYSROOT OR (NOT CMAKE_OSX_SYSROOT STREQUAL CMAKE_OSX_SYSROOT_INT))
execute_process(COMMAND xcodebuild -version -sdk ${XCODE_IOS_PLATFORM} Path
OUTPUT_VARIABLE CMAKE_OSX_SYSROOT
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
if (NOT EXISTS ${CMAKE_OSX_SYSROOT})
message(SEND_ERROR "Please make sure that Xcode is installed and that the toolchain"
"is pointing to the correct path. Please run:"
"sudo xcode-select -s /Applications/Xcode.app/Contents/Developer"
"and see if that fixes the problem for you.")
message(FATAL_ERROR "Invalid CMAKE_OSX_SYSROOT: ${CMAKE_OSX_SYSROOT} "
"does not exist.")
elseif(DEFINED CMAKE_OSX_SYSROOT)
message(STATUS "Using manually set SDK path: ${CMAKE_OSX_SYSROOT} for platform: ${IOS_PLATFORM}")
else()
message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT} for platform: ${IOS_PLATFORM}")
endif()
# Specify minimum version of deployment target.
if (NOT DEFINED IOS_DEPLOYMENT_TARGET)
if (IOS_PLATFORM STREQUAL "WATCHOS" OR IOS_PLATFORM STREQUAL "SIMULATOR_WATCHOS")
# Unless specified, SDK version 2.0 is used by default as minimum target version (watchOS).
set(IOS_DEPLOYMENT_TARGET "2.0"
CACHE STRING "Minimum iOS version to build for." )
else()
# Unless specified, SDK version 8.0 is used by default as minimum target version (iOS, tvOS).
set(IOS_DEPLOYMENT_TARGET "8.0"
CACHE STRING "Minimum iOS version to build for." )
endif()
message(STATUS "Using the default min-version since IOS_DEPLOYMENT_TARGET not provided!")
endif()
# Use bitcode or not
if (NOT DEFINED ENABLE_BITCODE AND NOT IOS_ARCH MATCHES "((^|, )(i386|x86_64))+")
# Unless specified, enable bitcode support by default
set(ENABLE_BITCODE TRUE CACHE BOOL "Whether or not to enable bitcode")
message(STATUS "Enabling bitcode support by default. ENABLE_BITCODE not provided!")
endif()
if (NOT DEFINED ENABLE_BITCODE)
message(STATUS "Disabling bitcode support by default on simulators. ENABLE_BITCODE not provided for override!")
endif()
# Use ARC or not
if (NOT DEFINED ENABLE_ARC)
# Unless specified, enable ARC support by default
set(ENABLE_ARC TRUE CACHE BOOL "Whether or not to enable ARC")
message(STATUS "Enabling ARC support by default. ENABLE_ARC not provided!")
endif()
# Use hidden visibility or not
if (NOT DEFINED ENABLE_VISIBILITY)
# Unless specified, disable symbols visibility by default
set(ENABLE_VISIBILITY FALSE CACHE BOOL "Whether or not to hide symbols (-fvisibility=hidden)")
message(STATUS "Hiding symbols visibility by default. ENABLE_VISIBILITY not provided!")
endif()
# Get the SDK version information.
execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion
OUTPUT_VARIABLE IOS_SDK_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Find the Developer root for the specific iOS platform being compiled for
# from CMAKE_OSX_SYSROOT. Should be ../../ from SDK specified in
# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain
# this information from xcrun or xcodebuild.
if (NOT CMAKE_IOS_DEVELOPER_ROOT)
get_filename_component(IOS_PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT} PATH)
get_filename_component(CMAKE_IOS_DEVELOPER_ROOT ${IOS_PLATFORM_SDK_DIR} PATH)
endif()
if (NOT EXISTS ${CMAKE_IOS_DEVELOPER_ROOT})
message(FATAL_ERROR "Invalid CMAKE_IOS_DEVELOPER_ROOT: "
"${CMAKE_IOS_DEVELOPER_ROOT} does not exist.")
endif()
# Find the C & C++ compilers for the specified SDK.
if (NOT CMAKE_C_COMPILER)
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang
OUTPUT_VARIABLE CMAKE_C_COMPILER
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using C compiler: ${CMAKE_C_COMPILER}")
endif()
if (NOT CMAKE_CXX_COMPILER)
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++
OUTPUT_VARIABLE CMAKE_CXX_COMPILER
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using CXX compiler: ${CMAKE_CXX_COMPILER}")
endif()
# Find (Apple's) libtool.
execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find libtool
OUTPUT_VARIABLE IOS_LIBTOOL
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Using libtool: ${IOS_LIBTOOL}")
# Configure libtool to be used instead of ar + ranlib to build static libraries.
# This is required on Xcode 7+, but should also work on previous versions of
# Xcode.
set(CMAKE_C_CREATE_STATIC_LIBRARY
"${IOS_LIBTOOL} -static -o <TARGET> <LINK_FLAGS> <OBJECTS> ")
set(CMAKE_CXX_CREATE_STATIC_LIBRARY
"${IOS_LIBTOOL} -static -o <TARGET> <LINK_FLAGS> <OBJECTS> ")
# Get the version of Darwin (OS X) of the host.
execute_process(COMMAND uname -r
OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Standard settings.
set(CMAKE_SYSTEM_NAME Darwin CACHE INTERNAL "")
set(CMAKE_SYSTEM_VERSION ${IOS_SDK_VERSION} CACHE INTERNAL "")
set(UNIX TRUE CACHE BOOL "")
set(APPLE TRUE CACHE BOOL "")
set(IOS TRUE CACHE BOOL "")
set(CMAKE_AR ar CACHE FILEPATH "" FORCE)
set(CMAKE_RANLIB ranlib CACHE FILEPATH "" FORCE)
# Force unset of OS X-specific deployment target (otherwise autopopulated),
# required as of cmake 2.8.10.
set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING
"Must be empty for iOS builds." FORCE)
# Set the architectures for which to build.
set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE STRING "Build architecture for iOS")
# Change the type of target generated for try_compile() so it'll work when cross-compiling
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
# All iOS/Darwin specific settings - some may be redundant.
set(CMAKE_SHARED_LIBRARY_PREFIX "lib")
set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib")
set(CMAKE_SHARED_MODULE_PREFIX "lib")
set(CMAKE_SHARED_MODULE_SUFFIX ".so")
set(CMAKE_C_COMPILER_ABI ELF)
set(CMAKE_CXX_COMPILER_ABI ELF)
set(CMAKE_C_HAS_ISYSROOT 1)
set(CMAKE_CXX_HAS_ISYSROOT 1)
set(CMAKE_MODULE_EXISTS 1)
set(CMAKE_DL_LIBS "")
set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
if(IOS_ARCH MATCHES "((^|, )(arm64|arm64e|x86_64))+")
set(CMAKE_C_SIZEOF_DATA_PTR 8)
set(CMAKE_CXX_SIZEOF_DATA_PTR 8)
message(STATUS "Using a data_ptr size of 8")
else()
set(CMAKE_C_SIZEOF_DATA_PTR 4)
set(CMAKE_CXX_SIZEOF_DATA_PTR 4)
message(STATUS "Using a data_ptr size of 4")
endif()
message(STATUS "Building for minimum iOS version: ${IOS_DEPLOYMENT_TARGET}"
" (SDK version: ${IOS_SDK_VERSION})")
# Note that only Xcode 7+ supports the newer more specific:
# -m${XCODE_IOS_PLATFORM}-version-min flags, older versions of Xcode use:
# -m(ios/ios-simulator)-version-min instead.
if (IOS_PLATFORM STREQUAL "OS" OR IOS_PLATFORM STREQUAL "OS64")
if (XCODE_VERSION VERSION_LESS 7.0)
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mios-version-min=${IOS_DEPLOYMENT_TARGET}")
else()
# Xcode 7.0+ uses flags we can build directly from XCODE_IOS_PLATFORM.
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
elseif (IOS_PLATFORM STREQUAL "TVOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mtvos-version-min=${IOS_DEPLOYMENT_TARGET}")
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mtvos-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
elseif (IOS_PLATFORM STREQUAL "WATCHOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mwatchos-version-min=${IOS_DEPLOYMENT_TARGET}")
elseif (IOS_PLATFORM STREQUAL "SIMULATOR_WATCHOS")
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mwatchos-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
else()
# SIMULATOR or SIMULATOR64 both use -mios-simulator-version-min.
set(XCODE_IOS_PLATFORM_VERSION_FLAGS
"-mios-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
message(STATUS "Version flags set to: ${XCODE_IOS_PLATFORM_VERSION_FLAGS}")
if (ENABLE_BITCODE)
set(BITCODE "-fembed-bitcode")
set(HEADER_PAD "")
message(STATUS "Enabling bitcode support.")
else()
set(BITCODE "")
set(HEADER_PAD "-headerpad_max_install_names")
message(STATUS "Disabling bitcode support.")
endif()
if (ENABLE_ARC)
set(FOBJC_ARC "-fobjc-arc")
message(STATUS "Enabling ARC support.")
else()
set(FOBJC_ARC "-fno-objc-arc")
message(STATUS "Disabling ARC support.")
endif()
if (NOT ENABLE_VISIBILITY)
set(VISIBILITY "-fvisibility=hidden")
message(STATUS "Hiding symbols (-fvisibility=hidden).")
else()
set(VISIBILITY "")
endif()
set(CMAKE_C_FLAGS
"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_C_FLAGS}")
# Hidden visibilty is required for C++ on iOS.
set(CMAKE_CXX_FLAGS
"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} -fvisibility-inlines-hidden -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS} -DNDEBUG -Os -ffast-math ${BITCODE} ${CMAKE_CXX_FLAGS_MINSIZEREL}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -DNDEBUG -O2 -g -ffast-math ${BITCODE} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -DNDEBUG -O3 -ffast-math ${BITCODE} ${CMAKE_CXX_FLAGS_RELEASE}")
set(CMAKE_C_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}")
set(CMAKE_CXX_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}")
# In order to ensure that the updated compiler flags are used in try_compile()
# tests, we have to forcibly set them in the CMake cache, not merely set them
# in the local scope.
list(APPEND VARS_TO_FORCE_IN_CACHE
CMAKE_C_FLAGS
CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_C_LINK_FLAGS
CMAKE_CXX_LINK_FLAGS)
foreach(VAR_TO_FORCE ${VARS_TO_FORCE_IN_CACHE})
set(${VAR_TO_FORCE} "${${VAR_TO_FORCE}}" CACHE STRING "")
endforeach()
set(CMAKE_PLATFORM_HAS_INSTALLNAME 1)
set (CMAKE_SHARED_LINKER_FLAGS "-rpath @executable_path/Frameworks -rpath @loader_path/Frameworks")
set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib ${HEADER_PAD}")
set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle ${HEADER_PAD}")
set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a")
# Hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old
# build tree (where install_name_tool was hardcoded) and where
# CMAKE_INSTALL_NAME_TOOL isn't in the cache and still cmake didn't fail in
# CMakeFindBinUtils.cmake (because it isn't rerun) hardcode
# CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did
# before, Alex.
if (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
endif (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
# Set the find root to the iOS developer roots and to user defined paths.
set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_OSX_SYSROOT}
${CMAKE_PREFIX_PATH} CACHE string "iOS find search path root" FORCE)
# Default to searching for frameworks first.
set(CMAKE_FIND_FRAMEWORK FIRST)
# Set up the default search directories for frameworks.
set(CMAKE_SYSTEM_FRAMEWORK_PATH
${CMAKE_OSX_SYSROOT}/System/Library/Frameworks
${CMAKE_OSX_SYSROOT}/System/Library/PrivateFrameworks
${CMAKE_OSX_SYSROOT}/Developer/Library/Frameworks)
# Only search the specified iOS SDK, not the remainder of the host filesystem.
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
# This little macro lets you set any XCode specific property.
macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE XCODE_RELVERSION)
set(XCODE_RELVERSION_I "${XCODE_RELVERSION}")
if (XCODE_RELVERSION_I STREQUAL "All")
set_property(TARGET ${TARGET} PROPERTY
XCODE_ATTRIBUTE_${XCODE_PROPERTY} "${XCODE_VALUE}")
else()
set_property(TARGET ${TARGET} PROPERTY
XCODE_ATTRIBUTE_${XCODE_PROPERTY}[variant=${XCODE_RELVERSION_I}] "${XCODE_VALUE}")
endif()
endmacro(set_xcode_property)
# This macro lets you find executable programs on the host system.
macro(find_host_package)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
set(IOS FALSE)
find_package(${ARGN})
set(IOS TRUE)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
endmacro(find_host_package)

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -ex
RELEASE_HOME=$(cd $(dirname $0)/../..; pwd)
BUILD_DIR=${RELEASE_HOME}/build/release_android
[[ -d ${BUILD_DIR} ]] && rm -r ${BUILD_DIR}
build() {
arch=$1
NDK_API_LEVEL=$2
mkdir -p ${BUILD_DIR}/${arch}
pushd ${BUILD_DIR}/${arch}
cmake ${RELEASE_HOME} \
-G "Unix Makefiles" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
-DANDROID_TOOLCHAIN=clang \
-DANDROID_ABI=${arch} \
-DANDROID_NATIVE_API_LEVEL=${NDK_API_LEVEL} \
-DANDROID_STL=c++_static \
-DBUILD_CUDA=OFF \
-DBUILD_3RDPARTY_PATH=${BUILD_3RDPARTY_PATH} \
-DOpenCV_DIR=${OPENCV_DIR} \
-DMNN_LIBS=${BUILD_3RDPARTY_PATH}/MNN-2.2.0/android-static/${arch} \
-DMNN_INCLUDE_DIRS=${BUILD_3RDPARTY_PATH}/MNN-2.2.0/android-static/include \
-DYAML_CPP_LIBS=${BUILD_3RDPARTY_PATH}/yaml-cpp/android-static/${arch} \
-DYAML_CPP_INCLUDE_DIRS=${BUILD_3RDPARTY_PATH}/yaml-cpp/android-static/include
# -DNCNN_DIR=${RELEASE_HOME}/3rdparty/ncnn/android/${arch} \
make -j$(nproc)
# ls ${BUILD_DIR}/${arch}| grep -v so| xargs rm -r
#make -j$(nproc) track_tool
popd
}
build arm64-v8a 21
build armeabi-v7a 21
date -R > ${BUILD_DIR}/release_note.txt
cd ${BUILD_DIR}
find . -type f |xargs md5sum >>release_note.txt
cd -
#cp -r ${RELEASE_HOME}/samples/c_api_demo.cpp ${RELEASE_HOME}/release_android
#cp -r ${RELEASE_HOME}/samples/utils ${RELEASE_HOME}/release_android