Update inspireface to 1.2.0

This commit is contained in:
Jingyu
2025-03-25 00:51:26 +08:00
parent 977ea6795b
commit ca64996b84
388 changed files with 28584 additions and 13036 deletions

View File

@@ -4,77 +4,92 @@ project(InspireFaceSample)
option(ISF_BUILD_SAMPLE_CLUTTERED "Whether to compile the cluttered sample program (debug code during development)" OFF)
include_directories(${SRC_DIR})
include_directories(${SRC_DIR}/inspireface/c_api)
if (ISF_ENABLE_RKNN)
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile/rknn/${ISF_RKNPU_MAJOR}/runtime/${ISF_RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu1")
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${ISF_RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
link_directories(${ISF_RKNN_API_LIB})
set(ext rknn_api dl)
endif ()
add_executable(Leak cpp/leak.cpp)
if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu2" AND ISF_RK_COMPILER_TYPE STREQUAL "aarch64")
if(ANDROID)
set(RK_PLATFORM "Android")
else()
set(RK_PLATFORM "Linux")
endif()
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/${ISF_RK_COMPILER_TYPE}/)
message("ISF_RKNN_API_LIB: ${ISF_RKNN_API_LIB}")
link_directories(${ISF_RKNN_API_LIB})
set(ext rknnrt dl)
endif ()
add_executable(Leak api/leak.cpp)
target_link_libraries(Leak InspireFace ${ext})
set_target_properties(Leak PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face detection and tracking
add_executable(FaceTrackSample cpp/sample_face_track.cpp)
# # Examples of face detection and tracking
add_executable(FaceTrackSample api/sample_face_track.cpp)
target_link_libraries(FaceTrackSample InspireFace ${ext})
set_target_properties(FaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceTrackSampleCost cpp/sample_face_track_cost.cpp)
target_link_libraries(FaceTrackSampleCost InspireFace ${ext})
set_target_properties(FaceTrackSampleCost PROPERTIES
add_executable(FaceTrackBenchmarkSample api/sample_face_track_benchmark.cpp)
target_link_libraries(FaceTrackBenchmarkSample InspireFace ${ext})
set_target_properties(FaceTrackBenchmarkSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face detection and tracking
add_executable(MTFaceTrackSample cpp/sample_face_track_mt.cpp)
target_link_libraries(MTFaceTrackSample InspireFace ${ext})
set_target_properties(MTFaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
if(NOT DISABLE_GUI)
# Examples of face detection and tracking
add_executable(FaceTrackVideoSample cpp/sample_face_track_video.cpp)
target_link_libraries(FaceTrackVideoSample InspireFace ${ext})
set_target_properties(FaceTrackVideoSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
endif ()
# Examples of face recognition
add_executable(FaceRecognitionSample cpp/sample_face_recognition.cpp)
target_link_libraries(FaceRecognitionSample InspireFace ${ext})
set_target_properties(FaceRecognitionSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceSearchSample cpp/sample_face_search.cpp)
target_link_libraries(FaceSearchSample InspireFace ${ext})
set_target_properties(FaceSearchSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face recognition
add_executable(FaceComparisonSample cpp/sample_face_comparison.cpp)
add_executable(FaceComparisonSample api/sample_face_comparison.cpp)
target_link_libraries(FaceComparisonSample InspireFace ${ext})
set_target_properties(FaceComparisonSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceFeatureHubSample api/sample_feature_hub.cpp)
target_link_libraries(FaceFeatureHubSample InspireFace ${ext})
set_target_properties(FaceFeatureHubSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face recognition
add_executable(FaceDetect cpp/face_detect.cpp)
target_link_libraries(FaceDetect InspireFace ${ext})
set_target_properties(FaceDetect PROPERTIES
add_executable(FaceLoadReloadSample api/sample_load_reload.cpp)
target_link_libraries(FaceLoadReloadSample InspireFace ${ext})
set_target_properties(FaceLoadReloadSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceTrackerSample source/tracker_sample.cpp)
target_link_libraries(FaceTrackerSample InspireFace ${ext})
set_target_properties(FaceTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(ExpansionLoadSample source/expansion_load.cpp)
target_link_libraries(ExpansionLoadSample InspireFace ${ext})
set_target_properties(ExpansionLoadSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceTrackPipelineSample source/tracker_pipeline.cpp)
target_link_libraries(FaceTrackPipelineSample InspireFace ${ext})
set_target_properties(FaceTrackPipelineSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FeatureHubSample source/feature_hub_sample.cpp)
target_link_libraries(FeatureHubSample InspireFace ${ext})
set_target_properties(FeatureHubSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(LandmarkSample source/landmark_sample.cpp)
target_link_libraries(LandmarkSample InspireFace ${ext})
set_target_properties(LandmarkSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
@@ -89,6 +104,28 @@ else()
endif ()
if(ISF_RK_DEVICE_TYPE STREQUAL "RV1106")
add_executable(FaceTrackSampleRV1106 rv1106/face_detect.cpp)
target_link_libraries(FaceTrackSampleRV1106 InspireFace ${ext})
set_target_properties(FaceTrackSampleRV1106 PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceAttributeSampleRV1106 rv1106/face_attribute.cpp)
target_link_libraries(FaceAttributeSampleRV1106 InspireFace ${ext})
set_target_properties(FaceAttributeSampleRV1106 PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
endif()
add_executable(NexusImageSample rv1106/rga_image.cpp)
target_link_libraries(NexusImageSample InspireFace ${ext})
set_target_properties(NexusImageSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# These sample programs are debugging and testing code left behind by developers during the development process.
# They are cluttered and have not been organized, or similar functionalities have already been organized in the standard samples.
# You can ignore them.
@@ -225,8 +262,6 @@ if (ISF_BUILD_SAMPLE_CLUTTERED)
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# C_API Demo
add_executable(LoopTracker cluttered/standard/loop_tracker.cpp)
target_link_libraries(LoopTracker InspireFace ${DEPEND})
@@ -235,8 +270,6 @@ if (ISF_BUILD_SAMPLE_CLUTTERED)
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ArchTracker cluttered/standard/archive_tracker.cpp)
target_link_libraries(ArchTracker InspireFace)
@@ -252,17 +285,17 @@ if (ISF_BUILD_SAMPLE_CLUTTERED)
)
endif ()
# Print Message
message(STATUS ">>>>>>>>>>>>>")
message(STATUS "InspireFace Sample:")
message(STATUS "\t ISF_BUILD_SAMPLE_CLUTTERED: ${ISF_BUILD_SAMPLE_CLUTTERED}")
# Install bin
install(TARGETS Leak RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackSampleCost RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS MTFaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceRecognitionSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceSearchSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceTrackSampleCost RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS MTFaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceRecognitionSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceSearchSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceComparisonSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackVideoSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)

View File

@@ -1,6 +1,7 @@
//
// Created by Tunm-Air13 on 2024/4/28.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
int main() {
char *n = new char[1024];

View File

@@ -0,0 +1,137 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <vector>
#include <inspireface.h>
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <img1_path> <img2_path>", argv[0]);
return 1;
}
auto packPath = argv[1];
auto imgPath1 = argv[2];
auto imgPath2 = argv[3];
HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath);
HFLogPrint(HF_LOG_INFO, "Source file Path 1: %s", imgPath1);
HFLogPrint(HF_LOG_INFO, "Source file Path 2: %s", imgPath2);
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create session error: %d", ret);
return ret;
}
std::vector<char*> twoImg = {imgPath1, imgPath2};
std::vector<std::vector<float>> vec(2, std::vector<float>(512));
for (int i = 0; i < twoImg.size(); ++i) {
HFImageBitmap imageBitmap = {0};
ret = HFCreateImageBitmapFromFilePath(twoImg[i], 3, &imageBitmap);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
return ret;
}
// Prepare image data for processing
HFImageStream stream;
ret = HFCreateImageStreamFromImageBitmap(imageBitmap, HF_CAMERA_ROTATION_0, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create stream error: %d", ret);
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Run face track error: %d", ret);
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
HFLogPrint(HF_LOG_ERROR, "No face was detected: %s", twoImg[i]);
return ret;
}
// Extract facial features from the first detected face, an interface that uses copy features in a comparison scenario
ret = HFFaceFeatureExtractCpy(session, stream, multipleFaceData.tokens[0], vec[i].data()); // Extract features
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Extract feature error: %d", ret);
return ret;
}
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
ret = HFReleaseImageBitmap(imageBitmap);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image bitmap error: %d", ret);
return ret;
}
}
// Make feature1
HFFaceFeature feature1 = {0};
feature1.data = vec[0].data();
feature1.size = vec[0].size();
// Make feature2
HFFaceFeature feature2 = {0};
feature2.data = vec[1].data();
feature2.size = vec[1].size();
// Run comparison
HFloat similarity;
ret = HFFaceComparison(feature1, feature2, &similarity);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Feature comparison error: %d", ret);
return ret;
}
HFloat recommended_cosine_threshold;
ret = HFGetRecommendedCosineThreshold(&recommended_cosine_threshold);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get recommended cosine threshold error: %d", ret);
return ret;
}
if (similarity > recommended_cosine_threshold) {
HFLogPrint(HF_LOG_INFO, "%.3f > %.3f ✓ Same face", similarity, recommended_cosine_threshold);
} else {
HFLogPrint(HF_LOG_WARN, "%.3f < %.3f ✗ Different face", similarity, recommended_cosine_threshold);
}
HFLogPrint(HF_LOG_INFO, "Similarity score: %.3f", similarity);
// Convert cosine similarity to percentage similarity.
// Note: conversion parameters are not optimal and should be adjusted based on your specific use case.
HFloat percentage;
ret = HFCosineSimilarityConvertToPercentage(similarity, &percentage);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Convert similarity to percentage error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Percentage similarity: %f", percentage);
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
return ret;
}
}

View File

@@ -0,0 +1,228 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <inspireface.h>
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc < 3 || argc > 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <source_path> [rotation]", argv[0]);
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
int rotation = 0;
// If rotation is provided, check and set the value
if (argc == 4) {
rotation = std::atoi(argv[3]);
if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) {
HFLogPrint(HF_LOG_ERROR, "Invalid rotation value. Allowed values are 0, 90, 180, 270.");
return 1;
}
}
HFRotation rotation_enum;
// Set rotation based on input parameter
switch (rotation) {
case 90:
rotation_enum = HF_CAMERA_ROTATION_90;
break;
case 180:
rotation_enum = HF_CAMERA_ROTATION_180;
break;
case 270:
rotation_enum = HF_CAMERA_ROTATION_270;
break;
case 0:
default:
rotation_enum = HF_CAMERA_ROTATION_0;
break;
}
HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath);
HFLogPrint(HF_LOG_INFO, "Source file Path: %s", sourcePath);
HFLogPrint(HF_LOG_INFO, "Rotation: %d", rotation);
HFSetLogLevel(HF_LOG_INFO);
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret);
return ret;
}
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 4);
// Load a image
HFImageBitmap image;
ret = HFCreateImageBitmapFromFilePath(sourcePath, 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error.");
return ret;
}
// Prepare an image parameter structure for configuration
HFImageStream imageHandle = {0};
ret = HFCreateImageStreamFromImageBitmap(image, rotation_enum, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret);
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute HFExecuteFaceTrack error: %d", ret);
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
HFLogPrint(HF_LOG_INFO, "Num of face: %d", faceNum);
// Copy a new image to draw
HFImageBitmap drawImage = {0};
ret = HFImageBitmapCopy(image, &drawImage);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Copy ImageBitmap error: %d", ret);
return ret;
}
HFImageBitmapData data;
ret = HFImageBitmapGetData(drawImage, &data);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get ImageBitmap data error: %d", ret);
return ret;
}
for (int index = 0; index < faceNum; ++index) {
HFLogPrint(HF_LOG_INFO, "========================================");
HFLogPrint(HF_LOG_INFO, "Token size: %d", multipleFaceData.tokens[index].size);
HFLogPrint(HF_LOG_INFO, "Process face index: %d", index);
HFLogPrint(HF_LOG_INFO, "DetConfidence: %f", multipleFaceData.detConfidence[index]);
HFImageBitmapDrawRect(drawImage, multipleFaceData.rects[index], {0, 100, 255}, 4);
// Print FaceID, In IMAGE-MODE it is changing, in VIDEO-MODE it is fixed, but it may be lost
HFLogPrint(HF_LOG_INFO, "FaceID: %d", multipleFaceData.trackIds[index]);
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle
// of the head
HFLogPrint(HF_LOG_INFO, "Roll: %f, Yaw: %f, Pitch: %f", multipleFaceData.angles.roll[index], multipleFaceData.angles.yaw[index],
multipleFaceData.angles.pitch[index]);
HInt32 numOfLmk;
HFGetNumOfFaceDenseLandmark(&numOfLmk);
HPoint2f denseLandmarkPoints[numOfLmk];
ret = HFGetFaceDenseLandmarkFromFaceToken(multipleFaceData.tokens[index], denseLandmarkPoints, numOfLmk);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "HFGetFaceDenseLandmarkFromFaceToken error!!");
return -1;
}
for (size_t i = 0; i < numOfLmk; i++) {
HFImageBitmapDrawCircleF(drawImage, {denseLandmarkPoints[i].x, denseLandmarkPoints[i].y}, 0, {100, 100, 0}, 2);
}
auto& rt = multipleFaceData.rects[index];
float area = ((float)(rt.height * rt.width)) / (data.width * data.height);
HFLogPrint(HF_LOG_INFO, "area: %f", area);
HPoint2f fiveKeyPoints[5];
ret = HFGetFaceFiveKeyPointsFromFaceToken(multipleFaceData.tokens[index], fiveKeyPoints, 5);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "HFGetFaceFiveKeyPointsFromFaceToken error!!");
return -1;
}
for (size_t i = 0; i < 5; i++) {
HFImageBitmapDrawCircleF(drawImage, {fiveKeyPoints[i].x, fiveKeyPoints[i].y}, 0, {0, 0, 232}, 2);
}
}
HFImageBitmapWriteToFile(drawImage, "draw_detected.jpg");
HFLogPrint(HF_LOG_WARN, "Write to file success: %s", "draw_detected.jpg");
// Run pipeline function
// Select the pipeline function that you want to execute, provided that it is already enabled
// when FaceContext is created!
auto pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// In this loop, all faces are processed
ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, pipelineOption);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute Pipeline error: %d", ret);
return ret;
}
// Get mask detection results from the pipeline cache
HFFaceMaskConfidence maskConfidence = {0};
ret = HFGetFaceMaskConfidence(session, &maskConfidence);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get mask detect result error: %d", ret);
return -1;
}
// Get face quality results from the pipeline cache
HFFaceQualityConfidence qualityConfidence = {0};
ret = HFGetFaceQualityConfidence(session, &qualityConfidence);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get face quality result error: %d", ret);
return -1;
}
for (int index = 0; index < faceNum; ++index) {
HFLogPrint(HF_LOG_INFO, "========================================");
HFLogPrint(HF_LOG_INFO, "Process face index from pipeline: %d", index);
HFLogPrint(HF_LOG_INFO, "Mask detect result: %f", maskConfidence.confidence[index]);
HFLogPrint(HF_LOG_INFO, "Quality predict result: %f", qualityConfidence.confidence[index]);
// We set the threshold of wearing a mask as 0.85. If it exceeds the threshold, it will be
// judged as wearing a mask. The threshold can be adjusted according to the scene
if (maskConfidence.confidence[index] > 0.85) {
HFLogPrint(HF_LOG_INFO, "Mask");
} else {
HFLogPrint(HF_LOG_INFO, "Non Mask");
}
}
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
return ret;
}
ret = HFReleaseImageBitmap(image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image bitmap error: %d", ret);
return ret;
}
ret = HFReleaseImageBitmap(drawImage);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release draw image bitmap error: %d", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,130 @@
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <inspireface.h>
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc < 3 || argc > 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <source_path> [rotation]", argv[0]);
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
int rotation = 0;
// If rotation is provided, check and set the value
if (argc == 4) {
rotation = std::atoi(argv[3]);
if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) {
HFLogPrint(HF_LOG_ERROR, "Invalid rotation value. Allowed values are 0, 90, 180, 270.");
return 1;
}
}
HFRotation rotation_enum;
// Set rotation based on input parameter
switch (rotation) {
case 90:
rotation_enum = HF_CAMERA_ROTATION_90;
break;
case 180:
rotation_enum = HF_CAMERA_ROTATION_180;
break;
case 270:
rotation_enum = HF_CAMERA_ROTATION_270;
break;
case 0:
default:
rotation_enum = HF_CAMERA_ROTATION_0;
break;
}
HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath);
HFLogPrint(HF_LOG_INFO, "Source file Path: %s", sourcePath);
HFLogPrint(HF_LOG_INFO, "Rotation: %d", rotation);
HFSetLogLevel(HF_LOG_INFO);
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret);
return ret;
}
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 4);
// Load a image
HFImageBitmap image;
ret = HFCreateImageBitmapFromFilePath(sourcePath, 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error.");
return ret;
}
// Prepare an image parameter structure for configuration
HFImageStream imageHandle = {0};
ret = HFCreateImageStreamFromImageBitmap(image, rotation_enum, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret);
return ret;
}
int loop = 100;
// Enable the cost spend
HFSessionSetEnableTrackCostSpend(session, 1);
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
for (int i = 0; i < loop; i++) {
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute HFExecuteFaceTrack error: %d", ret);
return ret;
}
}
HFLogPrint(HF_LOG_INFO, "Number of Detection: %d", multipleFaceData.detectedNum);
HFSessionPrintTrackCostSpend(session);
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
return ret;
}
ret = HFReleaseImageBitmap(image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image bitmap error: %d", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,104 @@
#include <iostream>
#include <inspireface.h>
#include <vector>
static std::vector<float> FT = {
0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963,
-0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179,
0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961,
-0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942,
-0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462,
0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792,
-0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103,
-0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638,
0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823,
0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569,
-0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047,
0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861,
0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725,
0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137,
-0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139,
-0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002,
-0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054,
-0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069,
0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419,
-0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264,
-0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936,
0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814,
-0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245,
-0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162,
-0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743,
-0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471,
-0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902,
0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575,
-0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842,
-0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046,
0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025,
0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019,
0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467,
0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584,
0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566,
0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422,
-0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787,
-0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362,
-0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798,
0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634,
0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007,
0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243,
0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446,
0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028,
-0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608,
-0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139,
-0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856,
};
int main() {
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace("test_res/pack/Pikachu");
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
HFFeatureHubConfiguration configuration;
configuration.primaryKeyMode = HF_PK_AUTO_INCREMENT;
configuration.enablePersistence = 1;
configuration.persistenceDbPath = "test.db";
configuration.searchMode = HF_SEARCH_MODE_EXHAUSTIVE;
configuration.searchThreshold = 0.48f;
ret = HFFeatureHubDataEnable(configuration);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Enable feature hub error: %d", ret);
return ret;
}
// std::vector<float> feature(512, 0.0f);
int64_t result_id = 0;
HFFaceFeature feature = {0};
feature.data = FT.data();
feature.size = FT.size();
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature;
ret = HFFeatureHubInsertFeature(identity, &result_id);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Insert feature error: %d", ret);
return ret;
}
// std::vector<float> query_feature(512, 20.0f);
HFFaceFeature query_feature = {0};
query_feature.data = FT.data();
query_feature.size = FT.size();
HFloat confidence;
HFFaceFeatureIdentity search_result = {0};
ret = HFFeatureHubFaceSearch(query_feature, &confidence, &search_result);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Search feature error: %d", ret);
} else {
HFLogPrint(HF_LOG_INFO, "Search face feature success, result_id: %d", search_result.id);
}
return 0;
}

View File

@@ -0,0 +1,20 @@
#include <iostream>
#include <inspireface.h>
int main() {
std::string resourcePath = "test_res/pack/Pikachu";
HResult ret = HFReloadInspireFace(resourcePath.c_str());
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Failed to launch InspireFace: %d", ret);
return 1;
}
// Switch to another resource
ret = HFReloadInspireFace("test_res/pack/Megatron");
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Failed to reload InspireFace: %d", ret);
return 1;
}
return 0;
}

View File

@@ -1,11 +1,12 @@
//
// Created by tunm on 2023/9/23.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "middleware/inference_helper/customized/rknn_adapter.h"
#include "middleware/inference_wrapper/customized/rknn_adapter.h"
#include "inspireface/feature_hub/simd.h"
#include <memory>
#include "inspireface/recognition_module/extract/extract.h"
@@ -15,16 +16,16 @@ using namespace inspire;
int main() {
std::vector<std::string> names = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
InspireArchive loader("test_res/pack/test_zip_rec");
{
InspireModel model;
loader.LoadModel("feature", model);
auto net = std::make_shared<RKNNAdapter>();
net->Initialize((unsigned char* )model.buffer, model.bufferSize);
net->Initialize((unsigned char *)model.buffer, model.bufferSize);
net->setOutputsWantFloat(1);
EmbeddedList list;
@@ -37,27 +38,26 @@ int main() {
auto out = net->GetOutputData(0);
auto dims = net->GetOutputTensorSize(0);
// for (int i = 0; i < dims.size(); ++i) {
// LOGD("%lu", dims[i]);
// }
//
for (int i = 0; i < 512; ++i) {
std::cout << out[i] << ", ";
}
std::cout << std::endl;
// for (int i = 0; i < dims.size(); ++i) {
// LOGD("%lu", dims[i]);
// }
//
for (int i = 0; i < 512; ++i) {
std::cout << out[i] << ", ";
}
std::cout << std::endl;
Embedded emb;
for (int j = 0; j < 512; ++j) {
emb.push_back(out[j]);
}
list.push_back(emb);
}
for (int i = 0; i < list.size(); ++i) {
auto &embedded = list[i];
float mse = 0.0f;
for (const auto &one: embedded) {
for (const auto &one : embedded) {
mse += one * one;
}
mse = sqrt(mse);
@@ -76,24 +76,26 @@ int main() {
Configurable param;
param.set<int>("model_index", 0);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<std::string>>("outputs_layers", {
"267",
});
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader.LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
m_extract_->loadData(model, InferenceWrapper::INFER_RKNN);
cv::Mat image = cv::imread(names[0]);
// cv::Mat rgb;
// cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
// cv::Mat rgb;
// cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
auto feat = m_extract_->GetFaceFeature(image);
for (int i = 0; i < 512; ++i) {
std::cout << feat[i] << ", ";
@@ -101,7 +103,6 @@ int main() {
std::cout << std::endl;
}
LOGD("End");
return 0;

View File

@@ -1,8 +1,9 @@
//
// Created by Tunm-Air13 on 2023/9/20.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "opencv2/opencv.hpp"
//#include "inspireface/middleware/model_loader/model_loader.h"
// #include "inspireface/middleware/model_loader/model_loader.h"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/middleware/costman.h"
@@ -30,8 +31,7 @@ int main() {
std::shared_ptr<FaceDetect> m_face_detector_;
m_face_detector_ = std::make_shared<FaceDetect>(320);
m_face_detector_->loadData(model, InferenceHelper::kRknn);
m_face_detector_->loadData(model, InferenceWrapper::INFER_RKNN);
// Load a image
cv::Mat image = cv::imread("test_res/images/face_sample.png");
@@ -42,11 +42,10 @@ int main() {
LOGD("Faces: %ld", locs.size());
for (auto &loc: locs) {
for (auto &loc : locs) {
cv::rectangle(image, cv::Point2f(loc.x1, loc.y1), cv::Point2f(loc.x2, loc.y2), cv::Scalar(0, 0, 255), 3);
}
cv::imwrite("det.jpg", image);
return 0;
}

View File

@@ -1,7 +1,7 @@
//
// Created by tunm on 2023/9/21.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "opencv2/opencv.hpp"
@@ -15,40 +15,41 @@ using namespace inspire;
std::shared_ptr<InspireArchive> loader;
void rec_function() {
std::shared_ptr<Extract> m_extract_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_03_extract);
// param.set<int>("model_index", ModelIndex::_03_extract);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<std::string>>("outputs_layers", {
"267",
});
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader->LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
m_extract_->loadData(model, InferenceWrapper::INFER_RKNN);
loader.reset();
std::vector<std::string> files = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
EmbeddedList embedded_list;
for (int i = 0; i < files.size(); ++i) {
auto warped = cv::imread(files[i]);
Timer timer;
auto emb = (*m_extract_)(warped);
LOGD("耗时: %f", timer.GetCostTimeUpdate());
LOGD("cost: %f", timer.GetCostTimeUpdate());
embedded_list.push_back(emb);
LOGD("%lu", emb.size());
}
@@ -63,26 +64,21 @@ void rec_function() {
LOGD("0 vs 2 : %f", _0v2);
LOGD("1 vs 2 : %f", _1v2);
// LOGD("size: %lu", embedded_list.size());
// LOGD("num of vector: %lu", embedded_list[2].size());
//
// float _0v1 = simd_dot(embedded_list[0].data(), embedded_list[1].data(), 512);
// float _0v2 = simd_dot(embedded_list[0].data(), embedded_list[2].data(), 512);
// float _1v2 = simd_dot(embedded_list[1].data(), embedded_list[2].data(), 512);
// LOGD("0 vs 1 : %f", _0v1);
// LOGD("0 vs 2 : %f", _0v2);
// LOGD("1 vs 2 : %f", _1v2);
// LOGD("size: %lu", embedded_list.size());
// LOGD("num of vector: %lu", embedded_list[2].size());
//
// float _0v1 = simd_dot(embedded_list[0].data(), embedded_list[1].data(), 512);
// float _0v2 = simd_dot(embedded_list[0].data(), embedded_list[2].data(), 512);
// float _1v2 = simd_dot(embedded_list[1].data(), embedded_list[2].data(), 512);
// LOGD("0 vs 1 : %f", _0v1);
// LOGD("0 vs 2 : %f", _0v2);
// LOGD("1 vs 2 : %f", _1v2);
}
int main() {
loader = std::make_shared<InspireArchive>();
loader->ReLoad("test_res/pack/Gundam_RV1109");
rec_function();
return 0;

View File

@@ -1,6 +1,7 @@
//
// Created by Tunm-Air13 on 2023/9/21.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "opencv2/opencv.hpp"
#include "inspireface/track_module/face_detect/all.h"
@@ -16,26 +17,25 @@ using namespace inspire;
InspireArchive loader;
void test_rnet() {
std::shared_ptr<RNet> m_rnet_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_04_refine_net);
// param.set<int>("model_index", ModelIndex::_04_refine_net);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"conv5-1/Softmax", "conv5-2/BiasAdd"});
param.set<std::vector<int>>("input_size", {24, 24});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
InspireModel model;
loader.LoadModel("refine_net", model);
m_rnet_ = std::make_shared<RNet>();
m_rnet_->loadData(model, InferenceHelper::kRknn);
m_rnet_->loadData(model, InferenceWrapper::INFER_RKNN);
{
// Load a image
@@ -56,28 +56,29 @@ void test_rnet() {
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("non face: %f", score);
}
}
void test_mask() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_05_mask);
// param.set<int>("model_index", ModelIndex::_05_mask);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"activation_1/Softmax",});
param.set<std::vector<std::string>>("outputs_layers", {
"activation_1/Softmax",
});
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<MaskPredict> m_mask_predict_;
m_mask_predict_ = std::make_shared<MaskPredict>();
InspireModel model;
loader.LoadModel("mask_detect", model);
m_mask_predict_->loadData(model, InferenceHelper::kRknn);
m_mask_predict_->loadData(model, InferenceWrapper::INFER_RKNN);
{
// Load a image
@@ -98,32 +99,33 @@ void test_mask() {
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("maskless: %f", score);
}
}
void test_quality() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_07_pose_q_fp16);
// param.set<int>("model_index", ModelIndex::_07_pose_q_fp16);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"fc1", });
param.set<std::vector<std::string>>("outputs_layers", {
"fc1",
});
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FacePoseQuality> m_face_quality_;
m_face_quality_ = std::make_shared<FacePoseQuality>();
InspireModel model;
loader.LoadModel("pose_quality", model);
m_face_quality_->loadData(model, InferenceHelper::kRknn);
m_face_quality_->loadData(model, InferenceWrapper::INFER_RKNN);
{
std::vector<std::string> names = {
"test_res/images/test_data/p3.jpg",
// "test_res/images/test_data/p1.jpg",
"test_res/images/test_data/p3.jpg",
// "test_res/images/test_data/p1.jpg",
};
for (int i = 0; i < names.size(); ++i) {
LOGD("Image: %s", names[i].c_str());
@@ -131,9 +133,9 @@ void test_quality() {
Timer timer;
auto pose_res = (*m_face_quality_)(image);
LOGD("质量cost: %f", timer.GetCostTimeUpdate());
LOGD("quality cost: %f", timer.GetCostTimeUpdate());
for (auto &p: pose_res.lmk) {
for (auto &p : pose_res.lmk) {
cv::circle(image, p, 0, cv::Scalar(0, 0, 255), 2);
}
cv::imwrite("pose.jpg", image);
@@ -141,22 +143,21 @@ void test_quality() {
LOGD("yam: %f", pose_res.yaw);
LOGD("roll: %f", pose_res.roll);
for (auto q: pose_res.lmk_quality) {
for (auto q : pose_res.lmk_quality) {
std::cout << q << ", ";
}
std::cout << std::endl;
}
}
}
void test_landmark_mnn() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<std::string>>("outputs_layers", {
"prelu1/add",
});
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {127.5f, 127.5f, 127.5f});
param.set<std::vector<float>>("norm", {0.0078125f, 0.0078125f, 0.0078125f});
@@ -184,30 +185,28 @@ void test_landmark_mnn() {
}
cv::imwrite("lmk.jpg", image);
}
void test_landmark() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<std::string>>("outputs_layers", {
"prelu1/add",
});
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FaceLandmark> m_landmark_predictor_;
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model, InferenceHelper::kRknn);
m_landmark_predictor_->loadData(model, InferenceWrapper::INFER_RKNN);
cv::Mat image = cv::imread("test_res/images/test_data/0.jpg");
cv::resize(image, image, cv::Size(112, 112));
@@ -226,24 +225,22 @@ void test_landmark() {
}
cv::imwrite("lmk.jpg", image);
}
void test_liveness() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_06_msafa27);
// param.set<int>("model_index", ModelIndex::_06_msafa27);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"556",});
param.set<std::vector<std::string>>("outputs_layers", {
"556",
});
param.set<std::vector<int>>("input_size", {80, 80});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", false); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("swap_color", false); // RGB mode
param.set<int>("data_type", InputTensorInfo::DataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::TensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::TensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<RBGAntiSpoofing> m_rgb_anti_spoofing_;
@@ -251,15 +248,11 @@ void test_liveness() {
InspireModel model;
loader.LoadModel("rgb_anti_spoofing", model);
m_rgb_anti_spoofing_ = std::make_shared<RBGAntiSpoofing>(80, true);
m_rgb_anti_spoofing_->loadData(model, InferenceHelper::kRknn);
m_rgb_anti_spoofing_->loadData(model, InferenceWrapper::INFER_RKNN);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
"test_res/images/test_data/real.jpg", "test_res/images/test_data/fake.jpg", "test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg", "test_res/images/test_data/w.jpg", "test_res/images/test_data/w2.jpg",
};
for (int i = 0; i < names.size(); ++i) {
@@ -269,7 +262,6 @@ void test_liveness() {
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("%s : %f", names[i].c_str(), score);
}
}
int test_liveness_ctx() {
@@ -278,13 +270,9 @@ int test_liveness_ctx() {
FaceContext ctx;
ctx.Configuration("test_res/pack/Gundam_RV1109", inspire::DETECT_MODE_IMAGE, 3, parameter);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
"test_res/images/test_data/bb.png",
"test_res/images/test_data/real.jpg", "test_res/images/test_data/fake.jpg", "test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg", "test_res/images/test_data/w.jpg", "test_res/images/test_data/w2.jpg",
"test_res/images/test_data/bb.png",
};
for (int i = 0; i < names.size(); ++i) {
@@ -293,22 +281,21 @@ int test_liveness_ctx() {
LOGD("%s : %f", names[i].c_str(), score);
}
return 0;
}
int main() {
loader.ReLoad("test_res/pack/Gundam_RV1109");
// test_rnet();
// test_rnet();
// test_mask();
// test_mask();
// test_quality();
// test_quality();
// test_landmark_mnn();
// test_landmark_mnn();
// test_landmark();
// test_landmark();
test_liveness();
test_liveness_ctx();

View File

@@ -1,6 +1,7 @@
//
// Created by Tunm-Air13 on 2023/9/22.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include "opencv2/opencv.hpp"
@@ -12,18 +13,14 @@ using namespace inspire;
int main() {
FaceContext ctx;
CustomPipelineParameter param;
int32_t ret = ctx.Configuration(
"test_res/pack/Gundam_RV1109",
DetectMode::DETECT_MODE_VIDEO,
3,
param);
int32_t ret = ctx.Configuration("test_res/pack/Gundam_RV1109", DetectMode::DETECT_MODE_VIDEO, 3, param);
if (ret != HSUCCEED) {
LOGE("Initiate error");
}
cv::Mat frame;
std::string imageFolder = "test_res/video_frames/";
// auto video_frame_num = 10;
// auto video_frame_num = 10;
auto video_frame_num = 288;
for (int i = 0; i < video_frame_num; ++i) {
auto index = i + 1;
@@ -45,7 +42,7 @@ int main() {
LOGD("track id: %d", ctx.GetTrackingFaceList()[0].GetTrackingId());
auto &face = ctx.GetTrackingFaceList()[0];
for (auto &p: face.landmark_) {
for (auto &p : face.landmark_) {
cv::circle(frame, p, 0, cv::Scalar(0, 0, 255), 3);
}
@@ -55,7 +52,8 @@ int main() {
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count) + " Cf: " + std::to_string(face.GetConfidence());
std::string text =
"ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count) + " Cf: " + std::to_string(face.GetConfidence());
cv::Point text_position(rect.x, rect.y - 10);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
@@ -70,7 +68,5 @@ int main() {
cv::imwrite(saveFile.str(), frame);
}
return 0;
}

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2024/4/6.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/feature_hub/face_recognition.h"
@@ -8,7 +9,7 @@
#include "track_module/face_track.h"
#include "pipeline_module/face_pipeline.h"
#include "inspireface/feature_hub/face_recognition.h"
#include "middleware/inference_helper/customized/rknn_adapter.h"
#include "middleware/inference_wrapper/customized/rknn_adapter.h"
using namespace inspire;
@@ -16,9 +17,9 @@ int main() {
InspireArchive archive;
auto ret = archive.ReLoad("test_res/pack/Gundam_RV1109");
LOGD("ReLoad %d", ret);
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// LOGD("LoadModel %d", ret);
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// LOGD("LoadModel %d", ret);
FaceTrack track;
ret = track.Configuration(archive);
@@ -28,11 +29,10 @@ int main() {
FaceRecognition recognition(archive, true);
// std::shared_ptr<RKNNAdapter> rknet = std::make_shared<RKNNAdapter>();
// ret = rknet->Initialize((unsigned char* )model.buffer, model.bufferSize);
//
// LOGD("LoadModel %d", ret);
// std::shared_ptr<RKNNAdapter> rknet = std::make_shared<RKNNAdapter>();
// ret = rknet->Initialize((unsigned char* )model.buffer, model.bufferSize);
//
// LOGD("LoadModel %d", ret);
return 0;
}

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2024/4/6.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/recognition_module/face_feature_extraction.h"
@@ -13,7 +14,7 @@ int main() {
archive.ReLoad("test_res/pack/Gundam_RV1109");
FaceTrack track;
// FaceRecognition recognition(archive, true);
// FaceRecognition recognition(archive, true);
auto ret = track.Configuration(archive);
INSPIRE_LOGD("ret=%d", ret);
@@ -29,13 +30,13 @@ int main() {
track.UpdateStream(stream, true);
// if (!track.trackingFace.empty()) {
// auto const &face = track.trackingFace[0];
// cv::rectangle(image, face.GetRectSquare(), cv::Scalar(200, 0, 20), 2);
// }
//
// cv::imshow("w", image);
// cv::waitKey(0);
// if (!track.trackingFace.empty()) {
// auto const &face = track.trackingFace[0];
// cv::rectangle(image, face.GetRectSquare(), cv::Scalar(200, 0, 20), 2);
// }
//
// cv::imshow("w", image);
// cv::waitKey(0);
InspireModel model;
ret = archive.LoadModel("mask_detect", model);

View File

@@ -1,19 +1,19 @@
//
// Created by Tunm-Air13 on 2023/9/11.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "opencv2/opencv.hpp"
#include "log.h"
#include "inspireface/feature_hub/simd.h"
//#include <Eigen/Dense>
// #include <Eigen/Dense>
using namespace inspire;
int main() {
int N = 512;
int vectorSize = 512; // Vector length
int vectorSize = 512; // Vector length
{
// Create an Nx512 matrix of type CV_32F and fill it with random numbers
cv::Mat mat(N, vectorSize, CV_32F);
@@ -26,14 +26,13 @@ int main() {
std::cout << mat.size << std::endl;
std::cout << one.size << std::endl;
auto timeStart = (double) cv::getTickCount();
auto timeStart = (double)cv::getTickCount();
cv::Mat cosineSimilarities;
cv::gemm(mat, one, 1, cv::Mat(), 0, cosineSimilarities);
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
double cost = ((double)cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Matrix COST: %f", cost);
}
{
@@ -51,33 +50,33 @@ int main() {
vectorOne[i] = static_cast<float>(std::rand()) / RAND_MAX;
}
auto timeStart = (double) cv::getTickCount();
auto timeStart = (double)cv::getTickCount();
// dot
for (const auto &v: matrix) {
for (const auto &v : matrix) {
simd_dot(v.data(), vectorOne.data(), vectorSize);
}
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
double cost = ((double)cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Vector COST: %f", cost);
}
// {
// Eigen::initParallel();
// Eigen::MatrixXf mat(N, vectorSize);
// mat = Eigen::MatrixXf::Random(N, vectorSize);
//
// std::cout << mat.rows() << " x " << mat.cols() << std::endl;
//
//
// Eigen::VectorXf one(vectorSize);
// one = Eigen::VectorXf::Random(vectorSize);
//
// auto timeStart = (double) cv::getTickCount();
// Eigen::VectorXf result = mat * one;
//
// double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
// LOGD("Eigen COST: %f", cost);
// }
// {
// Eigen::initParallel();
// Eigen::MatrixXf mat(N, vectorSize);
// mat = Eigen::MatrixXf::Random(N, vectorSize);
//
// std::cout << mat.rows() << " x " << mat.cols() << std::endl;
//
//
// Eigen::VectorXf one(vectorSize);
// one = Eigen::VectorXf::Random(vectorSize);
//
// auto timeStart = (double) cv::getTickCount();
// Eigen::VectorXf result = mat * one;
//
// double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
// LOGD("Eigen COST: %f", cost);
// }
return 0;
}

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2023/10/3.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "inspireface/c_api/inspireface.h"
#include "opencv2/opencv.hpp"
@@ -8,7 +9,7 @@
using namespace inspire;
std::string basename(const std::string& path) {
std::string basename(const std::string &path) {
size_t lastSlash = path.find_last_of("/\\"); // Take into account the cross-platform separator
if (lastSlash == std::string::npos) {
return path; // Without the slash, the entire path is the base name
@@ -30,7 +31,7 @@ int compare() {
parameter.enable_mask_detect = 1;
parameter.enable_recognition = 1;
parameter.enable_face_quality = 1;
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE; // Selecting the image mode is always detection
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE; // Selecting the image mode is always detection
HContextHandle session;
ret = HF_CreateFaceContextFromResourceFile(path, parameter, detMode, 3, &session);
if (ret != HSUCCEED) {
@@ -38,13 +39,13 @@ int compare() {
}
std::vector<std::string> names = {
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0001.jpg",
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0004.jpg",
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0001.jpg",
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0004.jpg",
};
HInt32 featureNum;
HF_GetFeatureLength(&featureNum);
INSPIRE_LOGD("Feature length: %d", featureNum);
HFloat featuresCache[names.size()][featureNum]; // Store the cached vector
HFloat featuresCache[names.size()][featureNum]; // Store the cached vector
for (int i = 0; i < names.size(); ++i) {
auto &name = names[i];
@@ -63,7 +64,7 @@ int compare() {
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret == HSUCCEED) {
INSPIRE_LOGD("image handle: %ld", (long )imageSteamHandle);
INSPIRE_LOGD("image handle: %ld", (long)imageSteamHandle);
}
HF_MultipleFaceData multipleFaceData = {0};
@@ -71,9 +72,11 @@ int compare() {
INSPIRE_LOGD("Number of faces detected: %d", multipleFaceData.detectedNum);
for (int i = 0; i < multipleFaceData.detectedNum; ++i) {
cv::Rect rect = cv::Rect(multipleFaceData.rects[i].x, multipleFaceData.rects[i].y, multipleFaceData.rects[i].width, multipleFaceData.rects[i].height);
cv::Rect rect =
cv::Rect(multipleFaceData.rects[i].x, multipleFaceData.rects[i].y, multipleFaceData.rects[i].width, multipleFaceData.rects[i].height);
cv::rectangle(image, rect, cv::Scalar(0, 255, 200), 2);
INSPIRE_LOGD("%d, track_id: %d, pitch: %f, yaw: %f, roll: %f", i, multipleFaceData.trackIds[i], multipleFaceData.angles.pitch[i], multipleFaceData.angles.yaw[i], multipleFaceData.angles.roll[i]);
INSPIRE_LOGD("%d, track_id: %d, pitch: %f, yaw: %f, roll: %f", i, multipleFaceData.trackIds[i], multipleFaceData.angles.pitch[i],
multipleFaceData.angles.yaw[i], multipleFaceData.angles.roll[i]);
INSPIRE_LOGD("token size: %d", multipleFaceData.tokens->size);
}
#ifndef DISABLE_GUI
@@ -89,16 +92,16 @@ int compare() {
return -1;
}
// for (int j = 0; j < 512; ++j) {
// std::cout << featuresCache[0][j] << ", ";
// }
// std::cout << std::endl;
// for (int j = 0; j < 512; ++j) {
// std::cout << featuresCache[0][j] << ", ";
// }
// std::cout << std::endl;
// HSize size;
// HF_GetFaceBasicTokenSize(&size);
// LOGD("in size: %ld", size);
//
// LOGD("o size %d", multipleFaceData.tokens[0].size);
// HSize size;
// HF_GetFaceBasicTokenSize(&size);
// LOGD("in size: %ld", size);
//
// LOGD("o size %d", multipleFaceData.tokens[0].size);
HBuffer buffer[multipleFaceData.tokens[0].size];
HF_CopyFaceBasicToken(multipleFaceData.tokens[0], buffer, multipleFaceData.tokens[0].size);
@@ -108,7 +111,7 @@ int compare() {
token.data = buffer;
HFloat quality;
// ret = HF_FaceQualityDetect(session, multipleFaceData.tokens[0], &quality);
// ret = HF_FaceQualityDetect(session, multipleFaceData.tokens[0], &quality);
ret = HF_FaceQualityDetect(session, token, &quality);
INSPIRE_LOGD("RET : %d", ret);
INSPIRE_LOGD("Q: %f", quality);
@@ -120,7 +123,6 @@ int compare() {
} else {
INSPIRE_LOGE("image release error: %ld", ret);
}
}
HFloat compResult;
@@ -147,7 +149,6 @@ int compare() {
int search() {
HResult ret;
// 初始化context
HString path = "test_res/pack/Pikachu";
HF_ContextCustomParameter parameter = {0};
parameter.enable_liveness = 1;
@@ -185,7 +186,7 @@ int search() {
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) {
INSPIRE_LOGE("image handle error: %ld", (long )imageSteamHandle);
INSPIRE_LOGE("image handle error: %ld", (long)imageSteamHandle);
return -1;
}
@@ -214,16 +215,15 @@ int search() {
ret = HF_FeatureHubInsertFeature(identity);
if (ret != HSUCCEED) {
INSPIRE_LOGE("插入失败: %ld", ret);
INSPIRE_LOGE("Insert failed: %ld", ret);
return -1;
}
// // 在插入一次测试一下重复操作问题
// ret = HF_FeaturesGroupInsertFeature(session, identity);
// if (ret != HSUCCEED) {
// LOGE("不能重复id插入: %ld", ret);
// }
// // Test duplicate insertion operation
// ret = HF_FeaturesGroupInsertFeature(session, identity);
// if (ret != HSUCCEED) {
// INSPIRE_LOGE("Cannot insert duplicate ID: %ld", ret);
// }
delete[] tagName;
@@ -247,7 +247,7 @@ int search() {
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) {
INSPIRE_LOGE("image handle error: %ld", (long )imageSteamHandle);
INSPIRE_LOGE("image handle error: %ld", (long)imageSteamHandle);
return -1;
}
HF_MultipleFaceData multipleFaceData = {0};
@@ -265,10 +265,10 @@ int search() {
return -1;
}
// ret = HF_FaceContextFeatureRemove(session, 3);
// if (ret != HSUCCEED) {
// LOGE("delete failed: %ld", ret);
// }
// ret = HF_FaceContextFeatureRemove(session, 3);
// if (ret != HSUCCEED) {
// LOGE("delete failed: %ld", ret);
// }
std::string newName = "Six";
char *newTagName = new char[newName.size() + 1];
@@ -283,10 +283,9 @@ int search() {
}
delete[] newTagName;
HF_FaceFeatureIdentity searchIdentity = {0};
// HF_FaceFeature featureSearched = {0};
// searchIdentity.feature = &featureSearched;
// HF_FaceFeature featureSearched = {0};
// searchIdentity.feature = &featureSearched;
HFloat confidence;
ret = HF_FeatureHubFaceSearch(feature, &confidence, &searchIdentity);
if (ret != HSUCCEED) {
@@ -298,7 +297,6 @@ int search() {
INSPIRE_LOGD("The matched tag: %s", searchIdentity.tag);
INSPIRE_LOGD("The matched customId: %d", searchIdentity.customId);
// Face Pipeline
ret = HF_MultipleFacePipelineProcess(session, imageSteamHandle, &multipleFaceData, parameter);
if (ret != HSUCCEED) {
@@ -331,7 +329,6 @@ int search() {
HF_FeatureHubViewDBTable();
HF_FaceFeatureIdentity identity;
ret = HF_FeatureHubGetFaceIdentity(100, &identity);
if (ret != HSUCCEED) {
@@ -350,47 +347,44 @@ int search() {
}
int opiton() {
// HInt32 mask = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_LIVENESS;
// HInt32 mask = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_LIVENESS;
return 0;
}
int main() {
HResult ret;
// {
// // 测试ImageStream
// cv::Mat image = cv::imread("test_res/images/kun.jpg");
// HF_ImageData imageData = {0};
// imageData.data = image.data;
// imageData.height = image.rows;
// imageData.width = image.cols;
// imageData.rotation = CAMERA_ROTATION_0;
// imageData.format = STREAM_BGR;
//
// HImageHandle imageSteamHandle;
// ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
// if (ret == HSUCCEED) {
// LOGD("image handle: %ld", (long )imageSteamHandle);
// }
// HF_DeBugImageStreamImShow(imageSteamHandle);
//
// ret = HF_ReleaseImageStream(imageSteamHandle);
// if (ret == HSUCCEED) {
// imageSteamHandle = nullptr;
// LOGD("image released");
// } else {
// LOGE("image release error: %ld", ret);
// }
//
// }
// {
// // TestImageStream
// cv::Mat image = cv::imread("test_res/images/kun.jpg");
// HF_ImageData imageData = {0};
// imageData.data = image.data;
// imageData.height = image.rows;
// imageData.width = image.cols;
// imageData.rotation = CAMERA_ROTATION_0;
// imageData.format = STREAM_BGR;
//
// HImageHandle imageSteamHandle;
// ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
// if (ret == HSUCCEED) {
// LOGD("image handle: %ld", (long )imageSteamHandle);
// }
// HF_DeBugImageStreamImShow(imageSteamHandle);
//
// ret = HF_ReleaseImageStream(imageSteamHandle);
// if (ret == HSUCCEED) {
// imageSteamHandle = nullptr;
// LOGD("image released");
// } else {
// LOGE("image release error: %ld", ret);
// }
//
// }
// compare();
// compare();
search();
opiton();
}

View File

@@ -1,7 +1,7 @@
//
// Created by tunm on 2023/9/15.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "face_context.h"
@@ -46,10 +46,10 @@ int main() {
ctx.FaceDetectAndTrack(stream);
// LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
// LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto &faces = ctx.GetTrackingFaceList();
for (auto &face: faces) {
for (auto &face : faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
@@ -60,19 +60,18 @@ int main() {
cv::Point text_position(rect.x, rect.y - 10);
const auto& pose_and_quality = face.high_result;
const auto &pose_and_quality = face.high_result;
float mean_quality = 0.0f;
for (int i = 0; i < pose_and_quality.lmk_quality.size(); ++i) {
mean_quality += pose_and_quality.lmk_quality[i];
}
mean_quality /= pose_and_quality.lmk_quality.size();
mean_quality = 1 - mean_quality;
std::string pose_text = "pitch: " + std::to_string(pose_and_quality.pitch) + ",Yaw: " + std::to_string(pose_and_quality.yaw) + ",roll:" +std::to_string(pose_and_quality.roll) + ", q: " +
std::to_string(mean_quality);
std::string pose_text = "pitch: " + std::to_string(pose_and_quality.pitch) + ",Yaw: " + std::to_string(pose_and_quality.yaw) +
",roll:" + std::to_string(pose_and_quality.roll) + ", q: " + std::to_string(mean_quality);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
@@ -82,7 +81,6 @@ int main() {
cv::putText(frame, pose_text, pose_position, font_face, font_scale, font_color, font_thickness);
}
cv::imshow("Webcam", frame);
if (cv::waitKey(1) == 27) {

View File

@@ -1,15 +1,15 @@
//
// Created by Tunm-Air13 on 2024/4/10.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "inspireface/c_api/inspireface.h"
#include "inspireface/middleware/camera_stream/camera_stream.h"
void non_file_test() {
HResult ret;
HPath path = "test_res/pack/abc"; // Use error path
HPath path = "test_res/pack/abc"; // Use error path
HF_ContextCustomParameter parameter = {0};
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE;
HContextHandle session;
@@ -28,7 +28,6 @@ void camera_test() {
stream.SetDataFormat(inspire::NV12);
stream.SetDataBuffer(image.data, image.rows, image.cols);
auto decode = stream.GetScaledImage(1.0f, true);
}
int main() {

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2024/4/6.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/recognition_module/face_feature_extraction.h"
@@ -12,7 +13,7 @@ int main() {
InspireArchive archive("test_res/pack/Pikachu");
FaceTrack track;
// FaceRecognition recognition(archive, true);
// FaceRecognition recognition(archive, true);
auto ret = track.Configuration(archive);
INSPIRE_LOGD("ret=%d", ret);
@@ -27,12 +28,11 @@ int main() {
track.UpdateStream(stream, true);
}
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// std::cout << ret << std::endl;
//
// archive.PublicPrintSubFiles();
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// std::cout << ret << std::endl;
//
// archive.PublicPrintSubFiles();
return 0;
}

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2023/9/8.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "track_module/face_detect/face_pose.h"
@@ -14,12 +15,14 @@ int main(int argc, char** argv) {
Configurable param;
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"ip3_pose", });
param.set<std::vector<std::string>>("outputs_layers", {
"ip3_pose",
});
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("input_channel", 1); // Input Gray
param.set<int>("input_image_channel", 1); // BGR 2 Gray
param.set<int>("input_image_channel", 1); // BGR 2 Gray
auto m_pose_net_ = std::make_shared<FacePose>();
InspireModel model;

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2023/9/10.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "face_context.h"
@@ -11,7 +12,7 @@
using namespace inspire;
std::string GetFileNameWithoutExtension(const std::string& filePath) {
std::string GetFileNameWithoutExtension(const std::string &filePath) {
size_t slashPos = filePath.find_last_of("/\\");
if (slashPos != std::string::npos) {
std::string fileName = filePath.substr(slashPos + 1);
@@ -52,7 +53,6 @@ int comparison1v1(FaceContext &ctx) {
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature_1);
}
{
@@ -68,7 +68,6 @@ int comparison1v1(FaceContext &ctx) {
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature_2);
}
float rec;
@@ -78,14 +77,11 @@ int comparison1v1(FaceContext &ctx) {
return 0;
}
int search(FaceContext &ctx) {
// std::shared_ptr<FeatureBlock> block;
// block.reset(FeatureBlock::Create(hyper::MC_OPENCV));
// std::shared_ptr<FeatureBlock> block;
// block.reset(FeatureBlock::Create(hyper::MC_OPENCV));
std::vector<String> files_list = {
};
std::vector<String> files_list = {};
for (int i = 0; i < files_list.size(); ++i) {
auto image = cv::imread(files_list[i]);
CameraStream stream;
@@ -103,11 +99,11 @@ int search(FaceContext &ctx) {
FEATURE_HUB->RegisterFaceFeature(feature, i, GetFileNameWithoutExtension(files_list[i]), 1000 + i);
}
// ctx.FaceRecognitionModule()->PrintMatrix();
// ctx.FaceRecognitionModule()->PrintMatrix();
// auto ret = block->DeleteFeature(3);
// LOGD("DEL: %d", ret);
// block->PrintMatrix();
// auto ret = block->DeleteFeature(3);
// LOGD("DEL: %d", ret);
// block->PrintMatrix();
FEATURE_HUB->DeleteFaceFeature(2);
@@ -129,8 +125,8 @@ int search(FaceContext &ctx) {
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature);
// block->UpdateFeature(4, feature);
// block->AddFeature(feature);
// block->UpdateFeature(4, feature);
// block->AddFeature(feature);
}
// Prepare an image to search
@@ -150,18 +146,17 @@ int search(FaceContext &ctx) {
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature);
SearchResult result;
auto timeStart = (double) cv::getTickCount();
auto timeStart = (double)cv::getTickCount();
FEATURE_HUB->SearchFaceFeature(feature, result);
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
double cost = ((double)cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Search time: %f", cost);
INSPIRE_LOGD("Top1: %d, %f, %s %d", result.index, result.score, result.tag.c_str(), result.customId);
}
return 0;
}
int main(int argc, char** argv) {
int main(int argc, char **argv) {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_recognition = true;
@@ -170,11 +165,10 @@ int main(int argc, char** argv) {
INSPIRE_LOGE("Initialization error");
return -1;
}
comparison1v1(ctx);
// search(ctx);
// search(ctx);
return 0;
}

View File

@@ -1,6 +1,7 @@
//
// Created by tunm on 2023/9/7.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "face_context.h"
@@ -31,12 +32,12 @@ int main(int argc, char** argv) {
std::vector<HyperFaceData> faces;
for (int i = 0; i < ctx.GetNumberOfFacesCurrentlyDetected(); ++i) {
// const ByteArray &byteArray = ctx.GetDetectCache()[i];
// const ByteArray &byteArray = ctx.GetDetectCache()[i];
HyperFaceData face = {0};
// ret = DeserializeHyperFaceData(byteArray, face);
// ret = DeserializeHyperFaceData(byteArray, face);
const FaceBasicData &faceBasic = ctx.GetFaceBasicDataCache()[i];
ret = DeserializeHyperFaceData((char* )faceBasic.data, faceBasic.dataSize, face);
const FaceBasicData& faceBasic = ctx.GetFaceBasicDataCache()[i];
ret = DeserializeHyperFaceData((char*)faceBasic.data, faceBasic.dataSize, face);
INSPIRE_LOGD("OK!");
if (ret != HSUCCEED) {
@@ -48,17 +49,16 @@ int main(int argc, char** argv) {
std::cout << rect << std::endl;
cv::rectangle(rot90, rect, cv::Scalar(0, 0, 233), 2);
for (auto &p: face.keyPoints) {
for (auto& p : face.keyPoints) {
cv::Point2f point(p.x, p.y);
cv::circle(rot90, point, 0, cv::Scalar(0, 0, 255), 5);
}
}
// cv::imshow("wq", rot90);
// cv::waitKey(0);
// cv::imshow("wq", rot90);
// cv::waitKey(0);
cv::imwrite("wq.png", rot90);
ret = ctx.FacesProcess(stream, faces, param);
if (ret != HSUCCEED) {
return -1;

View File

@@ -1,6 +1,7 @@
//
// Created by Tunm-Air13 on 2023/10/11.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "inspireface/feature_hub/persistence/sqlite_faces_manage.h"

View File

@@ -1,13 +1,14 @@
//
// Created by tunm on 2023/8/29.
//
/**
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include "inspireface/track_module/face_track.h"
#include "opencv2/opencv.hpp"
using namespace inspire;
int video_test(FaceTrack &ctx, int cam_id) {
int video_test(FaceTrack& ctx, int cam_id) {
#ifndef ISF_USE_MOBILE_OPENCV_IN_LOCAL
cv::VideoCapture cap(cam_id);
@@ -37,8 +38,8 @@ int video_test(FaceTrack &ctx, int cam_id) {
INSPIRE_LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto const &faces = ctx.trackingFace;
for (auto const &face: faces) {
auto const& faces = ctx.trackingFace;
for (auto const& face : faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
@@ -51,7 +52,7 @@ int video_test(FaceTrack &ctx, int cam_id) {
const auto& pose_and_quality = face.high_result;
std::vector<float> euler = {pose_and_quality.yaw, pose_and_quality.roll, pose_and_quality.pitch};
std::string pose_text = "P: " + std::to_string(euler[0]) + ",Yaw: " + std::to_string(euler[1]) + ",roll:" +std::to_string(euler[2]);
std::string pose_text = "P: " + std::to_string(euler[0]) + ",Yaw: " + std::to_string(euler[1]) + ",roll:" + std::to_string(euler[2]);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
@@ -64,7 +65,6 @@ int video_test(FaceTrack &ctx, int cam_id) {
cv::putText(frame, pose_text, pose_position, font_face, font_scale, font_color, font_thickness);
}
cv::imshow("Webcam", frame);
if (cv::waitKey(1) == 27) {
@@ -107,8 +107,8 @@ void video_file_test(FaceTrack& ctx, const std::string& video_filename) {
ctx.UpdateStream(stream, false);
INSPIRE_LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto const &faces = ctx.trackingFace;
for (auto const &face: faces) {
auto const& faces = ctx.trackingFace;
for (auto const& face : faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
@@ -116,7 +116,7 @@ void video_file_test(FaceTrack& ctx, const std::string& video_filename) {
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
auto lmk = face.GetLanmdark();
for (auto & p : lmk) {
for (auto& p : lmk) {
cv::circle(frame, p, 0, cv::Scalar(0, 0, 242), 2);
}
@@ -125,7 +125,8 @@ void video_file_test(FaceTrack& ctx, const std::string& video_filename) {
cv::Point text_position(rect.x, rect.y - 10);
const auto& euler = face.high_result;
std::string pose_text = "pitch: " + std::to_string(euler.pitch) + ",Yaw: " + std::to_string(euler.yaw) + ",roll:" +std::to_string(euler.roll);
std::string pose_text =
"pitch: " + std::to_string(euler.pitch) + ",Yaw: " + std::to_string(euler.yaw) + ",roll:" + std::to_string(euler.roll);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
@@ -163,8 +164,8 @@ int main(int argc, char** argv) {
const std::string folder = "test_res/pack/Pikachu";
INSPIRE_LOGD("%s", folder.c_str());
// ModelLoader loader;
// loader.Reset(folder);
// ModelLoader loader;
// loader.Reset(folder);
InspireArchive archive;
archive.ReLoad(folder);
@@ -183,7 +184,7 @@ int main(int argc, char** argv) {
} else if (source == "image") {
cv::Mat image = cv::imread(input);
if (!image.empty()) {
// image_test(ctx, image);
// image_test(ctx, image);
} else {
std::cerr << "Unable to open the image file." << std::endl;
}

View File

@@ -1,49 +0,0 @@
//
// Created by tunm on 2024/5/26.
//
#include <cstddef>
#include <iostream>
#include <opencv2/core/types.hpp>
#ifndef DISABLE_GUI
#include <opencv2/highgui.hpp>
#endif
#include <opencv2/imgproc.hpp>
#include <vector>
#include "data_type.h"
#include "opencv2/opencv.hpp"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/Initialization_module/launch.h"
using namespace inspire;
int main() {
INSPIRE_LAUNCH->Load("test_res/pack/Megatron");
auto archive = INSPIRE_LAUNCH->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_detect", detModel);
std::vector<int> input_size = {640, 640};
detModel.Config().set<std::vector<int>>("input_size", input_size);
FaceDetect detect(input_size[0]);
detect.loadData(detModel, detModel.modelType, true);
auto img = cv::imread("/Users/tunm/Downloads/xtl.png");
double time;
time = (double) cv::getTickCount();
std::vector<FaceLoc> results = detect(img);
time = ((double) cv::getTickCount() - time) / cv::getTickFrequency();
std::cout << "use time" << time << "\n";
for (size_t i = 0; i < results.size(); i++) {
auto &item = results[i];
cv::rectangle(img, cv::Point2f(item.x1, item.y1), cv::Point2f(item.x2, item.y2), cv::Scalar(0, 0, 255), 4);
}
#ifndef DISABLE_GUI
cv::imshow("w", img);
cv::waitKey(0);
#endif
return 0;
}

View File

@@ -1,116 +0,0 @@
//
// Created by tunm on 2024/4/20.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 4) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <img1_path> <img2_path>\n";
return 1;
}
auto packPath = argv[1];
auto imgPath1 = argv[2];
auto imgPath2 = argv[3];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path 1: " << imgPath1 << std::endl;
std::cout << "Source file Path 2: " << imgPath2 << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
std::vector<char* > twoImg = {imgPath1, imgPath2};
std::vector<std::vector<float>> vec(2, std::vector<float>(512));
for (int i = 0; i < twoImg.size(); ++i) {
auto image = cv::imread(twoImg[i]);
if (image.empty()) {
std::cout << "Image is empty: " << twoImg[i] << std::endl;
return 0;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << twoImg[i] << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face, an interface that uses copy features in a comparison scenario
ret = HFFaceFeatureExtractCpy(session, stream, multipleFaceData.tokens[0], vec[i].data()); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
}
// Make feature1
HFFaceFeature feature1 = {0};
feature1.data = vec[0].data();
feature1.size = vec[0].size();
// Make feature2
HFFaceFeature feature2 = {0};
feature2.data = vec[1].data();
feature2.size = vec[1].size();
// Run comparison
HFloat similarity;
ret = HFFaceComparison(feature1, feature2, &similarity);
if (ret != HSUCCEED) {
std::cout << "Feature comparison error: " << ret << std::endl;
return ret;
}
std::cout << "Similarity: " << similarity << std::endl;
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
}

View File

@@ -1,227 +0,0 @@
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check if the correct number of parameters was provided
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " <pack_path>\n";
return 1;
}
auto packPath = argv[1]; // Path to the resource pack
std::string testDir = "test_res/"; // Directory containing test resources
HResult ret;
// Load resource file, necessary before using any functionality
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Configuration for the feature database
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.featureBlockNum = 10; // Number of feature blocks
featureHubConfiguration.enablePersistence = 0; // Persistence not enabled, use in-memory database
featureHubConfiguration.dbPath = ""; // Database path (not used here)
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EAGER; // Search mode configuration
featureHubConfiguration.searchThreshold = 0.48f; // Threshold for search operations
// Enable the global feature database
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED) {
std::cout << "An exception occurred while starting FeatureHub: " << ret << std::endl;
return ret;
}
// Prepare a list of face photos for testing
std::vector<std::string> photos = {
testDir + "data/bulk/Nathalie_Baye_0002.jpg",
testDir + "data/bulk/jntm.jpg",
testDir + "data/bulk/woman.png",
testDir + "data/bulk/Rob_Lowe_0001.jpg",
};
std::vector<std::string> names = {
"Nathalie Baye",
"JNTM",
"Woman",
"Rob Lowe",
};
assert(photos.size() == names.size()); // Ensure each photo has a corresponding name
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
// Process each photo, extract features, and add them to the database
for (int i = 0; i < photos.size(); ++i) {
std::cout << "===============================" << std::endl;
// Load the image from the specified file path
const auto& path = photos[i];
const auto& name = names[i];
auto image = cv::imread(path);
if (image.empty()) {
std::cout << "The image is empty: " << path << ret << std::endl;
return ret;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << path << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face
HFFaceFeature feature = {0};
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
// Assign a name to the detected face and insert it into the feature hub
char* cstr = new char[name.size() + 1]; // Dynamically allocate memory for the name
strcpy(cstr, name.c_str()); // Copy the name into the allocated memory
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature; // Assign the extracted feature
identity.customId = i; // Custom identifier for the face
identity.tag = cstr; // Tag the feature with the name
ret = HFFeatureHubInsertFeature(identity); // Insert the feature into the hub
if (ret != HSUCCEED) {
std::cout << "Feature insertion into FeatureHub failed: " << ret << std::endl;
return ret;
}
delete[] cstr; // Clean up the dynamically allocated memory
std::cout << "Insert feature to FeatureHub: " << name << std::endl;
ret = HFReleaseImageStream(stream); // Release the image stream
if (ret != HSUCCEED) {
std::cout << "Release stream failed: " << ret << std::endl;
return ret;
}
}
HInt32 count;
ret = HFFeatureHubGetFaceCount(&count);
assert(count == photos.size());
std::cout << "\nInserted data: " << count << std::endl;
// Process a query image and search for similar faces in the database
auto query = cv::imread(testDir + "data/bulk/kun.jpg");
if (query.empty()) {
std::cout << "The query image is empty: " << ret << std::endl;
return ret;
}
HFImageData imageData = {0};
imageData.data = query.data;
imageData.format = HF_STREAM_BGR;
imageData.height = query.rows;
imageData.width = query.cols;
imageData.rotation = HF_CAMERA_ROTATION_0;
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream);
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) {
std::cout << "No face was detected from target image: " << ret << std::endl;
return ret;
}
// Initialize the feature structure to store extracted face features
HFFaceFeature feature = {0};
// Extract facial features from the detected face using the first token
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl; // Print error if extraction fails
return ret;
}
// Initialize the structure to store the results of the face search
HFFaceFeatureIdentity searched = {0};
HFloat confidence; // Variable to store the confidence level of the search result
// Search the feature hub for a matching face feature
ret = HFFeatureHubFaceSearch(feature, &confidence, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.customId == -1) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
std::cout << "\nFound similar face: id=" << searched.customId << ", tag=" << searched.tag << ", confidence=" << confidence << std::endl;
std::string name(searched.tag);
// Remove feature
ret = HFFeatureHubFaceRemove(searched.customId);
if (ret != HSUCCEED) {
std::cout << "Remove failed: " << ret << std::endl; // Print error if search fails
return ret;
}
// Remove feature and search again
ret = HFFeatureHubFaceSearch(feature, &confidence, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.customId != -1) {
std::cout << "Remove an exception: " << std::endl; // Notify if no matching face is found
return ret;
}
std::cout << "\nSearch again confidence=" << confidence << std::endl;
std::cout << name << " has been removed." << std::endl;
// Clean up and close the session
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
std::cout << "Release stream error: " << ret << std::endl;
return ret;
}
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
std::cout << "Release session error: " << ret << std::endl;
return ret;
}
return ret; // Return the final result code
}

View File

@@ -1,249 +0,0 @@
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check if the correct number of parameters was provided
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " <pack_path>\n";
return 1;
}
auto packPath = argv[1]; // Path to the resource pack
std::string testDir = "test_res/"; // Directory containing test resources
HResult ret;
// Load resource file, necessary before using any functionality
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Configuration for the feature database
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.featureBlockNum = 10; // Number of feature blocks
featureHubConfiguration.enablePersistence = 0; // Persistence not enabled, use in-memory database
featureHubConfiguration.dbPath = ""; // Database path (not used here)
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EAGER; // Search mode configuration
featureHubConfiguration.searchThreshold = 0.48f; // Threshold for search operations
// Enable the global feature database
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED) {
std::cout << "An exception occurred while starting FeatureHub: " << ret << std::endl;
return ret;
}
// Prepare a list of face photos for testing
std::vector<std::string> photos = {
testDir + "data/RD/d1.jpeg",
testDir + "data/RD/d2.jpeg",
testDir + "data/RD/d3.jpeg",
testDir + "data/RD/d4.jpeg",
};
std::vector<std::string> names = {
"d1", "d2", "d3", "d4",
};
assert(photos.size() == names.size()); // Ensure each photo has a corresponding name
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
// Process each photo, extract features, and add them to the database
for (int i = 0; i < photos.size(); ++i) {
std::cout << "===============================" << std::endl;
// Load the image from the specified file path
const auto& path = photos[i];
const auto& name = names[i];
auto image = cv::imread(path);
if (image.empty()) {
std::cout << "The image is empty: " << path << ret << std::endl;
return ret;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << path << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face
HFFaceFeature feature = {0};
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
// Assign a name to the detected face and insert it into the feature hub
char* cstr = new char[name.size() + 1]; // Dynamically allocate memory for the name
strcpy(cstr, name.c_str()); // Copy the name into the allocated memory
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature; // Assign the extracted feature
identity.customId = i; // Custom identifier for the face
identity.tag = cstr; // Tag the feature with the name
ret = HFFeatureHubInsertFeature(identity); // Insert the feature into the hub
if (ret != HSUCCEED) {
std::cout << "Feature insertion into FeatureHub failed: " << ret << std::endl;
return ret;
}
delete[] cstr; // Clean up the dynamically allocated memory
std::cout << "Insert feature to FeatureHub: " << name << std::endl;
ret = HFReleaseImageStream(stream); // Release the image stream
if (ret != HSUCCEED) {
std::cout << "Release stream failed: " << ret << std::endl;
return ret;
}
}
HInt32 count;
ret = HFFeatureHubGetFaceCount(&count);
assert(count == photos.size());
std::cout << "\nInserted data: " << count << std::endl;
// Process a query image and search for similar faces in the database
auto query = cv::imread(testDir + "data/RD/d5.jpeg");
if (query.empty()) {
std::cout << "The query image is empty: " << ret << std::endl;
return ret;
}
HFImageData imageData = {0};
imageData.data = query.data;
imageData.format = HF_STREAM_BGR;
imageData.height = query.rows;
imageData.width = query.cols;
imageData.rotation = HF_CAMERA_ROTATION_0;
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream);
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) {
std::cout << "No face was detected from target image: " << ret << std::endl;
return ret;
}
// Initialize the feature structure to store extracted face features
HFFaceFeature feature = {0};
// Extract facial features from the detected face using the first token
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl; // Print error if extraction fails
return ret;
}
HFSearchTopKResults searched = {0};
// Search the feature hub for a matching face feature
ret = HFFeatureHubFaceSearchTopK(feature, 10, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.size == 0) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
for (int i = 0; i < searched.size; ++i) {
auto id = searched.customIds[i];
auto score = searched.confidence[i];
HFFaceFeatureIdentity identity = {0};
ret = HFFeatureHubGetFaceIdentity(id, &identity);
if (ret != HSUCCEED) {
std::cout << "Get face identity error: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "\nFound similar face: id=" << id << ", tag=" << identity.tag << ", confidence=" << score << std::endl;
}
// std::string name(searched.tag);
//
// Remove feature
if (searched.size > 2) {
ret = HFFeatureHubFaceRemove(searched.customIds[2]);
if (ret != HSUCCEED) {
std::cout << "Remove failed: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "============= Remove id: " << searched.customIds[2] << " ==============" << std::endl;
ret = HFFeatureHubFaceSearchTopK(feature, 10, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.size == 0) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
for (int i = 0; i < searched.size; ++i) {
auto id = searched.customIds[i];
auto score = searched.confidence[i];
HFFaceFeatureIdentity identity = {0};
ret = HFFeatureHubGetFaceIdentity(id, &identity);
if (ret != HSUCCEED) {
std::cout << "Get face identity error: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "\nFound similar face: id=" << id << ", tag=" << identity.tag << ", confidence=" << score << std::endl;
}
}
// Clean up and close the session
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
std::cout << "Release stream error: " << ret << std::endl;
return ret;
}
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
std::cout << "Release session error: " << ret << std::endl;
return ret;
}
return ret; // Return the final result code
}

View File

@@ -1,202 +0,0 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc < 3 || argc > 4) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path> [rotation]\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
int rotation = 0;
// If rotation is provided, check and set the value
if (argc == 4) {
rotation = std::atoi(argv[3]);
if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) {
std::cerr << "Invalid rotation value. Allowed values are 0, 90, 180, 270.\n";
return 1;
}
}
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
std::cout << "Rotation: " << rotation << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 32);
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
// Set rotation based on input parameter
switch (rotation) {
case 90:
imageParam.rotation = HF_CAMERA_ROTATION_90;
break;
case 180:
imageParam.rotation = HF_CAMERA_ROTATION_180;
break;
case 270:
imageParam.rotation = HF_CAMERA_ROTATION_270;
break;
case 0:
default:
imageParam.rotation = HF_CAMERA_ROTATION_0;
break;
}
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
std::cout << "Num of face: " << faceNum << std::endl;
// Copy a new image to draw
cv::Mat draw = image.clone();
for (int index = 0; index < faceNum; ++index) {
std::cout << "========================================" << std::endl;
std::cout << "Token size: " << multipleFaceData.tokens[index].size << std::endl;
std::cout << "Process face index: " << index << std::endl;
std::cout << "DetConfidence: " << multipleFaceData.detConfidence[index] << std::endl;
// Use OpenCV's Rect to receive face bounding boxes
auto rect = cv::Rect(multipleFaceData.rects[index].x, multipleFaceData.rects[index].y,
multipleFaceData.rects[index].width, multipleFaceData.rects[index].height);
cv::rectangle(draw, rect, cv::Scalar(0, 100, 255), 4);
// Print FaceID, In IMAGE-MODE it is changing, in VIDEO-MODE it is fixed, but it may be lost
std::cout << "FaceID: " << multipleFaceData.trackIds[index] << std::endl;
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle
// of the head
std::cout << "Roll: " << multipleFaceData.angles.roll[index] << ", Yaw: " << multipleFaceData.angles.yaw[index]
<< ", Pitch: " << multipleFaceData.angles.pitch[index] << std::endl;
HInt32 numOfLmk;
HFGetNumOfFaceDenseLandmark(&numOfLmk);
HPoint2f denseLandmarkPoints[numOfLmk];
ret = HFGetFaceDenseLandmarkFromFaceToken(multipleFaceData.tokens[index], denseLandmarkPoints, numOfLmk);
if (ret != HSUCCEED) {
std::cerr << "HFGetFaceDenseLandmarkFromFaceToken error!!" << std::endl;
return -1;
}
for (size_t i = 0; i < numOfLmk; i++) {
cv::Point2f p(denseLandmarkPoints[i].x, denseLandmarkPoints[i].y);
cv::circle(draw, p, 0, (0, 0, 255), 2);
}
auto& rt = multipleFaceData.rects[index];
float area = ((float)(rt.height * rt.width)) / (imageParam.width * imageParam.height);
std::cout << "area: " << area << std::endl;
}
cv::imwrite("draw_detected.jpg", draw);
// Run pipeline function
// Select the pipeline function that you want to execute, provided that it is already enabled
// when FaceContext is created!
auto pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// In this loop, all faces are processed
ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, pipelineOption);
if (ret != HSUCCEED) {
std::cout << "Execute Pipeline error: " << ret << std::endl;
return ret;
}
// Get mask detection results from the pipeline cache
HFFaceMaskConfidence maskConfidence = {0};
ret = HFGetFaceMaskConfidence(session, &maskConfidence);
if (ret != HSUCCEED) {
std::cout << "Get mask detect result error: " << ret << std::endl;
return -1;
}
// Get face quality results from the pipeline cache
HFFaceQualityConfidence qualityConfidence = {0};
ret = HFGetFaceQualityConfidence(session, &qualityConfidence);
if (ret != HSUCCEED) {
std::cout << "Get face quality result error: " << ret << std::endl;
return -1;
}
for (int index = 0; index < faceNum; ++index) {
std::cout << "========================================" << std::endl;
std::cout << "Process face index from pipeline: " << index << std::endl;
std::cout << "Mask detect result: " << maskConfidence.confidence[index] << std::endl;
std::cout << "Quality predict result: " << qualityConfidence.confidence[index] << std::endl;
// We set the threshold of wearing a mask as 0.85. If it exceeds the threshold, it will be
// judged as wearing a mask. The threshold can be adjusted according to the scene
if (maskConfidence.confidence[index] > 0.85) {
std::cout << "Mask" << std::endl;
} else {
std::cout << "Non Mask" << std::endl;
}
}
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -1,95 +0,0 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path>\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 50;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, 160, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
for (int i = 0; i < 100; i++) {
auto current_time = (double) cv::getTickCount();
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
auto cost = ((double) cv::getTickCount() - current_time) / cv::getTickFrequency() * 1000;
std::cout << "coes: " << cost << std::endl;
}
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -1,98 +0,0 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
#include <thread>
void runFaceTrack(HFSession session, HFImageStream imageHandle) {
HFMultipleFaceData multipleFaceData = {0};
auto ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Thread " << std::this_thread::get_id() << " Execute HFExecuteFaceTrack error: " << ret << std::endl;
} else {
std::cout << "Thread " << std::this_thread::get_id() << " successfully executed HFExecuteFaceTrack.\n";
}
}
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path>\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm session
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, -1, -1, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Create and start multiple threads
const size_t numThreads = 10;
std::vector<std::thread> threads;
for (size_t i = 0; i < numThreads; ++i) {
threads.emplace_back(runFaceTrack, session, imageHandle);
}
// Wait for all threads to complete
for (auto& thread : threads) {
if (thread.joinable()) {
thread.join();
}
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release FaceContext error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -1,223 +0,0 @@
#include <iostream>
#include "c_api/intypedef.h"
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
#include <unordered_map>
#include <functional>
void drawMode(cv::Mat& frame, HFDetectMode mode) {
std::string modeText;
switch (mode) {
case HF_DETECT_MODE_ALWAYS_DETECT:
modeText = "Mode: Image Detection";
break;
case HF_DETECT_MODE_LIGHT_TRACK:
modeText = "Mode: Video Detection";
break;
case HF_DETECT_MODE_TRACK_BY_DETECTION:
modeText = "Mode: Track by Detection";
break;
default:
modeText = "Mode: Unknown";
break;
}
cv::putText(frame, modeText, cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(90, 100, 255), 2);
}
cv::Scalar generateColor(int id) {
int maxID = 100;
id = id % maxID;
int hue = (id * 360 / maxID) % 360;
int saturation = 255;
int value = 200;
cv::Mat hsv(1, 1, CV_8UC3, cv::Scalar(hue, saturation, value));
cv::Mat rgb;
cv::cvtColor(hsv, rgb, cv::COLOR_HSV2BGR);
cv::Vec3b rgbColor = rgb.at<cv::Vec3b>(0, 0);
return cv::Scalar(rgbColor[0], rgbColor[1], rgbColor[2]);
}
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <video_path>\n";
return 1;
}
auto packPath = argv[1];
auto videoPath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Video file Path: " << videoPath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_INTERACTION;
// Video or frame sequence mode uses VIDEO-MODE, which is face detection with tracking
HFDetectMode detMode = HF_DETECT_MODE_TRACK_BY_DETECTION;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 640;
// fps in tracking-by-detection mode
HInt32 trackByDetectFps = 20;
HFSession session = {0};
// Handle of the current face SDK algorithm context
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, trackByDetectFps, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 0);
// Open the video file
cv::VideoCapture cap(videoPath);
if (!cap.isOpened()) {
std::cout << "The source entered is not a video or read error." << std::endl;
return 1;
}
// Get the video properties
int frame_width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
int frame_height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
int fps = static_cast<int>(cap.get(cv::CAP_PROP_FPS));
cv::Size frame_size(frame_width, frame_height);
// Define the codec and create VideoWriter object
cv::VideoWriter outputVideo("output_video.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), fps, frame_size, true);
if (!outputVideo.isOpened()) {
std::cerr << "Could not open the output video for write: output_video.avi\n";
return -1;
}
cv::Mat frame;
while (cap.read(frame)) {
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = frame.data; // Data buffer
imageParam.width = frame.cols; // Target view width
imageParam.height = frame.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
double time = (double) cv::getTickCount();
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
time = ((double) cv::getTickCount() - time) / cv::getTickFrequency();
std::cout << "use time" << time << "\n";
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
std::cout << "Num of face: " << faceNum << std::endl;
// Copy a new image to draw
cv::Mat draw = frame.clone();
// Draw detection mode on the frame
drawMode(draw, detMode);
if (faceNum > 0) {
ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, option);
if (ret != HSUCCEED)
{
std::cout << "HFMultipleFacePipelineProcessOptional error: " << ret << std::endl;
return ret;
}
HFFaceIntereactionState result;
ret = HFGetFaceIntereactionStateResult(session, &result);
if (ret != HSUCCEED)
{
std::cout << "HFGetFaceIntereactionStateResult error: " << ret << std::endl;
return ret;
}
std::cout << "Left eye status: " << result.leftEyeStatusConfidence[0] << std::endl;
std::cout << "Righ eye status: " << result.rightEyeStatusConfidence[0] << std::endl;
}
for (int index = 0; index < faceNum; ++index) {
// std::cout << "========================================" << std::endl;
// std::cout << "Process face index: " << index << std::endl;
// Print FaceID, In VIDEO-MODE it is fixed, but it may be lost
auto trackId = multipleFaceData.trackIds[index];
// Use OpenCV's Rect to receive face bounding boxes
auto rect = cv::Rect(multipleFaceData.rects[index].x, multipleFaceData.rects[index].y,
multipleFaceData.rects[index].width, multipleFaceData.rects[index].height);
cv::rectangle(draw, rect, generateColor(trackId), 3);
// std::cout << "FaceID: " << trackId << std::endl;
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle of the head
// std::cout << "Roll: " << multipleFaceData.angles.roll[index]
// << ", Yaw: " << multipleFaceData.angles.yaw[index]
// << ", Pitch: " << multipleFaceData.angles.pitch[index] << std::endl;
// Add TrackID to the drawing
cv::putText(draw, "ID: " + std::to_string(trackId), cv::Point(rect.x, rect.y - 10),
cv::FONT_HERSHEY_SIMPLEX, 0.5, generateColor(trackId), 2);
HInt32 numOfLmk;
HFGetNumOfFaceDenseLandmark(&numOfLmk);
HPoint2f denseLandmarkPoints[numOfLmk];
ret = HFGetFaceDenseLandmarkFromFaceToken(multipleFaceData.tokens[index], denseLandmarkPoints, numOfLmk);
if (ret != HSUCCEED) {
std::cerr << "HFGetFaceDenseLandmarkFromFaceToken error!!" << std::endl;
return -1;
}
for (size_t i = 0; i < numOfLmk; i++) {
cv::Point2f p(denseLandmarkPoints[i].x, denseLandmarkPoints[i].y);
cv::circle(draw, p, 0, generateColor(trackId), 2);
}
}
cv::imshow("w", draw);
cv::waitKey(1);
// Write the frame into the file
outputVideo.write(draw);
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
}
// Release the VideoCapture and VideoWriter objects
cap.release();
outputVideo.release();
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,30 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/pipeline_module/attribute/face_attribute_adapt.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspirecv/time_spend.h>
#include <log.h>
using namespace inspire;
int main() {
INSPIRE_SET_LOG_LEVEL(ISF_LOG_DEBUG);
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIRE_LAUNCH->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_attribute", detModel);
if (ret != SARC_SUCCESS) {
INSPIRE_LOGE("Load %s error: %d", "face_detect_160", ret);
return HERR_ARCHIVE_LOAD_MODEL_FAILURE;
}
FaceAttributePredictAdapt face_attribute;
face_attribute.loadData(detModel, detModel.modelType, false);
auto img = inspirecv::Image::Create("test_res/data/crop/crop.png");
auto result = face_attribute(img);
std::cout << "result: " << result[0] << ", " << result[1] << ", " << result[2] << std::endl;
return 0;
}

View File

@@ -0,0 +1,49 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspirecv/time_spend.h>
using namespace inspire;
int main() {
INSPIRE_SET_LOG_LEVEL(ISF_LOG_DEBUG);
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIRE_LAUNCH->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_detect_160", detModel);
if (ret != SARC_SUCCESS) {
INSPIRE_LOGE("Load %s error: %d", "face_detect_160", ret);
return HERR_ARCHIVE_LOAD_MODEL_FAILURE;
}
FaceDetectAdapt face_detect(160);
std::vector<int> input_size;
input_size = detModel.Config().get<std::vector<int>>("input_size");
ret = face_detect.loadData(detModel, detModel.modelType, false);
if (ret != 0) {
INSPIRE_LOGE("Load %s error: %d", "face_detect_160", ret);
return HERR_ARCHIVE_LOAD_MODEL_FAILURE;
}
std::cout << "Load model success" << std::endl;
auto img = inspirecv::Image::Create("data/bulk/kun.jpg");
inspirecv::TimeSpend time_spend("Detect");
FaceLocList results;
for (int i = 0; i < 10; i++) {
time_spend.Start();
results = face_detect(img);
time_spend.Stop();
std::cout << "================" << std::endl;
}
std::cout << time_spend << std::endl;
std::cout << "Face detect success:" << results.size() << std::endl;
for (auto &face : results) {
std::cout << "Face detect success:" << face.x1 << " " << face.y1 << " " << face.x2 << " " << face.y2 << std::endl;
}
return 0;
}

View File

@@ -0,0 +1,129 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/middleware/nexus_processor/image_processor.h>
#include "log.h"
#include <inspirecv/time_spend.h>
using namespace inspire;
int main() {
#if defined(ISF_ENABLE_RGA)
std::cout << "ISF_ENABLE_RGA is open" << std::endl;
#else
std::cout << "ISF_ENABLE_RGA is close" << std::endl;
#endif
auto img = inspirecv::Image::Create("data/bulk/r0.jpg");
auto processor = nexus::ImageProcessor::Create();
uint8_t* resized_data = nullptr;
int resized_width = 100;
int resized_height = 100;
inspirecv::TimeSpend time_spend("RGA resize");
for (int i = 0; i < 10; i++) {
time_spend.Start();
auto ret = processor->Resize(img.Data(), img.Width(), img.Height(), img.Channels(), &resized_data, resized_width, resized_height);
time_spend.Stop();
if (ret != 0) {
INSPIRE_LOGE("RGA resize failed: %d", ret);
return -1;
}
}
processor->DumpCacheStatus();
std::cout << time_spend << std::endl;
auto resized_img = inspirecv::Image::Create(resized_width, resized_height, img.Channels(), resized_data);
resized_img.Write("save_dir/kun_resized.jpg");
processor->MarkDone();
uint8_t* swapped_data = nullptr;
inspirecv::TimeSpend swap_time_spend("RGA swap color");
for (int i = 0; i < 10; i++) {
swap_time_spend.Start();
auto ret = processor->SwapColor(resized_img.Data(), resized_img.Width(), resized_img.Height(), resized_img.Channels(), &swapped_data);
swap_time_spend.Stop();
}
std::cout << swap_time_spend << std::endl;
processor->DumpCacheStatus();
auto swapped_img = inspirecv::Image::Create(resized_img.Width(), resized_img.Height(), resized_img.Channels(), swapped_data);
swapped_img.Write("save_dir/kun_swapped.jpg");
processor->MarkDone();
// padding
uint8_t* padded_data = nullptr;
int top = 10;
int bottom = 10;
int left = 10;
int right = 10;
inspirecv::TimeSpend padding_time_spend("RGA padding");
int padded_width = 0;
int padded_height = 0;
for (int i = 0; i < 10; i++) {
padding_time_spend.Start();
auto ret = processor->Padding(swapped_img.Data(), swapped_img.Width(), swapped_img.Height(), swapped_img.Channels(), top, bottom, left, right,
&padded_data, padded_width, padded_height);
padding_time_spend.Stop();
}
processor->DumpCacheStatus();
std::cout << padding_time_spend << std::endl;
auto padded_img = inspirecv::Image::Create(padded_width, padded_height, swapped_img.Channels(), padded_data);
padded_img.Write("save_dir/kun_padded.jpg");
processor->MarkDone();
// inspirecv crop
inspirecv::Rect2i rect(30, 30, 70, 70);
inspirecv::TimeSpend inspirecv_crop_time_spend("InspireCV crop");
inspirecv::Image inspirecv_cropped_img;
for (int i = 0; i < 10; i++) {
inspirecv_crop_time_spend.Start();
inspirecv_cropped_img = padded_img.Crop(rect);
inspirecv_crop_time_spend.Stop();
}
std::cout << inspirecv_crop_time_spend << std::endl;
inspirecv_cropped_img.Write("save_dir/kun_cropped_inspirecv.jpg");
// Padding and crop
inspirecv::Image image = inspirecv::Image::Create("data/bulk/r90.jpg");
uint8_t* padded_cropped_data = nullptr;
int dst_width = 320;
int dst_height = 320;
float scale = 0.0f;
inspirecv::TimeSpend padded_crop_time_spend("RGA padded and cropped");
for (int i = 0; i < 10; i++) {
padded_crop_time_spend.Start();
auto ret = processor->ResizeAndPadding(image.Data(), image.Width(), image.Height(), image.Channels(), dst_width, dst_height,
&padded_cropped_data, scale);
padded_crop_time_spend.Stop();
}
processor->DumpCacheStatus();
std::cout << padded_crop_time_spend << std::endl;
auto padded_cropped_img = inspirecv::Image::Create(dst_width, dst_height, img.Channels(), padded_cropped_data);
padded_cropped_img.Write("save_dir/image_padded_cropped.jpg");
processor->MarkDone();
// resize 2
uint8_t* resized_data_2 = nullptr;
int resized_width_2 = 512;
int resized_height_2 = 512;
inspirecv::TimeSpend time_spend_2("RGA resize 2");
for (int i = 0; i < 10; i++) {
time_spend_2.Start();
auto ret = processor->Resize(padded_cropped_img.Data(), padded_cropped_img.Width(), padded_cropped_img.Height(),
padded_cropped_img.Channels(), &resized_data_2, resized_width_2, resized_height_2);
time_spend_2.Stop();
}
std::cout << time_spend_2 << std::endl;
processor->DumpCacheStatus();
auto resized_img_2 = inspirecv::Image::Create(resized_width_2, resized_height_2, padded_cropped_img.Channels(), resized_data_2);
resized_img_2.Write("save_dir/image_padded_cropped_resized_2.jpg");
processor->MarkDone();
return 0;
}

View File

@@ -0,0 +1,116 @@
#include <inspirecv/inspirecv.h>
#include "inspireface/initialization_module/launch.h"
#include "inspireface/middleware/model_archive/inspire_archive.h"
#include "inspireface/track_module/face_detect/face_detect_adapt.h"
#include "inspireface/track_module/landmark/face_landmark_adapt.h"
#include "inspireface/track_module/quality/face_pose_quality_adapt.h"
#include "inspireface/recognition_module/extract/extract_adapt.h"
void test_face_detect() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("face_detect_160", model);
auto input_size = 160;
inspire::FaceDetectAdapt faceDetectAdapt(input_size);
faceDetectAdapt.loadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/bulk/kun.jpg");
inspire::FaceLocList faces;
inspirecv::TimeSpend timeSpend("Face Detect@" + std::to_string(input_size));
for (int i = 0; i < 1000; i++) {
timeSpend.Start();
faces = faceDetectAdapt(image);
timeSpend.Stop();
}
std::cout << timeSpend << std::endl;
std::cout << "faces size: " << faces.size() << std::endl;
for (auto &face : faces) {
inspirecv::Rect2i rect = inspirecv::Rect2i::Create(face.x1, face.y1, face.x2 - face.x1, face.y2 - face.y1);
image.DrawRect(rect, {0, 0, 255});
}
image.Write("im.jpg");
}
void test_landmark() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("landmark", model);
auto input_size = 112;
inspire::FaceLandmarkAdapt landmarkAdapt(input_size);
landmarkAdapt.loadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
std::vector<float> lmk;
inspirecv::TimeSpend timeSpend("Landmark@" + std::to_string(input_size));
timeSpend.Start();
for (int i = 0; i < 10; i++) {
lmk = landmarkAdapt(image);
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
for (int i = 0; i < inspire::FaceLandmarkAdapt::NUM_OF_LANDMARK; i++) {
auto p = inspirecv::Point2i::Create(lmk[i * 2] * input_size, lmk[i * 2 + 1] * input_size);
image.DrawCircle(p, 5, {0, 0, 255});
}
image.Write("lm.jpg");
}
void test_quality() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("pose_quality", model);
auto input_size = 96;
inspire::FacePoseQualityAdapt poseQualityAdapt;
poseQualityAdapt.loadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
inspire::FacePoseQualityAdaptResult quality;
inspirecv::TimeSpend timeSpend("Pose Quality@" + std::to_string(input_size));
timeSpend.Start();
for (int i = 0; i < 10; i++) {
quality = poseQualityAdapt(image);
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
std::cout << "quality: " << quality.pitch << ", " << quality.yaw << ", " << quality.roll << std::endl;
for (int i = 0; i < quality.lmk.size(); i++) {
std::cout << "lmk: " << quality.lmk[i].GetX() << ", " << quality.lmk[i].GetY() << std::endl;
auto p = inspirecv::Point2i::Create(quality.lmk[i].GetX(), quality.lmk[i].GetY());
image.DrawCircle(p, 3, {0, 0, 255});
}
image.Write("qu.jpg");
}
void test_feature() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("feature", model);
auto input_size = 112;
inspire::ExtractAdapt extractAdapt;
extractAdapt.loadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
float norm;
bool normalize = true;
inspirecv::TimeSpend timeSpend("Extract@" + std::to_string(input_size));
timeSpend.Start();
inspire::Embedded feature;
for (int i = 0; i < 10; i++) {
feature = extractAdapt(image, norm, normalize);
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
std::cout << "feature: " << feature.size() << std::endl;
}
int main() {
std::string archivePath = "test_res/pack/Pikachu_Apple";
INSPIRE_LAUNCH->Load(archivePath);
// Test face detect
test_face_detect();
// Test landmark
// test_landmark();
// Test quality
// test_quality();
// Test feature
// test_feature();
return 0;
}

View File

@@ -0,0 +1,93 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/pipeline_module/face_pipeline_module.h>
#include <inspireface/common/face_data/face_serialize_tools.h>
#include <inspireface/feature_hub/feature_hub_db.h>
using namespace inspire;
static std::vector<float> FT = {
0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963,
-0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179,
0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961,
-0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942,
-0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462,
0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792,
-0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103,
-0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638,
0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823,
0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569,
-0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047,
0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861,
0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725,
0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137,
-0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139,
-0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002,
-0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054,
-0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069,
0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419,
-0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264,
-0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936,
0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814,
-0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245,
-0.0476374, 0.048495, 0.0378856, 0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162,
-0.0080779, -0.0550556, 0.0229963, -0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743,
-0.0128782, 0.0935529, 0.0588179, 0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471,
-0.00481095, 0.0266868, 0.0712961, -0.0675362, -0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902,
0.00192449, -0.0593105, 0.0191942, -0.00625798, 0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575,
-0.0180252, 0.0150318, -0.0686462, 0.0465634, 0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842,
-0.0161973, 0.0319588, 0.0112792, -0.102007, 0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046,
0.071994, -0.0272229, 0.0167103, -0.00694243, 0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025,
0.000983093, -0.00776073, -0.0268638, 0.00682446, 0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019,
0.000502882, 0.0496892, 0.0126823, 0.0511028, -0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467,
0.0419618, -0.00358901, -0.0309569, -0.0225608, -0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584,
0.0721224, -0.0795082, 0.0340047, 0.056139, -0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856, 0.0706566,
0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963, -0.00683422,
-0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179, 0.0164787,
-0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961, -0.0675362,
-0.0117453, 0.0658745, -0.0694139, -0.00704822, -0.0237313, 0.0209365, 0.0131902, 0.00192449, -0.0593105, 0.0191942, -0.00625798,
0.00748682, 0.0533557, 0.0314002, -0.0627113, 0.0827862, 0.00336722, -0.0191575, -0.0180252, 0.0150318, -0.0686462, 0.0465634,
0.0627244, 0.0449248, -0.037054, -0.0486668, 0.040752, 0.0143315, -0.0763842, -0.0161973, 0.0319588, 0.0112792, -0.102007,
0.0649219, 0.0630833, 0.0421069, 0.0519043, -0.084082, 0.0249516, 0.023046, 0.071994, -0.0272229, 0.0167103, -0.00694243,
0.0366775, 0.0672882, 0.0122419, -0.0233413, -0.0144258, -0.012853, -0.0202025, 0.000983093, -0.00776073, -0.0268638, 0.00682446,
0.0262906, -0.0407654, -0.0144264, -0.0310807, 0.0596711, 0.0238081, -0.0138019, 0.000502882, 0.0496892, 0.0126823, 0.0511028,
-0.0310699, -0.0322141, 0.00996936, 0.0675392, -0.0164277, 0.0930009, -0.037467, 0.0419618, -0.00358901, -0.0309569, -0.0225608,
-0.0332198, 0.00102291, 0.108814, -0.0831313, 0.048208, -0.0277542, -0.061584, 0.0721224, -0.0795082, 0.0340047, 0.056139,
-0.0166783, -0.0803042, -0.014245, -0.0476374, 0.048495, 0.0378856,
};
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
DatabaseConfiguration configuration;
configuration.primary_key_mode = PrimaryKeyMode::MANUAL_INPUT;
configuration.enable_persistence = false;
configuration.recognition_threshold = 0.48f;
FEATURE_HUB_DB->EnableHub(configuration);
// std::vector<float> feature(512, 0.0f);
int64_t result_id = 0;
auto ret = FEATURE_HUB_DB->FaceFeatureInsert(FT, 10086, result_id);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to insert face feature");
INSPIRE_LOGI("result id: %lld", result_id);
} else {
INSPIRE_LOGI("Insert face feature success, result_id: %lld", result_id);
}
// std::vector<float> query_feature(512, 20.0f);
FaceSearchResult search_result;
ret = FEATURE_HUB_DB->SearchFaceFeature(FT, search_result, true);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to search face feature");
} else {
INSPIRE_LOGI("Search face feature success, result_id: %lld", search_result.id);
}
return 0;
}

View File

@@ -0,0 +1,35 @@
#include <iostream>
#include <inspirecv/inspirecv.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include "inspireface/track_module/landmark/face_landmark_adapt.h"
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu-t4");
auto archive = INSPIRE_LAUNCH->getMArchive();
inspire::InspireModel lmkModel;
auto ret = archive.LoadModel("landmark", lmkModel);
if (ret != 0) {
INSPIRE_LOGE("Load %s error: %d", "landmark", ret);
return -1;
}
inspire::FaceLandmarkAdapt lmk;
lmk.loadData(lmkModel, lmkModel.modelType);
auto image = inspirecv::Image::Create("test_res/data/crop/crop.png");
auto data = image.Resize(112, 112);
auto lmk_out = lmk(data);
std::vector<inspirecv::Point2i> landmarks_output(inspire::FaceLandmarkAdapt::NUM_OF_LANDMARK);
for (int i = 0; i < inspire::FaceLandmarkAdapt::NUM_OF_LANDMARK; ++i) {
float x = lmk_out[i * 2 + 0] * image.Width();
float y = lmk_out[i * 2 + 1] * image.Height();
landmarks_output[i] = inspirecv::Point<int>(x, y);
}
for (int i = 0; i < landmarks_output.size(); ++i) {
image.DrawCircle(landmarks_output[i], 5, {0, 0, 255});
}
image.Write("crop_lmk.png");
}

View File

@@ -0,0 +1,48 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/pipeline_module/face_pipeline_module.h>
#include <inspireface/common/face_data/face_serialize_tools.h>
using namespace inspire;
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
auto archive = INSPIRE_LAUNCH->getMArchive();
auto mode = inspire::DetectModuleMode::DETECT_MODE_LIGHT_TRACK;
FaceTrackModule tracker(mode, 10, 20, 320, -1);
tracker.Configuration(archive, expansion_path);
FacePipelineModule pipe(archive, true, true, true, true);
auto image = inspirecv::Image::Create("test_res/data/bulk/r90.jpg");
inspirecv::InspireImageProcess processor;
processor.SetDataBuffer(image.Data(), image.Height(), image.Width());
processor.SetDataFormat(inspirecv::DATA_FORMAT::BGR);
processor.SetRotationMode(inspirecv::ROTATION_MODE::ROTATION_90);
std::vector<FaceProcessFunctionOption> methods = {PROCESS_MASK, PROCESS_RGB_LIVENESS, PROCESS_ATTRIBUTE, PROCESS_INTERACTION};
for (int i = 0; i < 1; i++) {
auto show = image.Clone();
tracker.UpdateStream(processor);
auto faces = tracker.trackingFace;
int index = 0;
if (faces.size() > 0) {
auto &face = faces[index];
auto hyper_face = FaceObjectInternalToHyperFaceData(face);
PrintHyperFaceDataDetail(hyper_face);
std::cout << face.getTransMatrix() << std::endl;
for (auto method : methods) {
pipe.Process(processor, hyper_face, method);
}
std::cout << "eyes status: " << pipe.eyesStatusCache[0] << " " << pipe.eyesStatusCache[1] << std::endl;
}
// show.Show("faces", );
}
return 0;
}

View File

@@ -0,0 +1,39 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
using namespace inspire;
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
auto archive = INSPIRE_LAUNCH->getMArchive();
auto mode = inspire::DetectModuleMode::DETECT_MODE_ALWAYS_DETECT;
FaceTrackModule tracker(mode, 10, 20, 320, -1);
tracker.Configuration(archive, expansion_path);
auto image = inspirecv::Image::Create("test_res/data/bulk/r0.jpg");
inspirecv::InspireImageProcess processor;
processor.SetDataBuffer(image.Data(), image.Height(), image.Width());
processor.SetDataFormat(inspirecv::DATA_FORMAT::BGR);
processor.SetRotationMode(inspirecv::ROTATION_MODE::ROTATION_0);
for (int i = 0; i < 100; i++) {
auto show = image.Clone();
tracker.UpdateStream(processor);
auto faces = tracker.trackingFace;
int index = 0;
if (faces.size() > 0) {
auto &face = faces[index];
for (auto &p : face.high_result.lmk) {
show.DrawCircle(p.As<int>(), 5, {0, 255, 0});
}
for (auto &p : face.landmark_) {
show.DrawCircle(p.As<int>(), 5, {0, 0, 255});
}
}
show.Show("faces", 0);
}
return 0;
}