This commit is contained in:
tunm
2025-05-22 16:07:26 +08:00
parent efb5639ec6
commit 7e25e4e482
168 changed files with 6434 additions and 2527 deletions

View File

@@ -1,10 +1,12 @@
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.20)
project(InspireFaceSample)
option(ISF_BUILD_SAMPLE_CLUTTERED "Whether to compile the cluttered sample program (debug code during development)" OFF)
option(ISF_BUILD_SAMPLE_INTERNAL "Whether to compile the internal sample program (debug code during development)" OFF)
include_directories(${SRC_DIR})
include_directories(${SRC_DIR}/inspireface/c_api)
include_directories(${SRC_DIR}/inspireface/include)
if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu1")
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${ISF_RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
@@ -24,75 +26,129 @@ if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu2" AND ISF_RK_COMPILER_TY
set(ext rknnrt dl)
endif ()
add_executable(Leak api/leak.cpp)
add_executable(Leak api/leak.c)
target_link_libraries(Leak InspireFace ${ext})
set_target_properties(Leak PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
# # Examples of face detection and tracking
add_executable(FaceTrackSample api/sample_face_track.cpp)
add_executable(FaceTrackSample api/sample_face_track.c)
target_link_libraries(FaceTrackSample InspireFace ${ext})
set_target_properties(FaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FaceTrackBenchmarkSample api/sample_face_track_benchmark.cpp)
add_executable(FaceTrackBenchmarkSample api/sample_face_track_benchmark.c)
target_link_libraries(FaceTrackBenchmarkSample InspireFace ${ext})
set_target_properties(FaceTrackBenchmarkSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
# Examples of face recognition
add_executable(FaceComparisonSample api/sample_face_comparison.cpp)
add_executable(FaceComparisonSample api/sample_face_comparison.c)
target_link_libraries(FaceComparisonSample InspireFace ${ext})
set_target_properties(FaceComparisonSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FaceFeatureHubSample api/sample_feature_hub.cpp)
add_executable(FaceFeatureHubSample api/sample_feature_hub.c)
target_link_libraries(FaceFeatureHubSample InspireFace ${ext})
set_target_properties(FaceFeatureHubSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FaceLoadReloadSample api/sample_load_reload.cpp)
add_executable(FaceLoadReloadSample api/sample_load_reload.c)
target_link_libraries(FaceLoadReloadSample InspireFace ${ext})
set_target_properties(FaceLoadReloadSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FaceTrackerSample source/tracker_sample.cpp)
target_link_libraries(FaceTrackerSample InspireFace ${ext})
set_target_properties(FaceTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
add_executable(CppResourcePoolSample api/sample_cpp_resource_pool.cpp)
target_link_libraries(CppResourcePoolSample InspireFace ${ext})
set_target_properties(CppResourcePoolSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(ExpansionLoadSample source/expansion_load.cpp)
target_link_libraries(ExpansionLoadSample InspireFace ${ext})
set_target_properties(ExpansionLoadSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
add_executable(FaceCrudSample api/sample_face_crud.c)
target_link_libraries(FaceCrudSample InspireFace ${ext})
set_target_properties(FaceCrudSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FaceTrackPipelineSample source/tracker_pipeline.cpp)
target_link_libraries(FaceTrackPipelineSample InspireFace ${ext})
set_target_properties(FaceTrackPipelineSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
add_executable(FeatureHubPersistenceSample api/sample_feature_hub_persistence.c)
target_link_libraries(FeatureHubPersistenceSample InspireFace ${ext})
set_target_properties(FeatureHubPersistenceSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/api/"
)
add_executable(FeatureHubSample source/feature_hub_sample.cpp)
target_link_libraries(FeatureHubSample InspireFace ${ext})
set_target_properties(FeatureHubSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
# --- C++ API ---
add_executable(CppSessionSample cpp_api/cpp_sample_face_track.cpp)
target_link_libraries(CppSessionSample InspireFace ${ext})
set_target_properties(CppSessionSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(LandmarkSample source/landmark_sample.cpp)
target_link_libraries(LandmarkSample InspireFace ${ext})
set_target_properties(LandmarkSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
add_executable(CppFaceComparisonSample cpp_api/cpp_sample_face_comparison.cpp)
target_link_libraries(CppFaceComparisonSample InspireFace ${ext})
set_target_properties(CppFaceComparisonSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(CppFaceCrudSample cpp_api/cpp_sample_face_crud.cpp)
target_link_libraries(CppFaceCrudSample InspireFace ${ext})
set_target_properties(CppFaceCrudSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(CppSampleAffine cpp_api/cpp_sample_affine.cpp)
target_link_libraries(CppSampleAffine InspireFace ${ext})
set_target_properties(CppSampleAffine PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
add_executable(CppSampleInspireCV cpp_api/cpp_sample_inspirecv.cpp)
target_link_libraries(CppSampleInspireCV InspireFace ${ext})
set_target_properties(CppSampleInspireCV PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cpp_api/"
)
if(ISF_BUILD_SAMPLE_INTERNAL)
add_executable(FaceTrackerSample source/tracker_sample.cpp)
target_link_libraries(FaceTrackerSample InspireFace ${ext})
set_target_properties(FaceTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(ExpansionLoadSample source/expansion_load.cpp)
target_link_libraries(ExpansionLoadSample InspireFace ${ext})
set_target_properties(ExpansionLoadSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceTrackPipelineSample source/tracker_pipeline.cpp)
target_link_libraries(FaceTrackPipelineSample InspireFace ${ext})
set_target_properties(FaceTrackPipelineSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FeatureHubSample source/feature_hub_sample.cpp)
target_link_libraries(FeatureHubSample InspireFace ${ext})
set_target_properties(FeatureHubSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(LandmarkSample source/landmark_sample.cpp)
target_link_libraries(LandmarkSample InspireFace ${ext})
set_target_properties(LandmarkSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
endif()
# Platform watershed
if (ISF_BUILD_LINUX_ARM7 OR ISF_BUILD_LINUX_AARCH64)
@@ -108,194 +164,194 @@ if(ISF_RK_DEVICE_TYPE STREQUAL "RV1106")
add_executable(FaceTrackSampleRV1106 rv1106/face_detect.cpp)
target_link_libraries(FaceTrackSampleRV1106 InspireFace ${ext})
set_target_properties(FaceTrackSampleRV1106 PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/internal/"
)
add_executable(FaceAttributeSampleRV1106 rv1106/face_attribute.cpp)
target_link_libraries(FaceAttributeSampleRV1106 InspireFace ${ext})
set_target_properties(FaceAttributeSampleRV1106 PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/internal/"
)
endif()
add_executable(NexusImageSample rv1106/rga_image.cpp)
target_link_libraries(NexusImageSample InspireFace ${ext})
set_target_properties(NexusImageSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
set_target_properties(NexusImageSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/internal/"
)
# These sample programs are debugging and testing code left behind by developers during the development process.
# They are cluttered and have not been organized, or similar functionalities have already been organized in the standard samples.
# You can ignore them.
if (ISF_BUILD_SAMPLE_CLUTTERED)
if (NOT ISF_BUILD_LINUX_ARM7 AND NOT ISF_BUILD_LINUX_AARCH64)
# =======================InspireFace Sample===========================
add_executable(TrackerSample cluttered/standard/tracker_sample.cpp)
target_link_libraries(TrackerSample InspireFace)
set_target_properties(TrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ContextSample cluttered/standard/context_sample.cpp)
target_link_libraries(ContextSample InspireFace)
# These sample programs are debugging and testing code left behind by developers during the development process.
# They are cluttered and have not been organized, or similar functionalities have already been organized in the standard samples.
# You can ignore them.
if (ISF_BUILD_SAMPLE_CLUTTERED)
if (NOT ISF_BUILD_LINUX_ARM7 AND NOT ISF_BUILD_LINUX_AARCH64)
# =======================InspireFace Sample===========================
add_executable(TrackerSample cluttered/standard/tracker_sample.cpp)
target_link_libraries(TrackerSample InspireFace)
set_target_properties(TrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(ContextSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ContextSample cluttered/standard/context_sample.cpp)
target_link_libraries(ContextSample InspireFace)
add_executable(TestSample cluttered/standard/test_sample.cpp)
target_link_libraries(TestSample InspireFace)
set_target_properties(ContextSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(TestSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(NetSample cluttered/standard/net_sample.cpp)
target_link_libraries(NetSample InspireFace)
set_target_properties(NetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(TestSample cluttered/standard/test_sample.cpp)
target_link_libraries(TestSample InspireFace)
add_executable(RecSample cluttered/standard/rec_sample.cpp)
target_link_libraries(RecSample InspireFace)
set_target_properties(TestSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(NetSample cluttered/standard/net_sample.cpp)
target_link_libraries(NetSample InspireFace)
set_target_properties(NetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(RecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(RecSample cluttered/standard/rec_sample.cpp)
target_link_libraries(RecSample InspireFace)
add_executable(BMSample cluttered/standard/bm_sample.cpp)
target_link_libraries(BMSample InspireFace)
set_target_properties(RecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(BMSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(BMSample cluttered/standard/bm_sample.cpp)
target_link_libraries(BMSample InspireFace)
else()
set_target_properties(BMSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# =======================RK Temporary test category===========================
if (ISF_ENABLE_RKNN)
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/${ISF_RKNPU_MAJOR}/runtime/${ISF_RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
message("Enable RKNN Inference")
link_directories(${ISF_RKNN_API_LIB})
# Face detection
add_executable(RKFaceDetSample cluttered/rk_sample/rk_face_det_sample.cpp)
target_link_libraries(RKFaceDetSample InspireFace rknn_api dl)
set_target_properties(RKFaceDetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Simple network test
add_executable(RKSimpleNetSample cluttered/rk_sample/rk_simple_net_sample.cpp)
target_link_libraries(RKSimpleNetSample InspireFace rknn_api dl)
set_target_properties(RKSimpleNetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Face recognize
add_executable(RKFaceRecSample cluttered/rk_sample/rk_face_recognize_sample.cpp)
target_link_libraries(RKFaceRecSample InspireFace rknn_api dl)
set_target_properties(RKFaceRecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Tracking module
add_executable(RKTrackerSample cluttered/rk_sample/rk_tracker_sample.cpp)
target_link_libraries(RKTrackerSample InspireFace rknn_api dl)
set_target_properties(RKTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Debug
add_executable(DebugRKRec cluttered/rk_sample/debug_rk_rec.cpp)
target_link_libraries(DebugRKRec InspireFace rknn_api dl)
set_target_properties(DebugRKRec PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
else()
add_executable(ArchTest cluttered/standard/archive_test.cpp)
target_link_libraries(ArchTest InspireFace)
# =======================RK Temporary test category===========================
set_target_properties(ArchTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
if (ISF_ENABLE_RKNN)
set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/${ISF_RKNPU_MAJOR}/runtime/${ISF_RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
message("Enable RKNN Inference")
link_directories(${ISF_RKNN_API_LIB})
# Face detection
add_executable(RKFaceDetSample cluttered/rk_sample/rk_face_det_sample.cpp)
target_link_libraries(RKFaceDetSample InspireFace rknn_api dl)
set_target_properties(RKFaceDetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Simple network test
add_executable(RKSimpleNetSample cluttered/rk_sample/rk_simple_net_sample.cpp)
target_link_libraries(RKSimpleNetSample InspireFace rknn_api dl)
set_target_properties(RKSimpleNetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Face recognize
add_executable(RKFaceRecSample cluttered/rk_sample/rk_face_recognize_sample.cpp)
target_link_libraries(RKFaceRecSample InspireFace rknn_api dl)
set_target_properties(RKFaceRecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Tracking module
add_executable(RKTrackerSample cluttered/rk_sample/rk_tracker_sample.cpp)
target_link_libraries(RKTrackerSample InspireFace rknn_api dl)
set_target_properties(RKTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Debug
add_executable(DebugRKRec cluttered/rk_sample/debug_rk_rec.cpp)
target_link_libraries(DebugRKRec InspireFace rknn_api dl)
set_target_properties(DebugRKRec PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ArchTest cluttered/standard/archive_test.cpp)
target_link_libraries(ArchTest InspireFace)
set_target_properties(ArchTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
endif()
endif()
endif()
# Tracking module
add_executable(SQLiteTest cluttered/standard/test_sqlite_sample.cpp)
target_link_libraries(SQLiteTest InspireFace)
# Tracking module
add_executable(SQLiteTest cluttered/standard/test_sqlite_sample.cpp)
target_link_libraries(SQLiteTest InspireFace)
set_target_properties(SQLiteTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(SQLiteTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
if (ISF_ENABLE_RKNN)
set(DEPEND rknn_api dl)
endif ()
if (ISF_ENABLE_RKNN)
set(DEPEND rknn_api dl)
endif ()
# C_API Demo
add_executable(CAPISample cluttered/standard/c_api_sample.cpp)
target_link_libraries(CAPISample InspireFace ${DEPEND})
# C_API Demo
add_executable(CAPISample cluttered/standard/c_api_sample.cpp)
target_link_libraries(CAPISample InspireFace ${DEPEND})
set_target_properties(CAPISample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(CAPISample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# C_API Demo
add_executable(LoopTracker cluttered/standard/loop_tracker.cpp)
target_link_libraries(LoopTracker InspireFace ${DEPEND})
# C_API Demo
add_executable(LoopTracker cluttered/standard/loop_tracker.cpp)
target_link_libraries(LoopTracker InspireFace ${DEPEND})
set_target_properties(LoopTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(LoopTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ArchTracker cluttered/standard/archive_tracker.cpp)
target_link_libraries(ArchTracker InspireFace)
add_executable(ArchTracker cluttered/standard/archive_tracker.cpp)
target_link_libraries(ArchTracker InspireFace)
set_target_properties(ArchTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
set_target_properties(ArchTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ErrorTest cluttered/standard/error_test.cpp)
target_link_libraries(ErrorTest InspireFace)
add_executable(ErrorTest cluttered/standard/error_test.cpp)
target_link_libraries(ErrorTest InspireFace)
set_target_properties(ErrorTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
endif ()
set_target_properties(ErrorTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
endif ()
endif()
# Print Message
message(STATUS ">>>>>>>>>>>>>")
message(STATUS "InspireFace Sample:")
message(STATUS "\t ISF_BUILD_SAMPLE_CLUTTERED: ${ISF_BUILD_SAMPLE_CLUTTERED}")
# Install bin
# Install c bin
install(TARGETS Leak RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceTrackSampleCost RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS MTFaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceRecognitionSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# install(TARGETS FaceSearchSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceComparisonSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceCrudSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
# Install cpp bin
install(TARGETS CppSessionSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS CppFaceComparisonSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS CppFaceCrudSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)

View File

@@ -2,9 +2,10 @@
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <stdlib.h>
int main() {
char *n = new char[1024];
char *n = malloc(1024);
return 0;
}

View File

@@ -0,0 +1,100 @@
#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <inspirecv/inspirecv.h>
#include <inspireface/include/inspireface/session.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/middleware/thread/resource_pool.h>
#include <inspireface/include/inspireface/spend_timer.h>
#include <thread>
int main(int argc, char** argv) {
if (argc != 5) {
std::cerr << "Usage: " << argv[0] << " <model_path> <image_path> <loop_count> <thread_num>" << std::endl;
return -1;
}
std::string model_path = argv[1];
std::string image_path = argv[2];
int loop = std::stoi(argv[3]);
int thread_num = std::stoi(argv[4]);
if (thread_num > 10) {
std::cerr << "Error: thread_num cannot be greater than 10" << std::endl;
return -1;
}
if (loop < 1000) {
std::cerr << "Error: loop count must be at least 1000" << std::endl;
return -1;
}
INSPIREFACE_CONTEXT->Load(model_path);
inspirecv::Image image = inspirecv::Image::Create(image_path);
inspirecv::FrameProcess process =
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
inspire::parallel::ResourcePool<inspire::Session> sessionPool(thread_num, [](inspire::Session& session) {
});
for (int i = 0; i < thread_num; ++i) {
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
param.enable_liveness = true;
param.enable_mask_detect = true;
param.enable_face_attribute = true;
param.enable_face_quality = true;
inspire::Session session = inspire::Session::Create(inspire::DetectModuleMode::DETECT_MODE_ALWAYS_DETECT, 1, param);
sessionPool.AddResource(std::move(session));
}
std::vector<std::thread> threads;
int tasksPerThread = loop / thread_num;
int remainingTasks = loop % thread_num;
// Run the task in parallel
for (int i = 0; i < thread_num; ++i) {
int taskCount = tasksPerThread + (i < remainingTasks ? 1 : 0);
threads.emplace_back([&, taskCount]() {
for (int j = 0; j < taskCount; ++j) {
auto sessionGuard = sessionPool.AcquireResource();
std::vector<inspire::FaceTrackWrap> results;
int32_t ret;
ret = sessionGuard->FaceDetectAndTrack(process, results);
if (ret != 0) {
std::cerr << "FaceDetectAndTrack failed" << std::endl;
break;
}
if (results.size() == 0) {
std::cerr << "Not found face" << std::endl;
break;
}
}
});
}
// Print basic information before starting
std::cout << "\n=== Configuration Information ===" << std::endl;
std::cout << "Model Path: " << model_path << std::endl;
std::cout << "Image Path: " << image_path << std::endl;
std::cout << "Total Loop Count: " << loop << std::endl;
std::cout << "Number of Threads: " << thread_num << std::endl;
std::cout << "Tasks per Thread: " << tasksPerThread << std::endl;
std::cout << "Remaining Tasks: " << remainingTasks << std::endl;
std::cout << "==============================\n" << std::endl;
inspire::SpendTimer timer("Number of threads: " + std::to_string(thread_num) + ", Number of tasks: " + std::to_string(loop));
timer.Start();
for (auto& thread : threads) {
thread.join();
}
timer.Stop();
std::cout << timer << std::endl;
// Convert microseconds to milliseconds and print
double milliseconds = timer.Total() / 1000.0;
std::cout << "Total execution time: " << milliseconds << " ms" << std::endl;
return 0;
}

View File

@@ -2,113 +2,134 @@
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inspireface.h>
#define NUM_IMAGES 2
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
HResult ret;
const char* packPath;
const char* imgPath1;
const char* imgPath2;
HOption option;
HFSession session;
HFFaceFeature features[NUM_IMAGES];
const char* imgPaths[NUM_IMAGES];
int i;
HFloat similarity;
HFloat recommended_cosine_threshold;
HFloat percentage;
/* Check whether the number of parameters is correct */
if (argc != 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <img1_path> <img2_path>", argv[0]);
return 1;
}
auto packPath = argv[1];
auto imgPath1 = argv[2];
auto imgPath2 = argv[3];
packPath = argv[1];
imgPath1 = argv[2];
imgPath2 = argv[3];
/* Initialize features array to NULL */
memset(features, 0, sizeof(features));
/* Allocate memory for feature vectors */
for (i = 0; i < NUM_IMAGES; i++) {
ret = HFCreateFaceFeature(&features[i]);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create face feature error: %d", ret);
goto cleanup;
}
}
/* Set the image path array */
imgPaths[0] = imgPath1;
imgPaths[1] = imgPath2;
HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath);
HFLogPrint(HF_LOG_INFO, "Source file Path 1: %s", imgPath1);
HFLogPrint(HF_LOG_INFO, "Source file Path 2: %s", imgPath2);
HResult ret;
// The resource file must be loaded before it can be used
/* The resource file must be loaded before it can be used */
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
goto cleanup;
}
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
/* Create a session for face recognition */
option = HF_ENABLE_FACE_RECOGNITION;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_ALWAYS_DETECT, 1, -1, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create session error: %d", ret);
return ret;
goto cleanup;
}
std::vector<char*> twoImg = {imgPath1, imgPath2};
std::vector<std::vector<float>> vec(2, std::vector<float>(512));
for (int i = 0; i < twoImg.size(); ++i) {
/* Process two images */
for (i = 0; i < NUM_IMAGES; i++) {
HFImageBitmap imageBitmap = {0};
ret = HFCreateImageBitmapFromFilePath(twoImg[i], 3, &imageBitmap);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
return ret;
}
// Prepare image data for processing
HFImageStream stream;
ret = HFCreateImageStreamFromImageBitmap(imageBitmap, HF_CAMERA_ROTATION_0, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create stream error: %d", ret);
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
ret = HFCreateImageBitmapFromFilePath(imgPaths[i], 3, &imageBitmap);
if (ret != HSUCCEED) {
HFReleaseImageBitmap(imageBitmap);
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
goto cleanup;
}
ret = HFCreateImageStreamFromImageBitmap(imageBitmap, HF_CAMERA_ROTATION_0, &stream);
if (ret != HSUCCEED) {
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
HFLogPrint(HF_LOG_ERROR, "Create stream error: %d", ret);
goto cleanup;
}
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret != HSUCCEED) {
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
HFLogPrint(HF_LOG_ERROR, "Run face track error: %d", ret);
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
HFLogPrint(HF_LOG_ERROR, "No face was detected: %s", twoImg[i]);
return ret;
goto cleanup;
}
// Extract facial features from the first detected face, an interface that uses copy features in a comparison scenario
ret = HFFaceFeatureExtractCpy(session, stream, multipleFaceData.tokens[0], vec[i].data()); // Extract features
if (multipleFaceData.detectedNum == 0) {
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
HFLogPrint(HF_LOG_ERROR, "No face was detected: %s", imgPaths[i]);
goto cleanup;
}
ret = HFFaceFeatureExtractTo(session, stream, multipleFaceData.tokens[0], features[i]);
if (ret != HSUCCEED) {
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
HFLogPrint(HF_LOG_ERROR, "Extract feature error: %d", ret);
return ret;
goto cleanup;
}
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
ret = HFReleaseImageBitmap(imageBitmap);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image bitmap error: %d", ret);
return ret;
}
HFReleaseImageStream(stream);
HFReleaseImageBitmap(imageBitmap);
}
// Make feature1
HFFaceFeature feature1 = {0};
feature1.data = vec[0].data();
feature1.size = vec[0].size();
HFFaceFeature feature1 = features[0];
HFFaceFeature feature2 = features[1];
// Make feature2
HFFaceFeature feature2 = {0};
feature2.data = vec[1].data();
feature2.size = vec[1].size();
// Run comparison
HFloat similarity;
/* Run comparison */
ret = HFFaceComparison(feature1, feature2, &similarity);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Feature comparison error: %d", ret);
return ret;
goto cleanup;
}
HFloat recommended_cosine_threshold;
ret = HFGetRecommendedCosineThreshold(&recommended_cosine_threshold);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get recommended cosine threshold error: %d", ret);
return ret;
goto cleanup;
}
if (similarity > recommended_cosine_threshold) {
@@ -118,20 +139,28 @@ int main(int argc, char* argv[]) {
}
HFLogPrint(HF_LOG_INFO, "Similarity score: %.3f", similarity);
// Convert cosine similarity to percentage similarity.
// Note: conversion parameters are not optimal and should be adjusted based on your specific use case.
HFloat percentage;
ret = HFCosineSimilarityConvertToPercentage(similarity, &percentage);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Convert similarity to percentage error: %d", ret);
return ret;
goto cleanup;
}
HFLogPrint(HF_LOG_INFO, "Percentage similarity: %f", percentage);
// The memory must be freed at the end of the program
/* Clean up resources */
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
return ret;
}
cleanup:
/* Release the feature vector memory */
for (i = 0; i < NUM_IMAGES; i++) {
if (features[i].data != NULL) { // Only release features that were successfully created
HFReleaseFaceFeature(&features[i]);
}
}
HFDeBugShowResourceStatistics();
return ret;
}

View File

@@ -0,0 +1,178 @@
#include <inspireface.h>
#include <stdio.h>
int main() {
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace("test_res/pack/Pikachu");
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
char *db_path = "case_crud.db";
if (remove(db_path) != 0) {
HFLogPrint(HF_LOG_ERROR, "Remove database file error: %d", ret);
return ret;
}
HFFeatureHubConfiguration configuration;
configuration.primaryKeyMode = HF_PK_AUTO_INCREMENT;
configuration.enablePersistence = 1;
configuration.persistenceDbPath = db_path;
configuration.searchMode = HF_SEARCH_MODE_EXHAUSTIVE;
configuration.searchThreshold = 0.48f;
ret = HFFeatureHubDataEnable(configuration);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Enable feature hub error: %d", ret);
return ret;
}
// Create a session
HFSession session;
ret = HFCreateInspireFaceSessionOptional(HF_ENABLE_FACE_RECOGNITION, HF_DETECT_MODE_ALWAYS_DETECT, 1, 320, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create session error: %d", ret);
return ret;
}
// Prepare an image for insertion into the hub
HFImageBitmap image;
ret = HFCreateImageBitmapFromFilePath("test_res/data/bulk/kun.jpg", 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
return ret;
}
// Create an image stream
HFImageStream imageHandle;
ret = HFCreateImageStreamFromImageBitmap(image, HF_CAMERA_ROTATION_0, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image stream error: %d", ret);
return ret;
}
// Detect and track
HFMultipleFaceData multipleFaceData;
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute face track error: %d", ret);
return ret;
}
if (multipleFaceData.detectedNum > 0) {
HFLogPrint(HF_LOG_INFO, "Face detected: %d", multipleFaceData.detectedNum);
}
HFFaceFeature feature;
ret = HFCreateFaceFeature(&feature);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create face feature error: %d", ret);
return ret;
}
ret = HFFaceFeatureExtractCpy(session, imageHandle, multipleFaceData.tokens[0], feature.data);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Extract feature error: %d", ret);
return ret;
}
// Insert face feature into the hub
HFFaceFeatureIdentity featureIdentity;
featureIdentity.feature = &feature;
featureIdentity.id = -1;
HFaceId result_id;
ret = HFFeatureHubInsertFeature(featureIdentity, &result_id);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Insert feature error: %d", ret);
return ret;
}
// Prepare a photo of the same person for the query
HFImageBitmap query_image;
ret = HFCreateImageBitmapFromFilePath("test_res/data/bulk/jntm.jpg", 3, &query_image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image bitmap error: %d", ret);
return ret;
}
// Create an image stream
HFImageStream query_imageHandle;
ret = HFCreateImageStreamFromImageBitmap(query_image, HF_CAMERA_ROTATION_0, &query_imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create image stream error: %d", ret);
return ret;
}
// Detect and track
ret = HFExecuteFaceTrack(session, query_imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute face track error: %d", ret);
return ret;
}
if (multipleFaceData.detectedNum > 0) {
HFLogPrint(HF_LOG_INFO, "Face detected: %d", multipleFaceData.detectedNum);
}
HFFaceFeature query_feature;
ret = HFCreateFaceFeature(&query_feature);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create face feature error: %d", ret);
return ret;
}
// Extract face feature
ret = HFFaceFeatureExtractTo(session, query_imageHandle, multipleFaceData.tokens[0], query_feature);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Extract feature error: %d", ret);
return ret;
}
// Search face feature
HFFaceFeatureIdentity query_featureIdentity;
query_featureIdentity.feature = &query_feature;
query_featureIdentity.id = -1;
HFloat confidence;
ret = HFFeatureHubFaceSearch(query_feature, &confidence, &query_featureIdentity);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Search feature error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Search feature result: %d", query_featureIdentity.id);
HFLogPrint(HF_LOG_INFO, "Search feature confidence: %f", confidence);
// Remove face feature
ret = HFFeatureHubFaceRemove(result_id);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Remove feature error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Remove feature result: %d", result_id);
// Query again
ret = HFFeatureHubFaceSearch(query_feature, &confidence, &query_featureIdentity);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Search feature error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Query again, search feature result: %d", query_featureIdentity.id);
if (query_featureIdentity.id != -1) {
HFLogPrint(HF_LOG_INFO, "Remove feature failed");
}
// Release resources
HFReleaseFaceFeature(&feature);
HFReleaseFaceFeature(&query_feature);
HFReleaseImageStream(imageHandle);
HFReleaseImageStream(query_imageHandle);
HFReleaseImageBitmap(image);
HFReleaseImageBitmap(query_image);
HFReleaseInspireFaceSession(session);
HFDeBugShowResourceStatistics();
return 0;
}

View File

@@ -2,30 +2,53 @@
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <inspireface.h>
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
HResult ret;
const char* packPath;
const char* sourcePath;
int rotation;
HFRotation rotation_enum;
HOption option;
HFDetectMode detMode;
HInt32 maxDetectNum;
HInt32 detectPixelLevel;
HFSession session;
HFImageBitmap image;
HFImageStream imageHandle;
HFMultipleFaceData multipleFaceData;
int faceNum;
HFImageBitmap drawImage;
HFImageBitmapData data;
int index;
HFFaceMaskConfidence maskConfidence;
HFFaceQualityConfidence qualityConfidence;
HOption pipelineOption;
HFFaceDetectPixelList pixelLevels;
/* Check whether the number of parameters is correct */
if (argc < 3 || argc > 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <source_path> [rotation]", argv[0]);
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
int rotation = 0;
packPath = argv[1];
sourcePath = argv[2];
rotation = 0;
// If rotation is provided, check and set the value
/* If rotation is provided, check and set the value */
if (argc == 4) {
rotation = std::atoi(argv[3]);
rotation = atoi(argv[3]);
if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) {
HFLogPrint(HF_LOG_ERROR, "Invalid rotation value. Allowed values are 0, 90, 180, 270.");
return 1;
}
}
HFRotation rotation_enum;
// Set rotation based on input parameter
/* Set rotation based on input parameter */
switch (rotation) {
case 90:
rotation_enum = HF_CAMERA_ROTATION_90;
@@ -46,28 +69,37 @@ int main(int argc, char* argv[]) {
HFLogPrint(HF_LOG_INFO, "Source file Path: %s", sourcePath);
HFLogPrint(HF_LOG_INFO, "Rotation: %d", rotation);
HFSetLogLevel(HF_LOG_INFO);
HFSetLogLevel(HF_LOG_DEBUG);
HResult ret;
// The resource file must be loaded before it can be used
/* The resource file must be loaded before it can be used */
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
HInt32 maxDetectNum = 20;
// Face detection image input level
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFQuerySupportedPixelLevelsForFaceDetection(&pixelLevels);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "HFQuerySupportedPixelLevelsForFaceDetection error: %d", ret);
return ret;
}
HFLogPrint(HF_LOG_INFO, "Supported pixel levels for face detection: %d", pixelLevels.size);
for (int i = 0; i < pixelLevels.size; i++) {
HFLogPrint(HF_LOG_INFO, "Supported pixel level %d: %d", i + 1, pixelLevels.pixel_level[i]);
}
/* Enable the functions in the pipeline: mask detection, live detection, and face quality
* detection */
option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
/* Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
* tracking */
detMode = HF_DETECT_MODE_LIGHT_TRACK;
/* Maximum number of faces detected */
maxDetectNum = 20;
/* Face detection image input level */
detectPixelLevel = 160;
/* Handle of the current face SDK algorithm context */
session = NULL;
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret);
@@ -77,123 +109,128 @@ int main(int argc, char* argv[]) {
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 4);
// Load a image
HFImageBitmap image;
/* Load a image */
ret = HFCreateImageBitmapFromFilePath(sourcePath, 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error.");
return ret;
}
// Prepare an image parameter structure for configuration
HFImageStream imageHandle = {0};
/* Prepare an image parameter structure for configuration */
ret = HFCreateImageStreamFromImageBitmap(image, rotation_enum, &imageHandle);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret);
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
/* Execute HF_FaceContextRunFaceTrack captures face information in an image */
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute HFExecuteFaceTrack error: %d", ret);
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
/* Print the number of faces detected */
faceNum = multipleFaceData.detectedNum;
HFLogPrint(HF_LOG_INFO, "Num of face: %d", faceNum);
// Copy a new image to draw
HFImageBitmap drawImage = {0};
/* Copy a new image to draw */
ret = HFImageBitmapCopy(image, &drawImage);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Copy ImageBitmap error: %d", ret);
return ret;
}
HFImageBitmapData data;
ret = HFImageBitmapGetData(drawImage, &data);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get ImageBitmap data error: %d", ret);
return ret;
}
for (int index = 0; index < faceNum; ++index) {
for (index = 0; index < faceNum; ++index) {
HInt32 numOfLmk;
HPoint2f* denseLandmarkPoints;
HPoint2f fiveKeyPoints[5];
float area;
size_t i;
HFLogPrint(HF_LOG_INFO, "========================================");
HFLogPrint(HF_LOG_INFO, "Token size: %d", multipleFaceData.tokens[index].size);
HFLogPrint(HF_LOG_INFO, "Process face index: %d", index);
HFLogPrint(HF_LOG_INFO, "DetConfidence: %f", multipleFaceData.detConfidence[index]);
HFImageBitmapDrawRect(drawImage, multipleFaceData.rects[index], {0, 100, 255}, 4);
HFImageBitmapDrawRect(drawImage, multipleFaceData.rects[index], (HColor){0, 100, 255}, 4);
// Print FaceID, In IMAGE-MODE it is changing, in VIDEO-MODE it is fixed, but it may be lost
HFLogPrint(HF_LOG_INFO, "FaceID: %d", multipleFaceData.trackIds[index]);
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle
// of the head
HFLogPrint(HF_LOG_INFO, "Roll: %f, Yaw: %f, Pitch: %f", multipleFaceData.angles.roll[index], multipleFaceData.angles.yaw[index],
multipleFaceData.angles.pitch[index]);
HInt32 numOfLmk;
/* Get the number of dense landmark points */
HFGetNumOfFaceDenseLandmark(&numOfLmk);
HPoint2f denseLandmarkPoints[numOfLmk];
denseLandmarkPoints = (HPoint2f*)malloc(sizeof(HPoint2f) * numOfLmk);
if (denseLandmarkPoints == NULL) {
HFLogPrint(HF_LOG_ERROR, "Memory allocation failed!");
return -1;
}
ret = HFGetFaceDenseLandmarkFromFaceToken(multipleFaceData.tokens[index], denseLandmarkPoints, numOfLmk);
if (ret != HSUCCEED) {
free(denseLandmarkPoints);
HFLogPrint(HF_LOG_ERROR, "HFGetFaceDenseLandmarkFromFaceToken error!!");
return -1;
}
for (size_t i = 0; i < numOfLmk; i++) {
HFImageBitmapDrawCircleF(drawImage, {denseLandmarkPoints[i].x, denseLandmarkPoints[i].y}, 0, {100, 100, 0}, 2);
/* Draw dense landmark points */
for (i = 0; i < numOfLmk; i++) {
HFImageBitmapDrawCircleF(drawImage,
(HPoint2f){denseLandmarkPoints[i].x, denseLandmarkPoints[i].y},
0,
(HColor){100, 100, 0},
2);
}
auto& rt = multipleFaceData.rects[index];
float area = ((float)(rt.height * rt.width)) / (data.width * data.height);
free(denseLandmarkPoints);
HFaceRect rt = multipleFaceData.rects[index];
area = ((float)(rt.height * rt.width)) / (data.width * data.height);
HFLogPrint(HF_LOG_INFO, "area: %f", area);
HPoint2f fiveKeyPoints[5];
ret = HFGetFaceFiveKeyPointsFromFaceToken(multipleFaceData.tokens[index], fiveKeyPoints, 5);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "HFGetFaceFiveKeyPointsFromFaceToken error!!");
return -1;
}
for (size_t i = 0; i < 5; i++) {
HFImageBitmapDrawCircleF(drawImage, {fiveKeyPoints[i].x, fiveKeyPoints[i].y}, 0, {0, 0, 232}, 2);
for (i = 0; i < 5; i++) {
HFImageBitmapDrawCircleF(drawImage, (HPoint2f){fiveKeyPoints[i].x, fiveKeyPoints[i].y}, 0, (HColor){0, 0, 232}, 2);
}
}
HFImageBitmapWriteToFile(drawImage, "draw_detected.jpg");
HFLogPrint(HF_LOG_WARN, "Write to file success: %s", "draw_detected.jpg");
// Run pipeline function
// Select the pipeline function that you want to execute, provided that it is already enabled
// when FaceContext is created!
auto pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// In this loop, all faces are processed
/* Run pipeline function */
/* Select the pipeline function that you want to execute, provided that it is already enabled
* when FaceContext is created! */
pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
/* In this loop, all faces are processed */
ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, pipelineOption);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute Pipeline error: %d", ret);
return ret;
}
// Get mask detection results from the pipeline cache
HFFaceMaskConfidence maskConfidence = {0};
/* Get mask detection results from the pipeline cache */
ret = HFGetFaceMaskConfidence(session, &maskConfidence);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get mask detect result error: %d", ret);
return -1;
}
// Get face quality results from the pipeline cache
HFFaceQualityConfidence qualityConfidence = {0};
/* Get face quality results from the pipeline cache */
ret = HFGetFaceQualityConfidence(session, &qualityConfidence);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Get face quality result error: %d", ret);
return -1;
}
for (int index = 0; index < faceNum; ++index) {
for (index = 0; index < faceNum; ++index) {
HFLogPrint(HF_LOG_INFO, "========================================");
HFLogPrint(HF_LOG_INFO, "Process face index from pipeline: %d", index);
HFLogPrint(HF_LOG_INFO, "Mask detect result: %f", maskConfidence.confidence[index]);
HFLogPrint(HF_LOG_INFO, "Quality predict result: %f", qualityConfidence.confidence[index]);
// We set the threshold of wearing a mask as 0.85. If it exceeds the threshold, it will be
// judged as wearing a mask. The threshold can be adjusted according to the scene
/* We set the threshold of wearing a mask as 0.85. If it exceeds the threshold, it will be
* judged as wearing a mask. The threshold can be adjusted according to the scene */
if (maskConfidence.confidence[index] > 0.85) {
HFLogPrint(HF_LOG_INFO, "Mask");
} else {
@@ -205,7 +242,7 @@ int main(int argc, char* argv[]) {
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
// The memory must be freed at the end of the program
/* The memory must be freed at the end of the program */
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);
@@ -224,5 +261,8 @@ int main(int argc, char* argv[]) {
return ret;
}
HFLogPrint(HF_LOG_INFO, "");
HFDeBugShowResourceStatistics();
return 0;
}

View File

@@ -1,31 +1,33 @@
/**
/*
* Created by Jingyu Yan
* @date 2024-10-01
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <inspireface.h>
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
/* Check whether the number of parameters is correct */
if (argc < 3 || argc > 4) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path> <source_path> [rotation]", argv[0]);
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
const char* packPath = argv[1];
const char* sourcePath = argv[2];
int rotation = 0;
// If rotation is provided, check and set the value
/* If rotation is provided, check and set the value */
if (argc == 4) {
rotation = std::atoi(argv[3]);
rotation = atoi(argv[3]);
if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) {
HFLogPrint(HF_LOG_ERROR, "Invalid rotation value. Allowed values are 0, 90, 180, 270.");
return 1;
}
}
HFRotation rotation_enum;
// Set rotation based on input parameter
/* Set rotation based on input parameter */
switch (rotation) {
case 90:
rotation_enum = HF_CAMERA_ROTATION_90;
@@ -49,24 +51,24 @@ int main(int argc, char* argv[]) {
HFSetLogLevel(HF_LOG_INFO);
HResult ret;
// The resource file must be loaded before it can be used
/* The resource file must be loaded before it can be used */
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality
// detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
// tracking
/* Enable the functions in the pipeline: mask detection, live detection, and face quality
* detection */
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
/* Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without
* tracking */
HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT;
// Maximum number of faces detected
/* Maximum number of faces detected */
HInt32 maxDetectNum = 20;
// Face detection image input level
/* Face detection image input level */
HInt32 detectPixelLevel = 160;
// Handle of the current face SDK algorithm context
/* Handle of the current face SDK algorithm context */
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session);
if (ret != HSUCCEED) {
@@ -77,14 +79,14 @@ int main(int argc, char* argv[]) {
HFSessionSetTrackPreviewSize(session, detectPixelLevel);
HFSessionSetFilterMinimumFacePixelSize(session, 4);
// Load a image
/* Load a image */
HFImageBitmap image;
ret = HFCreateImageBitmapFromFilePath(sourcePath, 3, &image);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error.");
return ret;
}
// Prepare an image parameter structure for configuration
/* Prepare an image parameter structure for configuration */
HFImageStream imageHandle = {0};
ret = HFCreateImageStreamFromImageBitmap(image, rotation_enum, &imageHandle);
if (ret != HSUCCEED) {
@@ -94,12 +96,13 @@ int main(int argc, char* argv[]) {
int loop = 100;
// Enable the cost spend
/* Enable the cost spend */
HFSessionSetEnableTrackCostSpend(session, 1);
// Execute HF_FaceContextRunFaceTrack captures face information in an image
int i;
/* Execute HF_FaceContextRunFaceTrack captures face information in an image */
HFMultipleFaceData multipleFaceData = {0};
for (int i = 0; i < loop; i++) {
for (i = 0; i < loop; i++) {
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Execute HFExecuteFaceTrack error: %d", ret);
@@ -113,7 +116,7 @@ int main(int argc, char* argv[]) {
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret);
}
// The memory must be freed at the end of the program
/* The memory must be freed at the end of the program */
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret);

View File

@@ -1,8 +1,6 @@
#include <iostream>
#include <inspireface.h>
#include <vector>
static std::vector<float> FT = {
static float FT[] = {
0.0706566, 0.00640248, 0.0418103, -0.00597861, 0.0269879, 0.0187478, 0.0486305, 0.0349162, -0.0080779, -0.0550556, 0.0229963,
-0.00683422, -0.0338589, 0.0533989, -0.0371725, 0.000972469, 0.0612415, 0.0389846, -0.00126743, -0.0128782, 0.0935529, 0.0588179,
0.0164787, -0.00732871, -0.0458209, -0.0100137, -0.0372892, 0.000871123, 0.0245121, -0.0811471, -0.00481095, 0.0266868, 0.0712961,
@@ -73,12 +71,10 @@ int main() {
return ret;
}
// std::vector<float> feature(512, 0.0f);
int64_t result_id = 0;
HFFaceFeature feature = {0};
feature.data = FT.data();
feature.size = FT.size();
feature.data = FT;
feature.size = sizeof(FT) / sizeof(FT[0]);
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature;
ret = HFFeatureHubInsertFeature(identity, &result_id);
@@ -87,10 +83,9 @@ int main() {
return ret;
}
// std::vector<float> query_feature(512, 20.0f);
HFFaceFeature query_feature = {0};
query_feature.data = FT.data();
query_feature.size = FT.size();
query_feature.data = FT;
query_feature.size = sizeof(FT) / sizeof(FT[0]);
HFloat confidence;
HFFaceFeatureIdentity search_result = {0};
ret = HFFeatureHubFaceSearch(query_feature, &confidence, &search_result);

View File

@@ -0,0 +1,52 @@
#include <inspireface.h>
#include <unistd.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
if (argc != 2) {
HFLogPrint(HF_LOG_ERROR, "Usage: %s <pack_path>", argv[0]);
return -1;
}
const char* packPath = argv[1];
HResult ret;
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret);
return ret;
}
const char* DBFilePath = "feature.db";
// remove old db file
if (access(DBFilePath, F_OK) == 0) {
if (remove(DBFilePath) != 0) {
HFLogPrint(HF_LOG_ERROR, "Failed to remove old db file: %s", DBFilePath);
return -1;
}
HFLogPrint(HF_LOG_INFO, "Remove old db file: %s", DBFilePath);
}
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.primaryKeyMode = HF_PK_AUTO_INCREMENT;
featureHubConfiguration.enablePersistence = 1;
featureHubConfiguration.persistenceDbPath = DBFilePath;
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EAGER;
featureHubConfiguration.searchThreshold = 0.48f;
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED)
{
HFLogPrint(HF_LOG_ERROR, "Enable FeatureHub failed: %d\n", ret);
return ret;
}
if (access(DBFilePath, F_OK) != 0) {
HFLogPrint(HF_LOG_ERROR, "DB file not found: %s", DBFilePath);
return -1;
}
HFLogPrint(HF_LOG_INFO, "DB file found: %s", DBFilePath);
// ....
HFTerminateInspireFace();
return 0;
}

View File

@@ -1,9 +1,8 @@
#include <iostream>
#include <inspireface.h>
int main() {
std::string resourcePath = "test_res/pack/Pikachu";
HResult ret = HFReloadInspireFace(resourcePath.c_str());
const char* resourcePath = "test_res/pack/Pikachu";
HResult ret = HFReloadInspireFace(resourcePath);
if (ret != HSUCCEED) {
HFLogPrint(HF_LOG_ERROR, "Failed to launch InspireFace: %d", ret);
return 1;

View File

@@ -91,7 +91,7 @@ int main() {
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader.LoadModel("feature", model);
m_extract_->loadData(model, InferenceWrapper::INFER_RKNN);
m_extract_->LoadData(model, InferenceWrapper::INFER_RKNN);
cv::Mat image = cv::imread(names[0]);
// cv::Mat rgb;

View File

@@ -31,7 +31,7 @@ int main() {
std::shared_ptr<FaceDetect> m_face_detector_;
m_face_detector_ = std::make_shared<FaceDetect>(320);
m_face_detector_->loadData(model, InferenceWrapper::INFER_RKNN);
m_face_detector_->LoadData(model, InferenceWrapper::INFER_RKNN);
// Load a image
cv::Mat image = cv::imread("test_res/images/face_sample.png");

View File

@@ -35,7 +35,7 @@ void rec_function() {
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader->LoadModel("feature", model);
m_extract_->loadData(model, InferenceWrapper::INFER_RKNN);
m_extract_->LoadData(model, InferenceWrapper::INFER_RKNN);
loader.reset();

View File

@@ -35,7 +35,7 @@ void test_rnet() {
InspireModel model;
loader.LoadModel("refine_net", model);
m_rnet_ = std::make_shared<RNet>();
m_rnet_->loadData(model, InferenceWrapper::INFER_RKNN);
m_rnet_->LoadData(model, InferenceWrapper::INFER_RKNN);
{
// Load a image
@@ -78,7 +78,7 @@ void test_mask() {
m_mask_predict_ = std::make_shared<MaskPredict>();
InspireModel model;
loader.LoadModel("mask_detect", model);
m_mask_predict_->loadData(model, InferenceWrapper::INFER_RKNN);
m_mask_predict_->LoadData(model, InferenceWrapper::INFER_RKNN);
{
// Load a image
@@ -120,7 +120,7 @@ void test_quality() {
m_face_quality_ = std::make_shared<FacePoseQuality>();
InspireModel model;
loader.LoadModel("pose_quality", model);
m_face_quality_->loadData(model, InferenceWrapper::INFER_RKNN);
m_face_quality_->LoadData(model, InferenceWrapper::INFER_RKNN);
{
std::vector<std::string> names = {
@@ -166,7 +166,7 @@ void test_landmark_mnn() {
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model);
m_landmark_predictor_->LoadData(model);
cv::Mat image = cv::imread("test_res/images/test_data/crop.png");
cv::resize(image, image, cv::Size(112, 112));
@@ -206,7 +206,7 @@ void test_landmark() {
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model, InferenceWrapper::INFER_RKNN);
m_landmark_predictor_->LoadData(model, InferenceWrapper::INFER_RKNN);
cv::Mat image = cv::imread("test_res/images/test_data/0.jpg");
cv::resize(image, image, cv::Size(112, 112));
@@ -248,7 +248,7 @@ void test_liveness() {
InspireModel model;
loader.LoadModel("rgb_anti_spoofing", model);
m_rgb_anti_spoofing_ = std::make_shared<RBGAntiSpoofing>(80, true);
m_rgb_anti_spoofing_->loadData(model, InferenceWrapper::INFER_RKNN);
m_rgb_anti_spoofing_->LoadData(model, InferenceWrapper::INFER_RKNN);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg", "test_res/images/test_data/fake.jpg", "test_res/images/test_data/live.jpg",

View File

@@ -27,7 +27,7 @@ int main(int argc, char** argv) {
auto m_pose_net_ = std::make_shared<FacePose>();
InspireModel model;
loader.LoadModel("", model);
m_pose_net_->loadData(model);
m_pose_net_->LoadData(model);
auto image = cv::imread("resource/images/crop.png");

View File

@@ -30,10 +30,10 @@ int main(int argc, char** argv) {
stream.SetDataBuffer(rot90.data, rot90.rows, rot90.cols);
ctx.FaceDetectAndTrack(stream);
std::vector<HyperFaceData> faces;
std::vector<FaceTrackWrap> faces;
for (int i = 0; i < ctx.GetNumberOfFacesCurrentlyDetected(); ++i) {
// const ByteArray &byteArray = ctx.GetDetectCache()[i];
HyperFaceData face = {0};
FaceTrackWrap face = {0};
// ret = DeserializeHyperFaceData(byteArray, face);
const FaceBasicData& faceBasic = ctx.GetFaceBasicDataCache()[i];

View File

@@ -0,0 +1,84 @@
#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
#include "inspireface/track_module/landmark/order_of_hyper_landmark.h"
int main(int argc, char** argv) {
if (argc != 3) {
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
return -1;
}
std::string model_path = argv[1];
std::string image_path = argv[2];
// Global init(only once)
INSPIREFACE_CONTEXT->Reload(model_path);
// Create image and frame process
inspirecv::Image image = inspirecv::Image::Create(image_path);
inspirecv::FrameProcess process =
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
// Create session
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
param.enable_liveness = true;
param.enable_mask_detect = true;
param.enable_face_attribute = true;
param.enable_face_quality = true;
param.enable_interaction_liveness = true;
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320));
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
// Detect and track
std::vector<inspire::FaceTrackWrap> results;
int32_t ret;
ret = session->FaceDetectAndTrack(process, results);
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
auto first = results[0];
auto lmk = session->GetFaceDenseLandmark(first);
std::cout << "lmk: " << lmk.size() << std::endl;
for (size_t i = 0; i < lmk.size(); i++) {
image.DrawCircle(lmk[i].As<int>(), 5, inspirecv::Color::Red);
}
inspirecv::TransformMatrix rotation_mode_affine = process.GetAffineMatrix();
std::vector<inspirecv::Point2f> stand_lmk = ApplyTransformToPoints(lmk, rotation_mode_affine.GetInverse());
// Use total lmk
auto rect = inspirecv::MinBoundingRect(stand_lmk);
auto rect_pts = rect.As<float>().ToFourVertices();
std::vector<inspirecv::Point2f> dst_pts = {{0, 0}, {112, 0}, {112, 112}, {0, 112}};
std::vector<inspirecv::Point2f> camera_pts = ApplyTransformToPoints(rect_pts, rotation_mode_affine);
auto affine = inspirecv::SimilarityTransformEstimate(camera_pts, dst_pts);
auto image_affine = process.ExecuteImageAffineProcessing(affine, 112, 112);
image_affine.Write("affine.jpg");
// image.DrawRect(rect.As<int>(), inspirecv::Color::Red);
// image.Write("lmk.jpg");
std::vector<inspirecv::Point2i> points;
for (const auto& idx : inspire::HLMK_LEFT_EYE_POINTS_INDEX) {
points.emplace_back(stand_lmk[idx].GetX(), stand_lmk[idx].GetY());
}
std::cout << "points: " << points.size() << std::endl;
auto rect_eye = inspirecv::MinBoundingRect(points).Square(1.4f);
// draw debug
image.DrawRect(rect_eye.As<int>(), inspirecv::Color::Red);
auto rect_pts_eye = rect_eye.As<float>().ToFourVertices();
std::vector<inspirecv::Point2f> dst_pts_eye = {{0, 0}, {64, 0}, {64, 64}, {0, 64}};
std::vector<inspirecv::Point2f> camera_pts_eye = ApplyTransformToPoints(rect_pts_eye, rotation_mode_affine);
auto affine_eye = inspirecv::SimilarityTransformEstimate(camera_pts_eye, dst_pts_eye);
auto eye_affine = process.ExecuteImageAffineProcessing(affine_eye, 64, 64);
eye_affine.Write("eye.jpg");
return 0;
}

View File

@@ -0,0 +1,78 @@
#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
int main(int argc, char** argv) {
if (argc != 4) {
std::cout << "Usage: " << argv[0] << " <model_path> <image_path1> <image_path2>" << std::endl;
return -1;
}
std::string model_path = argv[1];
std::string image_path1 = argv[2];
std::string image_path2 = argv[3];
// Global init(only once)
INSPIREFACE_CONTEXT->Reload(model_path);
// Create image and frame process
inspirecv::Image image1 = inspirecv::Image::Create(image_path1);
inspirecv::Image image2 = inspirecv::Image::Create(image_path2);
inspirecv::FrameProcess process1 = inspirecv::FrameProcess::Create(image1.Data(), image1.Height(), image1.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
inspirecv::FrameProcess process2 = inspirecv::FrameProcess::Create(image2.Data(), image2.Height(), image2.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
// Create session
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
// Create session
std::shared_ptr<inspire::Session> session(
inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320));
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
// Detect and track
std::vector<inspire::FaceTrackWrap> results1;
std::vector<inspire::FaceTrackWrap> results2;
// Detect and track
session->FaceDetectAndTrack(process1, results1);
session->FaceDetectAndTrack(process2, results2);
INSPIREFACE_CHECK_MSG(!results1.empty() && !results2.empty(), "No face detected");
// Get feature
inspire::FaceEmbedding feature1;
inspire::FaceEmbedding feature2;
session->FaceFeatureExtract(process1, results1[0], feature1);
session->FaceFeatureExtract(process2, results2[0], feature2);
// Compare
float similarity;
INSPIREFACE_FEATURE_HUB->CosineSimilarity(feature1.embedding, feature2.embedding, similarity);
std::cout << "cosine of similarity: " << similarity << std::endl;
std::cout << "percentage of similarity: " << SIMILARITY_CONVERTER_RUN(similarity) << std::endl;
std::cout << "== using alignment image ==" << std::endl;
// Get face alignment image
inspirecv::Image wrapped1;
inspirecv::Image wrapped2;
session->GetFaceAlignmentImage(process1, results1[0], wrapped1);
session->GetFaceAlignmentImage(process2, results2[0], wrapped2);
wrapped1.Write("wrapped1.jpg");
wrapped2.Write("wrapped2.jpg");
inspire::FaceEmbedding feature1_alignment;
inspire::FaceEmbedding feature2_alignment;
session->FaceFeatureExtractWithAlignmentImage(wrapped1, feature1_alignment);
session->FaceFeatureExtractWithAlignmentImage(wrapped2, feature2_alignment);
INSPIREFACE_FEATURE_HUB->CosineSimilarity(feature1_alignment.embedding, feature2_alignment.embedding, similarity);
std::cout << "cosine of similarity: " << similarity << std::endl;
std::cout << "percentage of similarity: " << SIMILARITY_CONVERTER_RUN(similarity) << std::endl;
return 0;
}

View File

@@ -0,0 +1,80 @@
#include <iostream>
#include <inspireface/inspireface.hpp>
int main() {
// Launch InspireFace
std::string model_path = "test_res/pack/Pikachu";
INSPIREFACE_CONTEXT->Reload(model_path);
INSPIREFACE_CHECK_MSG(INSPIREFACE_CONTEXT->isMLoad(), "InspireFace is not loaded");
// Enable feature hub
std::string db_path = "case_crud.db";
// Remove the database file if it exists
if (std::remove(db_path.c_str()) != 0) {
std::cerr << "Error removing database file: " << db_path << std::endl;
}
inspire::DatabaseConfiguration db_config;
db_config.enable_persistence = true;
db_config.persistence_db_path = db_path;
db_config.search_mode = inspire::SEARCH_MODE_EXHAUSTIVE;
db_config.recognition_threshold = 0.48f;
db_config.primary_key_mode = inspire::AUTO_INCREMENT;
INSPIREFACE_FEATURE_HUB->EnableHub(db_config);
// Create a session
auto param = inspire::CustomPipelineParameter();
param.enable_recognition = true;
auto session = inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320);
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not created");
// Prepare an image for insertion into the hub
auto image = inspirecv::Image::Create("test_res/data/bulk/kun.jpg");
auto image_process = inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
// Detect and track
std::vector<inspire::FaceTrackWrap> results;
session->FaceDetectAndTrack(image_process, results);
INSPIREFACE_CHECK_MSG(results.size() > 0, "No face detected");
// Extract face feature
inspire::FaceEmbedding feature;
session->FaceFeatureExtract(image_process, results[0], feature);
// Insert face feature into the hub, because the id is INSPIRE_INVALID_ID, so input id is ignored
int64_t result_id;
INSPIREFACE_FEATURE_HUB->FaceFeatureInsert(feature.embedding, INSPIRE_INVALID_ID, result_id);
// Prepare a photo of the same person for the query
auto query_image = inspirecv::Image::Create("test_res/data/bulk/jntm.jpg");
auto query_image_process = inspirecv::FrameProcess::Create(query_image.Data(), query_image.Height(), query_image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
// Detect and track
std::vector<inspire::FaceTrackWrap> query_results;
session->FaceDetectAndTrack(query_image_process, query_results);
INSPIREFACE_CHECK_MSG(query_results.size() > 0, "No face detected");
// Extract face feature
inspire::FaceEmbedding query_feature;
session->FaceFeatureExtract(query_image_process, query_results[0], query_feature);
// Search face feature
inspire::FaceSearchResult search_result;
INSPIREFACE_FEATURE_HUB->SearchFaceFeature(query_feature.embedding, search_result, true);
std::cout << "Search face feature result: " << search_result.id << std::endl;
std::cout << "Search face feature similarity: " << search_result.similarity << std::endl;
INSPIREFACE_CHECK_MSG(search_result.id == result_id, "Search face feature result id is not equal to the inserted id");
// Remove the face feature
INSPIREFACE_FEATURE_HUB->FaceFeatureRemove(result_id);
INSPIREFACE_CHECK_MSG(INSPIREFACE_FEATURE_HUB->GetFaceFeatureCount() == 0, "Face feature is not removed");
std::cout << "Remove face feature successfully" << std::endl;
// Query again
INSPIREFACE_FEATURE_HUB->SearchFaceFeature(query_feature.embedding, search_result, true);
INSPIREFACE_CHECK_MSG(search_result.id == INSPIRE_INVALID_ID, "Search face feature result id is not equal to the inserted id");
std::cout << "Query again, search face feature result: " << search_result.id << std::endl;
return 0;
}

View File

@@ -0,0 +1,56 @@
#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
int main(int argc, char** argv) {
if (argc != 3) {
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
return -1;
}
std::string model_path = argv[1];
std::string image_path = argv[2];
// Global init(only once)
INSPIREFACE_CONTEXT->Reload(model_path);
// Create image and frame process
inspirecv::Image image = inspirecv::Image::Create(image_path);
inspirecv::FrameProcess process =
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_90);
// Create session
inspire::CustomPipelineParameter param;
param.enable_recognition = true;
param.enable_liveness = true;
param.enable_mask_detect = true;
param.enable_face_attribute = true;
param.enable_face_quality = true;
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 100, param, 640));
session->SetTrackPreviewSize(640);
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
// Detect and track
std::vector<inspire::FaceTrackWrap> results;
int32_t ret;
ret = session->FaceDetectAndTrack(process, results);
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
for (auto& result : results) {
std::cout << "result: " << result.trackId << std::endl;
std::cout << "quality: " << result.quality[0] << ", " << result.quality[1] << ", " << result.quality[2] << ", " << result.quality[3] << ", "
<< result.quality[4] << std::endl;
inspirecv::Rect2i rect = inspirecv::Rect2i::Create(result.rect.x, result.rect.y, result.rect.width, result.rect.height);
std::cout << rect << std::endl;
image.DrawRect(rect, inspirecv::Color::Red);
inspirecv::TransformMatrix trans = inspirecv::TransformMatrix::Create(result.trans.m00, result.trans.m01, result.trans.tx, result.trans.m10, result.trans.m11, result.trans.ty);
std::cout << "trans: " << trans.GetInverse() << std::endl;
}
image.Write("result.jpg");
return 0;
}

View File

@@ -0,0 +1,200 @@
#include <iostream>
#include <inspirecv/inspirecv.h>
#include <inspireface/inspireface.hpp>
#ifdef _WIN32
#include <direct.h>
#define CREATE_DIR(dir) _mkdir(dir)
#else
#include <sys/stat.h>
#define CREATE_DIR(dir) mkdir(dir, 0777)
#endif
int main() {
// Make directory
if (CREATE_DIR("cv") == -1) {
// If the directory already exists, it is not an error
if (errno != EEXIST) {
std::cerr << "Error creating directory" << std::endl;
return 1;
}
}
/* Image I/O */
// Load image from file
// Load with 3 channels (BGR, like opencv)
inspirecv::Image img = inspirecv::Image::Create("test_res/data/bulk/kun_cartoon_crop.jpg", 3);
// Load image from buffer
// uint8_t* buffer = ...; // buffer is a pointer to the image data
// bool is_alloc_mem = false; // if true, will allocate memory for the image data,
// // false is recommended to point to the original data to avoid copying
// inspirecv::Image img_buffer = inspirecv::Image::Create(width, height, channel, buffer, is_alloc_mem);
// Save image to file
img.Write("cv/output.jpg");
// Show image, warning: it must depend on opencv
// img.Show("input");
// Get pointer to image data
const uint8_t* ptr = img.Data();
/* Image Processing */
// Convert to grayscale
inspirecv::Image gray = img.ToGray();
gray.Write("cv/gray.jpg");
// Apply Gaussian blur
inspirecv::Image blurred = img.GaussianBlur(3, 1.0);
blurred.Write("cv/blurred.jpg");
// Geometric transformations
auto scale = 0.35;
bool use_bilinear = true;
inspirecv::Image resized = img.Resize(img.Width() * scale, img.Height() * scale, use_bilinear); // Resize image
resized.Write("cv/resized.jpg");
// Rotate 90 degrees clockwise
inspirecv::Image rotated = img.Rotate90();
rotated.Write("cv/rotated.jpg");
// Flip vertically
inspirecv::Image flipped_vertical = img.FlipVertical();
flipped_vertical.Write("cv/flipped_vertical.jpg");
// Flip horizontally
inspirecv::Image flipped_horizontal = img.FlipHorizontal();
flipped_horizontal.Write("cv/flipped_horizontal.jpg");
// Crop for rectangle
inspirecv::Rect<int> rect = inspirecv::Rect<int>::Create(78, 41, 171, 171);
inspirecv::Image cropped = img.Crop(rect);
cropped.Write("cv/cropped.jpg");
// Image padding
int top = 50, bottom = 50, left = 50, right = 50;
inspirecv::Image padded = img.Pad(top, bottom, left, right, inspirecv::Color::Black);
padded.Write("cv/padded.jpg");
// Swap red and blue channels
inspirecv::Image swapped = img.SwapRB();
swapped.Write("cv/swapped.jpg");
// Multiply image by scale factor
double scale_factor = 0.5;
inspirecv::Image scaled = img.Mul(scale_factor);
scaled.Write("cv/scaled.jpg");
// Add value to image
double value = -175;
inspirecv::Image added = img.Add(value);
added.Write("cv/added.jpg");
// Rotate 90 degrees clockwise(also support 270 and 180)
inspirecv::Image rotated_90 = img.Rotate90();
rotated_90.Write("cv/rotated_90.jpg");
// Affine transform
/**
* Create a transform matrix from the following matrix
* [[a11, a12, tx],
* [a21, a22, ty]]
*
* Face crop transform matrix
* [[0.0, -1.37626, 261.127],
* [1.37626, 0.0, 85.1831]]
*/
float a11 = 0.0f;
float a12 = -1.37626f;
float a21 = 1.37626f;
float a22 = 0.0f;
float b1 = 261.127f;
float b2 = 85.1831f;
inspirecv::TransformMatrix trans = inspirecv::TransformMatrix::Create(a11, a12, b1, a21, a22, b2);
int dst_width = 112;
int dst_height = 112;
inspirecv::Image affine = rotated_90.WarpAffine(trans, dst_width, dst_height);
affine.Write("cv/affine.jpg");
/* Image Draw */
inspirecv::Image draw_img = img.Clone();
// Draw a rectangle
inspirecv::Rect<int> new_rect = rect.Square(1.1f); // Square and expand the rect
int thickness = 3;
draw_img.DrawRect(new_rect, inspirecv::Color::Green, thickness);
draw_img.Write("cv/draw_rect.jpg");
// Draw a circle
draw_img = img.Clone();
std::vector<inspirecv::Point<int>> points = new_rect.As<int>().ToFourVertices();
for (auto& point : points) {
draw_img.DrawCircle(point, 1, inspirecv::Color::Red, 5);
}
draw_img.Write("cv/draw_circle.jpg");
// Draw a line
draw_img = img.Clone();
draw_img.DrawLine(points[0], points[1], inspirecv::Color::Cyan, 2);
draw_img.DrawLine(points[1], points[2], inspirecv::Color::Magenta, 2);
draw_img.DrawLine(points[2], points[3], inspirecv::Color::Pink, 2);
draw_img.DrawLine(points[3], points[0], inspirecv::Color::Yellow, 2);
draw_img.Write("cv/draw_line.jpg");
// Fill a rectangle
draw_img = img.Clone();
draw_img.Fill(new_rect, inspirecv::Color::Purple);
draw_img.Write("cv/fill_rect.jpg");
// Reset
std::vector<uint8_t> gray_color(img.Width() * img.Height() * 3, 128);
img.Reset(img.Width(), img.Height(), 3, gray_color.data());
img.Write("cv/reset.jpg");
/** FrameProcess */
// BGR888 as raw data
inspirecv::Image raw = inspirecv::Image::Create("test_res/data/bulk/kun_cartoon_crop_r90.jpg", 3);
const uint8_t* buffer = raw.Data();
// You can also use other image format, like NV21, NV12, RGBA, RGB, BGR, BGRA
// const uint8_t* buffer = ...;
// Create frame process
auto width = raw.Width();
auto height = raw.Height();
auto rotation_mode = inspirecv::ROTATION_90;
auto data_format = inspirecv::BGR;
inspirecv::FrameProcess frame_process = inspirecv::FrameProcess::Create(buffer, height, width, data_format, rotation_mode);
// Set preview size
frame_process.SetPreviewSize(160);
// Set preview scale
// frame_process.SetPreviewScale(0.5f);
// Get transform image
inspirecv::Image transform_img = frame_process.ExecutePreviewImageProcessing(true);
transform_img.Write("cv/transform_img.jpg");
// ExecuteImageAffineProcessing
// Face crop transform matrix
// [[0.0, 0.726607, -61.8946],
// [-0.726607, 0.0, 189.737]]
a11 = 0.0f;
a12 = 0.726607f;
a21 = -0.726607;
a22 = 0.0f;
b1 = -61.8946f;
b2 = 189.737f;
inspirecv::TransformMatrix affine_matrix = inspirecv::TransformMatrix::Create(a11, a12, b1, a21, a22, b2);
dst_width = 112;
dst_height = 112;
inspirecv::Image affine_img = frame_process.ExecuteImageAffineProcessing(affine_matrix, dst_width, dst_height);
affine_img.Write("cv/affine_img.jpg");
return 0;
}

View File

@@ -1,8 +1,9 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/pipeline_module/attribute/face_attribute_adapt.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspirecv/time_spend.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
#include <inspireface/include/inspireface/spend_timer.h>
#include <inspireface/include/inspireface/herror.h>
#include <log.h>
using namespace inspire;
@@ -10,8 +11,8 @@ using namespace inspire;
int main() {
INSPIRE_SET_LOG_LEVEL(ISF_LOG_DEBUG);
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIRE_LAUNCH->getMArchive();
INSPIREFACE_CONTEXT->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIREFACE_CONTEXT->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_attribute", detModel);
if (ret != SARC_SUCCESS) {
@@ -20,7 +21,7 @@ int main() {
}
FaceAttributePredictAdapt face_attribute;
face_attribute.loadData(detModel, detModel.modelType, false);
face_attribute.LoadData(detModel, detModel.modelType, false);
auto img = inspirecv::Image::Create("test_res/data/crop/crop.png");
auto result = face_attribute(img);

View File

@@ -1,16 +1,17 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspirecv/time_spend.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
#include <inspireface/include/inspireface/spend_timer.h>
#include <inspireface/include/inspireface/herror.h>
using namespace inspire;
int main() {
INSPIRE_SET_LOG_LEVEL(ISF_LOG_DEBUG);
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIRE_LAUNCH->getMArchive();
INSPIREFACE_CONTEXT->Load("test_res/pack/Gundam_RV1106");
auto archive = INSPIREFACE_CONTEXT->getMArchive();
InspireModel detModel;
auto ret = archive.LoadModel("face_detect_160", detModel);
if (ret != SARC_SUCCESS) {
@@ -21,7 +22,7 @@ int main() {
std::vector<int> input_size;
input_size = detModel.Config().get<std::vector<int>>("input_size");
ret = face_detect.loadData(detModel, detModel.modelType, false);
ret = face_detect.LoadData(detModel, detModel.modelType, false);
if (ret != 0) {
INSPIRE_LOGE("Load %s error: %d", "face_detect_160", ret);
return HERR_ARCHIVE_LOAD_MODEL_FAILURE;
@@ -31,7 +32,7 @@ int main() {
auto img = inspirecv::Image::Create("data/bulk/kun.jpg");
inspirecv::TimeSpend time_spend("Detect");
inspire::SpendTimer time_spend("Detect");
FaceLocList results;
for (int i = 0; i < 10; i++) {
time_spend.Start();

View File

@@ -1,7 +1,8 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/middleware/nexus_processor/image_processor.h>
#include <inspireface/image_process/nexus_processor/image_processor.h>
#include "log.h"
#include <inspirecv/time_spend.h>
#include <inspireface/include/inspireface/spend_timer.h>
#include <inspireface/include/inspireface/herror.h>
using namespace inspire;
@@ -18,7 +19,7 @@ int main() {
uint8_t* resized_data = nullptr;
int resized_width = 100;
int resized_height = 100;
inspirecv::TimeSpend time_spend("RGA resize");
inspire::SpendTimer time_spend("RGA resize");
for (int i = 0; i < 10; i++) {
time_spend.Start();
auto ret = processor->Resize(img.Data(), img.Width(), img.Height(), img.Channels(), &resized_data, resized_width, resized_height);
@@ -37,7 +38,7 @@ int main() {
processor->MarkDone();
uint8_t* swapped_data = nullptr;
inspirecv::TimeSpend swap_time_spend("RGA swap color");
inspire::SpendTimer swap_time_spend("RGA swap color");
for (int i = 0; i < 10; i++) {
swap_time_spend.Start();
auto ret = processor->SwapColor(resized_img.Data(), resized_img.Width(), resized_img.Height(), resized_img.Channels(), &swapped_data);
@@ -57,7 +58,7 @@ int main() {
int bottom = 10;
int left = 10;
int right = 10;
inspirecv::TimeSpend padding_time_spend("RGA padding");
inspire::SpendTimer padding_time_spend("RGA padding");
int padded_width = 0;
int padded_height = 0;
for (int i = 0; i < 10; i++) {
@@ -76,7 +77,7 @@ int main() {
// inspirecv crop
inspirecv::Rect2i rect(30, 30, 70, 70);
inspirecv::TimeSpend inspirecv_crop_time_spend("InspireCV crop");
inspire::SpendTimer inspirecv_crop_time_spend("InspireCV crop");
inspirecv::Image inspirecv_cropped_img;
for (int i = 0; i < 10; i++) {
inspirecv_crop_time_spend.Start();
@@ -91,7 +92,7 @@ int main() {
int dst_width = 320;
int dst_height = 320;
float scale = 0.0f;
inspirecv::TimeSpend padded_crop_time_spend("RGA padded and cropped");
inspire::SpendTimer padded_crop_time_spend("RGA padded and cropped");
for (int i = 0; i < 10; i++) {
padded_crop_time_spend.Start();
auto ret = processor->ResizeAndPadding(image.Data(), image.Width(), image.Height(), image.Channels(), dst_width, dst_height,
@@ -110,7 +111,7 @@ int main() {
uint8_t* resized_data_2 = nullptr;
int resized_width_2 = 512;
int resized_height_2 = 512;
inspirecv::TimeSpend time_spend_2("RGA resize 2");
inspire::SpendTimer time_spend_2("RGA resize 2");
for (int i = 0; i < 10; i++) {
time_spend_2.Start();
auto ret = processor->Resize(padded_cropped_img.Data(), padded_cropped_img.Width(), padded_cropped_img.Height(),

View File

@@ -1,26 +1,28 @@
#include <inspirecv/inspirecv.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/include/inspireface/launch.h>
#include "inspireface/middleware/model_archive/inspire_archive.h"
#include "inspireface/track_module/face_detect/face_detect_adapt.h"
#include "inspireface/track_module/landmark/face_landmark_adapt.h"
#include "inspireface/track_module/quality/face_pose_quality_adapt.h"
#include "inspireface/recognition_module/extract/extract_adapt.h"
#include "inspireface/include/inspireface/spend_timer.h"
void test_face_detect() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("face_detect_160", model);
INSPIREFACE_CONTEXT->getMArchive().LoadModel("face_detect_160", model);
auto input_size = 160;
inspire::FaceDetectAdapt faceDetectAdapt(input_size);
faceDetectAdapt.loadData(model, model.modelType);
faceDetectAdapt.LoadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/bulk/kun.jpg");
inspire::FaceLocList faces;
inspirecv::TimeSpend timeSpend("Face Detect@" + std::to_string(input_size));
inspire::SpendTimer timeSpend("Face Detect@" + std::to_string(input_size));
for (int i = 0; i < 1000; i++) {
timeSpend.Start();
faces = faceDetectAdapt(image);
timeSpend.Stop();
}
std::cout << timeSpend << std::endl;
;
std::cout << "faces size: " << faces.size() << std::endl;
for (auto &face : faces) {
inspirecv::Rect2i rect = inspirecv::Rect2i::Create(face.x1, face.y1, face.x2 - face.x1, face.y2 - face.y1);
@@ -31,20 +33,21 @@ void test_face_detect() {
void test_landmark() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("landmark", model);
INSPIREFACE_CONTEXT->getMArchive().LoadModel("landmark", model);
auto input_size = 112;
inspire::FaceLandmarkAdapt landmarkAdapt(input_size);
landmarkAdapt.loadData(model, model.modelType);
landmarkAdapt.LoadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
std::vector<float> lmk;
inspirecv::TimeSpend timeSpend("Landmark@" + std::to_string(input_size));
inspire::SpendTimer timeSpend("Landmark@" + std::to_string(input_size));
timeSpend.Start();
for (int i = 0; i < 10; i++) {
lmk = landmarkAdapt(image);
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
;
for (int i = 0; i < inspire::FaceLandmarkAdapt::NUM_OF_LANDMARK; i++) {
auto p = inspirecv::Point2i::Create(lmk[i * 2] * input_size, lmk[i * 2 + 1] * input_size);
image.DrawCircle(p, 5, {0, 0, 255});
@@ -54,20 +57,21 @@ void test_landmark() {
void test_quality() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("pose_quality", model);
INSPIREFACE_CONTEXT->getMArchive().LoadModel("pose_quality", model);
auto input_size = 96;
inspire::FacePoseQualityAdapt poseQualityAdapt;
poseQualityAdapt.loadData(model, model.modelType);
poseQualityAdapt.LoadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
inspire::FacePoseQualityAdaptResult quality;
inspirecv::TimeSpend timeSpend("Pose Quality@" + std::to_string(input_size));
inspire::SpendTimer timeSpend("Pose Quality@" + std::to_string(input_size));
timeSpend.Start();
for (int i = 0; i < 10; i++) {
quality = poseQualityAdapt(image);
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
;
std::cout << "quality: " << quality.pitch << ", " << quality.yaw << ", " << quality.roll << std::endl;
for (int i = 0; i < quality.lmk.size(); i++) {
std::cout << "lmk: " << quality.lmk[i].GetX() << ", " << quality.lmk[i].GetY() << std::endl;
@@ -79,15 +83,15 @@ void test_quality() {
void test_feature() {
inspire::InspireModel model;
INSPIRE_LAUNCH->getMArchive().LoadModel("feature", model);
INSPIREFACE_CONTEXT->getMArchive().LoadModel("feature", model);
auto input_size = 112;
inspire::ExtractAdapt extractAdapt;
extractAdapt.loadData(model, model.modelType);
extractAdapt.LoadData(model, model.modelType);
inspirecv::Image image = inspirecv::Image::Create("test_res/data/crop/crop.png");
image = image.Resize(input_size, input_size);
float norm;
bool normalize = true;
inspirecv::TimeSpend timeSpend("Extract@" + std::to_string(input_size));
inspire::SpendTimer timeSpend("Extract@" + std::to_string(input_size));
timeSpend.Start();
inspire::Embedded feature;
for (int i = 0; i < 10; i++) {
@@ -95,12 +99,13 @@ void test_feature() {
}
timeSpend.Stop();
std::cout << timeSpend << std::endl;
;
std::cout << "feature: " << feature.size() << std::endl;
}
int main() {
std::string archivePath = "test_res/pack/Pikachu_Apple";
INSPIRE_LAUNCH->Load(archivePath);
INSPIREFACE_CONTEXT->Load(archivePath);
// Test face detect
test_face_detect();

View File

@@ -1,10 +1,10 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
#include <inspireface/pipeline_module/face_pipeline_module.h>
#include <inspireface/common/face_data/face_serialize_tools.h>
#include <inspireface/feature_hub/feature_hub_db.h>
#include <inspireface/include/inspireface/feature_hub_db.h>
using namespace inspire;
@@ -60,19 +60,19 @@ static std::vector<float> FT = {
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
INSPIREFACE_CONTEXT->Load("test_res/pack/Pikachu");
DatabaseConfiguration configuration;
configuration.primary_key_mode = PrimaryKeyMode::MANUAL_INPUT;
configuration.enable_persistence = false;
configuration.recognition_threshold = 0.48f;
FEATURE_HUB_DB->EnableHub(configuration);
INSPIREFACE_FEATURE_HUB->EnableHub(configuration);
// std::vector<float> feature(512, 0.0f);
int64_t result_id = 0;
auto ret = FEATURE_HUB_DB->FaceFeatureInsert(FT, 10086, result_id);
auto ret = INSPIREFACE_FEATURE_HUB->FaceFeatureInsert(FT, 10086, result_id);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to insert face feature");
INSPIRE_LOGI("result id: %lld", result_id);
@@ -82,7 +82,7 @@ int main() {
// std::vector<float> query_feature(512, 20.0f);
FaceSearchResult search_result;
ret = FEATURE_HUB_DB->SearchFaceFeature(FT, search_result, true);
ret = INSPIREFACE_FEATURE_HUB->SearchFaceFeature(FT, search_result, true);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to search face feature");
} else {

View File

@@ -1,12 +1,12 @@
#include <iostream>
#include <inspirecv/inspirecv.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
#include "inspireface/track_module/landmark/face_landmark_adapt.h"
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu-t4");
auto archive = INSPIRE_LAUNCH->getMArchive();
INSPIREFACE_CONTEXT->Load("test_res/pack/Pikachu-t4");
auto archive = INSPIREFACE_CONTEXT->getMArchive();
inspire::InspireModel lmkModel;
auto ret = archive.LoadModel("landmark", lmkModel);
@@ -16,7 +16,7 @@ int main() {
}
inspire::FaceLandmarkAdapt lmk;
lmk.loadData(lmkModel, lmkModel.modelType);
lmk.LoadData(lmkModel, lmkModel.modelType);
auto image = inspirecv::Image::Create("test_res/data/crop/crop.png");
auto data = image.Resize(112, 112);

View File

@@ -1,7 +1,7 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
#include <inspireface/pipeline_module/face_pipeline_module.h>
#include <inspireface/common/face_data/face_serialize_tools.h>
@@ -9,8 +9,8 @@ using namespace inspire;
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
auto archive = INSPIRE_LAUNCH->getMArchive();
INSPIREFACE_CONTEXT->Load("test_res/pack/Pikachu");
auto archive = INSPIREFACE_CONTEXT->getMArchive();
auto mode = inspire::DetectModuleMode::DETECT_MODE_LIGHT_TRACK;
FaceTrackModule tracker(mode, 10, 20, 320, -1);
tracker.Configuration(archive, expansion_path);
@@ -18,7 +18,7 @@ int main() {
FacePipelineModule pipe(archive, true, true, true, true);
auto image = inspirecv::Image::Create("test_res/data/bulk/r90.jpg");
inspirecv::InspireImageProcess processor;
inspirecv::FrameProcess processor;
processor.SetDataBuffer(image.Data(), image.Height(), image.Width());
processor.SetDataFormat(inspirecv::DATA_FORMAT::BGR);
processor.SetRotationMode(inspirecv::ROTATION_MODE::ROTATION_90);

View File

@@ -1,20 +1,20 @@
#include <inspirecv/inspirecv.h>
#include <inspireface/track_module/face_track_module.h>
#include "inspireface/initialization_module/launch.h"
#include <inspireface/middleware/inspirecv_image_process.h>
#include <inspireface/include/inspireface/launch.h>
#include <inspireface/include/inspireface/frame_process.h>
using namespace inspire;
int main() {
std::string expansion_path = "";
INSPIRE_LAUNCH->Load("test_res/pack/Pikachu");
auto archive = INSPIRE_LAUNCH->getMArchive();
INSPIREFACE_CONTEXT->Load("test_res/pack/Pikachu");
auto archive = INSPIREFACE_CONTEXT->getMArchive();
auto mode = inspire::DetectModuleMode::DETECT_MODE_ALWAYS_DETECT;
FaceTrackModule tracker(mode, 10, 20, 320, -1);
tracker.Configuration(archive, expansion_path);
auto image = inspirecv::Image::Create("test_res/data/bulk/r0.jpg");
inspirecv::InspireImageProcess processor;
inspirecv::FrameProcess processor;
processor.SetDataBuffer(image.Data(), image.Height(), image.Width());
processor.SetDataFormat(inspirecv::DATA_FORMAT::BGR);
processor.SetRotationMode(inspirecv::ROTATION_MODE::ROTATION_0);