Add the inspireface project to cpp-package.

This commit is contained in:
JingyuYan
2024-05-02 01:27:29 +08:00
parent e90dacb3cf
commit 08d7e96f79
431 changed files with 370534 additions and 0 deletions

View File

@@ -0,0 +1,255 @@
cmake_minimum_required(VERSION 3.10)
project(InspireFaceSample)
option(BUILD_SAMPLE_CLUTTERED "Whether to compile the cluttered sample program (debug code during development)" OFF)
# OpenCV Configuration
if(NOT APPLE)
# message(STATUS OpenCV_DIR=${OpenCV_DIR})
find_package(OpenCV REQUIRED)
endif()
include_directories(${SRC_DIR})
if (ENABLE_RKNN)
set(RKNN_API_LIB ${THIRD_PARTY_DIR}/${RKNPU_MAJOR}/runtime/${RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
link_directories(${RKNN_API_LIB})
set(ext rknn_api dl)
endif ()
add_executable(Leak cpp/leak.cpp)
target_link_libraries(Leak InspireFace ${ext})
set_target_properties(Leak PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face detection and tracking
add_executable(FaceTrackSample cpp/sample_face_track.cpp)
target_link_libraries(FaceTrackSample InspireFace ${ext})
set_target_properties(FaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceTrackSampleCost cpp/sample_face_track_cost.cpp)
target_link_libraries(FaceTrackSampleCost InspireFace ${ext})
set_target_properties(FaceTrackSampleCost PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face detection and tracking
add_executable(MTFaceTrackSample cpp/sample_face_track_mt.cpp)
target_link_libraries(MTFaceTrackSample InspireFace ${ext})
set_target_properties(MTFaceTrackSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face recognition
add_executable(FaceRecognitionSample cpp/sample_face_recognition.cpp)
target_link_libraries(FaceRecognitionSample InspireFace ${ext})
set_target_properties(FaceRecognitionSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
add_executable(FaceSearchSample cpp/sample_face_search.cpp)
target_link_libraries(FaceSearchSample InspireFace ${ext})
set_target_properties(FaceSearchSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Examples of face recognition
add_executable(FaceComparisonSample cpp/sample_face_comparison.cpp)
target_link_libraries(FaceComparisonSample InspireFace ${ext})
set_target_properties(FaceComparisonSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/"
)
# Platform watershed
if (BUILD_LINUX_ARM7 OR BUILD_LINUX_AARCH64)
# Typically this is an embedded system or development board scenario where some GUI-related functions are not supported
elseif(ANDROID)
# The executable program on the Android platform generally refers to the running on the shell
else()
# Usually x86 linux or macos
endif ()
# These sample programs are debugging and testing code left behind by developers during the development process.
# They are cluttered and have not been organized, or similar functionalities have already been organized in the standard samples.
# You can ignore them.
if (BUILD_SAMPLE_CLUTTERED)
if (NOT BUILD_LINUX_ARM7 AND NOT BUILD_LINUX_AARCH64)
# =======================InspireFace Sample===========================
add_executable(TrackerSample cluttered/standard/tracker_sample.cpp)
target_link_libraries(TrackerSample InspireFace)
set_target_properties(TrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ContextSample cluttered/standard/context_sample.cpp)
target_link_libraries(ContextSample InspireFace)
set_target_properties(ContextSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(TestSample cluttered/standard/test_sample.cpp)
target_link_libraries(TestSample InspireFace)
set_target_properties(TestSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(NetSample cluttered/standard/net_sample.cpp)
target_link_libraries(NetSample InspireFace)
set_target_properties(NetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(RecSample cluttered/standard/rec_sample.cpp)
target_link_libraries(RecSample InspireFace)
set_target_properties(RecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(BMSample cluttered/standard/bm_sample.cpp)
target_link_libraries(BMSample InspireFace)
set_target_properties(BMSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
else()
# =======================RK Temporary test category===========================
if (ENABLE_RKNN)
set(RKNN_API_LIB ${THIRD_PARTY_DIR}/${RKNPU_MAJOR}/runtime/${RK_DEVICE_TYPE}/Linux/librknn_api/${CPU_ARCH}/)
message("Enable RKNN Inference")
link_directories(${RKNN_API_LIB})
# Face detection
add_executable(RKFaceDetSample cluttered/rk_sample/rk_face_det_sample.cpp)
target_link_libraries(RKFaceDetSample InspireFace rknn_api dl)
set_target_properties(RKFaceDetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Simple network test
add_executable(RKSimpleNetSample cluttered/rk_sample/rk_simple_net_sample.cpp)
target_link_libraries(RKSimpleNetSample InspireFace rknn_api dl)
set_target_properties(RKSimpleNetSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Face recognize
add_executable(RKFaceRecSample cluttered/rk_sample/rk_face_recognize_sample.cpp)
target_link_libraries(RKFaceRecSample InspireFace rknn_api dl)
set_target_properties(RKFaceRecSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Tracking module
add_executable(RKTrackerSample cluttered/rk_sample/rk_tracker_sample.cpp)
target_link_libraries(RKTrackerSample InspireFace rknn_api dl)
set_target_properties(RKTrackerSample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# Debug
add_executable(DebugRKRec cluttered/rk_sample/debug_rk_rec.cpp)
target_link_libraries(DebugRKRec InspireFace rknn_api dl)
set_target_properties(DebugRKRec PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ArchTest cluttered/standard/archive_test.cpp)
target_link_libraries(ArchTest InspireFace)
set_target_properties(ArchTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
endif()
endif()
# Tracking module
add_executable(SQLiteTest cluttered/standard/test_sqlite_sample.cpp)
target_link_libraries(SQLiteTest InspireFace)
set_target_properties(SQLiteTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
if (ENABLE_RKNN)
set(DEPEND rknn_api dl)
endif ()
# C_API Demo
add_executable(CAPISample cluttered/standard/c_api_sample.cpp)
target_link_libraries(CAPISample InspireFace ${DEPEND})
set_target_properties(CAPISample PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
# C_API Demo
add_executable(LoopTracker cluttered/standard/loop_tracker.cpp)
target_link_libraries(LoopTracker InspireFace ${DEPEND})
set_target_properties(LoopTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ArchTracker cluttered/standard/archive_tracker.cpp)
target_link_libraries(ArchTracker InspireFace)
set_target_properties(ArchTracker PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
add_executable(ErrorTest cluttered/standard/error_test.cpp)
target_link_libraries(ErrorTest InspireFace)
set_target_properties(ErrorTest PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/cluttered/"
)
endif ()
# Print Message
message(STATUS "InspireFace Sample:")
message(STATUS "\t BUILD_SAMPLE_CLUTTERED: ${BUILD_SAMPLE_CLUTTERED}")
# Install bin
install(TARGETS Leak RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceTrackSampleCost RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS MTFaceTrackSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceRecognitionSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceSearchSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)
install(TARGETS FaceComparisonSample RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/sample)

View File

@@ -0,0 +1,108 @@
//
// Created by tunm on 2023/9/23.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "middleware/inference_helper/customized/rknn_adapter.h"
#include "inspireface/feature_hub/simd.h"
#include <memory>
#include "inspireface/recognition_module/extract/extract.h"
#include "middleware/model_archive/inspire_archive.h"
using namespace inspire;
int main() {
std::vector<std::string> names = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
InspireArchive loader("test_res/pack/test_zip_rec");
{
InspireModel model;
loader.LoadModel("feature", model);
auto net = std::make_shared<RKNNAdapter>();
net->Initialize((unsigned char* )model.buffer, model.bufferSize);
net->setOutputsWantFloat(1);
EmbeddedList list;
for (int i = 0; i < names.size(); ++i) {
cv::Mat image = cv::imread(names[i]);
cv::Mat rgb;
cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
net->SetInputData(0, rgb);
net->RunModel();
auto out = net->GetOutputData(0);
auto dims = net->GetOutputTensorSize(0);
// for (int i = 0; i < dims.size(); ++i) {
// LOGD("%lu", dims[i]);
// }
//
for (int i = 0; i < 512; ++i) {
std::cout << out[i] << ", ";
}
std::cout << std::endl;
Embedded emb;
for (int j = 0; j < 512; ++j) {
emb.push_back(out[j]);
}
list.push_back(emb);
}
for (int i = 0; i < list.size(); ++i) {
auto &embedded = list[i];
float mse = 0.0f;
for (const auto &one: embedded) {
mse += one * one;
}
mse = sqrt(mse);
for (float &one : embedded) {
one /= mse;
}
}
auto cos = simd_dot(list[0].data(), list[1].data(), 512);
LOGD("COS: %f", cos);
}
{
std::shared_ptr<Extract> m_extract_;
Configurable param;
param.set<int>("model_index", 0);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader.LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
cv::Mat image = cv::imread(names[0]);
// cv::Mat rgb;
// cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
auto feat = m_extract_->GetFaceFeature(image);
for (int i = 0; i < 512; ++i) {
std::cout << feat[i] << ", ";
}
std::cout << std::endl;
}
LOGD("End");
return 0;
}

View File

@@ -0,0 +1,52 @@
//
// Created by Tunm-Air13 on 2023/9/20.
//
#include "opencv2/opencv.hpp"
//#include "inspireface/middleware/model_loader/model_loader.h"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/middleware/costman.h"
#include "middleware/model_archive/inspire_archive.h"
#include "log.h"
using namespace inspire;
int main() {
auto detModel = "test_res/pack/Gundam_RV1109";
InspireArchive inspireArchive;
auto ret = inspireArchive.ReLoad(detModel);
if (ret != SARC_SUCCESS) {
LOGE("Error load");
return ret;
}
InspireModel model;
ret = inspireArchive.LoadModel("face_detect", model);
if (ret != SARC_SUCCESS) {
LOGE("Error model");
return ret;
}
std::cout << model.Config().toString() << std::endl;
std::shared_ptr<FaceDetect> m_face_detector_;
m_face_detector_ = std::make_shared<FaceDetect>(320);
m_face_detector_->loadData(model, InferenceHelper::kRknn);
// Load a image
cv::Mat image = cv::imread("test_res/images/face_sample.png");
Timer timer;
FaceLocList locs = (*m_face_detector_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("Faces: %ld", locs.size());
for (auto &loc: locs) {
cv::rectangle(image, cv::Point2f(loc.x1, loc.y1), cv::Point2f(loc.x2, loc.y2), cv::Scalar(0, 0, 255), 3);
}
cv::imwrite("det.jpg", image);
return 0;
}

View File

@@ -0,0 +1,89 @@
//
// Created by tunm on 2023/9/21.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "inspireface/feature_hub/face_recognition.h"
#include "inspireface/feature_hub/simd.h"
#include "inspireface/middleware/model_archive/inspire_archive.h"
using namespace inspire;
std::shared_ptr<InspireArchive> loader;
void rec_function() {
std::shared_ptr<Extract> m_extract_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_03_extract);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader->LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
loader.reset();
std::vector<std::string> files = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
EmbeddedList embedded_list;
for (int i = 0; i < files.size(); ++i) {
auto warped = cv::imread(files[i]);
Timer timer;
auto emb = (*m_extract_)(warped);
LOGD("耗时: %f", timer.GetCostTimeUpdate());
embedded_list.push_back(emb);
LOGD("%lu", emb.size());
}
float _0v1;
float _0v2;
float _1v2;
FaceRecognition::CosineSimilarity(embedded_list[0], embedded_list[1], _0v1);
FaceRecognition::CosineSimilarity(embedded_list[0], embedded_list[2], _0v2);
FaceRecognition::CosineSimilarity(embedded_list[1], embedded_list[2], _1v2);
LOGD("0 vs 1 : %f", _0v1);
LOGD("0 vs 2 : %f", _0v2);
LOGD("1 vs 2 : %f", _1v2);
// LOGD("size: %lu", embedded_list.size());
// LOGD("num of vector: %lu", embedded_list[2].size());
//
// float _0v1 = simd_dot(embedded_list[0].data(), embedded_list[1].data(), 512);
// float _0v2 = simd_dot(embedded_list[0].data(), embedded_list[2].data(), 512);
// float _1v2 = simd_dot(embedded_list[1].data(), embedded_list[2].data(), 512);
// LOGD("0 vs 1 : %f", _0v1);
// LOGD("0 vs 2 : %f", _0v2);
// LOGD("1 vs 2 : %f", _1v2);
}
int main() {
loader = std::make_shared<InspireArchive>();
loader->ReLoad("test_res/pack/Gundam_RV1109");
rec_function();
return 0;
}

View File

@@ -0,0 +1,316 @@
//
// Created by Tunm-Air13 on 2023/9/21.
//
#include "opencv2/opencv.hpp"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/pipeline_module/attribute/mask_predict.h"
#include "inspireface/middleware/costman.h"
#include "inspireface/track_module/quality/face_pose_quality.h"
#include "inspireface/track_module/landmark/face_landmark.h"
#include "inspireface/pipeline_module/liveness/rgb_anti_spoofing.h"
#include "inspireface/face_context.h"
using namespace inspire;
InspireArchive loader;
void test_rnet() {
std::shared_ptr<RNet> m_rnet_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_04_refine_net);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"conv5-1/Softmax", "conv5-2/BiasAdd"});
param.set<std::vector<int>>("input_size", {24, 24});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
InspireModel model;
loader.LoadModel("refine_net", model);
m_rnet_ = std::make_shared<RNet>();
m_rnet_->loadData(model, InferenceHelper::kRknn);
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/hasface.jpg");
Timer timer;
auto score = (*m_rnet_)(image);
LOGD("RNETcost: %f", timer.GetCostTimeUpdate());
LOGD("has face: %f", score);
}
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/noface.jpg");
Timer timer;
auto score = (*m_rnet_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("non face: %f", score);
}
}
void test_mask() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_05_mask);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"activation_1/Softmax",});
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<MaskPredict> m_mask_predict_;
m_mask_predict_ = std::make_shared<MaskPredict>();
InspireModel model;
loader.LoadModel("mask_detect", model);
m_mask_predict_->loadData(model, InferenceHelper::kRknn);
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/mask.jpg");
Timer timer;
auto score = (*m_mask_predict_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("mask: %f", score);
}
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/nomask.jpg");
Timer timer;
auto score = (*m_mask_predict_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("maskless: %f", score);
}
}
void test_quality() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_07_pose_q_fp16);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"fc1", });
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FacePoseQuality> m_face_quality_;
m_face_quality_ = std::make_shared<FacePoseQuality>();
InspireModel model;
loader.LoadModel("pose_quality", model);
m_face_quality_->loadData(model, InferenceHelper::kRknn);
{
std::vector<std::string> names = {
"test_res/images/test_data/p3.jpg",
// "test_res/images/test_data/p1.jpg",
};
for (int i = 0; i < names.size(); ++i) {
LOGD("Image: %s", names[i].c_str());
cv::Mat image = cv::imread(names[i]);
Timer timer;
auto pose_res = (*m_face_quality_)(image);
LOGD("质量cost: %f", timer.GetCostTimeUpdate());
for (auto &p: pose_res.lmk) {
cv::circle(image, p, 0, cv::Scalar(0, 0, 255), 2);
}
cv::imwrite("pose.jpg", image);
LOGD("pitch: %f", pose_res.pitch);
LOGD("yam: %f", pose_res.yaw);
LOGD("roll: %f", pose_res.roll);
for (auto q: pose_res.lmk_quality) {
std::cout << q << ", ";
}
std::cout << std::endl;
}
}
}
void test_landmark_mnn() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {127.5f, 127.5f, 127.5f});
param.set<std::vector<float>>("norm", {0.0078125f, 0.0078125f, 0.0078125f});
std::shared_ptr<FaceLandmark> m_landmark_predictor_;
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model);
cv::Mat image = cv::imread("test_res/images/test_data/crop.png");
cv::resize(image, image, cv::Size(112, 112));
std::vector<float> lmk;
Timer timer;
for (int i = 0; i < 50; ++i) {
lmk = (*m_landmark_predictor_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
}
for (int i = 0; i < FaceLandmark::NUM_OF_LANDMARK; ++i) {
float x = lmk[i * 2 + 0] * 112;
float y = lmk[i * 2 + 1] * 112;
cv::circle(image, cv::Point2f(x, y), 0, cv::Scalar(0, 0, 255), 1);
}
cv::imwrite("lmk.jpg", image);
}
void test_landmark() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FaceLandmark> m_landmark_predictor_;
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model, InferenceHelper::kRknn);
cv::Mat image = cv::imread("test_res/images/test_data/0.jpg");
cv::resize(image, image, cv::Size(112, 112));
std::vector<float> lmk;
Timer timer;
for (int i = 0; i < 50; ++i) {
lmk = (*m_landmark_predictor_)(image);
LOGD("LMKcost: %f", timer.GetCostTimeUpdate());
}
for (int i = 0; i < FaceLandmark::NUM_OF_LANDMARK; ++i) {
float x = lmk[i * 2 + 0] * 112;
float y = lmk[i * 2 + 1] * 112;
cv::circle(image, cv::Point2f(x, y), 0, cv::Scalar(0, 0, 255), 1);
}
cv::imwrite("lmk.jpg", image);
}
void test_liveness() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_06_msafa27);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"556",});
param.set<std::vector<int>>("input_size", {80, 80});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", false); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<RBGAntiSpoofing> m_rgb_anti_spoofing_;
InspireModel model;
loader.LoadModel("rgb_anti_spoofing", model);
m_rgb_anti_spoofing_ = std::make_shared<RBGAntiSpoofing>(80, true);
m_rgb_anti_spoofing_->loadData(model, InferenceHelper::kRknn);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
};
for (int i = 0; i < names.size(); ++i) {
auto image = cv::imread(names[i]);
Timer timer;
auto score = (*m_rgb_anti_spoofing_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("%s : %f", names[i].c_str(), score);
}
}
int test_liveness_ctx() {
CustomPipelineParameter parameter;
parameter.enable_liveness = true;
FaceContext ctx;
ctx.Configuration("test_res/pack/Gundam_RV1109", inspire::DETECT_MODE_IMAGE, 3, parameter);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
"test_res/images/test_data/bb.png",
};
for (int i = 0; i < names.size(); ++i) {
auto image = cv::imread(names[i]);
auto score = (*ctx.FacePipelineModule()->getMRgbAntiSpoofing())(image);
LOGD("%s : %f", names[i].c_str(), score);
}
return 0;
}
int main() {
loader.ReLoad("test_res/pack/Gundam_RV1109");
// test_rnet();
// test_mask();
// test_quality();
// test_landmark_mnn();
// test_landmark();
test_liveness();
test_liveness_ctx();
return 0;
}

View File

@@ -0,0 +1,76 @@
//
// Created by Tunm-Air13 on 2023/9/22.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "inspireface/face_context.h"
using namespace inspire;
int main() {
FaceContext ctx;
CustomPipelineParameter param;
int32_t ret = ctx.Configuration(
"test_res/pack/Gundam_RV1109",
DetectMode::DETECT_MODE_VIDEO,
3,
param);
if (ret != HSUCCEED) {
LOGE("Initiate error");
}
cv::Mat frame;
std::string imageFolder = "test_res/video_frames/";
// auto video_frame_num = 10;
auto video_frame_num = 288;
for (int i = 0; i < video_frame_num; ++i) {
auto index = i + 1;
std::stringstream frameFileName;
frameFileName << imageFolder << "frame-" << std::setw(4) << std::setfill('0') << index << ".jpg";
frame = cv::imread(frameFileName.str());
CameraStream stream;
stream.SetRotationMode(ROTATION_0);
stream.SetDataFormat(BGR);
stream.SetDataBuffer(frame.data, frame.rows, frame.cols);
Timer timer;
ctx.FaceDetectAndTrack(stream);
LOGD("Cost: %f", timer.GetCostTimeUpdate());
LOGD("faces: %d", ctx.GetNumberOfFacesCurrentlyDetected());
LOGD("track id: %d", ctx.GetTrackingFaceList()[0].GetTrackingId());
auto &face = ctx.GetTrackingFaceList()[0];
for (auto &p: face.landmark_) {
cv::circle(frame, p, 0, cv::Scalar(0, 0, 255), 3);
}
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count) + " Cf: " + std::to_string(face.GetConfidence());
cv::Point text_position(rect.x, rect.y - 10);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
cv::Scalar font_color(255, 255, 255);
cv::putText(frame, text, text_position, font_face, font_scale, font_color, font_thickness);
std::stringstream saveFile;
saveFile << "track_frames/" << "result-" << std::setw(4) << std::setfill('0') << index << ".jpg";
cv::imwrite(saveFile.str(), frame);
}
return 0;
}

View File

@@ -0,0 +1,38 @@
//
// Created by tunm on 2024/4/6.
//
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/feature_hub/face_recognition.h"
#include "log.h"
#include "track_module/face_track.h"
#include "pipeline_module/face_pipeline.h"
#include "inspireface/feature_hub/face_recognition.h"
#include "middleware/inference_helper/customized/rknn_adapter.h"
using namespace inspire;
int main() {
InspireArchive archive;
auto ret = archive.ReLoad("test_res/pack/Gundam_RV1109");
LOGD("ReLoad %d", ret);
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// LOGD("LoadModel %d", ret);
FaceTrack track;
ret = track.Configuration(archive);
LOGD("Configuration %d", ret);
FacePipeline pipeline(archive, true, true, true, true, true);
FaceRecognition recognition(archive, true);
// std::shared_ptr<RKNNAdapter> rknet = std::make_shared<RKNNAdapter>();
// ret = rknet->Initialize((unsigned char* )model.buffer, model.bufferSize);
//
// LOGD("LoadModel %d", ret);
return 0;
}

View File

@@ -0,0 +1,47 @@
//
// Created by tunm on 2024/4/6.
//
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/recognition_module/face_feature_extraction.h"
#include "log.h"
using namespace inspire;
int main() {
InspireArchive archive;
archive.ReLoad("test_res/pack/Gundam_RV1109");
FaceTrack track;
// FaceRecognition recognition(archive, true);
auto ret = track.Configuration(archive);
INSPIRE_LOGD("ret=%d", ret);
if (ret != 0) {
return -1;
}
auto image = cv::imread("test_res/data/bulk/kun.jpg");
CameraStream stream;
stream.SetDataBuffer(image.data, image.rows, image.cols);
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
track.UpdateStream(stream, true);
// if (!track.trackingFace.empty()) {
// auto const &face = track.trackingFace[0];
// cv::rectangle(image, face.GetRectSquare(), cv::Scalar(200, 0, 20), 2);
// }
//
// cv::imshow("w", image);
// cv::waitKey(0);
InspireModel model;
ret = archive.LoadModel("mask_detect", model);
std::cout << ret << std::endl;
archive.PublicPrintSubFiles();
return 0;
}

View File

@@ -0,0 +1,83 @@
//
// Created by Tunm-Air13 on 2023/9/11.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "log.h"
#include "inspireface/feature_hub/simd.h"
//#include <Eigen/Dense>
using namespace inspire;
int main() {
int N = 512;
int vectorSize = 512; // Vector length
{
// Create an Nx512 matrix of type CV_32F and fill it with random numbers
cv::Mat mat(N, vectorSize, CV_32F);
cv::randu(mat, cv::Scalar(0), cv::Scalar(1));
// Create a 512x1 CV_32F matrix and fill it with random numbers
cv::Mat one(vectorSize, 1, CV_32F);
cv::randu(one, cv::Scalar(0), cv::Scalar(1));
std::cout << mat.size << std::endl;
std::cout << one.size << std::endl;
auto timeStart = (double) cv::getTickCount();
cv::Mat cosineSimilarities;
cv::gemm(mat, one, 1, cv::Mat(), 0, cosineSimilarities);
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Matrix COST: %f", cost);
}
{
std::srand(static_cast<unsigned int>(std::time(nullptr)));
std::vector<std::vector<float>> matrix(N, std::vector<float>(vectorSize));
for (int i = 0; i < N; ++i) {
for (int j = 0; j < vectorSize; ++j) {
matrix[i][j] = static_cast<float>(std::rand()) / RAND_MAX;
}
}
std::vector<float> vectorOne(vectorSize);
for (int i = 0; i < vectorSize; ++i) {
vectorOne[i] = static_cast<float>(std::rand()) / RAND_MAX;
}
auto timeStart = (double) cv::getTickCount();
// dot
for (const auto &v: matrix) {
simd_dot(v.data(), vectorOne.data(), vectorSize);
}
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Vector COST: %f", cost);
}
// {
// Eigen::initParallel();
// Eigen::MatrixXf mat(N, vectorSize);
// mat = Eigen::MatrixXf::Random(N, vectorSize);
//
// std::cout << mat.rows() << " x " << mat.cols() << std::endl;
//
//
// Eigen::VectorXf one(vectorSize);
// one = Eigen::VectorXf::Random(vectorSize);
//
// auto timeStart = (double) cv::getTickCount();
// Eigen::VectorXf result = mat * one;
//
// double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
// LOGD("Eigen COST: %f", cost);
// }
return 0;
}

View File

@@ -0,0 +1,396 @@
//
// Created by tunm on 2023/10/3.
//
#include <iostream>
#include "inspireface/c_api/inspireface.h"
#include "opencv2/opencv.hpp"
#include "inspireface/log.h"
using namespace inspire;
std::string basename(const std::string& path) {
size_t lastSlash = path.find_last_of("/\\"); // Take into account the cross-platform separator
if (lastSlash == std::string::npos) {
return path; // Without the slash, the entire path is the base name
} else {
return path.substr(lastSlash + 1); // Returns the part after the last slash
}
}
int compare() {
HResult ret;
// Initialize context
#ifdef ENABLE_RKNN
HPath path = "test_res/pack/Gundam_RV1109";
#else
HPath path = "test_res/pack/Pikachu";
#endif
HF_ContextCustomParameter parameter = {0};
parameter.enable_liveness = 1;
parameter.enable_mask_detect = 1;
parameter.enable_recognition = 1;
parameter.enable_face_quality = 1;
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE; // Selecting the image mode is always detection
HContextHandle session;
ret = HF_CreateFaceContextFromResourceFile(path, parameter, detMode, 3, &session);
if (ret != HSUCCEED) {
INSPIRE_LOGD("An error occurred while creating ctx: %ld", ret);
}
std::vector<std::string> names = {
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0001.jpg",
"/Users/tunm/datasets/lfw_funneled/Abel_Pacheco/Abel_Pacheco_0004.jpg",
};
HInt32 featureNum;
HF_GetFeatureLength(&featureNum);
INSPIRE_LOGD("Feature length: %d", featureNum);
HFloat featuresCache[names.size()][featureNum]; // Store the cached vector
for (int i = 0; i < names.size(); ++i) {
auto &name = names[i];
cv::Mat image = cv::imread(name);
if (image.empty()) {
INSPIRE_LOGD("%s is empty!", name.c_str());
return -1;
}
HF_ImageData imageData = {0};
imageData.data = image.data;
imageData.height = image.rows;
imageData.width = image.cols;
imageData.rotation = CAMERA_ROTATION_0;
imageData.format = STREAM_BGR;
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret == HSUCCEED) {
INSPIRE_LOGD("image handle: %ld", (long )imageSteamHandle);
}
HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(session, imageSteamHandle, &multipleFaceData);
INSPIRE_LOGD("Number of faces detected: %d", multipleFaceData.detectedNum);
for (int i = 0; i < multipleFaceData.detectedNum; ++i) {
cv::Rect rect = cv::Rect(multipleFaceData.rects[i].x, multipleFaceData.rects[i].y, multipleFaceData.rects[i].width, multipleFaceData.rects[i].height);
cv::rectangle(image, rect, cv::Scalar(0, 255, 200), 2);
INSPIRE_LOGD("%d, track_id: %d, pitch: %f, yaw: %f, roll: %f", i, multipleFaceData.trackIds[i], multipleFaceData.angles.pitch[i], multipleFaceData.angles.yaw[i], multipleFaceData.angles.roll[i]);
INSPIRE_LOGD("token size: %d", multipleFaceData.tokens->size);
}
#ifndef DISABLE_GUI
// cv::imshow("wq", image);
// cv::waitKey(0);
#endif
ret = HF_FaceFeatureExtractCpy(session, imageSteamHandle, multipleFaceData.tokens[0], featuresCache[i]);
std::cout << "wtg" << std::endl;
if (ret != HSUCCEED) {
INSPIRE_LOGE("Abnormal feature extraction: %d", ret);
return -1;
}
// for (int j = 0; j < 512; ++j) {
// std::cout << featuresCache[0][j] << ", ";
// }
// std::cout << std::endl;
// HSize size;
// HF_GetFaceBasicTokenSize(&size);
// LOGD("in size: %ld", size);
//
// LOGD("o size %d", multipleFaceData.tokens[0].size);
HBuffer buffer[multipleFaceData.tokens[0].size];
HF_CopyFaceBasicToken(multipleFaceData.tokens[0], buffer, multipleFaceData.tokens[0].size);
HF_FaceBasicToken token = {0};
token.size = multipleFaceData.tokens[0].size;
token.data = buffer;
HFloat quality;
// ret = HF_FaceQualityDetect(session, multipleFaceData.tokens[0], &quality);
ret = HF_FaceQualityDetect(session, token, &quality);
INSPIRE_LOGD("RET : %d", ret);
INSPIRE_LOGD("Q: %f", quality);
ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) {
imageSteamHandle = nullptr;
INSPIRE_LOGD("image released");
} else {
INSPIRE_LOGE("image release error: %ld", ret);
}
}
HFloat compResult;
HF_FaceFeature compFeature1 = {0};
HF_FaceFeature compFeature2 = {0};
compFeature1.size = featureNum;
compFeature1.data = featuresCache[0];
compFeature2.size = featureNum;
compFeature2.data = featuresCache[1];
ret = HF_FaceComparison1v1(compFeature1, compFeature2, &compResult);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Contrast failure: %d", ret);
return -1;
}
INSPIRE_LOGD("similarity: %f", compResult);
ret = HF_ReleaseFaceContext(session);
if (ret != HSUCCEED) {
INSPIRE_LOGD("Release error");
}
return 0;
}
int search() {
HResult ret;
// 初始化context
HString path = "test_res/pack/Pikachu";
HF_ContextCustomParameter parameter = {0};
parameter.enable_liveness = 1;
parameter.enable_mask_detect = 1;
parameter.enable_recognition = 1;
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE;
HContextHandle session;
ret = HF_CreateFaceContextFromResourceFile(path, parameter, detMode, 3, &session);
if (ret != HSUCCEED) {
INSPIRE_LOGD("An error occurred while creating ctx: %ld", ret);
}
HF_FeatureHubConfiguration databaseConfiguration = {0};
databaseConfiguration.enablePersistence = 1;
databaseConfiguration.dbPath = "./";
ret = HF_FeatureHubDataEnable(databaseConfiguration);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Database configuration failure: %ld", ret);
return -1;
}
std::vector<std::string> files_list = {
};
for (int i = 0; i < files_list.size(); ++i) {
auto &name = files_list[i];
cv::Mat image = cv::imread(name);
HF_ImageData imageData = {0};
imageData.data = image.data;
imageData.height = image.rows;
imageData.width = image.cols;
imageData.rotation = CAMERA_ROTATION_0;
imageData.format = STREAM_BGR;
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) {
INSPIRE_LOGE("image handle error: %ld", (long )imageSteamHandle);
return -1;
}
HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(session, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) {
INSPIRE_LOGE("%s No face detected", name.c_str());
return -1;
}
HF_FaceFeature feature = {0};
ret = HF_FaceFeatureExtract(session, imageSteamHandle, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Feature extraction error: %ld", ret);
return -1;
}
auto tag = basename(name);
char *tagName = new char[tag.size() + 1];
std::strcpy(tagName, tag.c_str());
HF_FaceFeatureIdentity identity = {0};
identity.feature = &feature;
identity.customId = i;
identity.tag = tagName;
ret = HF_FeatureHubInsertFeature(identity);
if (ret != HSUCCEED) {
INSPIRE_LOGE("插入失败: %ld", ret);
return -1;
}
// // 在插入一次测试一下重复操作问题
// ret = HF_FeaturesGroupInsertFeature(session, identity);
// if (ret != HSUCCEED) {
// LOGE("不能重复id插入: %ld", ret);
// }
delete[] tagName;
ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) {
imageSteamHandle = nullptr;
INSPIRE_LOGD("image released");
} else {
INSPIRE_LOGE("image release error: %ld", ret);
}
}
cv::Mat image = cv::imread("test_res/images/kun.jpg");
HF_ImageData imageData = {0};
imageData.data = image.data;
imageData.height = image.rows;
imageData.width = image.cols;
imageData.rotation = CAMERA_ROTATION_0;
imageData.format = STREAM_BGR;
HImageHandle imageSteamHandle;
ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
if (ret != HSUCCEED) {
INSPIRE_LOGE("image handle error: %ld", (long )imageSteamHandle);
return -1;
}
HF_MultipleFaceData multipleFaceData = {0};
HF_FaceContextRunFaceTrack(session, imageSteamHandle, &multipleFaceData);
if (multipleFaceData.detectedNum <= 0) {
INSPIRE_LOGE("No face detected");
return -1;
}
HF_FaceFeature feature = {0};
ret = HF_FaceFeatureExtract(session, imageSteamHandle, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Feature extraction error: %ld", ret);
return -1;
}
// ret = HF_FaceContextFeatureRemove(session, 3);
// if (ret != HSUCCEED) {
// LOGE("delete failed: %ld", ret);
// }
std::string newName = "Six";
char *newTagName = new char[newName.size() + 1];
std::strcpy(newTagName, newName.c_str());
HF_FaceFeatureIdentity updateIdentity = {0};
updateIdentity.customId = 1;
updateIdentity.tag = newTagName;
updateIdentity.feature = &feature;
ret = HF_FeatureHubFaceUpdate(updateIdentity);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Update failure: %ld", ret);
}
delete[] newTagName;
HF_FaceFeatureIdentity searchIdentity = {0};
// HF_FaceFeature featureSearched = {0};
// searchIdentity.feature = &featureSearched;
HFloat confidence;
ret = HF_FeatureHubFaceSearch(feature, &confidence, &searchIdentity);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Search failure: %ld", ret);
return -1;
}
INSPIRE_LOGD("Search for confidence: %f", confidence);
INSPIRE_LOGD("The matched tag: %s", searchIdentity.tag);
INSPIRE_LOGD("The matched customId: %d", searchIdentity.customId);
// Face Pipeline
ret = HF_MultipleFacePipelineProcess(session, imageSteamHandle, &multipleFaceData, parameter);
if (ret != HSUCCEED) {
INSPIRE_LOGE("pipeline execution failed: %ld", ret);
return -1;
}
HF_RGBLivenessConfidence livenessConfidence = {0};
ret = HF_GetRGBLivenessConfidence(session, &livenessConfidence);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to obtain live data");
return -1;
}
INSPIRE_LOGD("Failed to obtain live data: %f", livenessConfidence.confidence[0]);
HF_FaceMaskConfidence maskConfidence = {0};
ret = HF_GetFaceMaskConfidence(session, &maskConfidence);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Failed to obtain live data");
return -1;
}
INSPIRE_LOGD("Mask wearing confidence: %f", maskConfidence.confidence[0]);
HInt32 faceNum;
ret = HF_FeatureHubGetFaceCount(&faceNum);
if (ret != HSUCCEED) {
INSPIRE_LOGE("fail to get");
}
INSPIRE_LOGD("Number of facial features: %d", faceNum);
HF_FeatureHubViewDBTable();
HF_FaceFeatureIdentity identity;
ret = HF_FeatureHubGetFaceIdentity(100, &identity);
if (ret != HSUCCEED) {
INSPIRE_LOGE("Feature acquisition failure");
}
ret = HF_ReleaseImageStream(imageSteamHandle);
if (ret == HSUCCEED) {
imageSteamHandle = nullptr;
INSPIRE_LOGD("image released");
} else {
INSPIRE_LOGE("image release error: %ld", ret);
}
return 0;
}
int opiton() {
// HInt32 mask = HF_ENABLE_FACE_RECOGNITION | HF_ENABLE_LIVENESS;
return 0;
}
int main() {
HResult ret;
// {
// // 测试ImageStream
// cv::Mat image = cv::imread("test_res/images/kun.jpg");
// HF_ImageData imageData = {0};
// imageData.data = image.data;
// imageData.height = image.rows;
// imageData.width = image.cols;
// imageData.rotation = CAMERA_ROTATION_0;
// imageData.format = STREAM_BGR;
//
// HImageHandle imageSteamHandle;
// ret = HF_CreateImageStream(&imageData, &imageSteamHandle);
// if (ret == HSUCCEED) {
// LOGD("image handle: %ld", (long )imageSteamHandle);
// }
// HF_DeBugImageStreamImShow(imageSteamHandle);
//
// ret = HF_ReleaseImageStream(imageSteamHandle);
// if (ret == HSUCCEED) {
// imageSteamHandle = nullptr;
// LOGD("image released");
// } else {
// LOGE("image release error: %ld", ret);
// }
//
// }
// compare();
search();
opiton();
}

View File

@@ -0,0 +1,97 @@
//
// Created by tunm on 2023/9/15.
//
#include <iostream>
#include "face_context.h"
#include "opencv2/opencv.hpp"
#include "sample/utils/test_helper.h"
using namespace inspire;
int main() {
#ifndef USE_MOBILE_OPENCV_IN_LOCAL
FaceContext ctx;
CustomPipelineParameter param;
param.enable_liveness = true;
param.enable_face_quality = true;
int32_t ret = ctx.Configuration("test_res/pack/Pikachu-t1", DetectMode::DETECT_MODE_VIDEO, 1, param);
if (ret != 0) {
INSPIRE_LOGE("Initialization error");
return -1;
}
cv::VideoCapture cap(0);
if (!cap.isOpened()) {
std::cerr << "Unable to open the camera." << std::endl;
return -1;
}
cv::namedWindow("Webcam", cv::WINDOW_NORMAL);
while (true) {
cv::Mat frame;
cap >> frame;
if (frame.empty()) {
std::cerr << "Unable to obtain images from the camera." << std::endl;
break;
}
CameraStream stream;
stream.SetDataBuffer(frame.data, frame.rows, frame.cols);
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
ctx.FaceDetectAndTrack(stream);
// LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto &faces = ctx.GetTrackingFaceList();
for (auto &face: faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count);
cv::Point text_position(rect.x, rect.y - 10);
const auto& pose_and_quality = face.high_result;
float mean_quality = 0.0f;
for (int i = 0; i < pose_and_quality.lmk_quality.size(); ++i) {
mean_quality += pose_and_quality.lmk_quality[i];
}
mean_quality /= pose_and_quality.lmk_quality.size();
mean_quality = 1 - mean_quality;
std::string pose_text = "pitch: " + std::to_string(pose_and_quality.pitch) + ",Yaw: " + std::to_string(pose_and_quality.yaw) + ",roll:" +std::to_string(pose_and_quality.roll) + ", q: " +
std::to_string(mean_quality);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
cv::Scalar font_color(255, 255, 255);
cv::putText(frame, text, text_position, font_face, font_scale, font_color, font_thickness);
cv::putText(frame, pose_text, pose_position, font_face, font_scale, font_color, font_thickness);
}
cv::imshow("Webcam", frame);
if (cv::waitKey(1) == 27) {
break;
}
}
cap.release();
cv::destroyAllWindows();
#endif
return 0;
}

View File

@@ -0,0 +1,36 @@
//
// Created by Tunm-Air13 on 2024/4/10.
//
#include <iostream>
#include "inspireface/c_api/inspireface.h"
#include "inspireface/middleware/camera_stream/camera_stream.h"
void non_file_test() {
HResult ret;
HPath path = "test_res/pack/abc"; // Use error path
HF_ContextCustomParameter parameter = {0};
HF_DetectMode detMode = HF_DETECT_MODE_IMAGE;
HContextHandle session;
ret = HF_CreateFaceContextFromResourceFile(path, parameter, detMode, 3, &session);
if (ret != 0) {
std::cout << "wrong" << std::endl;
}
HF_ReleaseFaceContext(session);
}
void camera_test() {
cv::Mat image = cv::imread("test_res/data/bulk/jntm.jpg");
inspire::CameraStream stream;
stream.SetRotationMode(inspire::ROTATION_0);
stream.SetDataFormat(inspire::NV12);
stream.SetDataBuffer(image.data, image.rows, image.cols);
auto decode = stream.GetScaledImage(1.0f, true);
}
int main() {
camera_test();
}

View File

@@ -0,0 +1,38 @@
//
// Created by tunm on 2024/4/6.
//
#include <iostream>
#include "track_module/face_track.h"
#include "inspireface/recognition_module/face_feature_extraction.h"
#include "log.h"
using namespace inspire;
int main() {
InspireArchive archive("test_res/pack/Pikachu");
FaceTrack track;
// FaceRecognition recognition(archive, true);
auto ret = track.Configuration(archive);
INSPIRE_LOGD("ret=%d", ret);
auto image = cv::imread("test_res/data/bulk/kun.jpg");
for (int i = 0; i < 10000000; ++i) {
CameraStream stream;
stream.SetDataBuffer(image.data, image.rows, image.cols);
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
track.UpdateStream(stream, true);
}
// InspireModel model;
// ret = archive.LoadModel("mask_detect", model);
// std::cout << ret << std::endl;
//
// archive.PublicPrintSubFiles();
return 0;
}

View File

@@ -0,0 +1,40 @@
//
// Created by tunm on 2023/9/8.
//
#include <iostream>
#include "track_module/face_detect/face_pose.h"
#include "middleware/model_archive/inspire_archive.h"
using namespace inspire;
int main(int argc, char** argv) {
InspireArchive loader;
loader.ReLoad("resource/pack/Pikachu");
Configurable param;
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"ip3_pose", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("input_channel", 1); // Input Gray
param.set<int>("input_image_channel", 1); // BGR 2 Gray
auto m_pose_net_ = std::make_shared<FacePose>();
InspireModel model;
loader.LoadModel("", model);
m_pose_net_->loadData(model);
auto image = cv::imread("resource/images/crop.png");
cv::Mat gray;
cv::resize(image, gray, cv::Size(112, 112));
auto res = (*m_pose_net_)(gray);
INSPIRE_LOGD("%f", res[0]);
INSPIRE_LOGD("%f", res[1]);
INSPIRE_LOGD("%f", res[2]);
return 0;
}

View File

@@ -0,0 +1,180 @@
//
// Created by tunm on 2023/9/10.
//
#include <iostream>
#include "face_context.h"
#include "sample/utils/test_helper.h"
#include "inspireface/recognition_module/extract/alignment.h"
#include "recognition_module/face_feature_extraction.h"
#include "feature_hub/feature_hub.h"
using namespace inspire;
std::string GetFileNameWithoutExtension(const std::string& filePath) {
size_t slashPos = filePath.find_last_of("/\\");
if (slashPos != std::string::npos) {
std::string fileName = filePath.substr(slashPos + 1);
size_t dotPos = fileName.find_last_of('.');
if (dotPos != std::string::npos) {
return fileName.substr(0, dotPos);
} else {
return fileName;
}
}
size_t dotPos = filePath.find_last_of('.');
if (dotPos != std::string::npos) {
return filePath.substr(0, dotPos);
}
return filePath;
}
int comparison1v1(FaceContext &ctx) {
Embedded feature_1;
Embedded feature_2;
{
auto image = cv::imread("");
cv::Mat rot90;
TestUtils::rotate(image, rot90, ROTATION_90);
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_90);
stream.SetDataBuffer(rot90.data, rot90.rows, rot90.cols);
ctx.FaceDetectAndTrack(stream);
const auto &faces = ctx.GetTrackingFaceList();
if (faces.empty()) {
INSPIRE_LOGD("image1 not face");
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature_1);
}
{
auto image = cv::imread("");
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
stream.SetDataBuffer(image.data, image.rows, image.cols);
ctx.FaceDetectAndTrack(stream);
const auto &faces = ctx.GetTrackingFaceList();
if (faces.empty()) {
INSPIRE_LOGD("image1 not face");
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature_2);
}
float rec;
auto ret = FEATURE_HUB->CosineSimilarity(feature_1, feature_2, rec);
INSPIRE_LOGD("rec: %f", rec);
return 0;
}
int search(FaceContext &ctx) {
// std::shared_ptr<FeatureBlock> block;
// block.reset(FeatureBlock::Create(hyper::MC_OPENCV));
std::vector<String> files_list = {
};
for (int i = 0; i < files_list.size(); ++i) {
auto image = cv::imread(files_list[i]);
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
stream.SetDataBuffer(image.data, image.rows, image.cols);
ctx.FaceDetectAndTrack(stream);
const auto &faces = ctx.GetTrackingFaceList();
if (faces.empty()) {
INSPIRE_LOGD("image1 not face");
return -1;
}
Embedded feature;
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature);
FEATURE_HUB->RegisterFaceFeature(feature, i, GetFileNameWithoutExtension(files_list[i]), 1000 + i);
}
// ctx.FaceRecognitionModule()->PrintMatrix();
// auto ret = block->DeleteFeature(3);
// LOGD("DEL: %d", ret);
// block->PrintMatrix();
FEATURE_HUB->DeleteFaceFeature(2);
INSPIRE_LOGD("Number of faces in the library: %d", FEATURE_HUB->GetFaceFeatureCount());
// Update or insert a face
{
Embedded feature;
auto image = cv::imread("");
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
stream.SetDataBuffer(image.data, image.rows, image.cols);
ctx.FaceDetectAndTrack(stream);
const auto &faces = ctx.GetTrackingFaceList();
if (faces.empty()) {
INSPIRE_LOGD("image1 not face");
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature);
// block->UpdateFeature(4, feature);
// block->AddFeature(feature);
}
// Prepare an image to search
{
Embedded feature;
auto image = cv::imread("");
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
stream.SetDataBuffer(image.data, image.rows, image.cols);
ctx.FaceDetectAndTrack(stream);
const auto &faces = ctx.GetTrackingFaceList();
if (faces.empty()) {
INSPIRE_LOGD("image1 not face");
return -1;
}
ctx.FaceRecognitionModule()->FaceExtract(stream, faces[0], feature);
SearchResult result;
auto timeStart = (double) cv::getTickCount();
FEATURE_HUB->SearchFaceFeature(feature, result);
double cost = ((double) cv::getTickCount() - timeStart) / cv::getTickFrequency() * 1000;
INSPIRE_LOGD("Search time: %f", cost);
INSPIRE_LOGD("Top1: %d, %f, %s %d", result.index, result.score, result.tag.c_str(), result.customId);
}
return 0;
}
int main(int argc, char** argv) {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_recognition = true;
int32_t ret = ctx.Configuration("test_res/pack/Pikachu", DetectMode::DETECT_MODE_IMAGE, 1, param);
if (ret != 0) {
INSPIRE_LOGE("Initialization error");
return -1;
}
comparison1v1(ctx);
// search(ctx);
return 0;
}

View File

@@ -0,0 +1,72 @@
//
// Created by tunm on 2023/9/7.
//
#include <iostream>
#include "face_context.h"
#include "opencv2/opencv.hpp"
#include "sample/utils/test_helper.h"
using namespace inspire;
int main(int argc, char** argv) {
FaceContext ctx;
CustomPipelineParameter param;
param.enable_liveness = true;
param.enable_face_quality = true;
int32_t ret = ctx.Configuration("test_res/pack/Pikachu", DetectMode::DETECT_MODE_IMAGE, 1, param);
if (ret != 0) {
INSPIRE_LOGE("Initialization error");
return -1;
}
auto image = cv::imread("test_res/images/kun.jpg");
cv::Mat rot90;
TestUtils::rotate(image, rot90, ROTATION_90);
CameraStream stream;
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_90);
stream.SetDataBuffer(rot90.data, rot90.rows, rot90.cols);
ctx.FaceDetectAndTrack(stream);
std::vector<HyperFaceData> faces;
for (int i = 0; i < ctx.GetNumberOfFacesCurrentlyDetected(); ++i) {
// const ByteArray &byteArray = ctx.GetDetectCache()[i];
HyperFaceData face = {0};
// ret = DeserializeHyperFaceData(byteArray, face);
const FaceBasicData &faceBasic = ctx.GetFaceBasicDataCache()[i];
ret = DeserializeHyperFaceData((char* )faceBasic.data, faceBasic.dataSize, face);
INSPIRE_LOGD("OK!");
if (ret != HSUCCEED) {
return -1;
}
faces.push_back(face);
cv::Rect rect(face.rect.x, face.rect.y, face.rect.width, face.rect.height);
std::cout << rect << std::endl;
cv::rectangle(rot90, rect, cv::Scalar(0, 0, 233), 2);
for (auto &p: face.keyPoints) {
cv::Point2f point(p.x, p.y);
cv::circle(rot90, point, 0, cv::Scalar(0, 0, 255), 5);
}
}
// cv::imshow("wq", rot90);
// cv::waitKey(0);
cv::imwrite("wq.png", rot90);
ret = ctx.FacesProcess(stream, faces, param);
if (ret != HSUCCEED) {
return -1;
}
// view
int32_t index = 0;
INSPIRE_LOGD("liveness: %f", ctx.GetRgbLivenessResultsCache()[index]);
return 0;
}

View File

@@ -0,0 +1,18 @@
//
// Created by Tunm-Air13 on 2023/10/11.
//
#include <iostream>
#include "inspireface/feature_hub/persistence/sqlite_faces_manage.h"
using namespace inspire;
int main() {
SQLiteFaceManage db;
db.OpenDatabase("t.db");
db.ViewTotal();
return 0;
}

View File

@@ -0,0 +1,198 @@
//
// Created by tunm on 2023/8/29.
//
#include <iostream>
#include "inspireface/track_module/face_track.h"
#include "opencv2/opencv.hpp"
using namespace inspire;
int video_test(FaceTrack &ctx, int cam_id) {
#ifndef USE_MOBILE_OPENCV_IN_LOCAL
cv::VideoCapture cap(cam_id);
if (!cap.isOpened()) {
std::cerr << "Unable to open the camera." << std::endl;
return -1;
}
cv::namedWindow("Webcam", cv::WINDOW_NORMAL);
while (true) {
cv::Mat frame;
cap >> frame;
if (frame.empty()) {
std::cerr << "Unable to obtain images from the camera." << std::endl;
break;
}
CameraStream stream;
stream.SetDataBuffer(frame.data, frame.rows, frame.cols);
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
ctx.UpdateStream(stream, false);
INSPIRE_LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto const &faces = ctx.trackingFace;
for (auto const &face: faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count);
cv::Point text_position(rect.x, rect.y - 10);
const auto& pose_and_quality = face.high_result;
std::vector<float> euler = {pose_and_quality.yaw, pose_and_quality.roll, pose_and_quality.pitch};
std::string pose_text = "P: " + std::to_string(euler[0]) + ",Yaw: " + std::to_string(euler[1]) + ",roll:" +std::to_string(euler[2]);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
cv::Scalar font_color(255, 255, 255);
cv::putText(frame, text, text_position, font_face, font_scale, font_color, font_thickness);
cv::putText(frame, pose_text, pose_position, font_face, font_scale, font_color, font_thickness);
}
cv::imshow("Webcam", frame);
if (cv::waitKey(1) == 27) {
break;
}
}
cap.release();
cv::destroyAllWindows();
#endif
return 0;
}
void video_file_test(FaceTrack& ctx, const std::string& video_filename) {
#ifndef USE_MOBILE_OPENCV_IN_LOCAL
cv::VideoCapture cap(video_filename);
if (!cap.isOpened()) {
std::cerr << "Unable to open the video file: " << video_filename << std::endl;
return;
}
cv::namedWindow("Video", cv::WINDOW_NORMAL);
while (true) {
cv::Mat frame;
cap >> frame;
if (frame.empty()) {
std::cerr << "Unable to get frames from the video file." << std::endl;
break;
}
CameraStream stream;
stream.SetDataBuffer(frame.data, frame.rows, frame.cols);
stream.SetDataFormat(BGR);
stream.SetRotationMode(ROTATION_0);
ctx.UpdateStream(stream, false);
INSPIRE_LOGD("Track Cost: %f", ctx.GetTrackTotalUseTime());
auto const &faces = ctx.trackingFace;
for (auto const &face: faces) {
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
auto lmk = face.GetLanmdark();
for (auto & p : lmk) {
cv::circle(frame, p, 0, cv::Scalar(0, 0, 242), 2);
}
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count);
cv::Point text_position(rect.x, rect.y - 10);
const auto& euler = face.high_result;
std::string pose_text = "pitch: " + std::to_string(euler.pitch) + ",Yaw: " + std::to_string(euler.yaw) + ",roll:" +std::to_string(euler.roll);
cv::Point pose_position(rect.x, rect.y + rect.height + 20);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
cv::Scalar font_color(255, 255, 255);
cv::putText(frame, text, text_position, font_face, font_scale, font_color, font_thickness);
cv::putText(frame, pose_text, pose_position, font_face, font_scale, font_color, font_thickness);
}
cv::imshow("Video", frame);
if (cv::waitKey(1) == 27) {
break;
}
}
cap.release();
cv::destroyAllWindows();
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <source> <input>" << std::endl;
return 1;
}
INSPIRE_SET_LOG_LEVEL(LogLevel::LOG_NONE);
const std::string source = argv[1];
const std::string input = argv[2];
const std::string folder = "test_res/pack/Pikachu";
INSPIRE_LOGD("%s", folder.c_str());
// ModelLoader loader;
// loader.Reset(folder);
InspireArchive archive;
archive.ReLoad(folder);
std::cout << archive.QueryStatus() << std::endl;
if (archive.QueryStatus() != SARC_SUCCESS) {
INSPIRE_LOGE("error archive");
return -1;
}
FaceTrack ctx;
ctx.Configuration(archive);
if (source == "webcam") {
int cam_id = std::stoi(input);
video_test(ctx, cam_id);
} else if (source == "image") {
cv::Mat image = cv::imread(input);
if (!image.empty()) {
// image_test(ctx, image);
} else {
std::cerr << "Unable to open the image file." << std::endl;
}
} else if (source == "video") {
video_file_test(ctx, input);
} else {
std::cerr << "Invalid input source: " << source << std::endl;
return 1;
}
return 0;
}

View File

@@ -0,0 +1,9 @@
//
// Created by Tunm-Air13 on 2024/4/28.
//
int main() {
char *n = new char[1024];
return 0;
}

View File

@@ -0,0 +1,116 @@
//
// Created by tunm on 2024/4/20.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 4) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <img1_path> <img2_path>\n";
return 1;
}
auto packPath = argv[1];
auto imgPath1 = argv[2];
auto imgPath2 = argv[3];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path 1: " << imgPath1 << std::endl;
std::cout << "Source file Path 2: " << imgPath2 << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
std::vector<char* > twoImg = {imgPath1, imgPath2};
std::vector<std::vector<float>> vec(2, std::vector<float>(512));
for (int i = 0; i < twoImg.size(); ++i) {
auto image = cv::imread(twoImg[i]);
if (image.empty()) {
std::cout << "Image is empty: " << twoImg[i] << std::endl;
return 0;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << twoImg[i] << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face, an interface that uses copy features in a comparison scenario
ret = HFFaceFeatureExtractCpy(session, stream, multipleFaceData.tokens[0], vec[i].data()); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
}
// Make feature1
HFFaceFeature feature1 = {0};
feature1.data = vec[0].data();
feature1.size = vec[0].size();
// Make feature2
HFFaceFeature feature2 = {0};
feature2.data = vec[1].data();
feature2.size = vec[1].size();
// Run comparison
HFloat similarity;
ret = HFFaceComparison(feature1, feature2, &similarity);
if (ret != HSUCCEED) {
std::cout << "Feature comparison error: " << ret << std::endl;
return ret;
}
std::cout << "Similarity: " << similarity << std::endl;
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
}

View File

@@ -0,0 +1,227 @@
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check if the correct number of parameters was provided
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " <pack_path>\n";
return 1;
}
auto packPath = argv[1]; // Path to the resource pack
std::string testDir = "test_res/"; // Directory containing test resources
HResult ret;
// Load resource file, necessary before using any functionality
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Configuration for the feature database
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.featureBlockNum = 10; // Number of feature blocks
featureHubConfiguration.enablePersistence = 0; // Persistence not enabled, use in-memory database
featureHubConfiguration.dbPath = ""; // Database path (not used here)
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EAGER; // Search mode configuration
featureHubConfiguration.searchThreshold = 0.48f; // Threshold for search operations
// Enable the global feature database
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED) {
std::cout << "An exception occurred while starting FeatureHub: " << ret << std::endl;
return ret;
}
// Prepare a list of face photos for testing
std::vector<std::string> photos = {
testDir + "data/bulk/Nathalie_Baye_0002.jpg",
testDir + "data/bulk/jntm.jpg",
testDir + "data/bulk/woman.png",
testDir + "data/bulk/Rob_Lowe_0001.jpg",
};
std::vector<std::string> names = {
"Nathalie Baye",
"JNTM",
"Woman",
"Rob Lowe",
};
assert(photos.size() == names.size()); // Ensure each photo has a corresponding name
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
// Process each photo, extract features, and add them to the database
for (int i = 0; i < photos.size(); ++i) {
std::cout << "===============================" << std::endl;
// Load the image from the specified file path
const auto& path = photos[i];
const auto& name = names[i];
auto image = cv::imread(path);
if (image.empty()) {
std::cout << "The image is empty: " << path << ret << std::endl;
return ret;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << path << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face
HFFaceFeature feature = {0};
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
// Assign a name to the detected face and insert it into the feature hub
char* cstr = new char[name.size() + 1]; // Dynamically allocate memory for the name
strcpy(cstr, name.c_str()); // Copy the name into the allocated memory
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature; // Assign the extracted feature
identity.customId = i; // Custom identifier for the face
identity.tag = cstr; // Tag the feature with the name
ret = HFFeatureHubInsertFeature(identity); // Insert the feature into the hub
if (ret != HSUCCEED) {
std::cout << "Feature insertion into FeatureHub failed: " << ret << std::endl;
return ret;
}
delete[] cstr; // Clean up the dynamically allocated memory
std::cout << "Insert feature to FeatureHub: " << name << std::endl;
ret = HFReleaseImageStream(stream); // Release the image stream
if (ret != HSUCCEED) {
std::cout << "Release stream failed: " << ret << std::endl;
return ret;
}
}
HInt32 count;
ret = HFFeatureHubGetFaceCount(&count);
assert(count == photos.size());
std::cout << "\nInserted data: " << count << std::endl;
// Process a query image and search for similar faces in the database
auto query = cv::imread(testDir + "data/bulk/kun.jpg");
if (query.empty()) {
std::cout << "The query image is empty: " << ret << std::endl;
return ret;
}
HFImageData imageData = {0};
imageData.data = query.data;
imageData.format = HF_STREAM_BGR;
imageData.height = query.rows;
imageData.width = query.cols;
imageData.rotation = HF_CAMERA_ROTATION_0;
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream);
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) {
std::cout << "No face was detected from target image: " << ret << std::endl;
return ret;
}
// Initialize the feature structure to store extracted face features
HFFaceFeature feature = {0};
// Extract facial features from the detected face using the first token
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl; // Print error if extraction fails
return ret;
}
// Initialize the structure to store the results of the face search
HFFaceFeatureIdentity searched = {0};
HFloat confidence; // Variable to store the confidence level of the search result
// Search the feature hub for a matching face feature
ret = HFFeatureHubFaceSearch(feature, &confidence, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.customId == -1) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
std::cout << "\nFound similar face: id=" << searched.customId << ", tag=" << searched.tag << ", confidence=" << confidence << std::endl;
std::string name(searched.tag);
// Remove feature
ret = HFFeatureHubFaceRemove(searched.customId);
if (ret != HSUCCEED) {
std::cout << "Remove failed: " << ret << std::endl; // Print error if search fails
return ret;
}
// Remove feature and search again
ret = HFFeatureHubFaceSearch(feature, &confidence, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.customId != -1) {
std::cout << "Remove an exception: " << std::endl; // Notify if no matching face is found
return ret;
}
std::cout << "\nSearch again confidence=" << confidence << std::endl;
std::cout << name << " has been removed." << std::endl;
// Clean up and close the session
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
std::cout << "Release stream error: " << ret << std::endl;
return ret;
}
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
std::cout << "Release session error: " << ret << std::endl;
return ret;
}
return ret; // Return the final result code
}

View File

@@ -0,0 +1,249 @@
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check if the correct number of parameters was provided
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " <pack_path>\n";
return 1;
}
auto packPath = argv[1]; // Path to the resource pack
std::string testDir = "test_res/"; // Directory containing test resources
HResult ret;
// Load resource file, necessary before using any functionality
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Configuration for the feature database
HFFeatureHubConfiguration featureHubConfiguration;
featureHubConfiguration.featureBlockNum = 10; // Number of feature blocks
featureHubConfiguration.enablePersistence = 0; // Persistence not enabled, use in-memory database
featureHubConfiguration.dbPath = ""; // Database path (not used here)
featureHubConfiguration.searchMode = HF_SEARCH_MODE_EAGER; // Search mode configuration
featureHubConfiguration.searchThreshold = 0.48f; // Threshold for search operations
// Enable the global feature database
ret = HFFeatureHubDataEnable(featureHubConfiguration);
if (ret != HSUCCEED) {
std::cout << "An exception occurred while starting FeatureHub: " << ret << std::endl;
return ret;
}
// Prepare a list of face photos for testing
std::vector<std::string> photos = {
testDir + "data/RD/d1.jpeg",
testDir + "data/RD/d2.jpeg",
testDir + "data/RD/d3.jpeg",
testDir + "data/RD/d4.jpeg",
};
std::vector<std::string> names = {
"d1", "d2", "d3", "d4",
};
assert(photos.size() == names.size()); // Ensure each photo has a corresponding name
// Create a session for face recognition
HOption option = HF_ENABLE_FACE_RECOGNITION;
HFSession session;
ret = HFCreateInspireFaceSessionOptional(option, HF_DETECT_MODE_IMAGE, 1, &session);
if (ret != HSUCCEED) {
std::cout << "Create session error: " << ret << std::endl;
return ret;
}
// Process each photo, extract features, and add them to the database
for (int i = 0; i < photos.size(); ++i) {
std::cout << "===============================" << std::endl;
// Load the image from the specified file path
const auto& path = photos[i];
const auto& name = names[i];
auto image = cv::imread(path);
if (image.empty()) {
std::cout << "The image is empty: " << path << ret << std::endl;
return ret;
}
// Prepare image data for processing
HFImageData imageData = {0};
imageData.data = image.data; // Pointer to the image data
imageData.format = HF_STREAM_BGR; // Image format (BGR in this case)
imageData.height = image.rows; // Image height
imageData.width = image.cols; // Image width
imageData.rotation = HF_CAMERA_ROTATION_0; // Image rotation
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream); // Create an image stream for processing
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
// Execute face tracking on the image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData); // Track faces in the image
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) { // Check if any faces were detected
std::cout << "No face was detected: " << path << ret << std::endl;
return ret;
}
// Extract facial features from the first detected face
HFFaceFeature feature = {0};
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature); // Extract features
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl;
return ret;
}
// Assign a name to the detected face and insert it into the feature hub
char* cstr = new char[name.size() + 1]; // Dynamically allocate memory for the name
strcpy(cstr, name.c_str()); // Copy the name into the allocated memory
HFFaceFeatureIdentity identity = {0};
identity.feature = &feature; // Assign the extracted feature
identity.customId = i; // Custom identifier for the face
identity.tag = cstr; // Tag the feature with the name
ret = HFFeatureHubInsertFeature(identity); // Insert the feature into the hub
if (ret != HSUCCEED) {
std::cout << "Feature insertion into FeatureHub failed: " << ret << std::endl;
return ret;
}
delete[] cstr; // Clean up the dynamically allocated memory
std::cout << "Insert feature to FeatureHub: " << name << std::endl;
ret = HFReleaseImageStream(stream); // Release the image stream
if (ret != HSUCCEED) {
std::cout << "Release stream failed: " << ret << std::endl;
return ret;
}
}
HInt32 count;
ret = HFFeatureHubGetFaceCount(&count);
assert(count == photos.size());
std::cout << "\nInserted data: " << count << std::endl;
// Process a query image and search for similar faces in the database
auto query = cv::imread(testDir + "data/RD/d5.jpeg");
if (query.empty()) {
std::cout << "The query image is empty: " << ret << std::endl;
return ret;
}
HFImageData imageData = {0};
imageData.data = query.data;
imageData.format = HF_STREAM_BGR;
imageData.height = query.rows;
imageData.width = query.cols;
imageData.rotation = HF_CAMERA_ROTATION_0;
HFImageStream stream;
ret = HFCreateImageStream(&imageData, &stream);
if (ret != HSUCCEED) {
std::cout << "Create stream error: " << ret << std::endl;
return ret;
}
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, stream, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Run face track error: " << ret << std::endl;
return ret;
}
if (multipleFaceData.detectedNum == 0) {
std::cout << "No face was detected from target image: " << ret << std::endl;
return ret;
}
// Initialize the feature structure to store extracted face features
HFFaceFeature feature = {0};
// Extract facial features from the detected face using the first token
ret = HFFaceFeatureExtract(session, stream, multipleFaceData.tokens[0], &feature);
if (ret != HSUCCEED) {
std::cout << "Extract feature error: " << ret << std::endl; // Print error if extraction fails
return ret;
}
HFSearchTopKResults searched = {0};
// Search the feature hub for a matching face feature
ret = HFFeatureHubFaceSearchTopK(feature, 10, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.size == 0) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
for (int i = 0; i < searched.size; ++i) {
auto id = searched.customIds[i];
auto score = searched.confidence[i];
HFFaceFeatureIdentity identity = {0};
ret = HFFeatureHubGetFaceIdentity(id, &identity);
if (ret != HSUCCEED) {
std::cout << "Get face identity error: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "\nFound similar face: id=" << id << ", tag=" << identity.tag << ", confidence=" << score << std::endl;
}
// std::string name(searched.tag);
//
// Remove feature
if (searched.size > 2) {
ret = HFFeatureHubFaceRemove(searched.customIds[2]);
if (ret != HSUCCEED) {
std::cout << "Remove failed: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "============= Remove id: " << searched.customIds[2] << " ==============" << std::endl;
ret = HFFeatureHubFaceSearchTopK(feature, 10, &searched);
if (ret != HSUCCEED) {
std::cout << "Search face feature error: " << ret << std::endl; // Print error if search fails
return ret;
}
if (searched.size == 0) {
std::cout << "No similar faces were found: " << std::endl; // Notify if no matching face is found
return ret;
}
// Output the details of the found face, including custom ID, associated tag, and confidence level
for (int i = 0; i < searched.size; ++i) {
auto id = searched.customIds[i];
auto score = searched.confidence[i];
HFFaceFeatureIdentity identity = {0};
ret = HFFeatureHubGetFaceIdentity(id, &identity);
if (ret != HSUCCEED) {
std::cout << "Get face identity error: " << ret << std::endl; // Print error if search fails
return ret;
}
std::cout << "\nFound similar face: id=" << id << ", tag=" << identity.tag << ", confidence=" << score << std::endl;
}
}
// Clean up and close the session
ret = HFReleaseImageStream(stream);
if (ret != HSUCCEED) {
std::cout << "Release stream error: " << ret << std::endl;
return ret;
}
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
std::cout << "Release session error: " << ret << std::endl;
return ret;
}
return ret; // Return the final result code
}

View File

@@ -0,0 +1,152 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path>\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
// Print the number of faces detected
auto faceNum = multipleFaceData.detectedNum;
std::cout << "Num of face: " << faceNum << std::endl;
// Copy a new image to draw
cv::Mat draw = image.clone();
for (int index = 0; index < faceNum; ++index) {
std::cout << "========================================" << std::endl;
std::cout << "Process face index: " << index << std::endl;
// Use OpenCV's Rect to receive face bounding boxes
auto rect = cv::Rect(multipleFaceData.rects[index].x, multipleFaceData.rects[index].y,
multipleFaceData.rects[index].width, multipleFaceData.rects[index].height);
cv::rectangle(draw, rect, cv::Scalar(0, 100, 255), 1);
// Print FaceID, In IMAGE-MODE it is changing, in VIDEO-MODE it is fixed, but it may be lost
std::cout << "FaceID: " << multipleFaceData.trackIds[index] << std::endl;
// Print Head euler angle, It can often be used to judge the quality of a face by the Angle of the head
std::cout << "Roll: " << multipleFaceData.angles.roll[index]
<< ", Yaw: " << multipleFaceData.angles.roll[index]
<< ", Pitch: " << multipleFaceData.angles.pitch[index] << std::endl;
}
cv::imwrite("draw_detected.jpg", draw);
// Run pipeline function
// Select the pipeline function that you want to execute, provided that it is already enabled when FaceContext is created!
auto pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// In this loop, all faces are processed
ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, pipelineOption);
if (ret != HSUCCEED) {
std::cout << "Execute Pipeline error: " << ret << std::endl;
return ret;
}
// Get mask detection results from the pipeline cache
HFFaceMaskConfidence maskConfidence = {0};
ret = HFGetFaceMaskConfidence(session, &maskConfidence);
if (ret != HSUCCEED) {
std::cout << "Get mask detect result error: " << ret << std::endl;
return -1;
}
// Get face quality results from the pipeline cache
HFFaceQualityConfidence qualityConfidence = {0};
ret = HFGetFaceQualityConfidence(session, &qualityConfidence);
if (ret != HSUCCEED) {
std::cout << "Get face quality result error: " << ret << std::endl;
return -1;
}
for (int index = 0; index < faceNum; ++index) {
std::cout << "========================================" << std::endl;
std::cout << "Process face index from pipeline: " << index << std::endl;
std::cout << "Mask detect result: " << maskConfidence.confidence[index] << std::endl;
std::cout << "Quality predict result: " << qualityConfidence.confidence[index] << std::endl;
// We set the threshold of wearing a mask as 0.85. If it exceeds the threshold, it will be judged as wearing a mask.
// The threshold can be adjusted according to the scene
if (maskConfidence.confidence[index] > 0.85) {
std::cout << "Mask" << std::endl;
} else {
std::cout << "Non Mask" << std::endl;
}
}
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,93 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path>\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm context
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
auto current_time = (double) cv::getTickCount();
// Execute HF_FaceContextRunFaceTrack captures face information in an image
HFMultipleFaceData multipleFaceData = {0};
ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl;
return ret;
}
auto cost = ((double) cv::getTickCount() - current_time) / cv::getTickFrequency() * 1000;
std::cout << "coes: " << cost << std::endl;
ret = HFReleaseImageStream(imageHandle);
if (ret != HSUCCEED) {
printf("Release image stream error: %lu\n", ret);
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release session error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,98 @@
//
// Created by Tunm-Air13 on 2024/4/17.
//
#include <iostream>
#include "opencv2/opencv.hpp"
#include "inspireface/c_api/inspireface.h"
#include <thread>
void runFaceTrack(HFSession session, HFImageStream imageHandle) {
HFMultipleFaceData multipleFaceData = {0};
auto ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData);
if (ret != HSUCCEED) {
std::cout << "Thread " << std::this_thread::get_id() << " Execute HFExecuteFaceTrack error: " << ret << std::endl;
} else {
std::cout << "Thread " << std::this_thread::get_id() << " successfully executed HFExecuteFaceTrack.\n";
}
}
int main(int argc, char* argv[]) {
// Check whether the number of parameters is correct
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " <pack_path> <source_path>\n";
return 1;
}
auto packPath = argv[1];
auto sourcePath = argv[2];
std::cout << "Pack file Path: " << packPath << std::endl;
std::cout << "Source file Path: " << sourcePath << std::endl;
HResult ret;
// The resource file must be loaded before it can be used
ret = HFLaunchInspireFace(packPath);
if (ret != HSUCCEED) {
std::cout << "Load Resource error: " << ret << std::endl;
return ret;
}
// Enable the functions in the pipeline: mask detection, live detection, and face quality detection
HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS;
// Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without tracking
HFDetectMode detMode = HF_DETECT_MODE_IMAGE;
// Maximum number of faces detected
HInt32 maxDetectNum = 5;
// Handle of the current face SDK algorithm session
HFSession session = {0};
ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, &session);
if (ret != HSUCCEED) {
std::cout << "Create FaceContext error: " << ret << std::endl;
return ret;
}
// Load a image
cv::Mat image = cv::imread(sourcePath);
if (image.empty()) {
std::cout << "The source entered is not a picture or read error." << std::endl;
return 1;
}
// Prepare an image parameter structure for configuration
HFImageData imageParam = {0};
imageParam.data = image.data; // Data buffer
imageParam.width = image.cols; // Target view width
imageParam.height = image.rows; // Target view width
imageParam.rotation = HF_CAMERA_ROTATION_0; // Data source rotate
imageParam.format = HF_STREAM_BGR; // Data source format
// Create an image data stream
HFImageStream imageHandle = {0};
ret = HFCreateImageStream(&imageParam, &imageHandle);
if (ret != HSUCCEED) {
std::cout << "Create ImageStream error: " << ret << std::endl;
return ret;
}
// Create and start multiple threads
const size_t numThreads = 10;
std::vector<std::thread> threads;
for (size_t i = 0; i < numThreads; ++i) {
threads.emplace_back(runFaceTrack, session, imageHandle);
}
// Wait for all threads to complete
for (auto& thread : threads) {
if (thread.joinable()) {
thread.join();
}
}
// The memory must be freed at the end of the program
ret = HFReleaseInspireFaceSession(session);
if (ret != HSUCCEED) {
printf("Release FaceContext error: %lu\n", ret);
return ret;
}
return 0;
}

View File

@@ -0,0 +1,176 @@
//
// Created by YH-Mac on 2020/9/12.
//
#ifndef LMKTRACKING_LIB_TEST_FOLDER_HELPER_H
#define LMKTRACKING_LIB_TEST_FOLDER_HELPER_H
#include <dirent.h>
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stack>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <unistd.h>
#include <vector>
#include <sstream>
#include "sys/types.h"
#include "sys/stat.h"
#include <chrono>
//using namespace std;
#define MODE (S_IRWXU | S_IRWXG | S_IRWXO)
using namespace std::chrono;
class Timer
{
public:
std::stack<high_resolution_clock::time_point> tictoc_stack;
void tic()
{
high_resolution_clock::time_point t1 = high_resolution_clock::now();
tictoc_stack.push(t1);
}
double toc(std::string msg = "", bool flag = true)
{
double diff = duration_cast<milliseconds>(high_resolution_clock::now() - tictoc_stack.top()).count();
if(msg.size() > 0){
if (flag)
printf("%s time elapsed: %f ms\n", msg.c_str(), diff);
}
if(msg == "ir")
return diff/3;
tictoc_stack.pop();
return diff;
}
void reset()
{
tictoc_stack = std::stack<high_resolution_clock::time_point>();
}
};
std::string dePrefix(std::string file_name, std::string prefix){
int n = file_name.find_last_of(prefix);
std::string de = file_name.substr(n + 1, file_name.size());
return de;
}
std::string deSuffix(std::string file_name, std::string suffix){
int n = file_name.find_last_of(suffix);
std::string de = file_name.substr(0, n);
return de;
}
bool check_folder(const std::string folder_path, bool is_create) {
int res = access(folder_path.c_str(), 0);
// std::cout << "res " << res << std::endl;
if (res != -1) {
std::cout << folder_path << " exists." << std::endl;
return true;
} else if(is_create){
std::cout << folder_path << " not exists." << std::endl;
int mkdir_res = mkdir(folder_path.c_str(), MODE);
if (mkdir_res != -1) {
std::cout << "mkdir successful." << std::endl;
} else {
std::cout << "mkdir fail." << std::endl;
}
return true;
} else{
std::cout << folder_path << " not exists." << std::endl;
return false;
}
}
std::vector<float> read_feat_txt(const std::string feat_path){
std::ifstream infile;
infile.open(feat_path);
if (!infile)
std::cout << "file error: " << feat_path << std::endl;
std::vector<float> feat;
float t1;
while (infile >> t1) {
feat.push_back(t1);
}
infile.close();
return feat;
}
void extract_feat_to_txt(const std::string feat_path, std::vector<float> feat) {
std::ofstream txt(feat_path);
for (int i = 0; i < feat.size(); ++i) {
txt << feat[i] << " ";
}
txt.close();
// std::cout << "export feature to " << feat_path << std::endl;
}
bool decide_ext(const char *gname, const char *nsuff)
// gname: name of the given file
// nsuff: suffix you need
{
char dot = '.';
char suff[10] = {0};
int c;
int j = 0;
c = strlen(gname);
// std::cout<< c << '\n';
for (int i = 0; i < c; i++) {
if (gname[i] == dot)
j = i;
}
int k = j;
j = c - j - 1;
for (int i = 0; i < j; i++) {
suff[i] = gname[k + i + 1];
}
if (0 == strcmp(suff, nsuff))
return true;
else
return false;
}
std::vector<std::string>
readFileListSuffix(const char *basePath, const char *suffix, std::vector<std::string>& file_names, bool recursive) {
DIR *dir;
struct dirent *ptr;
char base[1000];
std::vector<std::string> file_list;
if ((dir = opendir(basePath)) == NULL) {
perror("Open dir error...");
exit(1);
}
while ((ptr = readdir(dir)) != NULL) {
if (strcmp(ptr->d_name, ".") == 0 || strcmp(ptr->d_name, "..") == 0) ///current dir OR parrent dir
continue;
else if (ptr->d_type == 8) { ///file
// printf("d_name:%s/%s\n", basePath, ptr->d_name);
if (decide_ext(ptr->d_name, suffix)) {
std::stringstream ss;
ss << basePath << "/" << ptr->d_name;
std::string path = ss.str();
file_names.push_back(ptr->d_name);
file_list.push_back(path);
}
} else if (ptr->d_type == 10) { ///link file
// printf("d_name:%s/%s\n", basePath, ptr->d_name);
} else if (ptr->d_type == 4 and recursive) ///dir
{
memset(base, '\0', sizeof(base));
strcpy(base, basePath);
strcat(base, "/");
strcat(base, ptr->d_name);
readFileListSuffix(base, suffix, file_names, recursive);
}
}
closedir(dir);
return file_list;
}
#endif //LMKTRACKING_LIB_TEST_FOLDER_HELPER_H

View File

@@ -0,0 +1,122 @@
//
// Created by Jack YU on 2020/6/11.
//
#ifndef ZEUSEESTRACKING_LIB_TEST_HELPER_H
#define ZEUSEESTRACKING_LIB_TEST_HELPER_H
#include <dirent.h>
#include <sys/stat.h>
#include "middleware/camera_stream/camera_stream.h"
namespace TestUtils {
inline uint8_t *rgb2nv21(const cv::Mat &Img) {
if (Img.empty()) {
exit(0);
}
int cols = Img.cols;
int rows = Img.rows;
int Yindex = 0;
int UVindex = rows * cols;
unsigned char *yuvbuff =
new unsigned char[static_cast<int>(1.5 * rows * cols)];
cv::Mat NV21(rows + rows / 2, cols, CV_8UC1);
cv::Mat OpencvYUV;
cv::Mat OpencvImg;
cv::cvtColor(Img, OpencvYUV, cv::COLOR_BGR2YUV_YV12);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
uchar *YPointer = NV21.ptr<uchar>(i);
int B = Img.at<cv::Vec3b>(i, j)[0];
int G = Img.at<cv::Vec3b>(i, j)[1];
int R = Img.at<cv::Vec3b>(i, j)[2];
int Y = (77 * R + 150 * G + 29 * B) >> 8;
YPointer[j] = Y;
yuvbuff[Yindex++] = (Y < 0) ? 0 : ((Y > 255) ? 255 : Y);
uchar *UVPointer = NV21.ptr<uchar>(rows + i / 2);
if (i % 2 == 0 && (j) % 2 == 0) {
int U = ((-44 * R - 87 * G + 131 * B) >> 8) + 128;
int V = ((131 * R - 110 * G - 21 * B) >> 8) + 128;
UVPointer[j] = V;
UVPointer[j + 1] = U;
yuvbuff[UVindex++] = (V < 0) ? 0 : ((V > 255) ? 255 : V);
yuvbuff[UVindex++] = (U < 0) ? 0 : ((U > 255) ? 255 : U);
}
}
}
return yuvbuff;
}
inline void rotate(const cv::Mat &image, cv::Mat &out,
inspire::ROTATION_MODE mode) {
if (mode == inspire::ROTATION_90) {
cv::transpose(image, out);
cv::flip(out, out, 2);
} else if (mode == inspire::ROTATION_180) {
cv::flip(out, out, -1);
} else if (mode == inspire::ROTATION_270) {
cv::transpose(image, out);
cv::flip(out, out, 0);
}
}
void GetFilesInDirectory(std::vector<std::string> &out, const std::string &directory)
{
#ifdef WINDOWS
HANDLE dir;
WIN32_FIND_DATA file_data;
if ((dir = FindFirstFile((directory + "/*").c_str(), &file_data)) == INVALID_HANDLE_VALUE)
return; /* No files found */
do {
const string file_name = file_data.cFileName;
const string full_file_name = directory + "/" + file_name;
const bool is_directory = (file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
if (file_name[0] == '.')
continue;
if (is_directory)
continue;
out.push_back(full_file_name);
} while (FindNextFile(dir, &file_data));
FindClose(dir);
#else
DIR *dir;
class dirent *ent;
class stat st;
dir = opendir(directory.c_str());
while ((ent = readdir(dir)) != NULL) {
const std::string file_name = ent->d_name;
const std::string full_file_name = directory + "/" + file_name;
if (file_name[0] == '.')
continue;
if (stat(full_file_name.c_str(), &st) == -1)
continue;
const bool is_directory = (st.st_mode & S_IFDIR) != 0;
if (is_directory)
continue;
out.push_back(full_file_name);
}
closedir(dir);
#endif
}
} // namespace TestUtils
#endif // ZEUSEESTRACKING_LIB_TEST_HELPER_H