Add the inspireface project to cpp-package.

This commit is contained in:
JingyuYan
2024-05-02 01:27:29 +08:00
parent e90dacb3cf
commit 08d7e96f79
431 changed files with 370534 additions and 0 deletions

View File

@@ -0,0 +1,108 @@
//
// Created by tunm on 2023/9/23.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "middleware/inference_helper/customized/rknn_adapter.h"
#include "inspireface/feature_hub/simd.h"
#include <memory>
#include "inspireface/recognition_module/extract/extract.h"
#include "middleware/model_archive/inspire_archive.h"
using namespace inspire;
int main() {
std::vector<std::string> names = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
InspireArchive loader("test_res/pack/test_zip_rec");
{
InspireModel model;
loader.LoadModel("feature", model);
auto net = std::make_shared<RKNNAdapter>();
net->Initialize((unsigned char* )model.buffer, model.bufferSize);
net->setOutputsWantFloat(1);
EmbeddedList list;
for (int i = 0; i < names.size(); ++i) {
cv::Mat image = cv::imread(names[i]);
cv::Mat rgb;
cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
net->SetInputData(0, rgb);
net->RunModel();
auto out = net->GetOutputData(0);
auto dims = net->GetOutputTensorSize(0);
// for (int i = 0; i < dims.size(); ++i) {
// LOGD("%lu", dims[i]);
// }
//
for (int i = 0; i < 512; ++i) {
std::cout << out[i] << ", ";
}
std::cout << std::endl;
Embedded emb;
for (int j = 0; j < 512; ++j) {
emb.push_back(out[j]);
}
list.push_back(emb);
}
for (int i = 0; i < list.size(); ++i) {
auto &embedded = list[i];
float mse = 0.0f;
for (const auto &one: embedded) {
mse += one * one;
}
mse = sqrt(mse);
for (float &one : embedded) {
one /= mse;
}
}
auto cos = simd_dot(list[0].data(), list[1].data(), 512);
LOGD("COS: %f", cos);
}
{
std::shared_ptr<Extract> m_extract_;
Configurable param;
param.set<int>("model_index", 0);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader.LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
cv::Mat image = cv::imread(names[0]);
// cv::Mat rgb;
// cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
auto feat = m_extract_->GetFaceFeature(image);
for (int i = 0; i < 512; ++i) {
std::cout << feat[i] << ", ";
}
std::cout << std::endl;
}
LOGD("End");
return 0;
}

View File

@@ -0,0 +1,52 @@
//
// Created by Tunm-Air13 on 2023/9/20.
//
#include "opencv2/opencv.hpp"
//#include "inspireface/middleware/model_loader/model_loader.h"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/middleware/costman.h"
#include "middleware/model_archive/inspire_archive.h"
#include "log.h"
using namespace inspire;
int main() {
auto detModel = "test_res/pack/Gundam_RV1109";
InspireArchive inspireArchive;
auto ret = inspireArchive.ReLoad(detModel);
if (ret != SARC_SUCCESS) {
LOGE("Error load");
return ret;
}
InspireModel model;
ret = inspireArchive.LoadModel("face_detect", model);
if (ret != SARC_SUCCESS) {
LOGE("Error model");
return ret;
}
std::cout << model.Config().toString() << std::endl;
std::shared_ptr<FaceDetect> m_face_detector_;
m_face_detector_ = std::make_shared<FaceDetect>(320);
m_face_detector_->loadData(model, InferenceHelper::kRknn);
// Load a image
cv::Mat image = cv::imread("test_res/images/face_sample.png");
Timer timer;
FaceLocList locs = (*m_face_detector_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("Faces: %ld", locs.size());
for (auto &loc: locs) {
cv::rectangle(image, cv::Point2f(loc.x1, loc.y1), cv::Point2f(loc.x2, loc.y2), cv::Scalar(0, 0, 255), 3);
}
cv::imwrite("det.jpg", image);
return 0;
}

View File

@@ -0,0 +1,89 @@
//
// Created by tunm on 2023/9/21.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "inspireface/feature_hub/face_recognition.h"
#include "inspireface/feature_hub/simd.h"
#include "inspireface/middleware/model_archive/inspire_archive.h"
using namespace inspire;
std::shared_ptr<InspireArchive> loader;
void rec_function() {
std::shared_ptr<Extract> m_extract_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_03_extract);
param.set<std::string>("input_layer", "input");
param.set<std::vector<std::string>>("outputs_layers", {"267", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
param.set<bool>("swap_color", true); // RK requires rgb input
m_extract_ = std::make_shared<Extract>();
InspireModel model;
loader->LoadModel("feature", model);
m_extract_->loadData(model, InferenceHelper::kRknn);
loader.reset();
std::vector<std::string> files = {
"test_res/images/test_data/0.jpg",
"test_res/images/test_data/1.jpg",
"test_res/images/test_data/2.jpg",
};
EmbeddedList embedded_list;
for (int i = 0; i < files.size(); ++i) {
auto warped = cv::imread(files[i]);
Timer timer;
auto emb = (*m_extract_)(warped);
LOGD("耗时: %f", timer.GetCostTimeUpdate());
embedded_list.push_back(emb);
LOGD("%lu", emb.size());
}
float _0v1;
float _0v2;
float _1v2;
FaceRecognition::CosineSimilarity(embedded_list[0], embedded_list[1], _0v1);
FaceRecognition::CosineSimilarity(embedded_list[0], embedded_list[2], _0v2);
FaceRecognition::CosineSimilarity(embedded_list[1], embedded_list[2], _1v2);
LOGD("0 vs 1 : %f", _0v1);
LOGD("0 vs 2 : %f", _0v2);
LOGD("1 vs 2 : %f", _1v2);
// LOGD("size: %lu", embedded_list.size());
// LOGD("num of vector: %lu", embedded_list[2].size());
//
// float _0v1 = simd_dot(embedded_list[0].data(), embedded_list[1].data(), 512);
// float _0v2 = simd_dot(embedded_list[0].data(), embedded_list[2].data(), 512);
// float _1v2 = simd_dot(embedded_list[1].data(), embedded_list[2].data(), 512);
// LOGD("0 vs 1 : %f", _0v1);
// LOGD("0 vs 2 : %f", _0v2);
// LOGD("1 vs 2 : %f", _1v2);
}
int main() {
loader = std::make_shared<InspireArchive>();
loader->ReLoad("test_res/pack/Gundam_RV1109");
rec_function();
return 0;
}

View File

@@ -0,0 +1,316 @@
//
// Created by Tunm-Air13 on 2023/9/21.
//
#include "opencv2/opencv.hpp"
#include "inspireface/track_module/face_detect/all.h"
#include "inspireface/pipeline_module/attribute/mask_predict.h"
#include "inspireface/middleware/costman.h"
#include "inspireface/track_module/quality/face_pose_quality.h"
#include "inspireface/track_module/landmark/face_landmark.h"
#include "inspireface/pipeline_module/liveness/rgb_anti_spoofing.h"
#include "inspireface/face_context.h"
using namespace inspire;
InspireArchive loader;
void test_rnet() {
std::shared_ptr<RNet> m_rnet_;
Configurable param;
// param.set<int>("model_index", ModelIndex::_04_refine_net);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"conv5-1/Softmax", "conv5-2/BiasAdd"});
param.set<std::vector<int>>("input_size", {24, 24});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
InspireModel model;
loader.LoadModel("refine_net", model);
m_rnet_ = std::make_shared<RNet>();
m_rnet_->loadData(model, InferenceHelper::kRknn);
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/hasface.jpg");
Timer timer;
auto score = (*m_rnet_)(image);
LOGD("RNETcost: %f", timer.GetCostTimeUpdate());
LOGD("has face: %f", score);
}
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/noface.jpg");
Timer timer;
auto score = (*m_rnet_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("non face: %f", score);
}
}
void test_mask() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_05_mask);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"activation_1/Softmax",});
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<MaskPredict> m_mask_predict_;
m_mask_predict_ = std::make_shared<MaskPredict>();
InspireModel model;
loader.LoadModel("mask_detect", model);
m_mask_predict_->loadData(model, InferenceHelper::kRknn);
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/mask.jpg");
Timer timer;
auto score = (*m_mask_predict_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("mask: %f", score);
}
{
// Load a image
cv::Mat image = cv::imread("test_res/images/test_data/nomask.jpg");
Timer timer;
auto score = (*m_mask_predict_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("maskless: %f", score);
}
}
void test_quality() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_07_pose_q_fp16);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"fc1", });
param.set<std::vector<int>>("input_size", {96, 96});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", true); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FacePoseQuality> m_face_quality_;
m_face_quality_ = std::make_shared<FacePoseQuality>();
InspireModel model;
loader.LoadModel("pose_quality", model);
m_face_quality_->loadData(model, InferenceHelper::kRknn);
{
std::vector<std::string> names = {
"test_res/images/test_data/p3.jpg",
// "test_res/images/test_data/p1.jpg",
};
for (int i = 0; i < names.size(); ++i) {
LOGD("Image: %s", names[i].c_str());
cv::Mat image = cv::imread(names[i]);
Timer timer;
auto pose_res = (*m_face_quality_)(image);
LOGD("质量cost: %f", timer.GetCostTimeUpdate());
for (auto &p: pose_res.lmk) {
cv::circle(image, p, 0, cv::Scalar(0, 0, 255), 2);
}
cv::imwrite("pose.jpg", image);
LOGD("pitch: %f", pose_res.pitch);
LOGD("yam: %f", pose_res.yaw);
LOGD("roll: %f", pose_res.roll);
for (auto q: pose_res.lmk_quality) {
std::cout << q << ", ";
}
std::cout << std::endl;
}
}
}
void test_landmark_mnn() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {127.5f, 127.5f, 127.5f});
param.set<std::vector<float>>("norm", {0.0078125f, 0.0078125f, 0.0078125f});
std::shared_ptr<FaceLandmark> m_landmark_predictor_;
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model);
cv::Mat image = cv::imread("test_res/images/test_data/crop.png");
cv::resize(image, image, cv::Size(112, 112));
std::vector<float> lmk;
Timer timer;
for (int i = 0; i < 50; ++i) {
lmk = (*m_landmark_predictor_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
}
for (int i = 0; i < FaceLandmark::NUM_OF_LANDMARK; ++i) {
float x = lmk[i * 2 + 0] * 112;
float y = lmk[i * 2 + 1] * 112;
cv::circle(image, cv::Point2f(x, y), 0, cv::Scalar(0, 0, 255), 1);
}
cv::imwrite("lmk.jpg", image);
}
void test_landmark() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_01_lmk);
param.set<std::string>("input_layer", "input_1");
param.set<std::vector<std::string>>("outputs_layers", {"prelu1/add", });
param.set<std::vector<int>>("input_size", {112, 112});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<FaceLandmark> m_landmark_predictor_;
m_landmark_predictor_ = std::make_shared<FaceLandmark>(112);
InspireModel model;
loader.LoadModel("landmark", model);
m_landmark_predictor_->loadData(model, InferenceHelper::kRknn);
cv::Mat image = cv::imread("test_res/images/test_data/0.jpg");
cv::resize(image, image, cv::Size(112, 112));
std::vector<float> lmk;
Timer timer;
for (int i = 0; i < 50; ++i) {
lmk = (*m_landmark_predictor_)(image);
LOGD("LMKcost: %f", timer.GetCostTimeUpdate());
}
for (int i = 0; i < FaceLandmark::NUM_OF_LANDMARK; ++i) {
float x = lmk[i * 2 + 0] * 112;
float y = lmk[i * 2 + 1] * 112;
cv::circle(image, cv::Point2f(x, y), 0, cv::Scalar(0, 0, 255), 1);
}
cv::imwrite("lmk.jpg", image);
}
void test_liveness() {
Configurable param;
// param.set<int>("model_index", ModelIndex::_06_msafa27);
param.set<std::string>("input_layer", "data");
param.set<std::vector<std::string>>("outputs_layers", {"556",});
param.set<std::vector<int>>("input_size", {80, 80});
param.set<std::vector<float>>("mean", {0.0f, 0.0f, 0.0f});
param.set<std::vector<float>>("norm", {1.0f, 1.0f, 1.0f});
param.set<bool>("swap_color", false); // RGB mode
param.set<int>("data_type", InputTensorInfo::kDataTypeImage);
param.set<int>("input_tensor_type", InputTensorInfo::kTensorTypeUint8);
param.set<int>("output_tensor_type", InputTensorInfo::kTensorTypeFp32);
param.set<bool>("nchw", false);
std::shared_ptr<RBGAntiSpoofing> m_rgb_anti_spoofing_;
InspireModel model;
loader.LoadModel("rgb_anti_spoofing", model);
m_rgb_anti_spoofing_ = std::make_shared<RBGAntiSpoofing>(80, true);
m_rgb_anti_spoofing_->loadData(model, InferenceHelper::kRknn);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
};
for (int i = 0; i < names.size(); ++i) {
auto image = cv::imread(names[i]);
Timer timer;
auto score = (*m_rgb_anti_spoofing_)(image);
LOGD("cost: %f", timer.GetCostTimeUpdate());
LOGD("%s : %f", names[i].c_str(), score);
}
}
int test_liveness_ctx() {
CustomPipelineParameter parameter;
parameter.enable_liveness = true;
FaceContext ctx;
ctx.Configuration("test_res/pack/Gundam_RV1109", inspire::DETECT_MODE_IMAGE, 3, parameter);
std::vector<std::string> names = {
"test_res/images/test_data/real.jpg",
"test_res/images/test_data/fake.jpg",
"test_res/images/test_data/live.jpg",
"test_res/images/test_data/ttt.jpg",
"test_res/images/test_data/w.jpg",
"test_res/images/test_data/w2.jpg",
"test_res/images/test_data/bb.png",
};
for (int i = 0; i < names.size(); ++i) {
auto image = cv::imread(names[i]);
auto score = (*ctx.FacePipelineModule()->getMRgbAntiSpoofing())(image);
LOGD("%s : %f", names[i].c_str(), score);
}
return 0;
}
int main() {
loader.ReLoad("test_res/pack/Gundam_RV1109");
// test_rnet();
// test_mask();
// test_quality();
// test_landmark_mnn();
// test_landmark();
test_liveness();
test_liveness_ctx();
return 0;
}

View File

@@ -0,0 +1,76 @@
//
// Created by Tunm-Air13 on 2023/9/22.
//
#include "opencv2/opencv.hpp"
#include "inspireface/middleware/costman.h"
#include "inspireface/face_context.h"
using namespace inspire;
int main() {
FaceContext ctx;
CustomPipelineParameter param;
int32_t ret = ctx.Configuration(
"test_res/pack/Gundam_RV1109",
DetectMode::DETECT_MODE_VIDEO,
3,
param);
if (ret != HSUCCEED) {
LOGE("Initiate error");
}
cv::Mat frame;
std::string imageFolder = "test_res/video_frames/";
// auto video_frame_num = 10;
auto video_frame_num = 288;
for (int i = 0; i < video_frame_num; ++i) {
auto index = i + 1;
std::stringstream frameFileName;
frameFileName << imageFolder << "frame-" << std::setw(4) << std::setfill('0') << index << ".jpg";
frame = cv::imread(frameFileName.str());
CameraStream stream;
stream.SetRotationMode(ROTATION_0);
stream.SetDataFormat(BGR);
stream.SetDataBuffer(frame.data, frame.rows, frame.cols);
Timer timer;
ctx.FaceDetectAndTrack(stream);
LOGD("Cost: %f", timer.GetCostTimeUpdate());
LOGD("faces: %d", ctx.GetNumberOfFacesCurrentlyDetected());
LOGD("track id: %d", ctx.GetTrackingFaceList()[0].GetTrackingId());
auto &face = ctx.GetTrackingFaceList()[0];
for (auto &p: face.landmark_) {
cv::circle(frame, p, 0, cv::Scalar(0, 0, 255), 3);
}
auto rect = face.GetRect();
int track_id = face.GetTrackingId();
int track_count = face.GetTrackingCount();
cv::rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
std::string text = "ID: " + std::to_string(track_id) + " Count: " + std::to_string(track_count) + " Cf: " + std::to_string(face.GetConfidence());
cv::Point text_position(rect.x, rect.y - 10);
int font_face = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 0.5;
int font_thickness = 1;
cv::Scalar font_color(255, 255, 255);
cv::putText(frame, text, text_position, font_face, font_scale, font_color, font_thickness);
std::stringstream saveFile;
saveFile << "track_frames/" << "result-" << std::setw(4) << std::setfill('0') << index << ".jpg";
cv::imwrite(saveFile.str(), frame);
}
return 0;
}