mirror of
https://github.com/deepinsight/insightface.git
synced 2026-03-03 13:40:19 +00:00
Update
This commit is contained in:
@@ -0,0 +1,84 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <inspirecv/inspirecv.h>
|
||||
#include <inspireface/inspireface.hpp>
|
||||
#include "inspireface/track_module/landmark/order_of_hyper_landmark.h"
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 3) {
|
||||
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_path = argv[1];
|
||||
std::string image_path = argv[2];
|
||||
|
||||
// Global init(only once)
|
||||
INSPIREFACE_CONTEXT->Reload(model_path);
|
||||
|
||||
// Create image and frame process
|
||||
inspirecv::Image image = inspirecv::Image::Create(image_path);
|
||||
inspirecv::FrameProcess process =
|
||||
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
|
||||
|
||||
// Create session
|
||||
inspire::CustomPipelineParameter param;
|
||||
param.enable_recognition = true;
|
||||
param.enable_liveness = true;
|
||||
param.enable_mask_detect = true;
|
||||
param.enable_face_attribute = true;
|
||||
param.enable_face_quality = true;
|
||||
param.enable_interaction_liveness = true;
|
||||
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320));
|
||||
|
||||
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
|
||||
|
||||
// Detect and track
|
||||
std::vector<inspire::FaceTrackWrap> results;
|
||||
int32_t ret;
|
||||
ret = session->FaceDetectAndTrack(process, results);
|
||||
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
|
||||
|
||||
auto first = results[0];
|
||||
auto lmk = session->GetFaceDenseLandmark(first);
|
||||
std::cout << "lmk: " << lmk.size() << std::endl;
|
||||
for (size_t i = 0; i < lmk.size(); i++) {
|
||||
image.DrawCircle(lmk[i].As<int>(), 5, inspirecv::Color::Red);
|
||||
}
|
||||
|
||||
inspirecv::TransformMatrix rotation_mode_affine = process.GetAffineMatrix();
|
||||
|
||||
std::vector<inspirecv::Point2f> stand_lmk = ApplyTransformToPoints(lmk, rotation_mode_affine.GetInverse());
|
||||
|
||||
// Use total lmk
|
||||
auto rect = inspirecv::MinBoundingRect(stand_lmk);
|
||||
auto rect_pts = rect.As<float>().ToFourVertices();
|
||||
std::vector<inspirecv::Point2f> dst_pts = {{0, 0}, {112, 0}, {112, 112}, {0, 112}};
|
||||
std::vector<inspirecv::Point2f> camera_pts = ApplyTransformToPoints(rect_pts, rotation_mode_affine);
|
||||
|
||||
auto affine = inspirecv::SimilarityTransformEstimate(camera_pts, dst_pts);
|
||||
auto image_affine = process.ExecuteImageAffineProcessing(affine, 112, 112);
|
||||
image_affine.Write("affine.jpg");
|
||||
|
||||
// image.DrawRect(rect.As<int>(), inspirecv::Color::Red);
|
||||
// image.Write("lmk.jpg");
|
||||
|
||||
std::vector<inspirecv::Point2i> points;
|
||||
for (const auto& idx : inspire::HLMK_LEFT_EYE_POINTS_INDEX) {
|
||||
points.emplace_back(stand_lmk[idx].GetX(), stand_lmk[idx].GetY());
|
||||
}
|
||||
std::cout << "points: " << points.size() << std::endl;
|
||||
auto rect_eye = inspirecv::MinBoundingRect(points).Square(1.4f);
|
||||
// draw debug
|
||||
image.DrawRect(rect_eye.As<int>(), inspirecv::Color::Red);
|
||||
auto rect_pts_eye = rect_eye.As<float>().ToFourVertices();
|
||||
std::vector<inspirecv::Point2f> dst_pts_eye = {{0, 0}, {64, 0}, {64, 64}, {0, 64}};
|
||||
std::vector<inspirecv::Point2f> camera_pts_eye = ApplyTransformToPoints(rect_pts_eye, rotation_mode_affine);
|
||||
|
||||
auto affine_eye = inspirecv::SimilarityTransformEstimate(camera_pts_eye, dst_pts_eye);
|
||||
auto eye_affine = process.ExecuteImageAffineProcessing(affine_eye, 64, 64);
|
||||
eye_affine.Write("eye.jpg");
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <inspirecv/inspirecv.h>
|
||||
#include <inspireface/inspireface.hpp>
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 4) {
|
||||
std::cout << "Usage: " << argv[0] << " <model_path> <image_path1> <image_path2>" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_path = argv[1];
|
||||
std::string image_path1 = argv[2];
|
||||
std::string image_path2 = argv[3];
|
||||
|
||||
// Global init(only once)
|
||||
INSPIREFACE_CONTEXT->Reload(model_path);
|
||||
|
||||
// Create image and frame process
|
||||
inspirecv::Image image1 = inspirecv::Image::Create(image_path1);
|
||||
inspirecv::Image image2 = inspirecv::Image::Create(image_path2);
|
||||
inspirecv::FrameProcess process1 = inspirecv::FrameProcess::Create(image1.Data(), image1.Height(), image1.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
|
||||
inspirecv::FrameProcess process2 = inspirecv::FrameProcess::Create(image2.Data(), image2.Height(), image2.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
|
||||
|
||||
// Create session
|
||||
inspire::CustomPipelineParameter param;
|
||||
param.enable_recognition = true;
|
||||
|
||||
// Create session
|
||||
std::shared_ptr<inspire::Session> session(
|
||||
inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320));
|
||||
|
||||
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
|
||||
|
||||
// Detect and track
|
||||
std::vector<inspire::FaceTrackWrap> results1;
|
||||
std::vector<inspire::FaceTrackWrap> results2;
|
||||
|
||||
// Detect and track
|
||||
session->FaceDetectAndTrack(process1, results1);
|
||||
session->FaceDetectAndTrack(process2, results2);
|
||||
|
||||
INSPIREFACE_CHECK_MSG(!results1.empty() && !results2.empty(), "No face detected");
|
||||
|
||||
// Get feature
|
||||
inspire::FaceEmbedding feature1;
|
||||
inspire::FaceEmbedding feature2;
|
||||
session->FaceFeatureExtract(process1, results1[0], feature1);
|
||||
session->FaceFeatureExtract(process2, results2[0], feature2);
|
||||
|
||||
// Compare
|
||||
float similarity;
|
||||
INSPIREFACE_FEATURE_HUB->CosineSimilarity(feature1.embedding, feature2.embedding, similarity);
|
||||
std::cout << "cosine of similarity: " << similarity << std::endl;
|
||||
std::cout << "percentage of similarity: " << SIMILARITY_CONVERTER_RUN(similarity) << std::endl;
|
||||
|
||||
std::cout << "== using alignment image ==" << std::endl;
|
||||
|
||||
// Get face alignment image
|
||||
inspirecv::Image wrapped1;
|
||||
inspirecv::Image wrapped2;
|
||||
session->GetFaceAlignmentImage(process1, results1[0], wrapped1);
|
||||
session->GetFaceAlignmentImage(process2, results2[0], wrapped2);
|
||||
wrapped1.Write("wrapped1.jpg");
|
||||
wrapped2.Write("wrapped2.jpg");
|
||||
|
||||
inspire::FaceEmbedding feature1_alignment;
|
||||
inspire::FaceEmbedding feature2_alignment;
|
||||
session->FaceFeatureExtractWithAlignmentImage(wrapped1, feature1_alignment);
|
||||
session->FaceFeatureExtractWithAlignmentImage(wrapped2, feature2_alignment);
|
||||
|
||||
INSPIREFACE_FEATURE_HUB->CosineSimilarity(feature1_alignment.embedding, feature2_alignment.embedding, similarity);
|
||||
std::cout << "cosine of similarity: " << similarity << std::endl;
|
||||
std::cout << "percentage of similarity: " << SIMILARITY_CONVERTER_RUN(similarity) << std::endl;
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
#include <iostream>
|
||||
#include <inspireface/inspireface.hpp>
|
||||
|
||||
int main() {
|
||||
// Launch InspireFace
|
||||
std::string model_path = "test_res/pack/Pikachu";
|
||||
INSPIREFACE_CONTEXT->Reload(model_path);
|
||||
INSPIREFACE_CHECK_MSG(INSPIREFACE_CONTEXT->isMLoad(), "InspireFace is not loaded");
|
||||
|
||||
// Enable feature hub
|
||||
std::string db_path = "case_crud.db";
|
||||
// Remove the database file if it exists
|
||||
if (std::remove(db_path.c_str()) != 0) {
|
||||
std::cerr << "Error removing database file: " << db_path << std::endl;
|
||||
}
|
||||
inspire::DatabaseConfiguration db_config;
|
||||
db_config.enable_persistence = true;
|
||||
db_config.persistence_db_path = db_path;
|
||||
db_config.search_mode = inspire::SEARCH_MODE_EXHAUSTIVE;
|
||||
db_config.recognition_threshold = 0.48f;
|
||||
db_config.primary_key_mode = inspire::AUTO_INCREMENT;
|
||||
INSPIREFACE_FEATURE_HUB->EnableHub(db_config);
|
||||
|
||||
// Create a session
|
||||
auto param = inspire::CustomPipelineParameter();
|
||||
param.enable_recognition = true;
|
||||
auto session = inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 1, param, 320);
|
||||
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not created");
|
||||
|
||||
// Prepare an image for insertion into the hub
|
||||
auto image = inspirecv::Image::Create("test_res/data/bulk/kun.jpg");
|
||||
auto image_process = inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
|
||||
|
||||
// Detect and track
|
||||
std::vector<inspire::FaceTrackWrap> results;
|
||||
session->FaceDetectAndTrack(image_process, results);
|
||||
INSPIREFACE_CHECK_MSG(results.size() > 0, "No face detected");
|
||||
|
||||
// Extract face feature
|
||||
inspire::FaceEmbedding feature;
|
||||
session->FaceFeatureExtract(image_process, results[0], feature);
|
||||
|
||||
// Insert face feature into the hub, because the id is INSPIRE_INVALID_ID, so input id is ignored
|
||||
int64_t result_id;
|
||||
INSPIREFACE_FEATURE_HUB->FaceFeatureInsert(feature.embedding, INSPIRE_INVALID_ID, result_id);
|
||||
|
||||
// Prepare a photo of the same person for the query
|
||||
auto query_image = inspirecv::Image::Create("test_res/data/bulk/jntm.jpg");
|
||||
auto query_image_process = inspirecv::FrameProcess::Create(query_image.Data(), query_image.Height(), query_image.Width(), inspirecv::BGR, inspirecv::ROTATION_0);
|
||||
|
||||
// Detect and track
|
||||
std::vector<inspire::FaceTrackWrap> query_results;
|
||||
session->FaceDetectAndTrack(query_image_process, query_results);
|
||||
INSPIREFACE_CHECK_MSG(query_results.size() > 0, "No face detected");
|
||||
|
||||
// Extract face feature
|
||||
inspire::FaceEmbedding query_feature;
|
||||
session->FaceFeatureExtract(query_image_process, query_results[0], query_feature);
|
||||
|
||||
// Search face feature
|
||||
inspire::FaceSearchResult search_result;
|
||||
INSPIREFACE_FEATURE_HUB->SearchFaceFeature(query_feature.embedding, search_result, true);
|
||||
std::cout << "Search face feature result: " << search_result.id << std::endl;
|
||||
std::cout << "Search face feature similarity: " << search_result.similarity << std::endl;
|
||||
|
||||
INSPIREFACE_CHECK_MSG(search_result.id == result_id, "Search face feature result id is not equal to the inserted id");
|
||||
|
||||
// Remove the face feature
|
||||
INSPIREFACE_FEATURE_HUB->FaceFeatureRemove(result_id);
|
||||
INSPIREFACE_CHECK_MSG(INSPIREFACE_FEATURE_HUB->GetFaceFeatureCount() == 0, "Face feature is not removed");
|
||||
|
||||
std::cout << "Remove face feature successfully" << std::endl;
|
||||
|
||||
// Query again
|
||||
INSPIREFACE_FEATURE_HUB->SearchFaceFeature(query_feature.embedding, search_result, true);
|
||||
INSPIREFACE_CHECK_MSG(search_result.id == INSPIRE_INVALID_ID, "Search face feature result id is not equal to the inserted id");
|
||||
std::cout << "Query again, search face feature result: " << search_result.id << std::endl;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <inspirecv/inspirecv.h>
|
||||
#include <inspireface/inspireface.hpp>
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 3) {
|
||||
std::cout << "Usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_path = argv[1];
|
||||
std::string image_path = argv[2];
|
||||
|
||||
// Global init(only once)
|
||||
INSPIREFACE_CONTEXT->Reload(model_path);
|
||||
|
||||
// Create image and frame process
|
||||
inspirecv::Image image = inspirecv::Image::Create(image_path);
|
||||
inspirecv::FrameProcess process =
|
||||
inspirecv::FrameProcess::Create(image.Data(), image.Height(), image.Width(), inspirecv::BGR, inspirecv::ROTATION_90);
|
||||
|
||||
// Create session
|
||||
inspire::CustomPipelineParameter param;
|
||||
param.enable_recognition = true;
|
||||
param.enable_liveness = true;
|
||||
param.enable_mask_detect = true;
|
||||
param.enable_face_attribute = true;
|
||||
param.enable_face_quality = true;
|
||||
std::shared_ptr<inspire::Session> session(inspire::Session::CreatePtr(inspire::DETECT_MODE_ALWAYS_DETECT, 100, param, 640));
|
||||
session->SetTrackPreviewSize(640);
|
||||
|
||||
INSPIREFACE_CHECK_MSG(session != nullptr, "Session is not valid");
|
||||
|
||||
// Detect and track
|
||||
std::vector<inspire::FaceTrackWrap> results;
|
||||
int32_t ret;
|
||||
ret = session->FaceDetectAndTrack(process, results);
|
||||
INSPIREFACE_CHECK_MSG(ret == 0, "FaceDetectAndTrack failed");
|
||||
|
||||
for (auto& result : results) {
|
||||
std::cout << "result: " << result.trackId << std::endl;
|
||||
std::cout << "quality: " << result.quality[0] << ", " << result.quality[1] << ", " << result.quality[2] << ", " << result.quality[3] << ", "
|
||||
<< result.quality[4] << std::endl;
|
||||
inspirecv::Rect2i rect = inspirecv::Rect2i::Create(result.rect.x, result.rect.y, result.rect.width, result.rect.height);
|
||||
std::cout << rect << std::endl;
|
||||
image.DrawRect(rect, inspirecv::Color::Red);
|
||||
inspirecv::TransformMatrix trans = inspirecv::TransformMatrix::Create(result.trans.m00, result.trans.m01, result.trans.tx, result.trans.m10, result.trans.m11, result.trans.ty);
|
||||
std::cout << "trans: " << trans.GetInverse() << std::endl;
|
||||
}
|
||||
image.Write("result.jpg");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
#include <iostream>
|
||||
#include <inspirecv/inspirecv.h>
|
||||
#include <inspireface/inspireface.hpp>
|
||||
#ifdef _WIN32
|
||||
#include <direct.h>
|
||||
#define CREATE_DIR(dir) _mkdir(dir)
|
||||
#else
|
||||
#include <sys/stat.h>
|
||||
#define CREATE_DIR(dir) mkdir(dir, 0777)
|
||||
#endif
|
||||
|
||||
int main() {
|
||||
// Make directory
|
||||
if (CREATE_DIR("cv") == -1) {
|
||||
// If the directory already exists, it is not an error
|
||||
if (errno != EEXIST) {
|
||||
std::cerr << "Error creating directory" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Image I/O */
|
||||
|
||||
// Load image from file
|
||||
// Load with 3 channels (BGR, like opencv)
|
||||
inspirecv::Image img = inspirecv::Image::Create("test_res/data/bulk/kun_cartoon_crop.jpg", 3);
|
||||
|
||||
// Load image from buffer
|
||||
// uint8_t* buffer = ...; // buffer is a pointer to the image data
|
||||
// bool is_alloc_mem = false; // if true, will allocate memory for the image data,
|
||||
// // false is recommended to point to the original data to avoid copying
|
||||
// inspirecv::Image img_buffer = inspirecv::Image::Create(width, height, channel, buffer, is_alloc_mem);
|
||||
|
||||
// Save image to file
|
||||
img.Write("cv/output.jpg");
|
||||
|
||||
// Show image, warning: it must depend on opencv
|
||||
// img.Show("input");
|
||||
|
||||
// Get pointer to image data
|
||||
const uint8_t* ptr = img.Data();
|
||||
|
||||
/* Image Processing */
|
||||
// Convert to grayscale
|
||||
inspirecv::Image gray = img.ToGray();
|
||||
gray.Write("cv/gray.jpg");
|
||||
|
||||
// Apply Gaussian blur
|
||||
inspirecv::Image blurred = img.GaussianBlur(3, 1.0);
|
||||
blurred.Write("cv/blurred.jpg");
|
||||
|
||||
// Geometric transformations
|
||||
auto scale = 0.35;
|
||||
bool use_bilinear = true;
|
||||
inspirecv::Image resized = img.Resize(img.Width() * scale, img.Height() * scale, use_bilinear); // Resize image
|
||||
resized.Write("cv/resized.jpg");
|
||||
|
||||
// Rotate 90 degrees clockwise
|
||||
inspirecv::Image rotated = img.Rotate90();
|
||||
rotated.Write("cv/rotated.jpg");
|
||||
|
||||
// Flip vertically
|
||||
inspirecv::Image flipped_vertical = img.FlipVertical();
|
||||
flipped_vertical.Write("cv/flipped_vertical.jpg");
|
||||
|
||||
// Flip horizontally
|
||||
inspirecv::Image flipped_horizontal = img.FlipHorizontal();
|
||||
flipped_horizontal.Write("cv/flipped_horizontal.jpg");
|
||||
|
||||
// Crop for rectangle
|
||||
inspirecv::Rect<int> rect = inspirecv::Rect<int>::Create(78, 41, 171, 171);
|
||||
inspirecv::Image cropped = img.Crop(rect);
|
||||
cropped.Write("cv/cropped.jpg");
|
||||
|
||||
// Image padding
|
||||
int top = 50, bottom = 50, left = 50, right = 50;
|
||||
inspirecv::Image padded = img.Pad(top, bottom, left, right, inspirecv::Color::Black);
|
||||
padded.Write("cv/padded.jpg");
|
||||
|
||||
// Swap red and blue channels
|
||||
inspirecv::Image swapped = img.SwapRB();
|
||||
swapped.Write("cv/swapped.jpg");
|
||||
|
||||
// Multiply image by scale factor
|
||||
double scale_factor = 0.5;
|
||||
inspirecv::Image scaled = img.Mul(scale_factor);
|
||||
scaled.Write("cv/scaled.jpg");
|
||||
|
||||
// Add value to image
|
||||
double value = -175;
|
||||
inspirecv::Image added = img.Add(value);
|
||||
added.Write("cv/added.jpg");
|
||||
|
||||
// Rotate 90 degrees clockwise(also support 270 and 180)
|
||||
inspirecv::Image rotated_90 = img.Rotate90();
|
||||
rotated_90.Write("cv/rotated_90.jpg");
|
||||
|
||||
// Affine transform
|
||||
/**
|
||||
* Create a transform matrix from the following matrix
|
||||
* [[a11, a12, tx],
|
||||
* [a21, a22, ty]]
|
||||
*
|
||||
* Face crop transform matrix
|
||||
* [[0.0, -1.37626, 261.127],
|
||||
* [1.37626, 0.0, 85.1831]]
|
||||
*/
|
||||
float a11 = 0.0f;
|
||||
float a12 = -1.37626f;
|
||||
float a21 = 1.37626f;
|
||||
float a22 = 0.0f;
|
||||
float b1 = 261.127f;
|
||||
float b2 = 85.1831f;
|
||||
|
||||
inspirecv::TransformMatrix trans = inspirecv::TransformMatrix::Create(a11, a12, b1, a21, a22, b2);
|
||||
int dst_width = 112;
|
||||
int dst_height = 112;
|
||||
inspirecv::Image affine = rotated_90.WarpAffine(trans, dst_width, dst_height);
|
||||
affine.Write("cv/affine.jpg");
|
||||
|
||||
/* Image Draw */
|
||||
inspirecv::Image draw_img = img.Clone();
|
||||
|
||||
// Draw a rectangle
|
||||
inspirecv::Rect<int> new_rect = rect.Square(1.1f); // Square and expand the rect
|
||||
int thickness = 3;
|
||||
draw_img.DrawRect(new_rect, inspirecv::Color::Green, thickness);
|
||||
draw_img.Write("cv/draw_rect.jpg");
|
||||
|
||||
// Draw a circle
|
||||
draw_img = img.Clone();
|
||||
std::vector<inspirecv::Point<int>> points = new_rect.As<int>().ToFourVertices();
|
||||
for (auto& point : points) {
|
||||
draw_img.DrawCircle(point, 1, inspirecv::Color::Red, 5);
|
||||
}
|
||||
draw_img.Write("cv/draw_circle.jpg");
|
||||
|
||||
// Draw a line
|
||||
draw_img = img.Clone();
|
||||
draw_img.DrawLine(points[0], points[1], inspirecv::Color::Cyan, 2);
|
||||
draw_img.DrawLine(points[1], points[2], inspirecv::Color::Magenta, 2);
|
||||
draw_img.DrawLine(points[2], points[3], inspirecv::Color::Pink, 2);
|
||||
draw_img.DrawLine(points[3], points[0], inspirecv::Color::Yellow, 2);
|
||||
draw_img.Write("cv/draw_line.jpg");
|
||||
|
||||
// Fill a rectangle
|
||||
draw_img = img.Clone();
|
||||
draw_img.Fill(new_rect, inspirecv::Color::Purple);
|
||||
draw_img.Write("cv/fill_rect.jpg");
|
||||
|
||||
// Reset
|
||||
std::vector<uint8_t> gray_color(img.Width() * img.Height() * 3, 128);
|
||||
img.Reset(img.Width(), img.Height(), 3, gray_color.data());
|
||||
img.Write("cv/reset.jpg");
|
||||
|
||||
/** FrameProcess */
|
||||
|
||||
// BGR888 as raw data
|
||||
inspirecv::Image raw = inspirecv::Image::Create("test_res/data/bulk/kun_cartoon_crop_r90.jpg", 3);
|
||||
const uint8_t* buffer = raw.Data();
|
||||
|
||||
// You can also use other image format, like NV21, NV12, RGBA, RGB, BGR, BGRA
|
||||
// const uint8_t* buffer = ...;
|
||||
|
||||
// Create frame process
|
||||
auto width = raw.Width();
|
||||
auto height = raw.Height();
|
||||
auto rotation_mode = inspirecv::ROTATION_90;
|
||||
auto data_format = inspirecv::BGR;
|
||||
inspirecv::FrameProcess frame_process = inspirecv::FrameProcess::Create(buffer, height, width, data_format, rotation_mode);
|
||||
|
||||
// Set preview size
|
||||
frame_process.SetPreviewSize(160);
|
||||
|
||||
// Set preview scale
|
||||
// frame_process.SetPreviewScale(0.5f);
|
||||
|
||||
// Get transform image
|
||||
inspirecv::Image transform_img = frame_process.ExecutePreviewImageProcessing(true);
|
||||
transform_img.Write("cv/transform_img.jpg");
|
||||
|
||||
// ExecuteImageAffineProcessing
|
||||
|
||||
// Face crop transform matrix
|
||||
// [[0.0, 0.726607, -61.8946],
|
||||
// [-0.726607, 0.0, 189.737]]
|
||||
a11 = 0.0f;
|
||||
a12 = 0.726607f;
|
||||
a21 = -0.726607;
|
||||
a22 = 0.0f;
|
||||
b1 = -61.8946f;
|
||||
b2 = 189.737f;
|
||||
inspirecv::TransformMatrix affine_matrix = inspirecv::TransformMatrix::Create(a11, a12, b1, a21, a22, b2);
|
||||
dst_width = 112;
|
||||
dst_height = 112;
|
||||
inspirecv::Image affine_img = frame_process.ExecuteImageAffineProcessing(affine_matrix, dst_width, dst_height);
|
||||
affine_img.Write("cv/affine_img.jpg");
|
||||
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user