mirror of
https://gitcode.com/gh_mirrors/ope/OpenFace.git
synced 2025-12-30 13:02:30 +00:00
Integrating MTCNN in all of the demos.
This commit is contained in:
@@ -442,7 +442,7 @@ int main (int argc, char **argv)
|
||||
else
|
||||
{
|
||||
vector<double> confidences;
|
||||
LandmarkDetector::DetectFacesMTCNN(face_detections, grayscale_image, face_detector_mtcnn, confidences);
|
||||
LandmarkDetector::DetectFacesMTCNN(face_detections, read_image, face_detector_mtcnn, confidences);
|
||||
}
|
||||
// Detect landmarks around detected faces
|
||||
int face_det = 0;
|
||||
|
||||
@@ -282,20 +282,8 @@ int main (int argc, char **argv)
|
||||
while(!captured_image.empty())
|
||||
{
|
||||
|
||||
// Reading the images
|
||||
cv::Mat_<uchar> grayscale_image;
|
||||
|
||||
if(captured_image.channels() == 3)
|
||||
{
|
||||
cv::cvtColor(captured_image, grayscale_image, CV_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
grayscale_image = captured_image.clone();
|
||||
}
|
||||
|
||||
// The actual facial landmark detection / tracking
|
||||
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, clnf_model, det_parameters);
|
||||
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(captured_image, clnf_model, det_parameters);
|
||||
|
||||
// Visualising the results
|
||||
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
|
||||
|
||||
@@ -141,7 +141,9 @@ int main (int argc, char **argv)
|
||||
LandmarkDetector::CLNF clnf_model(det_parameters[0].model_location);
|
||||
clnf_model.face_detector_HAAR.load(det_parameters[0].haar_face_detector_location);
|
||||
clnf_model.haar_face_detector_location = det_parameters[0].haar_face_detector_location;
|
||||
|
||||
clnf_model.face_detector_MTCNN.Read(det_parameters[0].mtcnn_face_detector_location);
|
||||
clnf_model.mtcnn_face_detector_location = det_parameters[0].mtcnn_face_detector_location;
|
||||
|
||||
clnf_models.reserve(num_faces_max);
|
||||
|
||||
clnf_models.push_back(clnf_model);
|
||||
@@ -278,7 +280,7 @@ int main (int argc, char **argv)
|
||||
else
|
||||
{
|
||||
vector<double> confidences;
|
||||
LandmarkDetector::DetectFacesMTCNN(face_detections, grayscale_image, clnf_models[0].face_detector_MTCNN, confidences);
|
||||
LandmarkDetector::DetectFacesMTCNN(face_detections, captured_image, clnf_models[0].face_detector_MTCNN, confidences);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -523,29 +523,17 @@ int main (int argc, char **argv)
|
||||
// if loading images assume 30fps
|
||||
time_stamp = (double)frame_count * (1.0 / 30.0);
|
||||
}
|
||||
|
||||
// Reading the images
|
||||
cv::Mat_<uchar> grayscale_image;
|
||||
|
||||
if(captured_image.channels() == 3)
|
||||
{
|
||||
cvtColor(captured_image, grayscale_image, CV_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
grayscale_image = captured_image.clone();
|
||||
}
|
||||
|
||||
// The actual facial landmark detection / tracking
|
||||
bool detection_success;
|
||||
|
||||
if(video_input || images_as_video)
|
||||
{
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters);
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(captured_image, face_model, det_parameters);
|
||||
}
|
||||
else
|
||||
{
|
||||
detection_success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_model, det_parameters);
|
||||
detection_success = LandmarkDetector::DetectLandmarksInImage(captured_image, face_model, det_parameters);
|
||||
}
|
||||
|
||||
// Gaze tracking, absolute gaze direction
|
||||
|
||||
@@ -54,16 +54,16 @@ namespace LandmarkDetector
|
||||
// Landmark detection in videos, need to provide an image and model parameters (default values work well)
|
||||
// Optionally can provide a bounding box from which to start tracking
|
||||
//================================================================================================================
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInVideo(const cv::Mat &image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInVideo(const cv::Mat &image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
//================================================================================================================
|
||||
// Landmark detection in image, need to provide an image and optionally CLNF model together with parameters (default values work well)
|
||||
// Optionally can provide a bounding box in which detection is performed (this is useful if multiple faces are to be detected in images)
|
||||
//================================================================================================================
|
||||
bool DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInImage(const cv::Mat &image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
// Providing a bounding box
|
||||
bool DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInImage(const cv::Mat &image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
//================================================================
|
||||
// Helper function for getting head pose from CLNF parameters
|
||||
|
||||
@@ -259,12 +259,23 @@ void CorrectGlobalParametersVideo(const cv::Mat_<uchar> &grayscale_image, CLNF&
|
||||
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat &image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
// First need to decide if the landmarks should be "detected" or "tracked"
|
||||
// Detected means running face detection and a larger search area, tracked means initialising from previous step
|
||||
// and using a smaller search area
|
||||
|
||||
cv::Mat grayscale_image;
|
||||
if (image.channels() == 3)
|
||||
{
|
||||
cv::cvtColor(image, grayscale_image, CV_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
grayscale_image = image.clone();
|
||||
}
|
||||
|
||||
|
||||
// Indicating that this is a first detection in video sequence or after restart
|
||||
bool initial_detection = !clnf_model.tracking_initialised;
|
||||
|
||||
@@ -344,7 +355,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
||||
else if (params.curr_face_detector == FaceModelParameters::MTCNN_DETECTOR)
|
||||
{
|
||||
double confidence;
|
||||
face_detection_success = LandmarkDetector::DetectSingleFaceMTCNN(bounding_box, grayscale_image, clnf_model.face_detector_MTCNN, confidence, preference_det);
|
||||
face_detection_success = LandmarkDetector::DetectSingleFaceMTCNN(bounding_box, image, clnf_model.face_detector_MTCNN, confidence, preference_det);
|
||||
}
|
||||
|
||||
// Attempt to detect landmarks using the detected face (if unseccessful the detection will be ignored)
|
||||
@@ -411,7 +422,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
||||
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat &image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
if(bounding_box.width > 0)
|
||||
{
|
||||
@@ -423,7 +434,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
||||
clnf_model.tracking_initialised = true;
|
||||
}
|
||||
|
||||
return DetectLandmarksInVideo(grayscale_image, clnf_model, params);
|
||||
return DetectLandmarksInVideo(image, clnf_model, params);
|
||||
|
||||
}
|
||||
|
||||
@@ -433,9 +444,19 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
||||
//================================================================================================================
|
||||
|
||||
// This is the one where the actual work gets done, other DetectLandmarksInImage calls lead to this one
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat &image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
|
||||
cv::Mat grayscale_image;
|
||||
if (image.channels() == 3)
|
||||
{
|
||||
cv::cvtColor(image, grayscale_image, CV_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
grayscale_image = image.clone();
|
||||
}
|
||||
|
||||
// Can have multiple hypotheses
|
||||
vector<cv::Vec3d> rotation_hypotheses;
|
||||
|
||||
@@ -531,8 +552,17 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
||||
return best_success;
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat &image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
cv::Mat grayscale_image;
|
||||
if (image.channels() == 3)
|
||||
{
|
||||
cv::cvtColor(image, grayscale_image, CV_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
grayscale_image = image.clone();
|
||||
}
|
||||
|
||||
cv::Rect_<double> bounding_box;
|
||||
|
||||
@@ -556,7 +586,12 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
||||
}
|
||||
else if(params.curr_face_detector == FaceModelParameters::HAAR_DETECTOR)
|
||||
{
|
||||
LandmarkDetector::DetectSingleFace(bounding_box, grayscale_image, clnf_model.face_detector_HAAR);
|
||||
LandmarkDetector::DetectSingleFace(bounding_box, image, clnf_model.face_detector_HAAR);
|
||||
}
|
||||
else if (params.curr_face_detector == FaceModelParameters::MTCNN_DETECTOR)
|
||||
{
|
||||
double confidence;
|
||||
LandmarkDetector::DetectSingleFaceMTCNN(bounding_box, image, clnf_model.face_detector_MTCNN, confidence);
|
||||
}
|
||||
|
||||
if(bounding_box.width == 0)
|
||||
@@ -565,6 +600,6 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
||||
}
|
||||
else
|
||||
{
|
||||
return DetectLandmarksInImage(grayscale_image, bounding_box, clnf_model, params);
|
||||
return DetectLandmarksInImage(image, bounding_box, clnf_model, params);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user