diff --git a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp index 297461b9..0ff38aea 100644 --- a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp +++ b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp @@ -130,17 +130,17 @@ int main(int argc, char **argv) INFO_STREAM("Device or file opened"); - cv::Mat captured_image = sequence_reader.GetNextFrame(); + cv::Mat rgb_image = sequence_reader.GetNextFrame(); INFO_STREAM("Starting tracking"); - while (!captured_image.empty()) // this is not a for loop as we might also be reading from a webcam + while (!rgb_image.empty()) // this is not a for loop as we might also be reading from a webcam { // Reading the images cv::Mat_ grayscale_image = sequence_reader.GetGrayFrame(); // The actual facial landmark detection / tracking - bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters); + bool detection_success = LandmarkDetector::DetectLandmarksInVideo(rgb_image, face_model, det_parameters, grayscale_image); // Gaze tracking, absolute gaze direction cv::Point3f gazeDirection0(0, 0, -1); @@ -160,7 +160,7 @@ int main(int argc, char **argv) fps_tracker.AddFrame(); // Displaying the tracking visualizations - visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); + visualizer.SetImage(rgb_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities()); visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty); visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty); @@ -180,7 +180,7 @@ int main(int argc, char **argv) } // Grabbing the next frame in the sequence - captured_image = sequence_reader.GetNextFrame(); + rgb_image = sequence_reader.GetNextFrame(); } diff --git a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp index 2d19791f..7fd62185 100644 --- a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp +++ b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp @@ -303,7 +303,7 @@ int main(int argc, char **argv) else { // The actual facial landmark detection / tracking - detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_models[model], det_parameters[model]); + detection_success = LandmarkDetector::DetectLandmarksInVideo(rgb_image, face_models[model], det_parameters[model], grayscale_image); } } //}); diff --git a/lib/local/LandmarkDetector/include/LandmarkDetectorFunc.h b/lib/local/LandmarkDetector/include/LandmarkDetectorFunc.h index ec7b4dd1..a0dd9638 100644 --- a/lib/local/LandmarkDetector/include/LandmarkDetectorFunc.h +++ b/lib/local/LandmarkDetector/include/LandmarkDetectorFunc.h @@ -55,17 +55,17 @@ namespace LandmarkDetector // Optionally can provide a bounding box from which to start tracking // Can also optionally pass a grayscale image if it has already been computed to speed things up a bit //================================================================================================================ - bool DetectLandmarksInVideo(const cv::Mat &rgb_image, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image = cv::Mat()); - bool DetectLandmarksInVideo(const cv::Mat &rgb_image, const cv::Rect_ bounding_box, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image = cv::Mat()); + bool DetectLandmarksInVideo(const cv::Mat &rgb_image, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image); + bool DetectLandmarksInVideo(const cv::Mat &rgb_image, const cv::Rect_ bounding_box, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image); //================================================================================================================ // Landmark detection in image, need to provide an image and optionally CLNF model together with parameters (default values work well) // Optionally can provide a bounding box in which detection is performed (this is useful if multiple faces are to be detected in images) // Can also optionally pass a grayscale image if it has already been computed to speed things up a bit //================================================================================================================ - bool DetectLandmarksInImage(const cv::Mat &rgb_image, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image = cv::Mat()); + bool DetectLandmarksInImage(const cv::Mat &rgb_image, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image); // Providing a bounding box - bool DetectLandmarksInImage(const cv::Mat &rgb_image, const cv::Rect_ bounding_box, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image = cv::Mat()); + bool DetectLandmarksInImage(const cv::Mat &rgb_image, const cv::Rect_ bounding_box, CLNF& clnf_model, FaceModelParameters& params, cv::Mat &grayscale_image); //================================================================ // Helper function for getting head pose from CLNF parameters