From bee6d185cb438969701d0e7a2a559e6eda950a14 Mon Sep 17 00:00:00 2001 From: Tadas Baltrusaitis Date: Sun, 12 Nov 2017 21:40:30 +0000 Subject: [PATCH] Some more bug fixes with visualization and recording. Creating a metafile for recorded data. --- exe/FeatureExtraction/FeatureExtraction.cpp | 2 +- .../Utilities/include/RecorderOpenFace.h | 2 + lib/local/Utilities/include/Visualizer.h | 5 +- lib/local/Utilities/src/RecorderOpenFace.cpp | 57 ++++++- lib/local/Utilities/src/Visualizer.cpp | 154 +++++++++--------- .../run_biwi_experiment.m | 4 +- 6 files changed, 142 insertions(+), 82 deletions(-) diff --git a/exe/FeatureExtraction/FeatureExtraction.cpp b/exe/FeatureExtraction/FeatureExtraction.cpp index 6201ccf8..00db2db8 100644 --- a/exe/FeatureExtraction/FeatureExtraction.cpp +++ b/exe/FeatureExtraction/FeatureExtraction.cpp @@ -206,7 +206,7 @@ int main (int argc, char **argv) visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols); visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, detection_success); visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty); - visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy)); + visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty); visualizer.ShowObservation(); // Setting up the recorder output diff --git a/lib/local/Utilities/include/RecorderOpenFace.h b/lib/local/Utilities/include/RecorderOpenFace.h index 70f75ecc..fe2179b2 100644 --- a/lib/local/Utilities/include/RecorderOpenFace.h +++ b/lib/local/Utilities/include/RecorderOpenFace.h @@ -105,9 +105,11 @@ namespace Utilities // Keep track of the file and output root location std::string record_root = "processed"; // By default we are writing in the processed directory in the working directory + std::string of_filename; std::string filename; std::string csv_filename; std::string aligned_output_directory; + std::ofstream metadata_file; // The actual output file stream that will be written RecorderCSV csv_recorder; diff --git a/lib/local/Utilities/include/Visualizer.h b/lib/local/Utilities/include/Visualizer.h index 3a5d6952..b6e2c7fe 100644 --- a/lib/local/Utilities/include/Visualizer.h +++ b/lib/local/Utilities/include/Visualizer.h @@ -72,7 +72,7 @@ namespace Utilities void SetObservationPose(const cv::Vec6d& pose, double confidence); // Gaze related observations - void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector& eye_landmarks, const std::vector& eye_landmarks3d); + void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector& eye_landmarks, const std::vector& eye_landmarks3d, double confidence); // Face alignment related observations void SetObservationFaceAlign(const cv::Mat& aligned_face); @@ -88,6 +88,9 @@ namespace Utilities bool vis_track; bool vis_hog; bool vis_align; + + // Can be adjusted to show less confident frames + double visualisation_boundary = 0.4; private: diff --git a/lib/local/Utilities/src/RecorderOpenFace.cpp b/lib/local/Utilities/src/RecorderOpenFace.cpp index 16751641..8d6172c3 100644 --- a/lib/local/Utilities/src/RecorderOpenFace.cpp +++ b/lib/local/Utilities/src/RecorderOpenFace.cpp @@ -77,32 +77,80 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa // From the filename, strip out the name without directory and extension filename = path(in_filename).replace_extension("").filename().string(); + // Consuming the input arguments + bool* valid = new bool[arguments.size()]; + + for (size_t i = 0; i < arguments.size(); ++i) + { + valid[i] = true; + } + + string record_root; + for (size_t i = 0; i < arguments.size(); ++i) + { + if (arguments[i].compare("-outroot") == 0) + { + record_root = arguments[i + 1]; + } + } + // Determine output directory + bool output_found = false; for (size_t i = 0; i < arguments.size(); ++i) { if (arguments[i].compare("-out_dir") == 0) { - record_root = arguments[i + 1]; + record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).string(); + } + else if (!output_found && arguments[i].compare("-of") == 0) + { + record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).remove_filename().string(); + filename = path(boost::filesystem::path(arguments[i + 1])).replace_extension("").filename().string(); + valid[i] = false; + valid[i + 1] = false; + i++; + output_found = true; + } + } + + for (int i = (int)arguments.size() - 1; i >= 0; --i) + { + if (!valid[i]) + { + arguments.erase(arguments.begin() + i); } } // Construct the directories required for the output CreateDirectory(record_root); + // Create the filename for the general output file that contains all of the meta information about the recording + path of_det_name(filename); + of_det_name = path(record_root) / of_det_name.concat("_of_details.txt"); + + // Write in the of file what we are outputing what is the input etc. + metadata_file.open(of_det_name.string(), std::ios_base::out); + + // Populate the metadata file + metadata_file << "Input:" << in_filename << endl; + // Create the required individual recorders, CSV, HOG, aligned, video csv_filename = (path(record_root) / path(filename).replace_extension(".csv")).string(); + metadata_file << "Output csv:" << csv_filename << endl; // Consruct HOG recorder here if(params.outputHOG()) { std::string hog_filename = (path(record_root) / path(filename).replace_extension(".hog")).string(); hog_recorder.Open(hog_filename); + metadata_file << "Output HOG:" << csv_filename << endl; } // saving the videos if (params.outputTrackedVideo()) { this->video_filename = (path(record_root) / path(filename).replace_extension(".avi")).string(); + metadata_file << "Output video:" << this->video_filename << endl; } // Prepare image recording @@ -110,9 +158,10 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa { aligned_output_directory = (path(record_root) / path(filename + "_aligned")).string(); CreateDirectory(aligned_output_directory); + metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl; } - - + + observation_count = 0; } @@ -279,6 +328,8 @@ void RecorderOpenFace::Close() hog_recorder.Close(); csv_recorder.Close(); video_writer.release(); + metadata_file.close(); + } diff --git a/lib/local/Utilities/src/Visualizer.cpp b/lib/local/Utilities/src/Visualizer.cpp index 51c72879..5c8dc9b8 100644 --- a/lib/local/Utilities/src/Visualizer.cpp +++ b/lib/local/Utilities/src/Visualizer.cpp @@ -109,23 +109,26 @@ void Visualizer::SetObservationHOG(const cv::Mat_& hog_descriptor, int n void Visualizer::SetObservationLandmarks(const cv::Mat_& landmarks_2D, double confidence, bool success, const cv::Mat_& visibilities) { - // Draw 2D landmarks on the image - int n = landmarks_2D.rows / 2; - - // Drawing feature points - for (int i = 0; i < n; ++i) + if(confidence > visualisation_boundary) { - if (visibilities.empty() || visibilities.at(i)) + // Draw 2D landmarks on the image + int n = landmarks_2D.rows / 2; + + // Drawing feature points + for (int i = 0; i < n; ++i) { - cv::Point featurePoint(cvRound(landmarks_2D.at(i) * (double)draw_multiplier), cvRound(landmarks_2D.at(i + n) * (double)draw_multiplier)); + if (visibilities.empty() || visibilities.at(i)) + { + cv::Point featurePoint(cvRound(landmarks_2D.at(i) * (double)draw_multiplier), cvRound(landmarks_2D.at(i + n) * (double)draw_multiplier)); - // A rough heuristic for drawn point size - int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0); - int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0); + // A rough heuristic for drawn point size + int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0); + int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0); - cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits); - cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits); + cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits); + cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits); + } } } } @@ -133,7 +136,7 @@ void Visualizer::SetObservationLandmarks(const cv::Mat_& landmarks_2D, d void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence) { - double visualisation_boundary = 0.4; + // Only draw if the reliability is reasonable, the value is slightly ad-hoc if (confidence > visualisation_boundary) @@ -154,78 +157,79 @@ void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence) } // Eye gaze infomration drawing, first of eye landmarks then of gaze -void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const cv::Vec2d& gaze_angle, const std::vector& eye_landmarks2d, const std::vector& eye_landmarks3d) +void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const cv::Vec2d& gaze_angle, const std::vector& eye_landmarks2d, const std::vector& eye_landmarks3d, double confidence) { - - if (eye_landmarks2d.size() > 0) + if(confidence > visualisation_boundary) { - // First draw the eye region landmarks - for (size_t i = 0; i < eye_landmarks2d.size(); ++i) + if (eye_landmarks2d.size() > 0) { - cv::Point featurePoint(cvRound(eye_landmarks2d[i].x * (double)draw_multiplier), cvRound(eye_landmarks2d[i].y * (double)draw_multiplier)); + // First draw the eye region landmarks + for (size_t i = 0; i < eye_landmarks2d.size(); ++i) + { + cv::Point featurePoint(cvRound(eye_landmarks2d[i].x * (double)draw_multiplier), cvRound(eye_landmarks2d[i].y * (double)draw_multiplier)); - // A rough heuristic for drawn point size - int thickness = 1; - int thickness_2 = 1; + // A rough heuristic for drawn point size + int thickness = 1; + int thickness_2 = 1; - size_t next_point = i + 1; - if (i == 7) - next_point = 0; - if (i == 19) - next_point = 8; - if (i == 27) - next_point = 20; + size_t next_point = i + 1; + if (i == 7) + next_point = 0; + if (i == 19) + next_point = 8; + if (i == 27) + next_point = 20; - if (i == 7 + 28) - next_point = 0 + 28; - if (i == 19 + 28) - next_point = 8 + 28; - if (i == 27 + 28) - next_point = 20 + 28; + if (i == 7 + 28) + next_point = 0 + 28; + if (i == 19 + 28) + next_point = 8 + 28; + if (i == 27 + 28) + next_point = 20 + 28; - cv::Point nextFeaturePoint(cvRound(eye_landmarks2d[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks2d[next_point].y * (double)draw_multiplier)); - if ((i < 28 && (i < 8 || i > 19)) || (i >= 28 && (i < 8 + 28 || i > 19 + 28))) - cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits); - else - cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits); + cv::Point nextFeaturePoint(cvRound(eye_landmarks2d[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks2d[next_point].y * (double)draw_multiplier)); + if ((i < 28 && (i < 8 || i > 19)) || (i >= 28 && (i < 8 + 28 || i > 19 + 28))) + cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits); + else + cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits); + + } + + // Now draw the gaze lines themselves + cv::Mat cameraMat = (cv::Mat_(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0); + + // Grabbing the pupil location, to draw eye gaze need to know where the pupil is + cv::Point3d pupil_left(0, 0, 0); + cv::Point3d pupil_right(0, 0, 0); + for (size_t i = 0; i < 8; ++i) + { + pupil_left = pupil_left + eye_landmarks3d[i]; + pupil_right = pupil_right + eye_landmarks3d[i + eye_landmarks3d.size()/2]; + } + pupil_left = pupil_left / 8; + pupil_right = pupil_right / 8; + + std::vector points_left; + points_left.push_back(cv::Point3d(pupil_left)); + points_left.push_back(cv::Point3d(pupil_left + cv::Point3d(gaze_direction0)*50.0)); + + std::vector points_right; + points_right.push_back(cv::Point3d(pupil_right)); + points_right.push_back(cv::Point3d(pupil_right + cv::Point3d(gaze_direction1)*50.0)); + + cv::Mat_ proj_points; + cv::Mat_ mesh_0 = (cv::Mat_(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z); + Project(proj_points, mesh_0, fx, fy, cx, cy); + cv::line(captured_image, cv::Point(cvRound(proj_points.at(0, 0) * (double)draw_multiplier), cvRound(proj_points.at(0, 1) * (double)draw_multiplier)), + cv::Point(cvRound(proj_points.at(1, 0) * (double)draw_multiplier), cvRound(proj_points.at(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits); + + cv::Mat_ mesh_1 = (cv::Mat_(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z); + Project(proj_points, mesh_1, fx, fy, cx, cy); + cv::line(captured_image, cv::Point(cvRound(proj_points.at(0, 0) * (double)draw_multiplier), cvRound(proj_points.at(0, 1) * (double)draw_multiplier)), + cv::Point(cvRound(proj_points.at(1, 0) * (double)draw_multiplier), cvRound(proj_points.at(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits); } - - // Now draw the gaze lines themselves - cv::Mat cameraMat = (cv::Mat_(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0); - - // Grabbing the pupil location, to draw eye gaze need to know where the pupil is - cv::Point3d pupil_left(0, 0, 0); - cv::Point3d pupil_right(0, 0, 0); - for (size_t i = 0; i < 8; ++i) - { - pupil_left = pupil_left + eye_landmarks3d[i]; - pupil_right = pupil_right + eye_landmarks3d[i + eye_landmarks3d.size()/2]; - } - pupil_left = pupil_left / 8; - pupil_right = pupil_right / 8; - - std::vector points_left; - points_left.push_back(cv::Point3d(pupil_left)); - points_left.push_back(cv::Point3d(pupil_left + cv::Point3d(gaze_direction0)*50.0)); - - std::vector points_right; - points_right.push_back(cv::Point3d(pupil_right)); - points_right.push_back(cv::Point3d(pupil_right + cv::Point3d(gaze_direction1)*50.0)); - - cv::Mat_ proj_points; - cv::Mat_ mesh_0 = (cv::Mat_(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z); - Project(proj_points, mesh_0, fx, fy, cx, cy); - cv::line(captured_image, cv::Point(cvRound(proj_points.at(0, 0) * (double)draw_multiplier), cvRound(proj_points.at(0, 1) * (double)draw_multiplier)), - cv::Point(cvRound(proj_points.at(1, 0) * (double)draw_multiplier), cvRound(proj_points.at(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits); - - cv::Mat_ mesh_1 = (cv::Mat_(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z); - Project(proj_points, mesh_1, fx, fy, cx, cy); - cv::line(captured_image, cv::Point(cvRound(proj_points.at(0, 0) * (double)draw_multiplier), cvRound(proj_points.at(0, 1) * (double)draw_multiplier)), - cv::Point(cvRound(proj_points.at(1, 0) * (double)draw_multiplier), cvRound(proj_points.at(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits); - } - } void Visualizer::ShowObservation() diff --git a/matlab_runners/Head Pose Experiments/run_biwi_experiment.m b/matlab_runners/Head Pose Experiments/run_biwi_experiment.m index 1c03ea26..b7706eb7 100644 --- a/matlab_runners/Head Pose Experiments/run_biwi_experiment.m +++ b/matlab_runners/Head Pose Experiments/run_biwi_experiment.m @@ -14,7 +14,7 @@ dbSeqDir = dbSeqDir(3:end); output_dir = cat(2, output_dir, '/'); -command = sprintf('%s -inroot "%s" -out_dir "%s" -fx 505 -fy 505 -cx 320 -cy 240 -pose -vis-track ', executable, rootDir, output_dir); +command = sprintf('%s -inroot "%s" -outroot "%s" -fx 505 -fy 505 -cx 320 -cy 240 -pose -vis-track ', executable, rootDir, output_dir); if(verbose) command = cat(2, command, [' -tracked ' outputVideo]); @@ -26,7 +26,7 @@ end for i=1:numel(dbSeqDir) inputFile = [biwiDir dbSeqDir(i).name '/colour.avi']; - command = cat(2, command, sprintf(' -f "%s" ', inputFile)); + command = sprintf('%s -f "%s" -of "%s" ', command, inputFile, dbSeqDir(i).name); end if(isunix)