A number of changes to bring up master to version 2.0.6 (#581)

- Removed duplicated code and some C++ warnings
- Fixing snapshot function in HeadPoseLive (test on PC)
- C++ code cleanup (https://github.com/TadasBaltrusaitis/OpenFace/issues/533, https://github.com/TadasBaltrusaitis/OpenFace/issues/534)
- Adding a Visual Studio 2017 solution file
- Moving to C++ OpenCV convention (thanks @taoyudong)
- Storing models in additional locations (see wiki and scripts) https://github.com/TadasBaltrusaitis/OpenFace/issues/553
This commit is contained in:
Tadas Baltrusaitis
2018-10-06 21:13:18 +01:00
committed by GitHub
parent 795c707314
commit be9b57703c
76 changed files with 650 additions and 671 deletions

View File

@@ -90,7 +90,8 @@
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
@@ -124,7 +125,8 @@
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>

View File

@@ -128,7 +128,8 @@
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;_WINDOWS;_USRDLL;CPPINEROP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\FaceAnalyser\include;$(SolutionDir)lib\local\GazeAnalyser\include;$(SolutionDir)lib\local\Utilities\include;$(SolutionDir)lib\3rdParty\CameraEnumerator;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
@@ -168,7 +169,8 @@
<PreprocessorDefinitions>NDEBUG;_WINDOWS;_USRDLL;CPPINEROP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\FaceAnalyser\include;$(SolutionDir)lib\local\GazeAnalyser\include;$(SolutionDir)lib\local\Utilities\include;$(SolutionDir)lib\3rdParty\CameraEnumerator;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>

View File

@@ -33,8 +33,8 @@
///////////////////////////////////////////////////////////////////////////////
// FaceAnalyser_Interop.h
#ifndef __FACE_ANALYSER_INTEROP_h_
#define __FACE_ANALYSER_INTEROP_h_
#ifndef FACE_ANALYSER_INTEROP_H
#define FACE_ANALYSER_INTEROP_H
#pragma once
@@ -269,32 +269,12 @@ public:
// but not automatically called on explicit Dispose().
// May be called multiple times.
!FaceAnalyserManaged()
{
if (hog_features != nullptr)
{
delete hog_features;
}
if (aligned_face != nullptr)
{
delete aligned_face;
}
if (num_cols != nullptr)
{
delete num_cols;
}
if (num_rows != nullptr)
{
delete num_rows;
}
if (face_analyser != nullptr)
{
delete face_analyser;
}
{
delete hog_features;
delete aligned_face;
delete num_cols;
delete num_rows;
delete face_analyser;
}
// Destructor. Called on explicit Dispose() only.
@@ -306,4 +286,4 @@ public:
};
}
#endif
#endif // FACE_ANALYSER_INTEROP_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __LANDMARK_DETECTOR_UTILS_INTEROP_h_
#define __LANDMARK_DETECTOR_UTILS_INTEROP_h_
#ifndef FACE_DETECTOR_INTEROP_H
#define FACE_DETECTOR_INTEROP_H
#pragma once
@@ -139,18 +139,9 @@ namespace FaceDetectorInterop {
// May be called multiple times.
!FaceDetector()
{
if (face_detector_hog != nullptr)
{
delete face_detector_hog;
}
if (face_detector_mtcnn != nullptr)
{
delete face_detector_mtcnn;
}
if (face_detector_haar != nullptr)
{
delete face_detector_haar;
}
delete face_detector_hog;
delete face_detector_mtcnn;
delete face_detector_haar;
}
// Destructor. Called on explicit Dispose() only.
@@ -163,4 +154,4 @@ namespace FaceDetectorInterop {
}
#endif
#endif // FACE_DETECTOR_INTEROP_H

View File

@@ -33,9 +33,8 @@
//
///////////////////////////////////////////////////////////////////////////////
// FaceAnalyser_Interop.h
#ifndef __GAZE_ANALYSER_INTEROP_h_
#define __GAZE_ANALYSER_INTEROP_h_
#ifndef GAZE_ANALYSER_INTEROP_H
#define GAZE_ANALYSER_INTEROP_H
#pragma once
@@ -147,7 +146,7 @@ namespace GazeAnalyser_Interop {
// Perform manual projection of points
vector<cv::Point2f> imagePoints_left;
for (int i = 0; i < points_left.size(); ++i)
for (size_t i = 0; i < points_left.size(); ++i)
{
float x = points_left[i].x * fx / points_left[i].z + cx;
float y = points_left[i].y * fy / points_left[i].z + cy;
@@ -156,7 +155,7 @@ namespace GazeAnalyser_Interop {
}
vector<cv::Point2f> imagePoints_right;
for (int i = 0; i < points_right.size(); ++i)
for (size_t i = 0; i < points_right.size(); ++i)
{
float x = points_right[i].x * fx / points_right[i].z + cx;
float y = points_right[i].y * fy / points_right[i].z + cy;
@@ -175,31 +174,11 @@ namespace GazeAnalyser_Interop {
// May be called multiple times.
!GazeAnalyserManaged()
{
if (gazeDirection0 != nullptr)
{
delete gazeDirection0;
}
if (gazeDirection1 != nullptr)
{
delete gazeDirection1;
}
if (gazeAngle != nullptr)
{
delete gazeAngle;
}
if (pupil_left != nullptr)
{
delete pupil_left;
}
if (pupil_right != nullptr)
{
delete pupil_right;
}
delete gazeDirection0;
delete gazeDirection1;
delete gazeAngle;
delete pupil_left;
delete pupil_right;
}
// Destructor. Called on explicit Dispose() only.
@@ -211,4 +190,4 @@ namespace GazeAnalyser_Interop {
};
}
#endif
#endif // GAZE_ANALYSER_INTEROP_H

View File

@@ -96,7 +96,7 @@ namespace UtilitiesOF {
std::vector<std::string> image_files_std;
for (size_t i = 0; i < image_files->Count; ++i)
for (int i = 0; i < image_files->Count; ++i)
{
std::string image_file = msclr::interop::marshal_as<std::string>(image_files[i]);
image_files_std.push_back(image_file);
@@ -175,25 +175,10 @@ namespace UtilitiesOF {
// May be called multiple times.
!ImageReader()
{
// Automatically closes capture object before freeing memory.
if (m_image_capture != nullptr)
{
delete m_image_capture;
}
if (m_rgb_frame != nullptr)
{
delete m_rgb_frame;
}
if (m_gray_frame != nullptr)
{
delete m_gray_frame;
}
if (m_is_opened != nullptr)
{
delete m_is_opened;
}
delete m_image_capture;
delete m_rgb_frame;
delete m_gray_frame;
delete m_is_opened;
}
// Destructor. Called on explicit Dispose() only.

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __LANDMARK_DETECTOR_INTEROP_h_
#define __LANDMARK_DETECTOR_INTEROP_h_
#ifndef LANDMARK_DETECTOR_INTEROP_H
#define LANDMARK_DETECTOR_INTEROP_H
#pragma once
@@ -450,4 +450,4 @@ namespace CppInterop {
}
#endif
#endif // LANDMARK_DETECTOR_INTEROP_H

View File

@@ -31,8 +31,6 @@
//
///////////////////////////////////////////////////////////////////////////////
// Camera_Interop.h
#pragma once
#pragma unmanaged
@@ -77,12 +75,7 @@ namespace UtilitiesOF {
!RecorderOpenFaceParameters()
{
// Automatically closes capture object before freeing memory.
if (m_params != nullptr)
{
delete m_params;
}
delete m_params;
}
// Destructor. Called on explicit Dispose() only.
@@ -249,12 +242,7 @@ namespace UtilitiesOF {
// May be called multiple times.
!RecorderOpenFace()
{
// Automatically closes capture object before freeing memory.
if (m_recorder != nullptr)
{
delete m_recorder;
}
delete m_recorder;
}
// Destructor. Called on explicit Dispose() only.

View File

@@ -222,21 +222,9 @@ namespace UtilitiesOF {
// May be called multiple times.
!SequenceReader()
{
// Automatically closes capture object before freeing memory.
if (m_sequence_capture != nullptr)
{
delete m_sequence_capture;
}
if (m_rgb_frame != nullptr)
{
delete m_rgb_frame;
}
if (m_gray_frame != nullptr)
{
delete m_gray_frame;
}
delete m_sequence_capture;
delete m_rgb_frame;
delete m_gray_frame;
}
// Destructor. Called on explicit Dispose() only.

View File

@@ -156,11 +156,7 @@ namespace UtilitiesOF {
!Visualizer()
{
// Automatically closes capture object before freeing memory.
if (m_visualizer != nullptr)
{
delete m_visualizer;
}
delete m_visualizer;
}
// Destructor. Called on explicit Dispose() only.

View File

@@ -118,7 +118,8 @@
<Optimization>Disabled</Optimization>
<SDLCheck>false</SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib/local/Utilities/include;$(SolutionDir)lib/local/LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile>
@@ -167,7 +168,8 @@
<SDLCheck>
</SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib/local/Utilities/include;$(SolutionDir)lib/local/LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __FACEANALYSER_h_
#define __FACEANALYSER_h_
#ifndef FACEANALYSER_H
#define FACEANALYSER_H
// STL includes
#include <string>
@@ -242,4 +242,4 @@ private:
};
//===========================================================================
}
#endif
#endif // FACEANALYSER_H

View File

@@ -33,8 +33,8 @@
///////////////////////////////////////////////////////////////////////////////
// Parameters of the Face analyser
#ifndef __FACE_ANALYSER_PARAM_H
#define __FACE_ANALYSER_PARAM_H
#ifndef FACE_ANALYSER_PARAM_H
#define FACE_ANALYSER_PARAM_H
#include <vector>
#include <opencv2/core/core.hpp>
@@ -101,4 +101,4 @@ private:
}
#endif // __FACE_ANALYSER_PARAM_H
#endif // FACE_ANALYSER_PARAM_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __FACE_UTILS_h_
#define __FACE_UTILS_h_
#ifndef FACE_UTILS_H
#define FACE_UTILS_H
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
@@ -55,17 +55,6 @@ namespace FaceAnalysis
// The following two methods go hand in hand
void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool mean, bool stdev, bool max_min);
void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep = 120);
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
cv::Matx22f AlignShapesKabsch2D(const cv::Mat_<float>& align_from, const cv::Mat_<float>& align_to);
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst);
//============================================================================
// Matrix reading functionality
@@ -81,4 +70,4 @@ namespace FaceAnalysis
void SkipComments(std::ifstream& stream);
}
#endif
#endif // FACE_UTILS_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SVMDYNAMICLIN_h_
#define __SVMDYNAMICLIN_h_
#ifndef SVM_DYNAMIC_LIN_H
#define SVM_DYNAMIC_LIN_H
#include <vector>
#include <string>
@@ -84,4 +84,4 @@ private:
};
//===========================================================================
}
#endif
#endif // SVM_DYNAMIC_LIN_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SVMSTATICLIN_h_
#define __SVMSTATICLIN_h_
#ifndef SVM_STATIC_LIN_H
#define SVM_STATIC_LIN_H
#include <vector>
#include <string>
@@ -84,4 +84,4 @@ private:
};
//===========================================================================
}
#endif
#endif // SVM_STATIC_LIN_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SVRDYNAMICLINREGRESSORS_h_
#define __SVRDYNAMICLINREGRESSORS_h_
#ifndef SVR_DYNAMIC_LIN_REGRESSORS_H
#define SVR_DYNAMIC_LIN_REGRESSORS_H
#include <vector>
#include <string>
@@ -89,4 +89,4 @@ private:
};
//===========================================================================
}
#endif
#endif // SVR_DYNAMIC_LIN_REGRESSORS_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SVRSTATICLINREGRESSORS_h_
#define __SVRSTATICLINREGRESSORS_h_
#ifndef SVR_STATIC_LIN_REGRESSORS_H
#define SVR_STATIC_LIN_REGRESSORS_H
#include <vector>
#include <string>
@@ -81,4 +81,4 @@ private:
};
//===========================================================================
}
#endif
#endif // SVR_STATIC_LIN_REGRESSORS_H

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -370,7 +370,7 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
if (aligned_face_for_output.channels() == 3 && out_grayscale)
{
cvtColor(aligned_face_for_output, aligned_face_for_output, CV_BGR2GRAY);
cvtColor(aligned_face_for_output, aligned_face_for_output, cv::COLOR_BGR2GRAY);
}
// Extract HOG descriptor from the frame and convert it to a useable format
@@ -703,7 +703,7 @@ void FaceAnalyser::ExtractAllPredictionsOfflineClass(vector<std::pair<std::strin
// Perform a moving average of 7 frames on classifications
int window_size = 7;
vector<double> au_vals_tmp = au_vals;
if(au_vals.size() > (window_size - 1) / 2)
if((int)au_vals.size() > (window_size - 1) / 2)
{
for (size_t i = (window_size - 1)/2; i < au_vals.size() - (window_size - 1) / 2; ++i)
{

View File

@@ -34,6 +34,8 @@
#include <Face_utils.h>
#include <RotationHelpers.h>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
@@ -131,7 +133,7 @@ namespace FaceAnalysis
extract_rigid_points(source_landmarks, destination_landmarks);
}
cv::Matx22f scale_rot_matrix = AlignShapesWithScale(source_landmarks, destination_landmarks);
cv::Matx22f scale_rot_matrix = Utilities::AlignShapesWithScale(source_landmarks, destination_landmarks);
cv::Matx23f warp_matrix;
warp_matrix(0,0) = scale_rot_matrix(0,0);
@@ -170,7 +172,7 @@ namespace FaceAnalysis
extract_rigid_points(source_landmarks, destination_landmarks);
}
cv::Matx22f scale_rot_matrix = AlignShapesWithScale(source_landmarks, destination_landmarks);
cv::Matx22f scale_rot_matrix = Utilities::AlignShapesWithScale(source_landmarks, destination_landmarks);
cv::Matx23f warp_matrix;
warp_matrix(0,0) = scale_rot_matrix(0,0);
@@ -333,88 +335,6 @@ namespace FaceAnalysis
new_descriptor.copyTo(descriptors.row(row_to_change));
}
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
cv::Matx22f AlignShapesKabsch2D(const cv::Mat_<float>& align_from, const cv::Mat_<float>& align_to)
{
cv::SVD svd(align_from.t() * align_to);
// make sure no reflection is there
// corr ensures that we do only rotaitons and not reflections
float d = cv::determinant(svd.vt.t() * svd.u.t());
cv::Matx22f corr = cv::Matx22f::eye();
if (d > 0)
{
corr(1, 1) = 1;
}
else
{
corr(1, 1) = -1;
}
cv::Matx22f R;
cv::Mat(svd.vt.t()*cv::Mat(corr)*svd.u.t()).copyTo(R);
return R;
}
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst)
{
int n = src.rows;
// First we mean normalise both src and dst
float mean_src_x = cv::mean(src.col(0))[0];
float mean_src_y = cv::mean(src.col(1))[0];
float mean_dst_x = cv::mean(dst.col(0))[0];
float mean_dst_y = cv::mean(dst.col(1))[0];
cv::Mat_<float> src_mean_normed = src.clone();
src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;
cv::Mat_<float> dst_mean_normed = dst.clone();
dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;
// Find the scaling factor of each
cv::Mat src_sq;
cv::pow(src_mean_normed, 2, src_sq);
cv::Mat dst_sq;
cv::pow(dst_mean_normed, 2, dst_sq);
float s_src = sqrt(cv::sum(src_sq)[0] / n);
float s_dst = sqrt(cv::sum(dst_sq)[0] / n);
src_mean_normed = src_mean_normed / s_src;
dst_mean_normed = dst_mean_normed / s_dst;
float s = s_dst / s_src;
// Get the rotation
cv::Matx22f R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);
cv::Matx22f A;
cv::Mat(s * R).copyTo(A);
cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
cv::Mat_<float> offset = dst - aligned;
float t_x = cv::mean(offset.col(0))[0];
float t_y = cv::mean(offset.col(1))[0];
return A;
}
//============================================================================
// Matrix reading functionality
//============================================================================

View File

@@ -106,7 +106,8 @@
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -139,7 +140,8 @@
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
</ClCompile>
<Link>

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __GAZEESTIMATION_h_
#define __GAZEESTIMATION_h_
#ifndef GAZE_ESTIMATION_H
#define GAZE_ESTIMATION_H
#include "LandmarkDetectorModel.h"
@@ -51,4 +51,4 @@ namespace GazeAnalysis
cv::Point3f GetPupilPosition(cv::Mat_<float> eyeLdmks3d);
}
#endif
#endif // GAZE_ESTIMATION_H

View File

@@ -124,7 +124,8 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.4\classifiers" "$(OutDir)c
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/Zm300 %(AdditionalOptions)</AdditionalOptions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
@@ -173,7 +174,8 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.4\classifiers" "$(OutDir)c
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/Zm300 %(AdditionalOptions)</AdditionalOptions>

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __CCNF_PATCH_EXPERT_h_
#define __CCNF_PATCH_EXPERT_h_
#ifndef CCNF_PATCH_EXPERT_H
#define CCNF_PATCH_EXPERT_H
#include <opencv2/core/core.hpp>
@@ -126,4 +126,4 @@ public:
};
//===========================================================================
}
#endif
#endif // CCNF_PATCH_EXPERT_H

View File

@@ -33,8 +33,8 @@
///////////////////////////////////////////////////////////////////////////////
#ifndef __CEN_PATCH_EXPERT_h_
#define __CEN_PATCH_EXPERT_h_
#ifndef CEN_PATCH_EXPERT_H
#define CEN_PATCH_EXPERT_H
// system includes
#include <vector>
@@ -88,4 +88,4 @@ namespace LandmarkDetector
void interpolationMatrix(cv::Mat_<float>& mapMatrix, int response_height, int response_width, int input_width, int input_height);
}
#endif
#endif // CEN_PATCH_EXPERT_H

View File

@@ -31,9 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
// Header for all external CLNF/CLM-Z/CLM methods of interest to the user
#ifndef __CNN_UTILS_h_
#define __CNN_UTILS_h_
#ifndef CNN_UTILS_H
#define CNN_UTILS_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -60,4 +59,4 @@ namespace LandmarkDetector
// Convolution using matrix multiplication and OpenBLAS optimization, can also provide a pre-allocated im2col result for faster processing
void convolution_direct_blas(std::vector<cv::Mat_<float> >& outputs, const std::vector<cv::Mat_<float> >& input_maps, const cv::Mat_<float>& weight_matrix, int height_k, int width_k, cv::Mat_<float>& pre_alloc_im2col);
}
#endif
#endif // CNN_UTILS_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __FACE_DETECTOR_MTCNN_h_
#define __FACE_DETECTOR_MTCNN_h_
#ifndef FACE_DETECTOR_MTCNN_H
#define FACE_DETECTOR_MTCNN_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -132,4 +132,4 @@ namespace LandmarkDetector
};
}
#endif
#endif // FACE_DETECTOR_MTCNN_H

View File

@@ -33,12 +33,12 @@
///////////////////////////////////////////////////////////////////////////////
#ifndef __LANDMARK_CORE_INCLUDES_h_
#define __LANDMARK_CORE_INCLUDES_h_
#ifndef LANDMARK_CORE_INCLUDES_H
#define LANDMARK_CORE_INCLUDES_H
#include "LandmarkDetectorModel.h"
#include "LandmarkDetectorFunc.h"
#include "LandmarkDetectorParameters.h"
#include "LandmarkDetectorUtils.h"
#endif
#endif // LANDMARK_CORE_INCLUDES_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __LANDMARK_DETECTION_VALIDATOR_h_
#define __LANDMARK_DETECTION_VALIDATOR_h_
#ifndef LANDMARK_DETECTION_VALIDATOR_H
#define LANDMARK_DETECTION_VALIDATOR_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -114,4 +114,4 @@ private:
};
}
#endif
#endif // LANDMARK_DETECTION_VALIDATOR_H

View File

@@ -32,11 +32,11 @@
//
///////////////////////////////////////////////////////////////////////////////
// Header for all external CLM/CLNF/CLM-Z methods of interest to the user
// Header for all external CLM/CLNF/CE-CLM methods of interest to the user
//
//
#ifndef __LANDMARK_DETECTOR_FUNC_h_
#define __LANDMARK_DETECTOR_FUNC_h_
#ifndef LANDMARK_DETECTOR_FUNC_H
#define LANDMARK_DETECTOR_FUNC_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -78,4 +78,4 @@ namespace LandmarkDetector
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
cv::Vec6f GetPoseWRTCamera(const CLNF& clnf_model, float fx, float fy, float cx, float cy);
}
#endif
#endif // LANDMARK_DETECTOR_FUNC_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __LANDMARK_DETECTOR_MODEL_h_
#define __LANDMARK_DETECTOR_MODEL_h_
#ifndef LANDMARK_DETECTOR_MODEL_H
#define LANDMARK_DETECTOR_MODEL_H
// OpenCV dependencies
#include <opencv2/core/core.hpp>
@@ -209,4 +209,4 @@ private:
};
//===========================================================================
}
#endif
#endif // LANDMARK_DETECTOR_MODEL_H

View File

@@ -32,9 +32,9 @@
//
///////////////////////////////////////////////////////////////////////////////
// Parameters of the CLNF, CLM-Z and CLM trackers
#ifndef __LANDMARK_DETECTOR_PARAM_H
#define __LANDMARK_DETECTOR_PARAM_H
// Parameters of the CE-CLM, CLNF, and CLM trackers
#ifndef LANDMARK_DETECTOR_PARAM_H
#define LANDMARK_DETECTOR_PARAM_H
#include <vector>
@@ -120,4 +120,4 @@ struct FaceModelParameters
}
#endif // __LANDMARK_DETECTOR_PARAM_H
#endif // LANDMARK_DETECTOR_PARAM_H

View File

@@ -32,9 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
// Header for all external CLNF/CLM-Z/CLM methods of interest to the user
#ifndef __LANDMARK_DETECTOR_UTILS_h_
#define __LANDMARK_DETECTOR_UTILS_h_
#ifndef LANDMARK_DETECTOR_UTILS_H
#define LANDMARK_DETECTOR_UTILS_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -58,19 +57,6 @@ namespace LandmarkDetector
// templ is the template we are convolving with, templ_dfts it's dfts at varying windows sizes (optional), _result - the output, method the type of convolution
void matchTemplate_m(const cv::Mat_<float>& input_img, cv::Mat_<double>& img_dft, cv::Mat& _integral_img, cv::Mat& _integral_img_sq, const cv::Mat_<float>& templ, map<int, cv::Mat_<double> >& templ_dfts, cv::Mat_<float>& result, int method);
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
cv::Matx22d AlignShapesKabsch2D(const cv::Mat_<double>& align_from, const cv::Mat_<double>& align_to);
cv::Matx22f AlignShapesKabsch2D_f(const cv::Mat_<float>& align_from, const cv::Mat_<float>& align_to);
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22d AlignShapesWithScale(cv::Mat_<double>& src, cv::Mat_<double> dst);
cv::Matx22f AlignShapesWithScale_f(cv::Mat_<float>& src, cv::Mat_<float> dst);
// Useful utility for grabing a bounding box around a set of 2D landmarks (as a 1D 2n x 1 vector of xs followed by doubles or as an n x 2 vector)
void ExtractBoundingBox(const cv::Mat_<float>& landmarks, float &min_x, float &max_x, float &min_y, float &max_y);
@@ -119,4 +105,4 @@ namespace LandmarkDetector
}
#endif
#endif // LANDMARK_DETECTOR_UTILS_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __PAW_h_
#define __PAW_h_
#ifndef PAW_H
#define PAW_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -135,4 +135,4 @@ namespace LandmarkDetector
};
//===========================================================================
}
#endif
#endif // PAW_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __PDM_h_
#define __PDM_h_
#ifndef PDM_H
#define PDM_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -102,4 +102,4 @@ class PDM{
};
//===========================================================================
}
#endif
#endif // PDM_H

View File

@@ -32,8 +32,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __Patch_experts_h_
#define __Patch_experts_h_
#ifndef PATCH_EXPERTS_H
#define PATCH_EXPERTS_H
// OpenCV includes
#include <opencv2/core/core.hpp>
@@ -121,4 +121,4 @@ private:
};
}
#endif
#endif // PATCH_EXPERTS_H

View File

@@ -33,8 +33,8 @@
///////////////////////////////////////////////////////////////////////////////
#ifndef __SVR_PATCH_EXPERT_h_
#define __SVR_PATCH_EXPERT_h_
#ifndef SVR_PATCH_EXPERT_H
#define SVR_PATCH_EXPERT_H
// system includes
#include <map>
@@ -111,4 +111,4 @@ class Multi_SVR_patch_expert{
};
}
#endif
#endif // SVR_PATCH_EXPERT_H

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -264,11 +264,11 @@ void CCNF_neuron::Response(const cv::Mat_<float> &im, cv::Mat_<double> &im_dft,
if(neuron_type == 3)
{
// In case of depth we use per area, rather than per patch normalisation
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF); // the linear multiplication, efficient calc of response
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, cv::TM_CCOEFF); // the linear multiplication, efficient calc of response
}
else
{
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, cv::TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
}
cv::MatIterator_<float> p = resp.begin();

View File

@@ -77,7 +77,7 @@ namespace LandmarkDetector
auto iter = input_output_maps[0].row(k).begin();
float neg_mult = prelu_weights.at<float>(k);
for (size_t i = 0; i < w; ++i)
for (int i = 0; i < w; ++i)
{
float in_val = *iter;
// Apply the PReLU
@@ -105,7 +105,7 @@ namespace LandmarkDetector
cv::Size orig_size = input_maps[0].size();
cv::Mat_<float> input_concat((int)input_maps.size(), input_maps[0].cols * input_maps[0].rows);
for (int in = 0; in < input_maps.size(); ++in)
for (int in = 0; in < (int)input_maps.size(); ++in)
{
cv::Mat_<float> add = input_maps[in];

View File

@@ -354,9 +354,9 @@ void CNN::Read(const string& location)
// Rearrange the flattened kernels into weight matrices for direct convolution computation
cv::Mat_<float> weight_matrix(num_in_maps * kernels_rearr[0][0].rows * kernels_rearr[0][0].cols, num_kernels);
for (size_t k = 0; k < num_kernels; ++k)
for (int k = 0; k < num_kernels; ++k)
{
for (size_t i = 0; i < num_in_maps; ++i)
for (int i = 0; i < num_in_maps; ++i)
{
// Flatten the kernel
cv::Mat_<float> k_flat = kernels_rearr[k][i].t();
@@ -370,7 +370,7 @@ void CNN::Read(const string& location)
// Add a bias term to the weight matrix for efficiency
cv::Mat_<float> W(weight_matrix.rows, weight_matrix.cols + 1, 1.0);
for (size_t k = 0; k < weight_matrix.rows; ++k)
for (int k = 0; k < weight_matrix.rows; ++k)
{
W.at<float>(k, weight_matrix.cols) = biases[k];
}

View File

@@ -263,9 +263,9 @@ void DetectionValidator::Read(string location)
// Rearrange the flattened kernels into weight matrices for direct convolution computation
cv::Mat_<float> weight_matrix(num_in_maps * kernels_rearr[0][0].rows * kernels_rearr[0][0].cols, num_kernels);
for (size_t k = 0; k < num_kernels; ++k)
for (int k = 0; k < num_kernels; ++k)
{
for (size_t i = 0; i < num_in_maps; ++i)
for (int i = 0; i < num_in_maps; ++i)
{
// Flatten the kernel
cv::Mat_<float> k_flat = kernels_rearr[k][i].t();
@@ -279,7 +279,7 @@ void DetectionValidator::Read(string location)
// Add a bias term to the weight matrix for efficiency
cv::Mat_<float> W(weight_matrix.rows, weight_matrix.cols + 1, 1.0);
for (size_t k = 0; k < weight_matrix.rows; ++k)
for (int k = 0; k < weight_matrix.rows; ++k)
{
W.at<float>(k, weight_matrix.cols) = biases[k];
}

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -194,7 +194,7 @@ void CorrectGlobalParametersVideo(const cv::Mat_<uchar> &grayscale_image, CLNF&
// Resizing the template
cv::Mat corr_out;
cv::matchTemplate(image, clnf_model.face_template, corr_out, CV_TM_CCOEFF_NORMED);
cv::matchTemplate(image, clnf_model.face_template, corr_out, cv::TM_CCOEFF_NORMED);
// Actually matching it
//double min, max;
@@ -520,7 +520,7 @@ bool DetectLandmarksInImageMultiHypEarlyTerm(const cv::Mat_<uchar> &grayscale_im
// Setup the parameters accordingly
// Only do the first iteration
for (int i = 1; i < params.window_sizes_current.size(); ++i)
for (size_t i = 1; i < params.window_sizes_current.size(); ++i)
{
params.window_sizes_current[i] = 0;
}

View File

@@ -1128,7 +1128,7 @@ float CLNF::NU_RLMS(cv::Vec6f& final_global, cv::Mat_<float>& final_local, const
// Solve for the parameter update (from Baltrusaitis 2013 based on eq (36) Saragih 2011)
cv::Mat_<float> param_update;
cv::solve(Hessian, J_w_t_m, param_update, CV_CHOLESKY);
cv::solve(Hessian, J_w_t_m, param_update, cv::DECOMP_CHOLESKY);
// update the reference
pdm.UpdateModelParameters(param_update, current_local, current_global);

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -150,11 +150,11 @@ namespace LandmarkDetector
void matchTemplate_m(const cv::Mat_<float>& input_img, cv::Mat_<double>& img_dft, cv::Mat& _integral_img, cv::Mat& _integral_img_sq, const cv::Mat_<float>& templ, map<int, cv::Mat_<double> >& templ_dfts, cv::Mat_<float>& result, int method)
{
int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
bool isNormed = method == CV_TM_CCORR_NORMED ||
method == CV_TM_SQDIFF_NORMED ||
method == CV_TM_CCOEFF_NORMED;
int numType = method == cv::TM_CCORR || method == cv::TM_CCORR_NORMED ? 0 :
method == cv::TM_CCOEFF || method == cv::TM_CCOEFF_NORMED ? 1 : 2;
bool isNormed = method == cv::TM_CCORR_NORMED ||
method == cv::TM_SQDIFF_NORMED ||
method == cv::TM_CCOEFF_NORMED;
// Assume result is defined properly
if (result.empty())
@@ -164,7 +164,7 @@ namespace LandmarkDetector
}
LandmarkDetector::crossCorr_m(input_img, img_dft, templ, templ_dfts, result);
if (method == CV_TM_CCORR)
if (method == cv::TM_CCORR)
return;
double invArea = 1. / ((double)templ.rows * templ.cols);
@@ -174,7 +174,7 @@ namespace LandmarkDetector
double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0;
double templNorm = 0, templSum2 = 0;
if (method == CV_TM_CCOEFF)
if (method == cv::TM_CCOEFF)
{
// If it has not been precomputed compute it now
if (_integral_img.empty())
@@ -200,7 +200,7 @@ namespace LandmarkDetector
templNorm = templSdv[0] * templSdv[0] + templSdv[1] * templSdv[1] + templSdv[2] * templSdv[2] + templSdv[3] * templSdv[3];
if (templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED)
if (templNorm < DBL_EPSILON && method == cv::TM_CCOEFF_NORMED)
{
result.setTo(1.0);
return;
@@ -276,7 +276,7 @@ namespace LandmarkDetector
else if (fabs(num) < t*1.125)
num = num > 0 ? 1 : -1;
else
num = method != CV_TM_SQDIFF_NORMED ? 0 : 1;
num = method != cv::TM_SQDIFF_NORMED ? 0 : 1;
}
rrow[j] = (float)num;
@@ -284,163 +284,6 @@ namespace LandmarkDetector
}
}
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
cv::Matx22d AlignShapesKabsch2D(const cv::Mat_<double>& align_from, const cv::Mat_<double>& align_to)
{
cv::SVD svd(align_from.t() * align_to);
// make sure no reflection is there
// corr ensures that we do only rotaitons and not reflections
double d = cv::determinant(svd.vt.t() * svd.u.t());
cv::Matx22d corr = cv::Matx22d::eye();
if (d > 0)
{
corr(1, 1) = 1;
}
else
{
corr(1, 1) = -1;
}
cv::Matx22d R;
cv::Mat(svd.vt.t()*cv::Mat(corr)*svd.u.t()).copyTo(R);
return R;
}
cv::Matx22f AlignShapesKabsch2D_f(const cv::Mat_<float>& align_from, const cv::Mat_<float>& align_to)
{
cv::SVD svd(align_from.t() * align_to);
// make sure no reflection is there
// corr ensures that we do only rotaitons and not reflections
float d = cv::determinant(svd.vt.t() * svd.u.t());
cv::Matx22f corr = cv::Matx22f::eye();
if (d > 0)
{
corr(1, 1) = 1;
}
else
{
corr(1, 1) = -1;
}
cv::Matx22f R;
cv::Mat(svd.vt.t()*cv::Mat(corr)*svd.u.t()).copyTo(R);
return R;
}
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22d AlignShapesWithScale(cv::Mat_<double>& src, cv::Mat_<double> dst)
{
int n = src.rows;
// First we mean normalise both src and dst
double mean_src_x = cv::mean(src.col(0))[0];
double mean_src_y = cv::mean(src.col(1))[0];
double mean_dst_x = cv::mean(dst.col(0))[0];
double mean_dst_y = cv::mean(dst.col(1))[0];
cv::Mat_<double> src_mean_normed = src.clone();
src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;
cv::Mat_<double> dst_mean_normed = dst.clone();
dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;
// Find the scaling factor of each
cv::Mat src_sq;
cv::pow(src_mean_normed, 2, src_sq);
cv::Mat dst_sq;
cv::pow(dst_mean_normed, 2, dst_sq);
double s_src = sqrt(cv::sum(src_sq)[0] / n);
double s_dst = sqrt(cv::sum(dst_sq)[0] / n);
src_mean_normed = src_mean_normed / s_src;
dst_mean_normed = dst_mean_normed / s_dst;
double s = s_dst / s_src;
// Get the rotation
cv::Matx22d R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);
cv::Matx22d A;
cv::Mat(s * R).copyTo(A);
cv::Mat_<double> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
cv::Mat_<double> offset = dst - aligned;
double t_x = cv::mean(offset.col(0))[0];
double t_y = cv::mean(offset.col(1))[0];
return A;
}
cv::Matx22f AlignShapesWithScale_f(cv::Mat_<float>& src, cv::Mat_<float> dst)
{
int n = src.rows;
// First we mean normalise both src and dst
float mean_src_x = cv::mean(src.col(0))[0];
float mean_src_y = cv::mean(src.col(1))[0];
float mean_dst_x = cv::mean(dst.col(0))[0];
float mean_dst_y = cv::mean(dst.col(1))[0];
cv::Mat_<float> src_mean_normed = src.clone();
src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;
cv::Mat_<float> dst_mean_normed = dst.clone();
dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;
// Find the scaling factor of each
cv::Mat src_sq;
cv::pow(src_mean_normed, 2, src_sq);
cv::Mat dst_sq;
cv::pow(dst_mean_normed, 2, dst_sq);
float s_src = sqrt(cv::sum(src_sq)[0] / n);
float s_dst = sqrt(cv::sum(dst_sq)[0] / n);
src_mean_normed = src_mean_normed / s_src;
dst_mean_normed = dst_mean_normed / s_dst;
float s = s_dst / s_src;
// Get the rotation
cv::Matx22f R = AlignShapesKabsch2D_f(src_mean_normed, dst_mean_normed);
cv::Matx22f A = s * R;
cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
cv::Mat_<float> offset = dst - aligned;
float t_x = cv::mean(offset.col(0))[0];
float t_y = cv::mean(offset.col(1))[0];
return A;
}
// Useful utility for grabing a bounding box around a set of 2D landmarks (as a 1D 2n x 1 vector of xs followed by doubles or as an n x 2 vector)
void ExtractBoundingBox(const cv::Mat_<float>& landmarks, float &min_x, float &max_x, float &min_y, float &max_y)
{
@@ -700,8 +543,7 @@ namespace LandmarkDetector
if (min_width != -1)
{
if (region.width < min_width || region.x < ((float)intensity.cols) * roi.x || region.y < ((float)intensity.cols) * roi.y ||
region.x + region.width >((float)intensity.cols) * (roi.x + roi.width) || region.y + region.height >((float)intensity.rows) * (roi.y + roi.height))
if (region.width < min_width || region.x < ((float)intensity.cols) * roi.x || region.y < ((float)intensity.cols) * roi.y || region.x + region.width >((float)intensity.cols) * (roi.x + roi.width) || region.y + region.height >((float)intensity.rows) * (roi.y + roi.height))
continue;
}

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -328,7 +328,7 @@ void PAW::Warp(const cv::Mat& image_to_warp, cv::Mat& destination_image, const c
this->WarpRegion(map_x, map_y);
// Do the actual warp (with bi-linear interpolation)
remap(image_to_warp, destination_image, map_x, map_y, CV_INTER_LINEAR);
remap(image_to_warp, destination_image, map_x, map_y, cv::INTER_LINEAR);
}
@@ -512,4 +512,4 @@ int PAW::findTriangle(const cv::Point_<float>& point, const std::vector<std::vec
}
}
return tri;
}
}

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -654,7 +654,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, cv::Mat_<float>& out_params_l
// Solve for the parameter update (from Baltrusaitis 2013 based on eq (36) Saragih 2011)
cv::Mat_<float> param_update;
cv::solve(Hessian, J_w_t_m, param_update, CV_CHOLESKY);
cv::solve(Hessian, J_w_t_m, param_update, cv::DECOMP_CHOLESKY);
// To not overshoot, have the gradient decent rate a bit smaller
param_update = 0.75 * param_update;

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -36,6 +36,8 @@
#include "Patch_experts.h"
#include "RotationHelpers.h"
// TBB includes
#include <tbb/tbb.h>
@@ -156,7 +158,7 @@ void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, c
cv::Mat_<float> reference_shape_2D = (reference_shape.reshape(1, 2).t());
cv::Mat_<float> image_shape_2D = landmark_locations.reshape(1, 2).t();
sim_img_to_ref = AlignShapesWithScale_f(image_shape_2D, reference_shape_2D);
sim_img_to_ref = Utilities::AlignShapesWithScale(image_shape_2D, reference_shape_2D);
sim_ref_to_img = sim_img_to_ref.inv(cv::DECOMP_LU);
float a1 = sim_ref_to_img(0, 0);
@@ -246,7 +248,7 @@ void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, c
// Extract the region of interest around the current landmark location
cv::Mat_<float> area_of_interest(area_of_interest_height, area_of_interest_width, 0.0f);
cv::warpAffine(grayscale_image, area_of_interest, sim, area_of_interest.size(), cv::WARP_INVERSE_MAP + CV_INTER_LINEAR);
cv::warpAffine(grayscale_image, area_of_interest, sim, area_of_interest.size(), cv::WARP_INVERSE_MAP + cv::INTER_LINEAR);
// Get intensity response either from the SVR, CCNF, or CEN patch experts (prefer CEN as they are the most accurate so far)
if (!cen_expert_intensity.empty())
@@ -280,7 +282,7 @@ void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, c
// Extract the region of interest around the current landmark location
cv::Mat_<float> area_of_interest_r(area_of_interest_height, area_of_interest_width, 0.0f);
cv::warpAffine(grayscale_image, area_of_interest_r, sim_r, area_of_interest_r.size(), cv::WARP_INVERSE_MAP + CV_INTER_LINEAR);
cv::warpAffine(grayscale_image, area_of_interest_r, sim_r, area_of_interest_r.size(), cv::WARP_INVERSE_MAP + cv::INTER_LINEAR);
cv::Mat_<float> prealloc_mat_right = preallocated_im2col[mirror_id][im2col_size];
@@ -461,21 +463,21 @@ bool Patch_experts::Read(vector<string> intensity_svr_expert_locations, vector<s
}
// Reading in weights/biases/cutoffs
for (int i = 0; i < centers[0].size(); ++i)
for (size_t i = 0; i < centers[0].size(); ++i)
{
double weight;
earlyTermFile >> weight;
early_term_weights.push_back(weight);
}
for (int i = 0; i < centers[0].size(); ++i)
for (size_t i = 0; i < centers[0].size(); ++i)
{
double bias;
earlyTermFile >> bias;
early_term_biases.push_back(bias);
}
for (int i = 0; i < centers[0].size(); ++i)
for (size_t i = 0; i < centers[0].size(); ++i)
{
double cutoff;
earlyTermFile >> cutoff;

View File

@@ -14,19 +14,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -179,7 +179,7 @@ void SVR_patch_expert::Response(const cv::Mat_<float>& area_of_interest, cv::Mat
cv::Mat_<float> empty_matrix_2(0,0,0.0);
// Efficient calc of patch expert SVR response across the area of interest
matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF_NORMED);
matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, cv::TM_CCOEFF_NORMED);
response.create(svr_response.size());
cv::MatIterator_<float> p = response.begin();
@@ -246,7 +246,7 @@ void SVR_patch_expert::ResponseDepth(const cv::Mat_<float>& area_of_interest, cv
// Efficient calc of patch expert response across the area of interest
matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF);
matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, cv::TM_CCOEFF);
response.create(svr_response.size());
cv::MatIterator_<float> p = response.begin();

View File

@@ -113,7 +113,8 @@
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
@@ -153,7 +154,8 @@
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet>
</EnableEnhancedInstructionSet>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __IMAGE_CAPTURE_h_
#define __IMAGE_CAPTURE_h_
#ifndef IMAGE_CAPTURE_H
#define IMAGE_CAPTURE_H
// System includes
#include <fstream>
@@ -118,4 +118,4 @@ namespace Utilities
};
}
#endif
#endif // IMAGE_CAPTURE_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __IMAGE_MANIPULATION_HELPERS_h_
#define __IMAGE_MANIPULATION_HELPERS_h_
#ifndef IMAGE_MANIPULATION_HELPERS_H
#define IMAGE_MANIPULATION_HELPERS_H
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
@@ -88,4 +88,4 @@ namespace Utilities
}
#endif
#endif // IMAGE_MANIPULATION_HELPERS_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_CSV_h_
#define __RECORDER_CSV_h_
#ifndef RECORDER_CSV_H
#define RECORDER_CSV_H
// System includes
#include <fstream>
@@ -97,4 +97,4 @@ namespace Utilities
};
}
#endif
#endif // RECORDER_CSV_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_HOG_h_
#define __RECORDER_HOG_h_
#ifndef RECORDER_HOG_H
#define RECORDER_HOG_H
// System includes
#include <vector>
@@ -85,4 +85,4 @@ namespace Utilities
};
}
#endif
#endif // RECORDER_HOG_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_OPENFACE_h_
#define __RECORDER_OPENFACE_h_
#ifndef RECORDER_OPENFACE_H
#define RECORDER_OPENFACE_H
#include "RecorderCSV.h"
#include "RecorderHOG.h"
@@ -196,4 +196,4 @@ namespace Utilities
};
}
#endif
#endif // RECORDER_OPENFACE_H

View File

@@ -32,8 +32,8 @@
///////////////////////////////////////////////////////////////////////////////
// Parameters of the Face analyser
#ifndef __RECORDER_OPENFACE_PARAM_H
#define __RECORDER_OPENFACE_PARAM_H
#ifndef RECORDER_OPENFACE_PARAM_H
#define RECORDER_OPENFACE_PARAM_H
#include <vector>
#include <opencv2/core/core.hpp>
@@ -114,4 +114,4 @@ namespace Utilities
}
#endif // ____RECORDER_OPENFACE_PARAM_H
#endif // RECORDER_OPENFACE_PARAM_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __ROTATION_HELPERS_h_
#define __ROTATION_HELPERS_h_
#ifndef ROTATION_HELPERS_H
#define ROTATION_HELPERS_H
#include <opencv2/core/core.hpp>
#include <opencv2/calib3d.hpp>
@@ -160,5 +160,87 @@ namespace Utilities
}
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
static cv::Matx22f AlignShapesKabsch2D(const cv::Mat_<float>& align_from, const cv::Mat_<float>& align_to)
{
cv::SVD svd(align_from.t() * align_to);
// make sure no reflection is there
// corr ensures that we do only rotaitons and not reflections
double d = cv::determinant(svd.vt.t() * svd.u.t());
cv::Matx22f corr = cv::Matx22f::eye();
if (d > 0)
{
corr(1, 1) = 1;
}
else
{
corr(1, 1) = -1;
}
cv::Matx22f R;
cv::Mat(svd.vt.t()*cv::Mat(corr)*svd.u.t()).copyTo(R);
return R;
}
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
static cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst)
{
int n = src.rows;
// First we mean normalise both src and dst
float mean_src_x = (float)cv::mean(src.col(0))[0];
float mean_src_y = (float)cv::mean(src.col(1))[0];
float mean_dst_x = (float)cv::mean(dst.col(0))[0];
float mean_dst_y = (float)cv::mean(dst.col(1))[0];
cv::Mat_<float> src_mean_normed = src.clone();
src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;
cv::Mat_<float> dst_mean_normed = dst.clone();
dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;
// Find the scaling factor of each
cv::Mat src_sq;
cv::pow(src_mean_normed, 2, src_sq);
cv::Mat dst_sq;
cv::pow(dst_mean_normed, 2, dst_sq);
float s_src = (float)sqrt(cv::sum(src_sq)[0] / n);
float s_dst = (float)sqrt(cv::sum(dst_sq)[0] / n);
src_mean_normed = src_mean_normed / s_src;
dst_mean_normed = dst_mean_normed / s_dst;
float s = s_dst / s_src;
// Get the rotation
cv::Matx22f R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);
cv::Matx22f A;
cv::Mat(s * R).copyTo(A);
//cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
//cv::Mat_<float> offset = dst - aligned;
//float t_x = cv::mean(offset.col(0))[0];
//float t_y = cv::mean(offset.col(1))[0];
return A;
}
}
#endif
#endif // ROTATION_HELPERS_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SEQUENCE_CAPTURE_h_
#define __SEQUENCE_CAPTURE_h_
#ifndef SEQUENCE_CAPTURE_H
#define SEQUENCE_CAPTURE_H
// System includes
#include <fstream>
@@ -162,4 +162,4 @@ namespace Utilities
};
}
#endif
#endif // SEQUENCE_CAPTURE_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __VISUALIZATION_UTILS_h_
#define __VISUALIZATION_UTILS_h_
#ifndef VISUALIZATION_UTILS_H
#define VISUALIZATION_UTILS_H
#include <opencv2/core/core.hpp>
@@ -71,4 +71,4 @@ namespace Utilities
};
}
#endif
#endif // VISUALIZATION_UTILS_H

View File

@@ -31,8 +31,8 @@
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __VISUALIZER_h_
#define __VISUALIZER_h_
#ifndef VISUALIZER_H
#define VISUALIZER_H
// System includes
#include <vector>

View File

@@ -13,19 +13,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -358,7 +358,7 @@ cv::Mat ImageCapture::GetNextImage()
}
// Load the image as an 8 bit RGB
latest_frame = cv::imread(image_files[frame_num], CV_LOAD_IMAGE_COLOR);
latest_frame = cv::imread(image_files[frame_num], cv::IMREAD_COLOR);
if (latest_frame.empty())
{
@@ -420,4 +420,4 @@ double ImageCapture::GetProgress()
cv::Mat_<uchar> ImageCapture::GetGrayFrame()
{
return latest_gray_frame;
}
}

View File

@@ -13,19 +13,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -428,7 +428,7 @@ void RecorderOpenFace::WriteObservationTracked()
std::string output_codec = params.outputCodec();
try
{
video_writer.open(media_filename, CV_FOURCC(output_codec[0], output_codec[1], output_codec[2], output_codec[3]), params.outputFps(), vis_to_out.size(), true);
video_writer.open(media_filename, cv::VideoWriter::fourcc(output_codec[0], output_codec[1], output_codec[2], output_codec[3]), params.outputFps(), vis_to_out.size(), true);
if (!video_writer.isOpened())
{

View File

@@ -13,19 +13,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -232,16 +232,16 @@ bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height,
latest_gray_frame = cv::Mat();
capture.open(device);
capture.set(CV_CAP_PROP_FRAME_WIDTH, image_width);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, image_height);
capture.set(cv::CAP_PROP_FRAME_WIDTH, image_width);
capture.set(cv::CAP_PROP_FRAME_HEIGHT, image_height);
is_webcam = true;
is_image_seq = false;
vid_length = 0;
this->frame_width = (int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
this->frame_width = (int)capture.get(cv::CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(cv::CAP_PROP_FRAME_HEIGHT);
if (!capture.isOpened())
{
@@ -254,7 +254,7 @@ bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height,
std::cout << "Defaulting to " << frame_width << "x" << frame_height << std::endl;
}
this->fps = capture.get(CV_CAP_PROP_FPS);
this->fps = capture.get(cv::CAP_PROP_FPS);
// Check if fps is nan or less than 0
if (fps != fps || fps <= 0)
@@ -319,7 +319,7 @@ bool SequenceCapture::OpenVideoFile(std::string video_file, float fx, float fy,
return false;
}
this->fps = capture.get(CV_CAP_PROP_FPS);
this->fps = capture.get(cv::CAP_PROP_FPS);
// Check if fps is nan or less than 0
if (fps != fps || fps <= 0)
@@ -331,10 +331,10 @@ bool SequenceCapture::OpenVideoFile(std::string video_file, float fx, float fy,
is_webcam = false;
is_image_seq = false;
this->frame_width = (int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
this->frame_width = (int)capture.get(cv::CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(cv::CAP_PROP_FRAME_HEIGHT);
vid_length = (int)capture.get(CV_CAP_PROP_FRAME_COUNT);
vid_length = (int)capture.get(cv::CAP_PROP_FRAME_COUNT);
SetCameraIntrinsics(fx, fy, cx, cy);
@@ -390,7 +390,7 @@ bool SequenceCapture::OpenImageSequence(std::string directory, float fx, float f
}
// Assume all images are same size in an image sequence
cv::Mat tmp = cv::imread(image_files[0], CV_LOAD_IMAGE_COLOR);
cv::Mat tmp = cv::imread(image_files[0], cv::IMREAD_COLOR);
this->frame_height = tmp.size().height;
this->frame_width = tmp.size().width;
@@ -468,7 +468,7 @@ void SequenceCapture::CaptureThread()
}
else if (is_image_seq)
{
if (image_files.empty() || frame_num_int >= image_files.size())
if (image_files.empty() || frame_num_int >= (int)image_files.size())
{
// Indicate lack of success by returning an empty image
tmp_frame = cv::Mat();
@@ -476,7 +476,7 @@ void SequenceCapture::CaptureThread()
}
else
{
tmp_frame = cv::imread(image_files[frame_num_int], CV_LOAD_IMAGE_COLOR);
tmp_frame = cv::imread(image_files[frame_num_int], cv::IMREAD_COLOR);
}
timestamp_curr = 0;
}
@@ -544,4 +544,4 @@ bool SequenceCapture::IsOpened()
cv::Mat_<uchar> SequenceCapture::GetGrayFrame()
{
return latest_gray_frame;
}
}

View File

@@ -13,19 +13,19 @@
// reports and manuals, must cite at least one of the following works:
//
// OpenFace 2.0: Facial Behavior Analysis Toolkit
// Tadas Baltru<EFBFBD>aitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// Tadas Baltrušaitis, Amir Zadeh, Yao Chong Lim, and Louis-Philippe Morency
// in IEEE International Conference on Automatic Face and Gesture Recognition, 2018
//
// Convolutional experts constrained local model for facial landmark detection.
// A. Zadeh, T. Baltru<EFBFBD>aitis, and Louis-Philippe Morency,
// A. Zadeh, T. Baltrušaitis, and Louis-Philippe Morency,
// in Computer Vision and Pattern Recognition Workshops, 2017.
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-specific normalisation for automatic Action Unit detection
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
@@ -318,7 +318,7 @@ void Visualizer::SetObservationActionUnits(const std::vector<std::pair<std::stri
cv::rectangle(action_units_image, cv::Point(MARGIN_X, offset),
cv::Point((int)(MARGIN_X + AU_TRACKBAR_LENGTH * intensity / 5.0), offset + AU_TRACKBAR_HEIGHT),
cv::Scalar(128, 128, 128),
CV_FILLED);
cv::FILLED);
}
else
{