This commit is contained in:
Tadas Baltrusaitis
2018-04-29 18:51:13 +01:00
16 changed files with 207 additions and 219 deletions

View File

@@ -186,6 +186,7 @@ int main(int argc, char **argv)
// Reset the model, for the next video
face_model.Reset();
sequence_reader.Close();
sequence_number++;

View File

@@ -407,6 +407,8 @@ int main(int argc, char **argv)
active_models[model] = false;
}
sequence_reader.Close();
sequence_number++;
}

View File

@@ -262,6 +262,7 @@ int main(int argc, char **argv)
}
open_face_rec.Close();
sequence_reader.Close();
if (recording_params.outputAUs())
{

View File

@@ -333,6 +333,7 @@ void CCNF_patch_expert::Read(ifstream &stream, std::vector<int> window_sizes, st
}
// In case we are using OpenBLAS, make sure it is not multi-threading as we are multi-threading outside of it
goto_set_num_threads(1);
openblas_set_num_threads(1);
int n_sigmas = window_sizes.size();

View File

@@ -95,8 +95,9 @@ void CEN_patch_expert::Read(ifstream &stream)
{
// Setting up OpenBLAS
goto_set_num_threads(1);
openblas_set_num_threads(1);
// Sanity check
int read_type;

View File

@@ -289,6 +289,7 @@ void CNN::ClearPrecomp()
void CNN::Read(const string& location)
{
goto_set_num_threads(1);
openblas_set_num_threads(1);
ifstream cnn_stream(location, ios::in | ios::binary);

View File

@@ -39,6 +39,10 @@
#include <sstream>
#include <vector>
// For speeding up capture
#include "tbb/concurrent_queue.h"
#include "tbb/task_group.h"
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
@@ -109,6 +113,17 @@ namespace Utilities
private:
// For faster input, multi-thread the capture so it is not waiting for processing to be done
// Used to keep track if the recording is still going (for the writing threads)
bool capturing;
// For keeping track of tasks
tbb::task_group capture_threads;
// A thread that will write video output, so that the rest of the application does not block on it
void CaptureThread();
// Blocking copy and move, as it doesn't make sense to have several readers pointed at the same source, and this would cause issues, especially with webcams
SequenceCapture & operator= (const SequenceCapture& other);
SequenceCapture & operator= (const SequenceCapture&& other);
@@ -122,6 +137,12 @@ namespace Utilities
cv::Mat latest_frame;
cv::Mat_<uchar> latest_gray_frame;
// Storing the captured data queue
const int CAPTURE_CAPACITY = 200; // 200 MB
// Storing capture timestamp, RGB image, gray image
tbb::concurrent_bounded_queue<std::tuple<double, cv::Mat, cv::Mat_<uchar> > > capture_queue;
// Keeping track of frame number and the files in the image sequence
size_t frame_num;
std::vector<std::string> image_files;

View File

@@ -82,6 +82,8 @@ void CreateDirectory(std::string output_path)
void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
{
recording = true;
// Construct the directories required for the output
CreateDirectory(record_root);
@@ -129,7 +131,7 @@ void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
hog_filename = (path(record_root) / hog_filename).string();
hog_recorder.Open(hog_filename);
}
// saving the videos
if (params.outputTracked())
{
@@ -146,7 +148,7 @@ void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
metadata_file << "Output image:" << this->media_filename << endl;
this->media_filename = (path(record_root) / this->media_filename).string();
}
// Start the video and image writing thread
writing_threads.run([&] {VideoWritingTask(); });
}
@@ -158,16 +160,13 @@ void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl;
this->aligned_output_directory = (path(record_root) / this->aligned_output_directory).string();
CreateDirectory(aligned_output_directory);
// Start the video and image writing thread
writing_threads.run([&] {AlignedImageWritingTask(); });
}
this->frame_number = 0;
recording = true;
}
RecorderOpenFace::RecorderOpenFace(const std::string in_filename, const RecorderOpenFaceParameters& parameters, std::vector<std::string>& arguments):video_writer(), params(parameters)
@@ -269,10 +268,6 @@ void RecorderOpenFace::SetObservationVisualization(const cv::Mat &vis_track)
{
video_writer.open(media_filename, CV_FOURCC(output_codec[0], output_codec[1], output_codec[2], output_codec[3]), params.outputFps(), vis_track.size(), true);
// Set up the queue for video writing based on output size
int capacity = (1024 * 1024 * TRACKED_QUEUE_CAPACITY) / (vis_track.size().width * vis_track.size().height * vis_track.channels());
vis_to_out_queue.set_capacity(capacity);
if (!video_writer.isOpened())
{
WARN_STREAM("Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN.");
@@ -292,12 +287,13 @@ void RecorderOpenFace::SetObservationVisualization(const cv::Mat &vis_track)
void RecorderOpenFace::AlignedImageWritingTask()
{
while (recording)
while (recording || !aligned_face_queue.empty())
{
std::pair<std::string, cv::Mat> tracked_data;
if (aligned_face_queue.try_pop(tracked_data))
{
try {
aligned_face_queue.pop(tracked_data);
bool write_success = cv::imwrite(tracked_data.first, tracked_data.second);
if (!write_success)
@@ -305,19 +301,26 @@ void RecorderOpenFace::AlignedImageWritingTask()
WARN_STREAM("Could not output similarity aligned image image");
}
}
std::this_thread::sleep_for(std::chrono::microseconds(1000));
catch (tbb::user_abort e1)
{
// This means the thread finished successfully
}
}
}
void RecorderOpenFace::VideoWritingTask()
{
while(recording)
while(recording || !vis_to_out_queue.empty())
{
std::pair<std::string, cv::Mat> tracked_data;
if(vis_to_out_queue.try_pop(tracked_data))
{
try {
vis_to_out_queue.pop(tracked_data);
if (params.isSequence())
{
if (video_writer.isOpened())
@@ -334,9 +337,13 @@ void RecorderOpenFace::VideoWritingTask()
}
}
}
std::this_thread::sleep_for(std::chrono::microseconds(1000));
catch (tbb::user_abort e1)
{
// This means the thread finished successfully
}
}
}
void RecorderOpenFace::WriteObservation()
@@ -391,6 +398,12 @@ void RecorderOpenFace::WriteObservation()
// Write aligned faces
if (params.outputAlignedFaces())
{
if (frame_number == 1)
{
int capacity = (1024 * 1024 * ALIGNED_QUEUE_CAPACITY) / (aligned_face.size().width *aligned_face.size().height * aligned_face.channels());
aligned_face_queue.set_capacity(capacity);
}
char name[100];
// Filename is based on frame number
@@ -407,12 +420,22 @@ void RecorderOpenFace::WriteObservation()
string out_file = aligned_output_directory + preferredSlash + string(name);
aligned_face_queue.push(std::pair<std::string, cv::Mat>(out_file, aligned_face));
// Clear the image
aligned_face = cv::Mat();
}
if(params.outputTracked())
{
if (frame_number == 1)
{
// Set up the queue for video writing based on output size
int capacity = (1024 * 1024 * TRACKED_QUEUE_CAPACITY) / (vis_to_out.size().width * vis_to_out.size().height * vis_to_out.channels());
vis_to_out_queue.set_capacity(capacity);
}
if (vis_to_out.empty())
{
WARN_STREAM("Output tracked video frame is not set");
@@ -499,8 +522,22 @@ void RecorderOpenFace::Close()
{
recording = false;
// Make sure the recording threads complete
while (!vis_to_out_queue.empty())
{
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
while (!aligned_face_queue.empty())
{
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
// Free the waiting queues
vis_to_out_queue.abort();
aligned_face_queue.abort();
// Wait for the writing threads to finish
writing_threads.wait();
writing_threads.wait();
hog_recorder.Close();
csv_recorder.Close();

View File

@@ -262,12 +262,14 @@ bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height,
INFO_STREAM("FPS of the webcam cannot be determined, assuming 30");
fps = 30;
}
SetCameraIntrinsics(fx, fy, cx, cy);
std::string time = currentDateTime();
this->name = "webcam_" + time;
start_time = cv::getTickCount();
capturing = true;
capture_threads.run([&] {CaptureThread(); });
return true;
@@ -275,8 +277,22 @@ bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height,
void SequenceCapture::Close()
{
// Close the capturing threads
capturing = false;
// In case the queue is full and the thread is blocking, free one element so it can finish
std::tuple<double, cv::Mat, cv::Mat_<uchar> > data;
capture_queue.try_pop(data);
capture_threads.wait();
// Empty the capture queue
capture_queue.clear();
// Release the capture objects
if (capture.isOpened())
capture.release();
}
// Destructor that releases the capture
@@ -325,6 +341,8 @@ bool SequenceCapture::OpenVideoFile(std::string video_file, float fx, float fy,
SetCameraIntrinsics(fx, fy, cx, cy);
this->name = video_file;
capturing = true;
capture_threads.run([&] {CaptureThread(); });
return true;
@@ -381,6 +399,8 @@ bool SequenceCapture::OpenImageSequence(std::string directory, float fx, float f
is_webcam = false;
is_image_seq = true;
vid_length = image_files.size();
capturing = true;
capture_threads.run([&] {CaptureThread(); });
return true;
@@ -415,47 +435,70 @@ void SequenceCapture::SetCameraIntrinsics(float fx, float fy, float cx, float cy
}
}
void SequenceCapture::CaptureThread()
{
int capacity = (CAPTURE_CAPACITY * 1024 * 1024) / (4 * frame_width * frame_height);
capture_queue.set_capacity(capacity);
int frame_num_int = 0;
while(capturing)
{
double timestamp_curr = 0;
cv::Mat tmp_frame;
cv::Mat_<uchar> tmp_gray_frame;
if (is_webcam || !is_image_seq)
{
bool success = capture.read(tmp_frame);
if (!success)
{
// Indicate lack of success by returning an empty image
tmp_frame = cv::Mat();
capturing = false;
}
// Recording the timestamp
if (!is_webcam)
{
timestamp_curr = frame_num_int * (1.0 / fps);
}
else
{
timestamp_curr = (cv::getTickCount() - start_time) / cv::getTickFrequency();
}
}
else if (is_image_seq)
{
if (image_files.empty() || frame_num_int >= image_files.size())
{
// Indicate lack of success by returning an empty image
tmp_frame = cv::Mat();
capturing = false;
}
else
{
tmp_frame = cv::imread(image_files[frame_num_int], CV_LOAD_IMAGE_COLOR);
}
timestamp_curr = 0;
}
frame_num_int++;
// Set the grayscale frame
ConvertToGrayscale_8bit(tmp_frame, tmp_gray_frame);
capture_queue.push(std::make_tuple(timestamp_curr, tmp_frame, tmp_gray_frame));
}
}
cv::Mat SequenceCapture::GetNextFrame()
{
if (is_webcam || !is_image_seq)
{
bool success = capture.read(latest_frame);
if (!success)
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
// Recording the timestamp
if (!is_webcam)
{
time_stamp = frame_num * (1.0 / fps);
}
else
{
time_stamp = (cv::getTickCount() - start_time) / cv::getTickFrequency();
}
}
else if (is_image_seq)
{
if (image_files.empty() || frame_num >= image_files.size())
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
else
{
latest_frame = cv::imread(image_files[frame_num], CV_LOAD_IMAGE_COLOR);
}
time_stamp = 0;
}
// Set the grayscale frame
ConvertToGrayscale_8bit(latest_frame, latest_gray_frame);
std::tuple<double, cv::Mat, cv::Mat_<uchar> > data;
capture_queue.pop(data);
time_stamp = std::get<0>(data);
latest_frame = std::get<1>(data);
latest_gray_frame = std::get<2>(data);
frame_num++;

View File

@@ -2,11 +2,13 @@ function Script_CECLM_cross_data()
addpath(genpath('../'));
[images, detections, labels] = Collect_menpo_imgs('D:\Datasets\menpo/');
[images, detections, labels] = Collect_menpo_imgs('G:\Datasets\menpo/');
%% loading the CE-CLM model and parameters
[patches, pdm, clmParams, early_term_params] = Load_CECLM_general();
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
% As early termination weights were trained on part of menpo turn them off,
% to perform a clean cross-data experiment
early_term_params.weights_scale(:) = 1;
@@ -53,18 +55,9 @@ for i=1:numel(images)
bbox = squeeze(detections(i,:));
% have a multi-view version
if(multi_view)
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
else
[shape,~,~,lhood,lmark_lhood,view_used] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams);
end
% The actual work get's done here
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
all_lmark_lhoods(:,i) = lmark_lhood;
all_views_used(i) = view_used;

View File

@@ -3,14 +3,12 @@ function Script_CECLM_menpo_test_frontal()
addpath(genpath('../'));
addpath(genpath('./menpo_challenge_helpers'));
[images, detections] = Collect_menpo_test_frontal('D:\Datasets\menpo\testset\semifrontal/');
[images, detections] = Collect_menpo_test_frontal('G:\Datasets\menpo\testset\semifrontal/');
%% loading the CE-CLM model and parameters
[patches, pdm, clmParams, early_term_params] = Load_CECLM_menpo();
% Use the multi-hypothesis model, as bounding box tells nothing about
% orientation
multi_view = true;
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
%% Setup recording
experiment.params = clmParams;
@@ -53,19 +51,8 @@ for i=1:numel(images)
bbox = squeeze(detections(i,:));
% have a multi-view version
% have a multi-view version
if(multi_view)
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
else
[shape,~,~,lhood,lmark_lhood,view_used] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams);
end
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
shape = shape + 0.5;
[~, name_org, ~] = fileparts(images(i).img);

View File

@@ -3,14 +3,12 @@ function Script_CECLM_menpo_test_profile()
addpath(genpath('../'));
addpath(genpath('./menpo_challenge_helpers'));
[images, detections] = Collect_menpo_test_profile('D:\Datasets\menpo\testset\profile/');
[images, detections] = Collect_menpo_test_profile('G:\Datasets\menpo\testset\profile/');
%% loading the CE-CLM model and parameters
[patches, pdm, clmParams, early_term_params] = Load_CECLM_menpo();
% Use the multi-hypothesis model, as bounding box tells nothing about
% orientation
multi_view = true;
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
%% Setup recording
experiment.params = clmParams;
@@ -41,7 +39,7 @@ if(~exist(out_pts, 'dir'))
mkdir(out_pts);
end
load('../pdm_generation/menpo_pdm/menpo_chin/conversion.mat');
load('menpo_challenge_helpers/conversion.mat');
%%
for i=1:numel(images)
@@ -55,19 +53,8 @@ for i=1:numel(images)
bbox = squeeze(detections(i,:));
% have a multi-view version
% have a multi-view version
if(multi_view)
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
[shape,g_params,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
else
[shape,g_params,~,lhood,lmark_lhood,view_used] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams);
end
[shape,g_params,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
shape = shape + 0.5;

View File

@@ -2,14 +2,12 @@ function Script_CECLM_menpo_valid()
addpath(genpath('../'));
[images, detections, labels] = Collect_valid_imgs('D:\Datasets\menpo/');
[images, detections, labels] = Collect_valid_imgs('G:\Datasets\menpo/');
%% loading the CE-CLM model and parameters
[patches, pdm, clmParams, early_term_params] = Load_CECLM_menpo();
% Use the multi-hypothesis model, as bounding box tells nothing about
% orientation
multi_view = true;
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
%% Setup recording
experiment.params = clmParams;
@@ -47,18 +45,8 @@ for i=1:numel(images)
bbox = squeeze(detections(i,:));
% have a multi-view version
if(multi_view)
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
else
[shape,~,~,lhood,lmark_lhood,view_used] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams);
end
[shape,~,~,lhood,lmark_lhood,view_used] =...
Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views, early_term_params);
all_lmark_lhoods(:,i) = lmark_lhood;
all_views_used(i) = view_used;

View File

@@ -3,51 +3,24 @@ function Script_CLNF_cross_data_general()
addpath(genpath('../'));
% Replace this with the location of the Menpo dataset
[images, detections, labels] = Collect_menpo_imgs('D:\Datasets\menpo/');
[images, detections, labels] = Collect_menpo_imgs('G:\Datasets\menpo/');
%% loading the patch experts
clmParams = struct;
[ patches, pdm, clmParams ] = Load_CLNF_general();
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
clmParams.window_size = [25,25; 23,23; 21,21; 21,21];
clmParams.numPatchIters = size(clmParams.window_size,1);
[ patches_51, pdm_51, clmParams_51, inds_full, inds_inner ] = Load_CLNF_inner();
[patches] = Load_Patch_Experts( '../models/general/', 'ccnf_patches_*_general.mat', [], [], clmParams);
%% Fitting the model to the provided image
% the default PDM to use
pdmLoc = ['../models/pdm/pdm_68_aligned_wild.mat'];
load(pdmLoc);
pdm = struct;
pdm.M = double(M);
pdm.E = double(E);
pdm.V = double(V);
clmParams.regFactor = [35, 27, 20, 20];
clmParams.sigmaMeanShift = [1.25, 1.375, 1.5, 1.5];
clmParams.tikhonov_factor = [2.5, 5, 7.5, 7.5];
clmParams.startScale = 1;
clmParams.num_RLMS_iter = 10;
clmParams.fTol = 0.01;
clmParams.useMultiScale = true;
clmParams.use_multi_modal = 1;
clmParams.multi_modal_types = patches(1).multi_modal_types;
% Loading the final scale
[clmParams_inner, pdm_inner] = Load_CLM_params_inner();
clmParams_inner.window_size = [17,17;19,19;21,21;23,23];
inds_inner = 18:68;
[patches_inner] = Load_Patch_Experts( '../models/general/', 'ccnf_patches_*general_no_out.mat', [], [], clmParams_inner);
clmParams_inner.multi_modal_types = patches_inner(1).multi_modal_types;
shapes_all = zeros(size(labels,2),size(labels,3), size(labels,1));
labels_all = zeros(size(labels,2),size(labels,3), size(labels,1));
lhoods = zeros(numel(images),1);
% for recording purposes
experiment.params = clmParams;
num_points = numel(M)/3;
num_points = numel(pdm.M)/3;
shapes_all = cell(numel(images), 1);
labels_all = cell(numel(images), 1);
@@ -55,9 +28,6 @@ lhoods = zeros(numel(images),1);
all_lmark_lhoods = zeros(num_points, numel(images));
all_views_used = zeros(numel(images),1);
% Use the multi-hypothesis model, as bounding box tells nothing about
% orientation
multi_view = true;
verbose = false; % set to true to visualise the fitting
tic
@@ -72,55 +42,10 @@ for i=1:numel(images)
bbox = detections(i,:);
% have a multi-view version
if(multi_view)
views = [0,0,0; 0,-30,0; 0,30,0; 0,-55,0; 0,55,0; 0,0,30; 0,0,-30; 0,-90,0; 0,90,0; 0,-70,40; 0,70,-40];
views = views * pi/180;
shapes = zeros(num_points, 2, size(views,1));
ls = zeros(size(views,1),1);
lmark_lhoods = zeros(num_points,size(views,1));
views_used = zeros(size(views,1),1);
% Find the best orientation
for v = 1:size(views,1)
[shapes(:,:,v),~,~,ls(v),lmark_lhoods(:,v),views_used(v)] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams, 'orientation', views(v,:));
end
[lhood, v_ind] = max(ls);
lmark_lhood = lmark_lhoods(:,v_ind);
shape = shapes(:,:,v_ind);
view_used = views_used(v_ind);
else
[shape,~,~,lhood,lmark_lhood,view_used] = Fitting_from_bb(image, [], bbox, pdm, patches, clmParams);
end
% Perform inner face fitting
shape_inner = shape(inds_inner,:);
[ a, R, T, ~, l_params, err] = fit_PDM_ortho_proj_to_2D_no_reg(pdm_inner.M, pdm_inner.E, pdm_inner.V, shape_inner);
if(a > 0.9)
g_param = [a; Rot2Euler(R)'; T];
bbox_2 = [min(shape_inner(:,1)), min(shape_inner(:,2)), max(shape_inner(:,1)), max(shape_inner(:,2))];
[shape_inner] = Fitting_from_bb(image, [], bbox_2, pdm_inner, patches_inner, clmParams_inner, 'gparam', g_param, 'lparam', l_params);
% Now after detections incorporate the eyes back
% into the face model
shape(inds_inner, :) = shape_inner;
[ ~, ~, ~, ~, ~, ~, shape_fit] = fit_PDM_ortho_proj_to_2D_no_reg(pdm.M, pdm.E, pdm.V, shape);
all_lmark_lhoods(:,i) = lmark_lhood;
all_views_used(i) = view_used;
shape = shape_fit;
end
[shape,~,~,lhood,lmark_lhood,view_used] = Fitting_from_bb_multi_hyp(image, [], bbox, pdm, patches, clmParams, views);
% Perform inner landmark fitting now
[shape, shape_inner] = Fitting_from_bb_hierarch(image, pdm, pdm_51, patches_51, clmParams_51, shape, inds_full, inds_inner);
all_lmark_lhoods(:,i) = lmark_lhood;
all_views_used(i) = view_used;

View File

@@ -89,7 +89,7 @@ function [ shape2D, global_params, local_params, final_lhood, landmark_lhoods, v
local_params = locals{v_ind};
shape2D = shapes(:,:,v_ind);
view_used = views_used(v);
view_used = views_used(v_ind);
end