27 |
jakw |
1 |
#include "SMReconstructionWorker.h"
|
9 |
jakw |
2 |
|
41 |
jakw |
3 |
#include "AlgorithmGrayCode.h"
|
99 |
jakw |
4 |
#include "AlgorithmGrayCodeHorzVert.h"
|
128 |
jakw |
5 |
#include "AlgorithmPhaseShiftTwoFreq.h"
|
200 |
jakw |
6 |
#include "AlgorithmPhaseShiftTwoFreqHorzVert.h"
|
128 |
jakw |
7 |
#include "AlgorithmPhaseShiftThreeFreq.h"
|
192 |
jakw |
8 |
#include "AlgorithmPhaseShiftEmbedded.h"
|
123 |
jakw |
9 |
#include "AlgorithmLineShift.h"
|
27 |
jakw |
10 |
|
9 |
jakw |
11 |
#include <QCoreApplication>
|
|
|
12 |
#include <QSettings>
|
|
|
13 |
|
|
|
14 |
#include <iostream>
|
|
|
15 |
#include <opencv2/opencv.hpp>
|
|
|
16 |
|
42 |
jakw |
17 |
#include "cvtools.h"
|
180 |
jakw |
18 |
#include <opencv2/core/eigen.hpp>
|
42 |
jakw |
19 |
|
9 |
jakw |
20 |
#include <pcl/filters/statistical_outlier_removal.h>
|
|
|
21 |
#include <pcl/io/pcd_io.h>
|
44 |
jakw |
22 |
#include <pcl/features/normal_3d.h>
|
185 |
jakw |
23 |
#include <pcl/features/normal_3d_omp.h>
|
180 |
jakw |
24 |
#include <pcl/common/transforms.h>
|
9 |
jakw |
25 |
|
245 |
jakw |
26 |
/* Convert everything to Debayered floating point frames */
|
|
|
27 |
void debayerAndFloat(const std::vector<cv::Mat> &rawFrames, std::vector<cv::Mat> &frames){
|
9 |
jakw |
28 |
|
245 |
jakw |
29 |
unsigned int nFrames = rawFrames.size();
|
|
|
30 |
frames.resize(nFrames);
|
|
|
31 |
|
|
|
32 |
assert(rawFrames[0].type() == CV_8UC1);
|
|
|
33 |
|
|
|
34 |
// Debayer and convert to float
|
|
|
35 |
for(unsigned int i=0; i<nFrames; i++){
|
|
|
36 |
cv::cvtColor(rawFrames[i], frames[i], CV_BayerBG2RGB);
|
|
|
37 |
frames[i].convertTo(frames[i], CV_32FC3, 1.0/255.0);
|
|
|
38 |
}
|
|
|
39 |
|
|
|
40 |
}
|
|
|
41 |
|
|
|
42 |
/* Merge exposures of HDR sequence */
|
|
|
43 |
void mergeHDR(const std::vector<cv::Mat> &frames, const std::vector<float> &shutters, std::vector<cv::Mat> &hdrFrames){
|
|
|
44 |
|
|
|
45 |
int nShutters = shutters.size();
|
|
|
46 |
unsigned int nFrames = frames.size();
|
|
|
47 |
unsigned int nHDRFrames = nFrames/nShutters;
|
|
|
48 |
|
|
|
49 |
assert(nShutters * nHDRFrames == nFrames);
|
|
|
50 |
|
|
|
51 |
int nRows = frames[0].rows;
|
|
|
52 |
int nCols = frames[0].cols;
|
|
|
53 |
|
|
|
54 |
// Merge into HDR
|
|
|
55 |
std::vector<cv::Mat> outputFrames(nHDRFrames);
|
|
|
56 |
|
|
|
57 |
float shutterMin = shutters[0];
|
|
|
58 |
|
|
|
59 |
for(unsigned int i=0; i<nHDRFrames; i++){
|
|
|
60 |
outputFrames[i].create(nRows, nCols, CV_32FC3);
|
|
|
61 |
outputFrames[i].setTo(0.0);
|
|
|
62 |
}
|
|
|
63 |
|
|
|
64 |
#pragma omp parallel for
|
|
|
65 |
for(unsigned int j=0; j<nShutters; j++){
|
|
|
66 |
|
|
|
67 |
std::vector<cv::Mat> frameChannels;
|
|
|
68 |
cv::split(frames[j*nHDRFrames], frameChannels);
|
|
|
69 |
|
|
|
70 |
cv::Mat mask = (frameChannels[0] < 0.99) & (frameChannels[1] < 0.99) & (frameChannels[2] < 0.99);
|
|
|
71 |
|
|
|
72 |
for(unsigned int i=0; i<nHDRFrames; i++){
|
|
|
73 |
cv::Mat frameji = frames[j*nHDRFrames + i];
|
|
|
74 |
|
|
|
75 |
cv::add((shutterMin/shutters[j]) * frameji, outputFrames[i], outputFrames[i], mask);
|
|
|
76 |
|
|
|
77 |
}
|
|
|
78 |
}
|
|
|
79 |
|
|
|
80 |
hdrFrames = outputFrames;
|
|
|
81 |
}
|
|
|
82 |
|
207 |
flgw |
83 |
void SMReconstructionWorker::reconstructPointCloud(const SMFrameSequence &frameSequence){
|
137 |
jakw |
84 |
|
245 |
jakw |
85 |
time.start();
|
|
|
86 |
|
27 |
jakw |
87 |
QSettings settings;
|
|
|
88 |
|
|
|
89 |
// Get current calibration
|
33 |
jakw |
90 |
calibration = settings.value("calibration/parameters").value<SMCalibrationParameters>();
|
27 |
jakw |
91 |
|
41 |
jakw |
92 |
// Create Algorithm
|
137 |
jakw |
93 |
QString codec = frameSequence.codec;
|
231 |
jakw |
94 |
unsigned int resX = settings.value("projector/resX").toInt();
|
|
|
95 |
unsigned int resY = settings.value("projector/resY").toInt();
|
137 |
jakw |
96 |
|
71 |
jakw |
97 |
if(codec == "GrayCode")
|
|
|
98 |
algorithm = new AlgorithmGrayCode(resX, resY);
|
107 |
jakw |
99 |
else if(codec == "GrayCodeHorzVert")
|
99 |
jakw |
100 |
algorithm = new AlgorithmGrayCodeHorzVert(resX, resY);
|
128 |
jakw |
101 |
else if(codec == "PhaseShiftTwoFreq")
|
|
|
102 |
algorithm = new AlgorithmPhaseShiftTwoFreq(resX, resY);
|
200 |
jakw |
103 |
else if(codec == "PhaseShiftTwoFreqHorzVert")
|
|
|
104 |
algorithm = new AlgorithmPhaseShiftTwoFreqHorzVert(resX, resY);
|
128 |
jakw |
105 |
else if(codec == "PhaseShiftThreeFreq")
|
|
|
106 |
algorithm = new AlgorithmPhaseShiftThreeFreq(resX, resY);
|
192 |
jakw |
107 |
else if(codec == "PhaseShiftEmbedded")
|
|
|
108 |
algorithm = new AlgorithmPhaseShiftEmbedded(resX, resY);
|
123 |
jakw |
109 |
else if(codec == "LineShift")
|
|
|
110 |
algorithm = new AlgorithmLineShift(resX, resY);
|
208 |
flgw |
111 |
else{
|
|
|
112 |
std::cerr << "SLScanWorker: invalid codec (Please set codec in preferences): " << codec.toStdString() << std::endl;
|
|
|
113 |
return; // otherwise segfault TODO no default?
|
|
|
114 |
}
|
27 |
jakw |
115 |
|
245 |
jakw |
116 |
// Convert data to floating point and debayer
|
|
|
117 |
unsigned int nFrames = frameSequence.frames0.size();
|
225 |
jakw |
118 |
|
245 |
jakw |
119 |
std::vector<cv::Mat> frames0, frames1;
|
|
|
120 |
debayerAndFloat(frameSequence.frames0, frames0);
|
|
|
121 |
debayerAndFloat(frameSequence.frames1, frames1);
|
229 |
jakw |
122 |
|
245 |
jakw |
123 |
// If HDR sequence, merge frames
|
|
|
124 |
if(frameSequence.shutters.size() > 1){
|
|
|
125 |
mergeHDR(frames0, frameSequence.shutters, frames0);
|
|
|
126 |
mergeHDR(frames1, frameSequence.shutters, frames1);
|
|
|
127 |
}
|
9 |
jakw |
128 |
|
245 |
jakw |
129 |
assert(frames0.size() == algorithm->getNPatterns());
|
|
|
130 |
assert(frames1.size() == algorithm->getNPatterns());
|
|
|
131 |
|
42 |
jakw |
132 |
// Get 3D Points
|
36 |
jakw |
133 |
std::vector<cv::Point3f> Q;
|
245 |
jakw |
134 |
std::vector<cv::Vec3f> color;
|
|
|
135 |
algorithm->get3DPoints(calibration, frames0, frames1, Q, color);
|
9 |
jakw |
136 |
|
|
|
137 |
// Convert point cloud to PCL format
|
128 |
jakw |
138 |
pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr pointCloudPCL(new pcl::PointCloud<pcl::PointXYZRGBNormal>);
|
9 |
jakw |
139 |
|
36 |
jakw |
140 |
pointCloudPCL->width = Q.size();
|
|
|
141 |
pointCloudPCL->height = 1;
|
128 |
jakw |
142 |
pointCloudPCL->is_dense = true;
|
9 |
jakw |
143 |
|
36 |
jakw |
144 |
pointCloudPCL->points.resize(Q.size());
|
9 |
jakw |
145 |
|
75 |
jakw |
146 |
for(unsigned int i=0; i<Q.size(); i++){
|
128 |
jakw |
147 |
pcl::PointXYZRGBNormal point;
|
36 |
jakw |
148 |
point.x = Q[i].x; point.y = Q[i].y; point.z = Q[i].z;
|
245 |
jakw |
149 |
point.r = 255*color[i][0]; point.g = 255*color[i][1]; point.b = 255*color[i][2];
|
36 |
jakw |
150 |
pointCloudPCL->points[i] = point;
|
27 |
jakw |
151 |
}
|
9 |
jakw |
152 |
|
207 |
flgw |
153 |
// Transform point cloud to rotation axis coordinate system
|
|
|
154 |
/*cv::Mat TRCV(3, 4, CV_32F);
|
|
|
155 |
cv::Mat(calibration.Rr).copyTo(TRCV.colRange(0, 3));
|
|
|
156 |
cv::Mat(calibration.Tr).copyTo(TRCV.col(3));
|
|
|
157 |
Eigen::Affine3f TR;
|
|
|
158 |
cv::cv2eigen(TRCV, TR.matrix());
|
|
|
159 |
pcl::transformPointCloud(*pointCloudPCL, *pointCloudPCL, TR);
|
180 |
jakw |
160 |
|
207 |
flgw |
161 |
// Estimate surface normals (does not produce proper normals...)
|
|
|
162 |
std::cout << "Estimating normals..." << std::endl;
|
|
|
163 |
pcl::PointCloud<pcl::PointXYZ>::Ptr points(new pcl::PointCloud<pcl::PointXYZ>);
|
|
|
164 |
pcl::copyPointCloud(*pointCloudPCL, *points);
|
|
|
165 |
pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
|
|
|
166 |
pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> ne;
|
|
|
167 |
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>());
|
|
|
168 |
tree->setInputCloud(points);
|
|
|
169 |
ne.setSearchMethod(tree);
|
|
|
170 |
ne.setRadiusSearch(1.0);
|
|
|
171 |
//ne.setKSearch(50);
|
|
|
172 |
ne.setViewPoint(0.0, 0.0, 0.0);
|
|
|
173 |
ne.setInputCloud(points);
|
|
|
174 |
ne.compute(*normals);
|
|
|
175 |
pcl::copyPointCloud(*normals, *pointCloudPCL);*/
|
44 |
jakw |
176 |
|
|
|
177 |
// Assemble SMPointCloud data structure
|
42 |
jakw |
178 |
SMPointCloud smPointCloud;
|
45 |
jakw |
179 |
smPointCloud.id = frameSequence.id;
|
42 |
jakw |
180 |
smPointCloud.pointCloud = pointCloudPCL;
|
|
|
181 |
smPointCloud.rotationAngle = frameSequence.rotationAngle;
|
|
|
182 |
|
44 |
jakw |
183 |
// Determine transform in world (camera0) coordinate system
|
|
|
184 |
float angleRadians = frameSequence.rotationAngle/180.0*M_PI;
|
|
|
185 |
cv::Vec3f rot_rvec(0.0, -angleRadians, 0.0);
|
|
|
186 |
cv::Mat R;
|
|
|
187 |
cv::Rodrigues(rot_rvec, R);
|
|
|
188 |
smPointCloud.R = calibration.Rr.t()*cv::Matx33f(R)*calibration.Rr;
|
|
|
189 |
smPointCloud.T = calibration.Rr.t()*cv::Matx33f(R)*calibration.Tr - calibration.Rr.t()*calibration.Tr;
|
|
|
190 |
|
180 |
jakw |
191 |
|
207 |
flgw |
192 |
// Determine transform in world (camera0) coordinate system
|
|
|
193 |
/*float angleRadians = frameSequence.rotationAngle/180.0*M_PI;
|
|
|
194 |
cv::Vec3f rot_rvec(0.0, -angleRadians, 0.0);
|
|
|
195 |
cv::Mat R;
|
|
|
196 |
cv::Rodrigues(rot_rvec, R);
|
|
|
197 |
smPointCloud.R = cv::Matx33f(R);
|
|
|
198 |
smPointCloud.T = cv::Vec3f(0.0,0.0,0.0);*/
|
180 |
jakw |
199 |
|
9 |
jakw |
200 |
// Emit result
|
225 |
jakw |
201 |
emit newPointCloud(smPointCloud);
|
|
|
202 |
std::cout << "SMReconstructionWorker: " << time.elapsed() << "ms" << std::endl;
|
|
|
203 |
std::cout << "SMReconstructionWorker: " << smPointCloud.pointCloud->size() << " Points" << std::endl;
|
|
|
204 |
|
9 |
jakw |
205 |
}
|
|
|
206 |
|
245 |
jakw |
207 |
|
207 |
flgw |
208 |
void SMReconstructionWorker::reconstructPointClouds(const std::vector<SMFrameSequence> &frameSequences){
|
225 |
jakw |
209 |
|
27 |
jakw |
210 |
// Process sequentially
|
207 |
flgw |
211 |
#pragma omp parallel for
|
167 |
jakw |
212 |
for(unsigned int i=0; i<frameSequences.size(); i++){
|
207 |
flgw |
213 |
if(!frameSequences[i].reconstructed) reconstructPointCloud(frameSequences[i]);
|
24 |
jakw |
214 |
}
|
|
|
215 |
}
|
|
|
216 |
|
207 |
flgw |
217 |
void SMReconstructionWorker::triangulate(const std::vector<cv::Point2f>& q0, const std::vector<cv::Point2f>& q1, std::vector<cv::Point3f> &Q){
|
41 |
jakw |
218 |
cv::Mat P0(3,4,CV_32F,cv::Scalar(0.0));
|
|
|
219 |
cv::Mat(calibration.K0).copyTo(P0(cv::Range(0,3), cv::Range(0,3)));
|
|
|
220 |
|
|
|
221 |
cv::Mat temp(3,4,CV_32F);
|
|
|
222 |
cv::Mat(calibration.R1).copyTo(temp(cv::Range(0,3), cv::Range(0,3)));
|
|
|
223 |
cv::Mat(calibration.T1).copyTo(temp(cv::Range(0,3), cv::Range(3,4)));
|
|
|
224 |
cv::Mat P1 = cv::Mat(calibration.K1) * temp;
|
|
|
225 |
|
42 |
jakw |
226 |
cv::Mat QMatHomogenous, QMat;
|
|
|
227 |
cv::triangulatePoints(P0, P1, q0, q1, QMatHomogenous);
|
|
|
228 |
cvtools::convertMatFromHomogeneous(QMatHomogenous, QMat);
|
|
|
229 |
cvtools::matToPoints3f(QMat, Q);
|
41 |
jakw |
230 |
}
|